query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Access the run at a given index. This is required by QtQuick
def data(self, index, role=Qt.DisplayRole): if not index.isValid(): return QVariant() run = self._runs[index.row()] if role == Qt.DisplayRole: return run return QVariant()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, i):\n return self._runs[i]", "def run(self, run_idx):\n return self._h5['{}/{}'.format(RUNS, int(run_idx))]", "def run(self, run_number):\n return self[self.run_cache[run_number]]", "def __getitem__(self, index):\n # NOTE: this automatically supports slicing :-)\n return self._main._sequence[index]", "def __getitem__(self, index):\n self.wait()\n return self._results.__getitem__(index)", "def start(self):\n try:\n return self.index[0]\n except:\n pass", "def __getitem__(self, index):\n return self.worker_list[index]", "def get(self, index):\n return self.board[index]", "def next_run_idx(self):\n return self.num_runs", "def get_running_task_by_index(self, nIndex):\n\t\treturn handle_to_object(call_sdk_function('PrlLoginResponse_GetRunningTaskByIndex', self.handle, nIndex))", "def unit_at(self, index):\n return self.child_at(index)", "def get(self, index):\n raise NotImplementedError() # pragma: no cover", "def __getitem__( self, stepNum ):\n assert isinstance( stepNum, int )\n\n assert isinstance( self._env, Env )\n assert isinstance( self._steps, list )\n\n return self._steps[ stepNum - 1 ]", "def get_run(self, run_id: str) -> sqlite3.Row:\n with self.table_access_condition:\n conn = self._get_connection()\n c = conn.cursor()\n c.execute(\n \"\"\"\n SELECT * from runs\n WHERE run_id = ?;\n \"\"\",\n (run_id,),\n )\n results = c.fetchall()\n return results[0]", "def __getitem__(self, pos):\n try:\n return self.run_mem[pos]\n except KeyError:\n self.run_mem[pos] = self.program[pos]\n return self.run_mem[pos]", "def __getitem__(self, index):\n return self.dataset[index]", "def __getitem__(self, index):\n return self.seq[index]", "def __getitem__(self, index):\n return self.experimentalReplicates[index]", "def __getitem__(self, index: int) -> object:\n return self.get_at_index(index)", "def __getitem__(self, index):\n return self._games[index]", "def __getitem__(self, index):\n return self.data_source.get_sample(index)", "def __getitem__(self, index):\r\n\r\n if self._instance is not _unset and index not in self:\r\n self._instance[index]\r\n return self._contents[index]", "def index(self) -> int:", "def __getitem__(self, index):\n return self.position[index]", "def run_grp(self, run_idx):\n return self.runs[\"{}\".format(run_idx)]", "def __getitem__(self, index):\n item = self.data[index]\n return item", "def __getitem__(self, index):\n return self.points[index]", "def __getitem__( self, index ) :\n\n return( self.__entries[index] )", "def __getitem__(self, index):\r\n return self._items[index]", "def get_current_index(self):\r\n return self.contents_widget.currentRow()", "def __getitem__(self, index):\n pass", "def __getitem__(self, index):\n pass", "def test_run(index='3ban2y82', reload=False):\n wd = os.path.join(w_dir, index)\n start = run_lims[index][0]\n end = run_lims[index][1]\n run_kwargs = {'data_dir': wd,\n 'index': index,\n 'parallel': True,\n 'caching': True,\n 'cache_reload': reload,\n 'limits': (start, end)}\n r = PlotRun(run=index, run_kwargs=run_kwargs, t_width=400)\n return r", "def _evaluate(self, index):\n raise NotImplementedError", "def __getitem__(self, index):\n return self._value_at(index)", "def __getitem__(self, index):\n return self._value_at(index)", "def __getitem__(self, index):\n if index < 0 or index >= len(self.sequence):\n raise Exception(\"Index is out of bounds\")\n return self.sequence[index]", "def __getitem__ ( self , index ):\n\t\treturn self . data [ index ]", "def get_run(arn=None):\n pass", "def __getitem__(self, index: Any) -> Any:\n return self.contents[index]", "def get_element(self, index):\n original_index = index\n if index < 0:\n index = self.size + index\n if index >= self.size or index < 0:\n raise IndexError(\n 'index %i is out of range for SeriesAxis with size %i'\n % (original_index, self.size)\n )\n return self.start + self.step * index", "def subsection_at(self, index):\n return self.child_at(index)", "def __getitem__(self, index):\n return self.array[index]", "def by_index(cls, index, date=None):\n q = cls.query_started(date).\\\n limit(1).offset(index-1)\n return q.first()", "def get(self, index):\n return self._get_node(index)", "def __getitem__(self, idx):\n return self.batches[idx]", "def __getitem__(self, index):\n return (index, self.data_cube[0, index, :])", "def __getitem__(self, index):\n return self.data[index]", "def __getitem__(self, index):\n return self.data[index]", "def get_item(self, index):\n\n demo_id = self._index_to_demo_id[index]\n demo_start_index = self._demo_id_to_start_indices[demo_id]\n demo_length = self._demo_id_to_demo_length[demo_id]\n\n # start at offset index if not padding for frame stacking\n demo_index_offset = 0 if self.pad_frame_stack else (self.n_frame_stack - 1)\n index_in_demo = index - demo_start_index + demo_index_offset\n\n # end at offset index if not padding for seq length\n demo_length_offset = 0 if self.pad_seq_length else (self.seq_length - 1)\n end_index_in_demo = demo_length - demo_length_offset\n\n meta = self.get_dataset_sequence_from_demo(\n demo_id,\n index_in_demo=index_in_demo,\n keys=self.dataset_keys,\n seq_length=self.seq_length\n )\n\n # determine goal index\n goal_index = None\n if self.goal_mode == \"last\":\n goal_index = end_index_in_demo - 1\n\n meta[\"obs\"] = self.get_obs_sequence_from_demo(\n demo_id,\n index_in_demo=index_in_demo,\n keys=self.obs_keys,\n num_frames_to_stack=self.n_frame_stack - 1,\n seq_length=self.seq_length,\n prefix=\"obs\"\n )\n if self.hdf5_normalize_obs:\n meta[\"obs\"] = ObsUtils.normalize_obs(meta[\"obs\"], obs_normalization_stats=self.obs_normalization_stats)\n\n if self.load_next_obs:\n meta[\"next_obs\"] = self.get_obs_sequence_from_demo(\n demo_id,\n index_in_demo=index_in_demo,\n keys=self.obs_keys,\n num_frames_to_stack=self.n_frame_stack - 1,\n seq_length=self.seq_length,\n prefix=\"next_obs\"\n )\n if self.hdf5_normalize_obs:\n meta[\"next_obs\"] = ObsUtils.normalize_obs(meta[\"next_obs\"], obs_normalization_stats=self.obs_normalization_stats)\n\n if goal_index is not None:\n goal = self.get_obs_sequence_from_demo(\n demo_id,\n index_in_demo=goal_index,\n keys=self.obs_keys,\n num_frames_to_stack=0,\n seq_length=1,\n prefix=\"next_obs\",\n )\n if self.hdf5_normalize_obs:\n goal = ObsUtils.normalize_obs(goal, obs_normalization_stats=self.obs_normalization_stats)\n meta[\"goal_obs\"] = {k: goal[k][0] for k in goal} # remove sequence dimension for goal\n\n return meta", "def row(self, index):\n return self.matrix_list[index - 1]", "def __getitem__(self, index):\n return getattr(self, self.__slots__[index])", "def __getitem__(self,index):\n return self._data[index[0]][index[1]]", "def __getitem__(self, index):\n if isinstance(index, types.SliceType):\n return [self._main[key] for key in self._main._sequence[index]]\n else:\n return self._main[self._main._sequence[index]]", "def __getitem__(self, idx):\n return self.items[idx]", "def index(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"index\")", "def __getitem__(self, index):\n return self.to_list()[index]", "def __getitem__(self, index):\n return self._nums[index]", "def __getitem__(self, index):\n if isinstance(index, (tuple, list)) and len(index) == 2:\n return self.cells[index[1]][index[0]]\n return self.cells[index]", "def __getitem__(self, index):\n x, y = index\n if 0 <= x < self.width and 0 <= y < self.height:\n return self.cells[x + y * self.width]\n else:\n return None", "def get_item(self, index: int) -> _T:\n return self.index_to_item[index]", "def index(self):\n return self.frame.index", "def __getitem__(self, index):\n return self.data[index[0] - 1][index[1] - 1]", "def run_trajs(self, run_idx):\n return self._h5['{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES)]", "def __getitem__(self, idx: int):\n return self.deck[idx]", "def __getitem__(self, index):\n def _getTextByIndex(blockIndex):\n return self._doc.findBlockByNumber(blockIndex).text()\n\n if isinstance(index, int):\n index = self._checkAndConvertIndex(index)\n return _getTextByIndex(index)\n elif isinstance(index, slice):\n start, stop, step = index.indices(self._doc.blockCount())\n return [_getTextByIndex(blockIndex) \\\n for blockIndex in range(start, stop, step)]", "def get_at_index(self, index: int) -> object:\n return self.data[index]", "def __getitem__(self, index):\n raise NotImplementedError", "def __getitem__(self, index):\n raise NotImplementedError", "def __getitem__(self, index):\n return self._timeseriesData[index]", "def index(self) -> int:\r\n return self._index", "def get_step_class_at_index(self, index):\n return self[index][0]", "def __getitem__(self, idx):\n return self.getitem(idx)", "def __getitem__(self, index):\n return self.cellData[index]", "def __getitem__(self, i):\n return self.__tiers[i]", "def run_number(self):\n return self._runNumber", "def __getitem__(self, index):\n assert 0 <= index < len(self), \"Array subscript out of range\"\n return self._elements[index]", "def get(self, index):\n self.__validate_index(index)\n return self.__list[index]", "def __getitem__(self, index):\n return self.values[index]", "def next_run_traj_idx(self, run_idx):\n return self.num_run_trajs(run_idx)", "def _getExactlyOneRun(self):\n run_listing = json.loads(\n self.server.get(_ROUTE_PREFIX + \"/runs\").get_data()\n )\n self.assertLen(run_listing, 1)\n return list(run_listing.keys())[0]", "def __getitem__(self, index):\n return [self.points][index]", "def __getitem__(self, index):\n\t\treturn self.data[index]", "def __getitem__(self, idx):\n return self.samples[idx]", "def index(self):\n return self._index", "def get(self, index):\n if 0 <= index <= len(self.nums):\n return self.nums[index]\n return -1", "def get_run(self, _id):\n return Run.deserialize(self._get_single('runs', {'run': _id}))", "def get_piece(self, index):\n return self.squares[index]", "def row(self, index):\n return self.data[index]", "def index(self) -> int:\n return self._index", "def index(self) -> int:\n return self._index", "def index(self) -> int:\n return self._index", "def index(self) -> int:\n return self._index", "def index(self) -> int:\n return self._index", "def __getitem__(self, index: int) -> AnnotatedData:\n # sample selection\n selected_sample = self.df.iloc[index]\n return self._make_return_tuple(selected_sample)", "def __getitem__(self, index):\n # attempt to\n try:\n # cast {index} to an integer\n index = int(index)\n # if this fails\n except TypeError:\n # ask my tile do the rest\n value = self.data[self.tile.offset(index)]\n # otherwise\n else:\n # retrieve the item directly from my container\n value = self.data[index]\n # all done\n return value", "def __getitem__(self, idx):\n return self.data.iloc[idx]", "def __getitem__(self, idx):\n pass", "def __getitem__(self, idx):\n pass", "def __getitem__(self, index):\n return self.components[index]" ]
[ "0.72931045", "0.6952865", "0.6678343", "0.6282501", "0.61454105", "0.60703266", "0.60688543", "0.60501313", "0.60471225", "0.59921044", "0.5978085", "0.59680146", "0.59228444", "0.58857644", "0.5878358", "0.5874778", "0.5870897", "0.5781742", "0.5779231", "0.5774915", "0.5754918", "0.57541037", "0.5739877", "0.5729803", "0.5722136", "0.56853575", "0.56656015", "0.566387", "0.56561816", "0.5655188", "0.5630689", "0.5630689", "0.5628866", "0.5620045", "0.5614204", "0.5614204", "0.5605124", "0.5599784", "0.5571865", "0.55714905", "0.5566957", "0.55631727", "0.5549987", "0.5544565", "0.5535727", "0.55289286", "0.5524177", "0.55211234", "0.55211234", "0.54896116", "0.5487571", "0.54866046", "0.54857486", "0.54827714", "0.5482464", "0.54819673", "0.5480174", "0.5474677", "0.54745555", "0.54579633", "0.54576737", "0.544914", "0.5447255", "0.5445314", "0.54441637", "0.5441241", "0.54362065", "0.5433523", "0.5433523", "0.54325604", "0.54285616", "0.5427919", "0.54269433", "0.54215765", "0.5417663", "0.5416102", "0.54146975", "0.5413595", "0.5410914", "0.54027945", "0.5402633", "0.5396889", "0.53906155", "0.53903687", "0.53903425", "0.53875047", "0.5386314", "0.53730136", "0.53643584", "0.53633314", "0.53633314", "0.53633314", "0.53633314", "0.53633314", "0.53623956", "0.5361057", "0.5356476", "0.53549236", "0.53549236", "0.53548944" ]
0.6371707
3
Update the data at a given index. This is required by QtQuick
def setData(self, index, value, role=Qt.EditRole): if not index.isValid(): return False if role == Qt.Edit: self._runs[index.row()] = value return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updateRow(self, index: int) -> None:\n ...", "def set(self, index, data):\n self.data[index] = data", "def set_at_index(self, index: int, value: object) -> None:\n self.data[index] = value", "def _update_value_at(self, index, value):\n node = self._get_node_at(index)\n if node is None:\n raise IndexError('List index out of range.')\n node.value = value", "def __setitem__(self, index, value):\n self._update_value_at(index, value)", "def _idx_changed(self, idx):\n self.refresh_memory()", "def __setitem__(self, idx, value):\n assert(isinstance(idx, int))\n nidx = self._normalize_idx(idx)\n if nidx >= len(self.data):\n raise IndexError\n self.data[nidx] = value", "def update_settings_at_index(self, settings, index):\n self[index][2].update(settings)", "def __setitem__(self, index, value):\n if isinstance(index, int):\n self.data.iloc[index] = value\n elif isinstance(index, str):\n self.data[index] = value\n elif (\n isinstance(index, tuple)\n and len(index) == 2\n and index[1] in self.data.columns\n ):\n self.data.loc[index] = value\n else:\n assert isinstance(index, slice) or len(index) > 0\n self.data[index] = value", "def update_list_view(self):\n self.model.dataChanged.emit(self.model.index(0, 1),\n self.model.index(len(self.model.data_list), 1))\n #self.pBar.setValue(localization.localizationProgress() * 100)", "def update_from_indexes(self, data, **kw):\n for i in data:\n self.update_from_index(i, **kw)", "def update_store(self, value, index):\n if index == 1:\n self.state[self.M] = value\n else:\n self.state[-1] = value", "def index(self, index):\n\n self._index = index", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, value, index):\n\n length = self.get_length()\n if type(index) is int:\n if index > length:\n # The index value is out of range and prompts and exits\n print(\"Index is out of range.\")\n return\n else:\n this_node = Node(data=value)\n if index == 0:\n this_node.next = self.head.next\n this_node.prev =None\n self.head = this_node\n else:\n cur = self.head\n while index - 1:\n cur = cur.next\n index -= 1\n this_node.next = cur.next.next\n this_node.prev = cur.next.prev\n cur.next = this_node\n return\n else:\n print(\"Index value is not int.\")\n return", "def __setitem__(self, index, value):\n self._timeseriesData[index] = value", "def index(self, new_index):\n old_index = self._index\n\n L = len(self.results)\n if L == 0:\n new_index = -1\n elif new_index < 0:\n new_index = 0\n elif L - 1 < new_index:\n new_index = L - 1\n\n self._index = new_index\n self._update_preview_content()\n\n # update results formatting\n self._update_selected_result(old_index, new_index)", "def set_custom_data(self, index_from, data):\r\n self.unif[index_from:(index_from + len(data))] = data", "def update(self, idx, value):\n idx = self.__capacity - 1 + idx\n self.__tree[idx] = value\n self.__update(idx)", "def updateProject(self, index, data_role):\n row_index = index.row()\n value = self._dataModel.data(index, data_role)\n experiment_id = self._project.experimentsIds()[0] # only 1st measured datablock is currently taken into account\n keys = [\"experiments\", experiment_id, \"calculated\", \"calc\"]\n self._project.setByPathAndIndex(keys, row_index, value)", "def setData(self, index, value):\n \n self.state[index.row()][index.column()] = value\n return value", "def update_data():\n pass", "def update(self, data_test):\n self.data_array[-1].update(data_test)", "def updateValue(self,i,x):\n assert 0 <= i < len(self)\n self.__update_aux(0,0,len(self),i,x)", "def update(self, data):\n self.data.update(data)", "def _updateSlot(self, index):\r\n\r\n if index.isValid():\r\n self.propertiesTableView.selectionModel().setCurrentIndex(index, QtGui.QItemSelectionModel.ClearAndSelect)", "def _update_data(self, selected):\n if selected.row() != self.datasets.index:\n self.datasets.index = selected.row()\n self.datasets.update_current()\n self._update_main()", "def updateData(self):\n self.needsData.emit(self.property(\"number\"))", "def update_all_data(self):\n self.dataChanged.emit(qtc.QModelIndex(), qtc.QModelIndex())", "def update_by_index(df, col, indexs, data):\n for indx in indexs:\n df.loc[indx, col] = data", "def update_data(self):\n self._model.update()\n self.__refresh()", "def __setitem__(self, index: Any, value: Any) -> None:\n self.contents[index] = value\n return", "def setvalue(self, index, value):\n self._checkIndex(index)\n self._items[index].value = value", "def __setitem__(self, index, value):\n if isinstance(index, slice):\n del self[index]\n offset = 0\n if len(self) == 0:\n for x in value:\n self.append(x)\n else:\n for x in xrange(*index.indices(len(self))):\n self.__insert(x + offset, value)\n offset += value.length\n if not index.step:\n break\n return\n\n self.__verify_index(index)\n\n if index < 0:\n index += self.length\n\n index, prev_node, cur_node = self.__find_node_index(index)\n cur_node.data_list[index] = value", "def __setitem__(self, index, item):\n # TODO: complete this function!\n if index >= self.__len__():\n raise IndexError\n else:\n if index == 0:\n self._first = item\n else:\n self._rest.__setitem__(index-1, item)", "def __setitem__(self, index: int, value: object) -> None:\n self.set_at_index(index, value)", "def __setitem__(self, index, value):\n self.position[index] = value", "def update_view(self):\n for row in self.view.obj_list:\n for obj in row:\n obj._update(self.model)", "def __getitem__(self, index):\n\n if self._data_indices is not None:\n index = self._data_indices[index]\n data = self._dataset[index]\n return data", "def update_pit(self, value, pit_index, index):\n if index == 1:\n self.state[pit_index] = value\n else:\n self.state[pit_index + self.M + 1] = value", "def __getitem__(self, index):\n raise NotImplementedError", "def __getitem__(self, index):\n raise NotImplementedError", "def __setitem__(self, idx, val):\n self.rows[idx[0]][idx[1]] = val", "def __getitem__(self, index):\n self.update(index)\n return self.primes[index]", "def refresh_view_attrs(self, rv, index, data):\n self.index = index\n return super(SelectableButton, self).refresh_view_attrs(rv, index, data)", "def update_data(self, newData):\r\n self.AllData = newData", "def __setitem__(self, index, newItem):\r\n #if index < 0 or index >= self.size():\r\n # raise IndexError(\"Array index out of bounds\")\r\n self._items[index] = newItem", "def __setitem__(self, index, value):\n # attempt to\n try:\n # cast {index} to an integer\n index = int(index)\n # if this fails\n except TypeError:\n # let my tile do the rest\n self.data[self.tile.offset(index)] = value\n # otherwise\n else:\n # set the item directly in my container\n self.data[index] = value\n # all done\n return", "def _Dynamic_UpdateIndex(self, index, void, request_id=None):\n self._RemoteSend(index, void, \"UpdateIndex\", request_id)\n return", "def __setitem__(self, index, newItem):\r\n self._items[index] = newItem", "def __setitem__(self, index, value):\n self.points[index] = value", "def __setitem__(self, index, value):\n self.points[index] = value", "def __getitem__(self, index):\n item = self.data[index]\n return item", "def replace(self, index, value):\n index += self.n\n self.data[index] = value\n index //= 2\n while index > 0:\n self.data[index] = self.func(self.data[2*index], self.data[2*index+1])\n index //= 2", "def update_data(self):\n # Just set data_changed, the component should do the rest.\n self.data_changed = True", "def __setitem__(self, index, value):\n if self._list_like(index):\n len_var = len(index)\n if len_var==0:\n raise IndexError(\"Received empty index.\")\n elif len_var==1:\n self._points[index[0]] = value\n elif len_var==2:\n # safeguard against empty entries\n if index[0] not in self._points:\n self._points[index[0]] = StatePoint()\n self._points[index[0]][index[1]] = value\n else:\n raise IndexError(\"Received too long index.\")\n else:\n self._points[index] = value", "def update(\n self, index: Union[int, np.ndarray], value: Union[float, np.ndarray]\n ):\n\n tree_index = self.capacity + index\n self._tree[tree_index] = value\n\n # Propagate up the tree.\n parent = tree_index // 2\n while np.any(parent > 0):\n left = self._tree[2 * parent] # Children/sibling.\n right = self._tree[2 * parent + 1]\n # Note: Due to possible floating point error in the sum-tree case,\n # it's safer to recompute the parent nodes directly rather than to\n # accumulate an \"update\" up the tree which could be faster.\n self._tree[parent] = self.operation(left, right)\n parent = parent // 2", "def _update_model(self, idx):\n self._wfield.update(self._choices[idx][0])", "def on_update_seatable(data, index, *args):\n row = convert_row(metadata, data)\n print(row)", "def refresh_view_attrs(self, rv, index, data):\r\n self.index = index\r\n return super(SelectableLabel, self).refresh_view_attrs(rv, index, data)", "def __setitem__(self, index: int, item: Any) -> None:\n # If empty raise indexerror\n if self.is_empty():\n raise IndexError\n # Set the first item\n elif index == 0:\n self._first = item\n # Recurse on the _rest\n else:\n if not self._rest:\n raise IndexError\n self._rest.__setitem__(index - 1, item)", "def __getitem__(self, index):\n pass", "def __getitem__(self, index):\n pass", "def __getitem__ ( self , index ):\n\t\treturn self . data [ index ]", "def __setitem__(self, index, value):\n self.elem[index] = value", "def __setitem__(self, index: int, value: float) -> None:\n self._previous_values[index] = value", "def updateItem(self, object):\n pass", "def update(self, idx, new_error):\n self.buffer.update(idx, self.priority(new_error))", "def refresh_view_attrs(self, rv, index, data):\n self.index = index\n return super(SelectableLabel, self).refresh_view_attrs(\n rv, index, data)", "def __setitem__(self, index, value):\n assert 0 <= index < len(self), \"Array subscript out of range\"\n self._elements[index] = value", "def set_index(self, index):\n self.index = index", "def update_settings_at_index(self, settings: dict, index):\n self.routine_template.update_settings_at_index(settings, index)", "def __setitem__(self, index, value):\n if not isinstance(index, numbers.Integral):\n raise TypeError(\"Input index must be integer\")\n if index >= len(self._fsm.get(self._id)):\n raise ValueError(\"Input index is out of boundary\")\n ts = self._fsm.get(self._id)\n ts[index] = value\n self._fsm.store(self._id, ts)", "def update_idx(self):\n self.idx = (self.F * self.FMUL +\n self.E * self.EMUL +\n self.Z * self.ZMUL +\n self.A * self.AMUL +\n self.B * self.BMUL )", "def update_item(self, item):\n try:\n index = self.ui.listItemList.model().index_of(item)\n # TODO: missing a way to insert row, don't know how to add data with insertRows\n # see https://svn.enthought.com/svn/enthought/TraitsBackendQt/trunk/enthought/traits/ui/qt4/list_str_model.py\n #if item.isRead() and self.show_updated_only():\n # self.ui.listItemList.model().removeRow(index.row())\n #else:\n self.ui.listItemList.update(index)\n except:\n pass\n self.update_title()", "def _number_list_index_changed(self, *a):\r\n self.api.set_list_index(self.number_list_index.get_value())\r\n \r\n # Make sure.\r\n n = self.api.get_list_index()\r\n self.number_list_index.set_value(n, block_events=True)\r\n \r\n # Update the frequency and power in the safest possible way\r\n# fs = self.api.get_list_frequencies()\r\n# ps = self.api.get_list_powers()\r\n# self.number_dbm.set_value(ps[n])\r\n# self.number_frequency.set_value(fs[n])\r\n \r\n # Update the frequency and power using the graph if we have it.\r\n \r\n # If enabled, things are out of sync, get the list.\r\n if self.button_send_list._widget.isEnabled(): self.query_list()\r\n \r\n # Get the power and frequency from the plot\r\n self.number_dbm .set_value(self.plot_list['P_dBm'][n])\r\n self.number_frequency.set_value(self.plot_list['f_Hz'][n])", "def update(self, data):\n return self._data.update(data)", "def __setitem__(self, index, value):\n if isinstance(index, tuple):\n list.__getitem__(self, index[0])[index[1]] = value\n elif isinstance(index, int):\n self.pop(index)\n self.insert(index, value)\n else:\n raise TypeError, \"Table indices must be int or tuple\"", "def setData(self, index, value, role=QtCore.Qt.DisplayRole):\n if self.verbose: print('myPandasModel.setData()')\n print(' myPandasModel.setData() row:', index.row(), 'column:', index.column(), 'value:', value, type(value))\n #if index.column() == self.includeCol:\n\n # dataChanged is inherited from QAbstractItemModel\n #topLeftIndex = index\n #bottomRightIndex = index\n #self.dataChanged.emit(index, index)\n\n if 1:\n\n #print('value:', value, type(value))\n v = self._data.iloc[index.row(), index.column()]\n #print('before v:',v, type(v))\n #print('isinstance:', isinstance(v, np.float64))\n if isinstance(v, np.float64):\n try:\n value = float(value)\n except (ValueError) as e:\n print('please enter a number')\n return False\n\n # set\n self._data.iloc[index.row(), index.column()] = value\n\n v = self._data.iloc[index.row(), index.column()]\n print(' after v:',v, type(v))\n return True\n return True", "def update(self, data):\n return data", "def index(self):\n self.index_value(self.proxy_get())", "def __getitem__(self, index):\n\t\treturn self.data[index]", "def updateGraph(self):\n GraphItem.setData(self, **self.data)\n for i, item in enumerate(self.textItems):\n pos = self.data['pos'][i] + [-.3, .3]\n #item.setPos(*self.data['pos'][i])\n item.setPos(*pos)", "def __getitem__(self, index):\n return self.data[index]", "def __getitem__(self, index):\n return self.data[index]", "def index_update(tensor, indices, values):\n tensor[indices] = values\n return tensor", "def setData(self, index, value, role):\n if role == qc.Qt.EditRole and value.isnumeric():\n # convert keys to a list so that they they can be indexed\n keys = [x for x in self._data.keys()]\n key = keys[index.row()]\n self._data[key][index.column()-1] = value\n\n self.dataChanged.emit(index, index)\n return True\n\n return False", "def update(self):\n raise NotImplementedError", "def __update(self, idx):\n parent = (idx - 1) // 2\n while parent >= 0:\n left, right = 2 * parent + 1, 2 * parent + 2\n self.__tree[parent] = self.__tree[left] + self.__tree[right]\n parent = (parent - 1) // 2", "def update_item(self, table, item):", "def update(self, index: int, x: int):\n index += self.n2\n self.tree[index] = self.binary(self.tree[index], x)\n while index > 1:\n # (index ^ 1) はiと1の排他的論理和(XOR)\n x = self.binary(x, self.tree[index ^ 1])\n index >>= 1 # 右ビットシフトで親ノードのインデックスへ移動\n self.tree[index] = self.binary(self.tree[index], x)", "def update(self, datain):\r\n self.arraydata = datain\r\n self.layoutChanged.emit()", "def __setitem__(self, item_index: Index, new_item: Item) -> None:\n raise NotImplementedError(\"__setitem__\")", "def update_order_index(self, index=None):\n if index is None:\n index = getattr(self, \"current_order_index\", 0)\n\n session = self.parent.session\n self.current_order_index = index\n self.current_order \\\n = session.input_spectra[self.current_order_index].copy()\n\n # Apply any RV correction.\n try:\n v = session.metadata[\"rv\"][\"rv_applied\"]\n except (AttributeError, KeyError):\n v = 0\n\n self.current_order._dispersion *= (1 - v/c)\n\n # Update the view if the input settings don't match the settings used\n # to normalize the current order.\n self.check_for_different_input_settings()\n\n return None", "def index(self, index):\n \"\"\"\n if index is None:\n raise ValueError(\"Invalid value for `index`, must not be `None`\")\n \"\"\"\n\n self.container['index'] = index", "def __setitem__(self, index, value):\n self.components[index] = value", "def _update(self, datapoints):\r\n if len(datapoints) == 1:\r\n timestamp, value = datapoints[0]\r\n whisper.update(self.path, value, timestamp)\r\n else:\r\n whisper.update_many(self.path, datapoints)", "def update(self, **kwargs):\n self.pending_update = True\n self.update_data(**kwargs)\n self.update_selection()\n if self.context is not None and self.context.doc is not None:\n self.context.doc.add_next_tick_callback(self.update_source)" ]
[ "0.7554738", "0.7046055", "0.70130014", "0.6715155", "0.6603428", "0.6596355", "0.6492101", "0.6436119", "0.63908285", "0.6352602", "0.63500535", "0.6314473", "0.6250634", "0.62338316", "0.62338316", "0.62338316", "0.62338316", "0.622466", "0.62043625", "0.6203889", "0.6196867", "0.61895776", "0.6184957", "0.61686337", "0.6160741", "0.6158195", "0.61154675", "0.61152345", "0.6086889", "0.6081074", "0.6079321", "0.6078983", "0.6062708", "0.6033217", "0.5999742", "0.5972229", "0.5971722", "0.5940731", "0.5922374", "0.59143233", "0.58883345", "0.58875406", "0.58807623", "0.58631635", "0.58631635", "0.58465546", "0.5839683", "0.5838372", "0.58305913", "0.58297205", "0.58252174", "0.5824735", "0.5819637", "0.58187324", "0.58187324", "0.58187014", "0.5814857", "0.58133644", "0.58110183", "0.58094525", "0.58036894", "0.5798483", "0.5797648", "0.57969505", "0.57955104", "0.57955104", "0.5792787", "0.5790627", "0.5782126", "0.5771969", "0.57703274", "0.5764921", "0.5763554", "0.57521826", "0.5744709", "0.57442313", "0.5741106", "0.57377964", "0.57278645", "0.5714831", "0.57103705", "0.571015", "0.5696438", "0.5696069", "0.56946", "0.5692982", "0.5687905", "0.5687905", "0.5680802", "0.56747234", "0.56690747", "0.5661308", "0.5660367", "0.5658756", "0.56578755", "0.56503135", "0.56493115", "0.56314147", "0.5628652", "0.5620891", "0.5607827" ]
0.0
-1
A description of the model properties required by QtQuick
def flags(self, index): if not index.isValid(): return Qt.ItemIsEditable return Qt.ItemIsEnabled | Qt.ItemIsEditable
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def properties(self):\n pass", "def properties(self):", "def properties(self):", "def properties(self):", "def properties(self):\n raise NotImplementedError", "def properties(self):\n return None", "def properties(self):\n return None", "def get_properties(self):\n return self.name, self.author, self.description, self.fmu_type, self.version, self.guid, self.tool, self.numStates", "def _showProperty(self):\n pass", "def __repr__(self):\n return misc.describe_layer(self, name=\"model\")", "def properties(self):\r\n return resources.Properties(self)", "def __repr__(self) -> str:\n return f\"Property(name={self.name}, property_type={self.property_type})\"", "def get_editable_properties(self):\n return (\n\t\t{'title':'Title', 'id':'title', 'type':'line', 'description':'The title of this instance', 'required':1,\n\t\t\t'value':getattr(self.aq_base, 'title', utilities.get_type_default('line'))},\n\t)", "def getModelDesc():\n\n return \"Example model template\"", "def properties(self) -> Any:\n return pulumi.get(self, \"properties\")", "def describe(self):\n response = check_defined(self, inspect.stack()[0][3])\n if not response:\n return response\n property_info = {'child_properties': self.child_properties,\n 'descendant_properties': self.descendant_properties,\n 'parent_properties': self.parent_properties,\n 'domain': self.domain,\n 'range': self.range,\n 'uri': self.uri,\n 'label': self.label,\n 'description': self.description}\n return property_info", "def display_properties(self):\n return self._display_properties", "def properties(self):\n txt = '{} has rank {} and the following properties:'\n print(txt.format(self, self.rank()))\n s = \"\\t- {} {}\"\n print(s.format('irreducible: ', self.is_irreducible()))\n print(s.format('mutation finite: ', self.is_mutation_finite()))\n print(s.format('simply-laced: ', self.is_simply_laced()))\n print(s.format('skew-symmetric: ', self.is_skew_symmetric()))\n print(s.format('finite: ', self.is_finite()))\n if self.is_irreducible():\n print(s.format('affine: ', self.is_affine()))\n print(s.format('elliptic: ', self.is_elliptic()))", "def props(self):\r\n return ()", "def props(self):\r\n return ()", "def props(self):\r\n return ()", "def show_properties(self):\n print(\"L:\", self.L)\n print(\"d:\", self.d)\n print(\"D:\", self.D)\n print(\"dtype:\", self.dtype)\n print(\"R[-1]:\", self.R[-1])\n print(\"F[-1]:\", self.F[-1])\n print(\"Cummulated norm C:\", self.normC)", "def model_definition(self):\n pass", "def properties(self):\n return self._props", "def get_properties(self):\n return self.properties", "def properties(self) -> Optional[str]:\n return pulumi.get(self, \"properties\")", "def describe(self):\r\n return dict((field.name, dict(\r\n id=field.name,\r\n name=field.label,\r\n validators=ValidatorSerializer(\r\n field.requires if isSequenceType(field.requires) else [field.requires])(),\r\n comment=field.comment,\r\n readable=field.readable,\r\n writable=field.writable,\r\n type=getattr(field, 'wtype',\r\n field.type.type if isinstance(field.type, SQLCustomType) else field.type.split('(')[0]),\r\n # w2pwidget=field.widget,\r\n )) for field in self.descibe_columns)", "def advanced_properties(self):\n return self._advanced_properties", "def properties(self):\n return self._properties", "def properties(self):\n return self._properties", "def properties(self,prop):\r\n # The particulars of how they are stored and manipulated (e.g., do\r\n # we want an inventory internally) is not settled. I've used a\r\n # property dictionary for now.\r\n #\r\n # How these properties interact with a user defined style file is\r\n # even less clear.\r\n\r\n # Properties defined by plot\r\n self.xbox.set_text(r\"$%s$\" % prop[\"xlabel\"])\r\n self.ybox.set_text(r\"$%s$\" % prop[\"ylabel\"])\r\n self.tbox.set_text(r\"$%s$\" % prop[\"title\"])\r\n\r\n # Properties defined by user\r\n #self.axes.grid(True)\r", "def get_properties(self):\n return self.properties", "def __str__(self):\n return \"Description(values={},data_model={})\".format(\n self._values, self.data_model\n )", "def description(self):\n desc = self.title\n ops = []\n for attribute in self.attributes.all():\n value = attribute.value\n if isinstance(value, list):\n ops.append(\n \"%s = '%s'\" % (attribute.type, (\", \".join([str(v) for v in value])))\n )\n else:\n ops.append(\"%s = '%s'\" % (attribute.type, value))\n if ops:\n desc = \"%s (%s)\" % (desc, \", \".join(ops))\n return desc", "def propertyDetails(self):\n return (PROPERTY_DETAILS.get(aa, NONE) for aa in self.sequence)", "def view_props(self):\n camera = getattr(self.ui, \"camera\")\n _camera_props = ['camera.%s' % k for k in camera._controls.attrs.keys()]\n surface = getattr(self.ui, \"surface\")\n _subject = list(surface._folders.attrs.keys())[0]\n _surface = getattr(surface, _subject)\n _surface_props = ['surface.{subject}.%s'%k for k in _surface._controls.attrs.keys()]\n _curvature_props = ['surface.{subject}.curvature.brightness',\n 'surface.{subject}.curvature.contrast',\n 'surface.{subject}.curvature.smoothness']\n return _camera_props + _surface_props + _curvature_props", "def getProperties(self):\n return _libsbml.SBMLConverter_getProperties(self)", "def meta(self):\n title = 'Передача оборудования: {0}'.format(self.object.model)\n return {\n 'title': title\n }", "def describe(self) -> Text:\n return self.__repr__()", "def __str__(self):\n return self.properties.__str__()", "def attrs(self):\n return self.size, self.propSuffix, self.specified", "def details(self):\n raise NotImplementedError()", "def _print_properties(self):\n return NotImplemented", "def show_model_summary(self):\n\t\treturn self.model.summary()", "def describe(self):\n return str(self)", "def description(self):\n pass", "def description(self):\n pass", "def get_visual_properties(self) -> dict:\n return self._vis_properties", "def model_info():\n pass", "def __str__(self):\n return \"DataModel(name={},attributes={},description={})\".format(\n self.name, {a.name: str(a) for a in self.attributes}, self.description\n )", "def PropertyName(self) -> str:", "def model_description(self) -> Optional[str]:\n return self.profile_device.model_description", "def getProperties():", "def __str__(self):\n \n res = ['>>> Model %(model_name)s <<<']\n res.append('')\n res.append('Independent parameters:')\n res.append('-----------------------')\n res.append('')", "def description(self) -> ConfigNodePropertyString:\n return self._description", "def props(self):\n\n info = {'dtype': self.dtype, 'multivar': self.multivar, 'multiindex': self.multiindex}\n return info", "def properties(self):\n\n return self._properties", "def info_view(self):\n view_dict = dict(\n env_class=self.env_class,\n model_structure=None,\n model_kwargs={key: value for key, value in self.model_kwargs.items() \n if isinstance(value,(str,int,float,tuple,list,dict)) \n and len(str(value))<100},\n model_class=self.model_class,\n hyperparams=self.hyperparams)\n return view_dict", "def description(self) -> str:\n raise NotImplementedError", "def description(self) -> str:\n raise NotImplementedError", "def description(self) -> str:\n raise NotImplementedError", "def inspect_model_fields(self, model: ModelRepresentation) -> None:\n c = model.count()\n title(f\"{model.name} ({c})\")\n print(model.fields_info())", "def describe(self):\n raise NotImplementedError()", "def describe(self):\n raise NotImplementedError()", "def description(self):", "def getProperties(self):\n return self.properties", "def _disp_props(self):\n ret = list()\n if self.required:\n ret.append('required')\n if self.default:\n ret.append('default=%s' % self.default)\n return ret", "def get_properties():", "def readProperties(self):\r\n print('not yet implemented')", "def description(self) -> str:\r\n raise NotImplementedError", "def description(self) -> str:\r\n raise NotImplementedError", "def description(self) -> str:\r\n raise NotImplementedError", "def model_information(self):\n return self._read(MX_MODEL_INFORMATION)", "def describe(self):\n print(self.description)", "def describe(self):\n print(self.description)", "def attributes(self):", "def get_properties(self) -> List[ObserverPropertiesItem]:\n return [\n self._prop_builder.auto('Seed', type(self).seed),\n self._prop_builder.auto('Class filter', type(self).class_filter),\n self._prop_builder.auto('Random order', type(self).random_order),\n self._prop_builder.auto('Save gpu memory', type(self).save_gpu_memory),\n self._prop_builder.auto('Location filter ration', type(self).location_filter_ratio),\n self._prop_builder.auto('Dataset size', type(self).dataset_size),\n self._prop_builder.auto('Dataset config', type(self).dataset_config),\n self._prop_builder.auto('Switch training resets train pos ', type(self).switch_train_resets_train_pos),\n self._prop_builder.auto('Hide labels', type(self).is_hide_labels)\n ]", "def model_info(self) -> str:\n return self._model_info(self.model).decode(\"utf-8\")", "def description(self) -> str:\n pass", "def get_model_with_properties():\n \n m = ConcreteModel()\n\n # ------------------------------------------------------------------\n # Data\n # ------------------------------------------------------------------\n\n m.np = 25 # Number of possible tays\n m.c = 4 # Number of components\n m.lc = 1 # Light component\n m.hc = 4 # Heavy component\n\n #### Constant parameters\n m.Rgas = 8.314 # Ideal gas constant in J/mol K\n m.Tref = 298.15 # Reference temperature in K\n\n #### Product specifications\n m.xspec_lc = 0.99 # Final liquid composition for methanol (1)\n m.xspec_hc = 0.99 # Fnal liquid composition for butanol (4)\n m.xspec_inter2 = 0.99 # Final liquid composition for ethanol (2)\n m.xspec_inter3 = 0.99 # Final liquid composition for propanol (3)\n m.Ddes = 50 # Final flowrate in distillate in mol/s\n m.Bdes = 50 # Final flowrate in bottoms in mol/s\n m.Sdes = 50 # Final flowrate in side product streams in mol/s\n\n # #### Known initial values\n m.Fi = m.Ddes + m.Bdes + 2 * m.Sdes # Side feed flowrate in mol/s\n m.Vi = 400 # Initial value for vapor flowrate in mol/s\n m.Li = 400 # Initial value for liquid flowrate in mol/s\n\n m.Tf = 358 # Side feed temperature in K\n\n m.Preb = 1.2 # Reboiler pressure in bar\n m.Pbot = 1.12 # Bottom-most tray pressure in bar\n m.Ptop = 1.08 # Top-most tray pressure in bar\n m.Pcon = 1.05 # Condenser pressure in bar\n m.Pf = 1.02\n\n m.rr0 = 0.893 # Internal reflux ratio initial value\n m.bu0 = 0.871 # Internal reflux ratio initial value\n\n\n #### Scaling factors\n m.Hscale = 1e3 \n m.Qscale = 1e-3 \n\n \n #### Constants for the calculation of liquid heat capacity\n m.cpc = {} # Constant 1 for liquid heat capacity \n m.cpc2 = {} # Constant 2 for liquid heat capacity \n m.cpc[1] = m.Rgas \n m.cpc[2] = 1\n m.cpc2['A', 1] = 1 / 100\n m.cpc2['B', 1] = 1 / 1e4\n m.cpc2['A', 2] = 1\n m.cpc2['B', 2] = 1\n\n\n # ------------------------------------------------------------------\n # Physical Properties\n #\n # Notation:\n # MW ........................ molecular weight in g/gmol\n # TB ........................ boiling point temperature in K\n # TC ........................ critical temperature in K\n # PC ........................ critical pressure in bar\n # w ........................ acentric factor\n # lden ...................... liquid density g/m3,\n # dHvap ..................... heat of vaporization in J/mol.\n # vpA, vpB, vpC, and vpD .... vapor pressure constants\n # cpA, cpB, cpC, and cpD .... heat capacity constants J/mol:\n # 1 for liq and 2 for vapor phase\n #\n # Reference A: R.C. Reid, J.M. Prausnitz and B.E. Poling,\n # \"The Properties of gases and liquids\", 1987 and 2004 Eds.\n #\n # ------------------------------------------------------------------\n\n m.prop = {} # Properties of components:\n cpL = {} # Ruczika-D method for liquid heat capacity calculation\n # (Reference A, page 6.20)\n sumA = {}\n sumB = {}\n sumC = {}\n cpL['a', 'C(H3)(C)'] = 4.19845\n cpL['b', 'C(H3)(C)'] = -0.312709\n cpL['c', 'C(H3)(C)'] = 0.178609\n cpL['a', 'C(H2)(C2)'] = 2.7345\n cpL['b', 'C(H2)(C2)'] = 0.122732\n cpL['c', 'C(H2)(C2)'] = -0.123482\n cpL['a', 'C(H2)(C)(O)'] = 0.517007\n cpL['b', 'C(H2)(C)(O)'] = 1.26631\n cpL['c', 'C(H2)(C)(O)'] = -0.0939713\n cpL['a', 'O(H)(C)'] = 16.1555\n cpL['b', 'O(H)(C)'] = -11.938\n cpL['c', 'O(H)(C)'] = 2.85117\n cpL['a', 'C(H3)(O)'] = 3.70344\n cpL['b', 'C(H3)(O)'] = -1.12884\n cpL['c', 'C(H3)(O)'] = 0.51239\n sumA[1] = (cpL['a', 'C(H3)(O)']\n + cpL['a', 'O(H)(C)']) \n sumB[1] = (cpL['b', 'C(H3)(O)']\n + cpL['b', 'O(H)(C)'])\n sumC[1] = (cpL['c', 'C(H3)(O)']\n + cpL['c', 'O(H)(C)'])\n sumA[2] = (cpL['a', 'C(H3)(C)']\n + cpL['a', 'C(H2)(C)(O)']\n + cpL['a', 'O(H)(C)'])\n sumB[2] = (cpL['b', 'C(H3)(C)']\n + cpL['b', 'C(H2)(C)(O)']\n + cpL['b', 'O(H)(C)'])\n sumC[2] = (cpL['c', 'C(H3)(C)']\n + cpL['c', 'C(H2)(C)(O)']\n + cpL['c', 'O(H)(C)'])\n sumA[3] = (cpL['a', 'C(H3)(C)']\n + cpL['a', 'C(H2)(C2)']\n + cpL['a', 'C(H2)(C)(O)']\n + cpL['a', 'O(H)(C)'])\n sumB[3] = (cpL['b', 'C(H3)(C)']\n + cpL['b', 'C(H2)(C2)']\n + cpL['b', 'C(H2)(C)(O)']\n + cpL['b', 'O(H)(C)'])\n sumC[3] = (cpL['c', 'C(H3)(C)']\n + cpL['c', 'C(H2)(C2)']\n + cpL['c', 'C(H2)(C)(O)']\n + cpL['c', 'O(H)(C)'])\n sumA[4] = (cpL['a', 'C(H3)(C)']\n + 2 * cpL['a', 'C(H2)(C2)']\n + cpL['a', 'C(H2)(C)(O)']\n + cpL['a', 'O(H)(C)'])\n sumB[4] = (cpL['b', 'C(H3)(C)']\n + 2 * cpL['b', 'C(H2)(C2)']\n + cpL['b', 'C(H2)(C)(O)']\n + cpL['b', 'O(H)(C)'])\n sumC[4] = (cpL['c', 'C(H3)(C)']\n + 2 * cpL['c', 'C(H2)(C2)']\n + cpL['c', 'C(H2)(C)(O)']\n + cpL['c', 'O(H)(C)'])\n\n ## Methanol: component 1\n m.prop[1, 'MW'] = 32.042\n m.prop[1, 'TB'] = 337.7\n m.prop[1, 'TC'] = 512.6\n m.prop[1, 'PC'] = 80.9\n m.prop[1, 'w'] = 0.556\n m.prop[1, 'lden'] = 792e3\n m.prop[1, 'dHvap'] = 38.376e3\n m.prop[1, 'vpA'] = -8.54796\n m.prop[1, 'vpB'] = 0.76982\n m.prop[1, 'vpC'] = -3.10850\n m.prop[1, 'vpD'] = 1.54481\n m.prop[1, 'cpA', 1] = sumA[1]\n m.prop[1, 'cpB', 1] = sumB[1]\n m.prop[1, 'cpC', 1] = sumC[1]\n m.prop[1, 'cpD', 1] = 0\n m.prop[1, 'cpA', 2] = 2.115e1\n m.prop[1, 'cpB', 2] = 7.092e-2\n m.prop[1, 'cpC', 2] = 2.587e-5\n m.prop[1, 'cpD', 2] = -2.852e-8\n\n\n ## Ethanol: component 2\n m.prop[2, 'MW'] = 46.069\n m.prop[2, 'TB'] = 351.4\n m.prop[2, 'TC'] = 513.9\n m.prop[2, 'PC'] = 61.4\n m.prop[2, 'w'] = 0.644\n m.prop[2, 'lden'] = 789.3e3\n m.prop[2, 'dHvap'] = 42.698e3\n m.prop[2, 'vpA'] = -8.51838\n m.prop[2, 'vpB'] = 0.34163\n m.prop[2, 'vpC'] = -5.73683\n m.prop[2, 'vpD'] = 8.32581\n m.prop[2, 'cpA', 1] = sumA[2]\n m.prop[2, 'cpB', 1] = sumB[2]\n m.prop[2, 'cpC', 1] = sumC[2]\n m.prop[2, 'cpD', 1] = 0\n m.prop[2, 'cpA', 2] = 9.014\n m.prop[2, 'cpB', 2] = 2.141e-1\n m.prop[2, 'cpC', 2] = -8.390e-5\n m.prop[2, 'cpD', 2] = 1.373e-9\n\n\n ## Propanol: component 3\n m.prop[3, 'MW'] = 60.096\n m.prop[3, 'TB'] = 370.3\n m.prop[3, 'TC'] = 536.8\n m.prop[3, 'PC'] = 51.7\n m.prop[3, 'w'] = 0.623\n m.prop[3, 'lden'] = 804e3\n m.prop[3, 'dHvap'] = 47.763e3\n m.prop[3, 'vpA'] = -8.05594\n m.prop[3, 'vpB'] = 4.25183e-2\n m.prop[3, 'vpC'] = -7.51296\n m.prop[3, 'vpD'] = 6.89004\n m.prop[3, 'cpA', 1] = sumA[3]\n m.prop[3, 'cpB', 1] = sumB[3]\n m.prop[3, 'cpC', 1] = sumC[3]\n m.prop[3, 'cpD', 1] = 0\n m.prop[3, 'cpA', 2] = 2.47\n m.prop[3, 'cpB', 2] = 3.325e-1\n m.prop[3, 'cpC', 2] = -1.855e-4\n m.prop[3, 'cpD', 2] = 4.296e-8\n\n\n ## Butanol: component 4\n m.prop[4, 'MW'] = 74.123\n m.prop[4, 'TB'] = 390.9\n m.prop[4, 'TC'] = 563.1\n m.prop[4, 'PC'] = 44.2\n m.prop[4, 'w'] = 0.593\n m.prop[4, 'lden'] = 810e3\n m.prop[4, 'dHvap'] = 52.607e3\n m.prop[4, 'vpA'] = -8.00756\n m.prop[4, 'vpB'] = 0.53783\n m.prop[4, 'vpC'] = -9.34240\n m.prop[4, 'vpD'] = 6.68692\n m.prop[4, 'cpA', 1] = sumA[4]\n m.prop[4, 'cpB', 1] = sumB[4]\n m.prop[4, 'cpC', 1] = sumC[4]\n m.prop[4, 'cpD', 1] = 0\n m.prop[4, 'cpA', 2] = 3.266\n m.prop[4, 'cpB', 2] = 4.18e-1\n m.prop[4, 'cpC', 2] = -2.242e-4\n m.prop[4, 'cpD', 2] = 4.685e-8\n\n\n return m", "def properties(self) -> Optional[pulumi.Input['CosmosDBSpecPropertiesArgs']]:\n return pulumi.get(self, \"properties\")", "def __repr__(self):\n\n # info string\n info = self.model.__repr__()\n info += \"\\n=========================\\n\"\n info += f\"Train data length:\\t\\t{ len(self.train_dataset) }\\n\"\n info += f\"Eval sata length:\\t\\t{ len(self.eval_dataset) }\\n\"\n info += f\"Optimizer:\\t\\t\\t\\t{ str(self.optimizer).split('(')[0] }\\n\"\n info += f\"Criterion:\\t\\t\\t\\t{ str(self.criterion).split('(')[0] }\\n\"\n info += f\"Training Environment:\\t{ self.device.type }\\n\"\n info += f\"Show information:\\t\\t{ 'True' if self.info else 'False' }\\n\"\n info += \"=========================\\n\"\n\n return info", "def properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"properties\")", "def properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"properties\")", "def attributes(self):\n raise NotImplementedError", "def __repr__(self):\n return self.description", "def props(self):\n return self._props", "def props(self):\n return self._props", "def license_model_description(self):\n return self._license_model_description", "def details(self):\n pass", "def properties(self,prop):\n # The particulars of how they are stored and manipulated (e.g., do \n # we want an inventory internally) is not settled. I've used a\n # property dictionary for now.\n #\n # How these properties interact with a user defined style file is\n # even less clear.\n\n # Properties defined by plot\n self.subplot.set_xlabel(r\"$%s$\" % prop[\"xlabel\"])\n self.subplot.set_ylabel(r\"$%s$\" % prop[\"ylabel\"])\n self.subplot.set_title(prop[\"title\"])\n\n # Properties defined by user\n #self.axes.grid(True)", "def extra_repr(self):\n return self.model.extra_repr()", "def printProperties(self):\n print \"Properties of: {0}\".format(self.getName())\n for prop in self._getPropName():\n if isinstance(prop, types.IntType):\n print \"\\t{0} => {1}\".format(prop, self.getPropertie(prop))\n else:\n print \"\\t'{0}' => {1}\".format(prop, self.getPropertie(prop))", "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "def properties(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"properties\")", "def generate_property_template(self):\n template = {\n \"@id\": \"url or curie of the property\",\n \"@type\": \"rdf:Property\",\n \"rdfs:comment\": \"description of the property\",\n \"rdfs:label\": \"carmel case, should match @id\",\n \"schema:domainIncludes\": {\n \"@id\": \"class which use it as a property, could be list\"\n },\n \"schema:isPartOf\": {\n \"@id\": \"http://schema.biothings.io\"\n },\n \"schema:rangeIncludes\": {\n \"@id\": \"relates a property to a class that constitutes (one of) the expected type(s) for values of the property\"\n }\n }\n return template", "def properties(self) -> List[TaskPropertyModel]:\n return self._properties", "def printModel(self):\n print(self.model)", "def description(self):\n if \"description\" in self._prop_dict:\n return self._prop_dict[\"description\"]\n else:\n return None", "def get_model_properties(self):\n properties = {}\n\n filename = self._get_data_filename(\"modelargs.json\")\n with open(filename, \"r\") as f:\n results = json.loads(f.read())\n properties[\"image_size\"] = results.get(\"image_size\")\n properties[\"num_classes\"] = results.get(\"num_classes\")\n properties[\"model\"] = results.get(\"model\")\n properties[\"name\"] = results.get(\"name\")\n properties[\"filter_size\"] = results.get(\"filter_size\", 3)\n properties[\"increase_factor\"] = results.get(\"increase_factor\", 0)\n self.model = properties[\"name\"] # regardless of the name of the folder, this will get the proper model name (i.e. <modelname>.cntk)\n\n # optional property\n properties[\"trainer\"] = results.get(\"trainer\", \"CNTK 2.2\")\n\n self._ensure_model_file()\n properties[\"size_mb\"] = round(os.path.getsize(self.model_file) / (1000 * 1000))\n\n return properties", "def attributes_to_save(self):\r\n return ['initial_pos', 'height', 'width']" ]
[ "0.69124717", "0.6807334", "0.6807334", "0.6807334", "0.67613196", "0.66567373", "0.66567373", "0.66386944", "0.6318433", "0.6283687", "0.6269547", "0.625697", "0.62384415", "0.6229266", "0.620358", "0.61727464", "0.61452335", "0.6096329", "0.6087369", "0.6087369", "0.6087369", "0.60716015", "0.6059512", "0.60478616", "0.6042809", "0.6029266", "0.5998953", "0.5993945", "0.59903127", "0.59903127", "0.59768504", "0.5973463", "0.5970088", "0.59456295", "0.5924363", "0.5911386", "0.59071314", "0.5896432", "0.5881707", "0.5872583", "0.5857686", "0.5843375", "0.58419216", "0.5839013", "0.5835876", "0.58355695", "0.58355695", "0.58350986", "0.5832331", "0.5828074", "0.5827515", "0.58082116", "0.58050245", "0.58038", "0.57864213", "0.5786268", "0.57817835", "0.5781212", "0.5781163", "0.5781163", "0.5781163", "0.5779805", "0.5775486", "0.5775486", "0.5772853", "0.5765446", "0.57537675", "0.5739862", "0.5739169", "0.57117087", "0.57117087", "0.57117087", "0.57077366", "0.57024795", "0.57024795", "0.56899077", "0.5688466", "0.5686032", "0.5677525", "0.5656123", "0.5646295", "0.564447", "0.56374925", "0.56374925", "0.56206006", "0.5619644", "0.56142014", "0.56142014", "0.5610783", "0.5597729", "0.559579", "0.55848694", "0.557356", "0.5572378", "0.5572378", "0.55718917", "0.5567288", "0.5566524", "0.555898", "0.5557913", "0.55567884" ]
0.0
-1
Create a new run starting at the given coordinates
def append(self, startx, starty): if self._runs: angles = self._runs[-1]._angles pos = self._runs[-1]._position else: angles = [0] pos = None run = SingleRun(self, startx, starty, angles=angles, position=pos) self.beginInsertRows(QModelIndex(), len(self._runs), len(self._runs)) self._runs.append(run) self.endInsertRows() self.scriptChanged.emit() self.validChanged.emit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build(self, coordinates = None):\n\n # start- and endpoints of lines are nodes, but they do not need to have a point object associated to them\n # in this case, point coordinates should be set\n if (self.geo):\n coordinates = rs.PointCoordinates(self.geo)\n\n self.x = round(+ coordinates[0], 5)\n self.y = round(+ coordinates[1], 5)\n self.z = round(+ coordinates[2], 5)", "def _set_start(self, coordinates):\n self._start = coordinates", "def create_position(self):\n area = utils.AreaCreator(\n self._width, self._height, starts_at=self._starts_at,\n is_battle_area=False)\n for coordinate in area.get_coordinates():\n position = coordinate.get_position()\n self._cells.append(QShipCell(position))\n self._update_battle_position(self._cells)", "def __init__(self, num_points=5000):\n self.num_points = num_points\n\n # All walks start at (0, 0)\n self.x_values = [0]\n self.y_values = [0]", "def __init__(self, num_points=5000):\n self.num_points = num_points\n\n # All walks start at (0, 0)\n self.x_values = [0]\n self.y_values = [0]", "def move_stage_to_xy(self, coordinates):\n raise NotImplementedError", "def create_position(self):\n area = utils.AreaCreator(\n self._width, self._height, starts_at=self._starts_at,\n is_battle_area=False)\n for coordinate in area.get_coordinates():\n position = coordinate.get_position()\n self._cells.append(PShipCell(position))\n self._update_battle_position(self._cells)", "def __init__(self, coordinates):\n self.coordinates = coordinates", "def make_move(self, playername, coordinates, direction):\n\n pass", "def spawn(self, y, x, h, w):\n self.pos = (np.random.randint(y, y + h), np.random.randint(x, x + w))", "def new_from_runid(cls, runid, offset=1):\n return cls(*cls.get_exoid(runid), offset=offset)", "def GridBoard_create(markersX, markersY, markerLength, markerSeparation, dictionary, firstMarker=None):\n pass", "def NewRun(ss):\n run = ss.TrainEnv.Run.Cur\n ss.TrainEnv.Init(run)\n ss.TestEnv.Init(run)\n ss.Time.Reset()\n ss.Net.InitWts()\n ss.InitStats()\n ss.TrnEpcLog.SetNumRows(0)\n ss.TstEpcLog.SetNumRows(0)\n ss.NeedsNewRun = False", "def set_start_coords(self, x:int, y:int) -> None:\r\n self.start_x = x\r\n self.start_y = y", "def spawn(self):\n (x_coord, y_coord) = (0, 0)\n grid_x = SCREEN_X // self.size\n grid_y = SCREEN_Y // self.size\n while x_coord < EDGE + 5 or x_coord > SCREEN_X - self.size - EDGE - 5:\n x_coord = random.randrange(grid_x) * self.size\n while y_coord < EDGE + 5 or y_coord > SCREEN_Y - self.size - EDGE - 5:\n y_coord = random.randrange(grid_y) * self.size\n return (x_coord, y_coord)", "def __init__(self, num_points = 5000):\n self.num_points = num_points\n\n #all walks start at 0.0\n self.x_values = [0]\n self.y_values = [0]", "def __init__(self, coordinates):\n\n if len(coordinates) != 5:\n raise ValueError(\n \"Point coordinates are wrong defined. They must be \"\n \"defined as (x, y, z, azimuth, elevation).\"\n f\"Provided coordinates: {coordinates}.\"\n )\n\n # Cast and store input coordinates.\n self._coordinates = np.asarray(coordinates, dtype=np.float64).squeeze()\n\n # Provide center to `Electrode`.\n super().__init__(coordinates[:3])", "def __init__(self, coordinates): \n\t\tself.coordinates = coordinates\n\t\tself.start = coordinates[0]\n\t\tself.end = coordinates [1]\n\t\tself.line = LineString([tuple(self.start), tuple(self.end)])\n\t\tself.normal = -grad.grad_line(self.start, self.end)\n\t\tself.n_collisions = 0\n\t\tself.length = np.sqrt((self.start[0]-self.end[0])**2+(self.start[1]-self.end[1])**2)", "def create_snake(self):\n for position in SNAKE_STARTING_POSITIONS:\n self.add_segment(position)", "def create(self, pos):\n self.pos = pos", "def create_object(cls: Type[\"Object\"],\n start_point: Tuple[int, int],\n end_point: Tuple[int, int],\n program: \"Program\",\n canvas: tk.Canvas) -> Type[\"Object\"]:\n pass", "def start(self, x, y):\n self.last_x = x\n self.last_y = y\n self.aperture_id = None", "def test_execute_start_new(self):\n # Setup params\n n_slots = 10\n start_new_cmd = \"create_parking_lot\"\n start_new_args = (n_slots,)\n\n # Verify command is able to execute start new parking lot\n _, output = self.controller.execute(start_new_cmd, *start_new_args)\n self.assertEqual(output, n_slots)", "def __init__(self, time_feed:TimeFeed, *coordinates, coord_map=None, num_parallel_calls=10):\n self.num_parallel_calls = tf.convert_to_tensor(num_parallel_calls, tf.int32)\n self.coord_map = coord_map\n self.N = np.prod([tf.shape(c)[0] for c in coordinates])\n self.coordinates = [tf.cast(c, float_type) if c.dtype is not float_type else c for c in coordinates]\n self.time_feed = time_feed\n self.slice_size = self.time_feed.slice_size\n self.dims = tf.stack([self.slice_size]+[tf.shape(c)[0] for c in coordinates], axis=0)\n self.coord_feed = self.time_feed.feed.map(self.get_coord_block, num_parallel_calls=self.num_parallel_calls)\n self.feed = self.coord_feed", "def new_run(self, init_walkers, continue_run=None, **kwargs):\n\n # check to see if the continue_run is actually in this file\n if continue_run is not None:\n if continue_run not in self.run_idxs:\n raise ValueError(\"The continue_run idx given, {}, is not present in this file\".format(\n continue_run))\n\n # get the index for this run\n new_run_idx = self.next_run_idx()\n\n # create a new group named the next integer in the counter\n run_grp = self._h5.create_group('{}/{}'.format(RUNS, new_run_idx))\n\n\n # set the initial walkers group\n init_walkers_grp = run_grp.create_group(INIT_WALKERS)\n\n self._add_init_walkers(init_walkers_grp, init_walkers)\n\n # initialize the walkers group\n traj_grp = run_grp.create_group(TRAJECTORIES)\n\n\n # run the initialization routines for adding a run\n self._add_run_init(new_run_idx, continue_run=continue_run)\n\n\n # add metadata if given\n for key, val in kwargs.items():\n if key != RUN_IDX:\n run_grp.attrs[key] = val\n else:\n warn('run_idx metadata is set by wepy and cannot be used', RuntimeWarning)\n\n return run_grp", "def __init__( self, seed=(1, 0, 0) ):\n x, y, z = seed\n self._coords = matrix( [[x], [y], [z], [1.]], 'd' )", "def __init__(self, coords, direction):\n self.y, self.x = coords\n self.direction = direction", "def main():\r\n location = gen_Location(\"0.856901647439813,14.08447265625\")\r\n add_location(location)", "def _spawn_runways() -> pd.DataFrame:\n\n n = NUMBER_OF_RUNWAYS\n runway_data = np.empty((n, 5))\n\n if not n % 2:\n for i, N in enumerate(range(1, n, 2)):\n\n x = N * (RUNWAY_SEPARATION + RUNWAY_WIDTH) / 2\n y_base, y_top = - RUNWAY_LENGTH / 2, RUNWAY_LENGTH / 2\n\n runway_data[i, 0] = x\n runway_data[i, 1] = y_base\n runway_data[i, 2] = x\n runway_data[i, 3] = y_top\n runway_data[i, 4] = 0\n\n runway_data[i + n // 2, 0] = - x\n runway_data[i + n // 2, 1] = y_base\n runway_data[i + n // 2, 2] = - x\n runway_data[i + n // 2, 3] = y_top\n runway_data[i + n // 2, 4] = 0\n\n else:\n for i, N in enumerate(range(- n // 2 + 1, n // 2 + 1)):\n\n x = N * (RUNWAY_SEPARATION + RUNWAY_WIDTH)\n y_base, y_top = - RUNWAY_LENGTH / 2, RUNWAY_LENGTH / 2\n\n runway_data[i, 0] = x\n runway_data[i, 1] = y_base\n runway_data[i, 2] = x\n runway_data[i, 3] = y_top\n runway_data[i, 4] = 0\n\n runway_info = pd.DataFrame(runway_data)\n return runway_info", "def __init__(self, x1, y1):\n self.x = x1\n self.y = y1", "def new_task(self):\n self.true_trajectory = self.simulate()\n self.x0 = self.true_trajectory[0]\n self.xT = self.true_trajectory[-1]\n return self.reset()", "def __init__(self, grid, location):\n self.grid = grid\n self.location = location # Tuple containing (x, y) coordinates.", "def _create_petition_(self):\n self.__weather = create(self.__latitude, self.__longitude)", "def spawn_orb(self):\n x_pos = random.randint(0, self.config.arena_size[0] - 1)\n y_pos = random.randint(0, self.config.arena_size[1] - 1)\n self.arena[x_pos][y_pos] = Tile.ORB", "def start_points(n, world):\n world[0, 0] = 1\n world[n-1, n-1] = 1\n world[0, n-1] = 1\n world[n-1, 0] = 1\n world[np.round(n/2).astype(int)][np.round(n/2).astype(int)] = 1\n return world", "def new_stop(self, coordinates, name): # TODO Add check for being in the map range\n name = name.replace('’', \"'\")\n if ((self._data['bounds'][0] < coordinates[1] < self._data['bounds'][2]) and (self._data['bounds'][1] < coordinates[0] < self._data['bounds'][3])) or ((self._data['bounds'][2] < coordinates[1] < self._data['bounds'][0]) and (self._data['bounds'][3] < coordinates[0] < self._data['bounds'][1])):\n self.add_stop(properties={'marker-size': 'medium', 'marker-symbol': '', 'marker-color': '#808080', 'Stop Name': name, 'Task': '', 'Reward': '',\n 'Last Edit': int(self.now().strftime(\"%j\")), 'Nicknames': []\n },\n geometry={\"type\": \"Point\", \"coordinates\": coordinates, \"bbox\": [coordinates[0], coordinates[1], coordinates[0], coordinates[1]]})\n else:\n raise StopOutsideBoundary()", "def __init__(self, x, y, n_points):\n\n self.x = x\n self.y = y\n self.n_points = n_points", "def __init__(self, origin_x=-2.5, origin_y=-2.5, resolution=.1,\n width=50, height=50):\n self.origin_x = origin_x\n self.origin_y = origin_y\n self.resolution = resolution\n self.width = width\n self.height = height\n self.grid = np.zeros((height, width))", "def create_position(self):\n raise NotImplementedError", "def run(self):\n self.timestamp['start'] = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')\n\n for point in self._prepare_grid():\n graph = self._prepare_graph(**point)\n env = self._prepare_env(graph, **point)\n log = self._prepare_logger(graph, env, **point)\n\n try:\n env.run(until=self.runtime)\n except Exception as e:\n print(e)\n log.close()\n\n # self.timestamp[grid.hash_grid_point(point)].append(datetime.datetime.now().strftime('%Y%m%dT%H%M%S'))\n\n self.timestamp['end'] = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')", "def from_tuple(cls, coords):\n return cls(*coords)", "def create_ship(length, *args):\n coord = (randint(0, 10 - length), randint(0, 10 - length))\n check = True\n while check:\n check = False\n if args:\n for arg in args:\n check = point_in_ship(arg, coord) if check == False else True\n while point_in_ship(arg, coord):\n coord = (randint(0, 10 - length), randint(0, 10 - length))\n else:\n break\n return Ship(length=length, horizontal=randint(0, 1), bow=coord)", "def __init__(self, position, is_horizontal, map_state):\n\n self.position = position\n self.spawn_position = position[:]\n self.in_spawn_area = True\n self.is_horizontal = is_horizontal\n self.map_state = map_state\n self.previous_direction = (0, 0)", "def create_prediction_run(\n session: Session,\n prediction_model_id: int,\n prediction_run_timestamp: datetime.datetime,\n complete: bool,\n interpolated: bool) -> PredictionModelRunTimestamp:\n prediction_run = PredictionModelRunTimestamp(\n prediction_model_id=prediction_model_id,\n prediction_run_timestamp=prediction_run_timestamp,\n complete=complete,\n interpolated=interpolated)\n session.add(prediction_run)\n session.commit()\n return prediction_run", "def __init__(self, data, startLat, startLon, delta, numX, numY):\n self.data = data\n self.startLat = startLat\n self.startLon = startLon\n self.delta = delta\n self.xCells = numX\n self.yCells = numY", "def __generate_spawn_points(self):\n while True:\n p1x = random.randint(0, self.width - 1)\n p1y = random.randint(0, self.height - 1)\n p2x, p2y = self.__mirror(p1x, p1y)\n d_sq = (p1x - p2x)**2 + (p1y - p2y)**2\n if d_sq >= (self.width / 2)**2:\n break\n return (p1x, p1y), (p2x, p2y)", "def run():\n import argparse\n parser = argparse.ArgumentParser(description=\"Create and solve mazes\")\n parser.add_argument(\"-c\", \"--cli\", help=\"Switch to CLI mode\", action='store_true')\n parser.add_argument(\"-f\", \"--file\", help=\"File to import map from\")\n parser.add_argument(\"-s\", \"--start\", help=\"Starting position in the maze\")\n parser.add_argument(\"-e\", \"--end\", help=\"Ending position in the maze\")\n args = parser.parse_args()\n if args.file:\n myfile = args.file\n else:\n myfile = 'map1.txt'\n with open(myfile, 'r') as mapfile:\n maze_str = mapfile.read()\n maze = Maze(maze_str, cli=args.cli, start=parse_seq(args.start), finish=parse_seq(args.end))\n maze.game_loop()", "def createNew(cls, x0, y0, z0, a1, b1, c1, a2, b2, c2):\n p0 = Point(x0, y0, z0)\n d1 = Vector(a1, b1, c1)\n d2 = Vector(a2, b2, c2)\n return cls(p0, d1, d2)", "def spawn_start_goal(grid, spawn_seed=None):\n\n xs, ys = np.where(grid == 0)\n free_positions = list(zip(xs, ys))\n\n start, goal = random.Random(spawn_seed).sample(free_positions, 2)\n\n return start, goal", "def __init__(self, num_points=5000):\n self.num_points = num_points\n\n # All motion starts at (0,0).\n self.x_values = [0]\n self.y_values = [0]", "def test_create_new_placements(self):\n subv = SimpleMachineVertex(None, \"\")\n pl = Placement(subv, 0, 0, 1)\n Placements([pl])", "def make_snapshot(particle_types=['A'], a=1, n=7, r=0):\n s = Snapshot(device.communicator)\n\n if s.communicator.rank == 0:\n # make one unit cell\n s.configuration.box = [a, a, a, 0, 0, 0]\n s.particles.N = 4\n s.particles.types = particle_types\n s.particles.position[:] = [\n [0, 0, 0],\n [0, a / 2, a / 2],\n [a / 2, 0, a / 2],\n [a / 2, a / 2, 0],\n ]\n # and replicate it\n s.replicate(n, n, n)\n\n # perturb the positions\n if r > 0:\n shift = numpy.random.uniform(-r, r, size=(s.particles.N, 3))\n s.particles.position[:] += shift\n\n return s", "def __init__(self, start, goal, grid, expandDistance=1.0, goalSampleRate=5):\n self.start = start\n self.goal = goal\n self.maxX = len(grid) - 1\n self.maxY = len(grid[0]) - 1\n self.grid = grid\n self.expandDistance = expandDistance\n self.goalSampleRate = goalSampleRate", "def __init__(self, seeds):\n seeds = list(seeds)\n seeds.sort()\n self.seeds = seeds\n # Gather the minimum and maximum x and y coordinates\n self.minX = seeds[0][0]\n self.maxX = seeds[-1][0]\n ys = list(map(lambda x : x[1], seeds))\n self.minY = min(ys)\n self.maxY = max(ys)", "def __init__(self, location_id, x=0, y=0):\r\n self.location_id = location_id\r\n self.x = x\r\n self.y = y", "def create_new_point(self, coords, **options):\n\n if 'fill' not in options:\n options['fill'] = self.variables.foreground_color\n\n x1, y1 = (coords[0] - self.variables.point_size), (coords[1] - self.variables.point_size)\n x2, y2 = (coords[0] + self.variables.point_size), (coords[1] + self.variables.point_size)\n shape_id = self.create_oval(x1, y1, x2, y2, **options)\n self.variables.vector_objects[str(shape_id)] = VectorObject(SHAPE_TYPES.POINT, options)\n self.variables.vector_objects[str(shape_id)].point_size = self.variables.point_size\n self.variables.shape_ids.append(shape_id)\n self.set_shape_pixel_coords_from_canvas_coords(shape_id, coords)\n self.variables.current_shape_id = shape_id\n return shape_id", "def __init__(self, points, width, height):\n \n # Length in pixels\n self.length = self.__getSideLength(width, height)\n # Screen size in pixels\n self.screenSize = self.length * (width+2), self.length * (height+2)\n # Width in tiles\n self.width = width\n # Height in tiles\n self.height = height\n \n # Creating the static and normal tiles.\n self.statics = self.__createStatics(points)\n self.rectangles, self.centrePoints = self.__createTiles(self.length, width, height)", "def cut_trees(self, )\n\n\n\n def random_spot(x_low, y_low, x_range, y_range):\n x = randint(x_low, x_low + x_range)\n y = randint(y_low, y_low + y_range)\n dur = random.uniform(0.5, 3.0)\n\n return pyautogui.moveTo(x, y, dur)", "def prepare_spawns(self, spawns, epoch):\n sim_count = 1\n basedir = os.getcwd()\n spawn_folder_names = []\n for traj_id, frame_id in spawns:\n logger.info('Building simulation {} of epoch {}'.format(sim_count, epoch))\n\n folder_name = 'e{:02d}s{:02d}_{}f{:04d}'.format(epoch, sim_count, traj_id, frame_id)\n destination = os.path.join(self.input_folder, folder_name)\n create_folder(destination)\n spawn_folder_names.append(destination)\n\n if not self.from_solvated:\n # Add files from build folder to destination folder so tleap\n # can read them since we're not retrieving frame from an\n # already solvated trajectory\n\n create_symlinks(\n files=os.path.join(self.build_folder, '*'),\n dst_folder=os.path.realpath(destination)\n )\n\n # All files in destination, so now move into it\n os.chdir(destination)\n\n # Structure\n if self.from_solvated:\n outfile = 'seed.ncrst'\n else:\n outfile = 'seed.pdb'\n write_cpptraj_script(\n traj=os.path.relpath(\n os.path.join(\n basedir,\n self.meta.loc[traj_id]['traj_fn']\n )\n ),\n top=os.path.relpath(\n os.path.join(\n basedir,\n self.meta.loc[traj_id]['top_fn']\n )\n ),\n # Cpptraj uses 1-indexed frame number\n frame1=frame_id + 1,\n frame2=frame_id + 1,\n outfile=outfile,\n path='script.cpptraj',\n run=True\n )\n\n # Topology\n if not self.from_solvated:\n write_tleap_script(\n pdb_file='seed.pdb',\n run=True,\n system_name='structure',\n path='script.tleap'\n )\n # Apply hmr to new topologies\n hmr_prmtop(top_fn='structure.prmtop')\n else:\n os.symlink(\n os.path.relpath(\n os.path.join(\n basedir,\n self.meta.loc[traj_id]['top_fn']\n )\n ),\n 'structure.prmtop'\n )\n\n # AMBER input files\n write_production_file()\n\n # Write information from provenance to file\n information = [\n 'Parent trajectory:\\t{}'.format(self.meta.loc[traj_id]['traj_fn']),\n 'Frame number:\\t{}'.format(frame_id),\n 'Topology:\\t{}'.format(self.meta.loc[traj_id]['top_fn']),\n ''\n ]\n provenance_fn = 'provenance.txt'\n with open(provenance_fn, 'w+') as f:\n f.write('\\n'.join(information))\n\n # When finished, update sim_count and go back to base dir to repeat\n sim_count += 1\n os.chdir(basedir)\n return spawn_folder_names", "def simulate(coordinates, start, n, hive_parameters=None, shape_file=None):\r\n\r\n # Read in geopandas data if it's available\r\n if shape_file is not None:\r\n shape_checker = ShapeChecker(shape_file)\r\n sc = shape_checker.check_shape\r\n else:\r\n sc = None\r\n\r\n # Setup initial population\r\n current_generation = [\r\n Hive(lat, long, start, parameters=hive_parameters, point_validator=sc)\r\n for (lat, long) in coordinates\r\n ]\r\n\r\n # For storing results. One row = one hive\r\n results = pd.DataFrame(columns=[\r\n 'year',\r\n 'Latitude',\r\n 'Longitude',\r\n 'survived',\r\n 'surviving_queens'\r\n ])\r\n\r\n for i in range(n):\r\n new_generation = []\r\n n_hives = 0\r\n for h in current_generation:\r\n if h.calculate_survival():\r\n new_generation += h.disperse()\r\n n_hives += 1\r\n\r\n logger.info(f'Year: {start + i}. '\r\n f'{n_hives} of {len(current_generation)} hives survived.')\r\n\r\n data = {\r\n 'year': [h.year for h in current_generation],\r\n 'Latitude': [h.lat for h in current_generation],\r\n 'Longitude': [h.long for h in current_generation],\r\n 'survived': [h.survived for h in current_generation],\r\n 'surviving_queens': [h.surviving_queens for h in current_generation]\r\n }\r\n results = pd.concat([results, pd.DataFrame(data)])\r\n current_generation = new_generation\r\n\r\n return results", "def set_coordinates(self, coordinates):\n self.coordinates = coordinates", "def Board_create(objPoints, dictionary, ids):\n pass", "def __init__(self, x, y):\n self.height = x\n self.width = y\n self.grid = self.initialize(self.height, self.width)\n self.randx = random.randint(0, self.height-1)\n self.randy = random.randint(0, self.width-1)\n #self.make()\n #self.show()", "def __init__(self, x, y):\r\n self.x=x\r\n self.y=y", "def make_start_moves(self):\n self.geos = Geos([])\n\n if g.config.machine_type == 'drag_knife':\n self.make_swivelknife_move()\n return\n\n # Get the start rad. and the length of the line segment at begin.\n start_rad = self.shape.parentLayer.start_radius\n\n # Get tool radius based on tool diameter.\n tool_rad = self.shape.parentLayer.getToolRadius()\n\n # Calculate the starting point with and without compensation.\n start = self.start\n angle = self.angle\n\n if self.shape.cut_cor == 40:\n self.append(RapidPos(start))\n \n elif self.shape.cut_cor != 40 and not g.config.vars.Cutter_Compensation[\"done_by_machine\"]:\n\n toolwidth = self.shape.parentLayer.getToolRadius()\n offtype = \"in\" if self.shape.cut_cor == 42 else \"out\"\n offshape = offShapeClass(parent = self.shape, offset = toolwidth, offtype = offtype)\n\n if len(offshape.rawoff) > 0:\n start, angle = offshape.rawoff[0].get_start_end_points(True, True)\n\n self.append(RapidPos(start))\n self.geos += offshape.rawoff\n\n # Cutting Compensation Left\n elif self.shape.cut_cor == 41:\n # Center of the Starting Radius.\n Oein = start.get_arc_point(angle + pi/2, start_rad + tool_rad)\n # Start Point of the Radius\n Ps_ein = Oein.get_arc_point(angle + pi, start_rad + tool_rad)\n # Start Point of the straight line segment at begin.\n Pg_ein = Ps_ein.get_arc_point(angle + pi/2, start_rad)\n\n # Get the dive point for the starting contour and append it.\n start_ein = Pg_ein.get_arc_point(angle, tool_rad)\n self.append(RapidPos(start_ein))\n\n # generate the Start Line and append it including the compensation.\n start_line = LineGeo(start_ein, Ps_ein)\n self.append(start_line)\n\n # generate the start rad. and append it.\n start_rad = ArcGeo(Ps=Ps_ein, Pe=start, O=Oein,\n r=start_rad + tool_rad, direction=1)\n self.append(start_rad)\n\n # Cutting Compensation Right\n elif self.shape.cut_cor == 42:\n # Center of the Starting Radius.\n Oein = start.get_arc_point(angle - pi/2, start_rad + tool_rad)\n # Start Point of the Radius\n Ps_ein = Oein.get_arc_point(angle + pi, start_rad + tool_rad)\n # Start Point of the straight line segment at begin.\n Pg_ein = Ps_ein.get_arc_point(angle - pi/2, start_rad)\n\n # Get the dive point for the starting contour and append it.\n start_ein = Pg_ein.get_arc_point(angle, tool_rad)\n self.append(RapidPos(start_ein))\n\n # generate the Start Line and append it including the compensation.\n start_line = LineGeo(start_ein, Ps_ein)\n self.append(start_line)\n\n # generate the start rad. and append it.\n start_rad = ArcGeo(Ps=Ps_ein, Pe=start, O=Oein,\n r=start_rad + tool_rad, direction=0)\n self.append(start_rad)", "def grid(iant,xgrid=[0],ygrid=[0],sleep=4):\n d=Carma(iant).drive()\n d.setOffset(xgrid[0],ygrid[0])\n time.sleep(sleep)\n time.sleep(sleep)\n for y in ygrid:\n for x in xgrid:\n print x,y\n d.setOffset(x,y)\n time.sleep(sleep)", "def loc_from_tuple(self, coords):\n self.x, self.y = coords", "def generateCoord(self, resolutionList):\r\n locatorList = []\r\n\r\n print \"Scanning Eye\"\r\n self.getEyeCoord(locatorList, resolutionList[0])\r\n print \"Got Eye Coord\"\r\n print \"Scanning NoseBridge\"\r\n self.getNoseBridgeCoord(locatorList, resolutionList[5])\r\n print \"Got NoseBridge Coord\"\r\n print \"Scanning Nose\"\r\n self.getNoseCoord(locatorList, resolutionList[3])\r\n print \"Got Nose Coord\"\r\n print \"Scanning Mouth\"\r\n self.getMouthCoord(locatorList, resolutionList[1])\r\n print \"Got Mouth Coord\"\r\n print \"Scanning MouthLoop\"\r\n self.getMouthLoopCoord(locatorList, resolutionList[2])\r\n print \"Got MouthLoop Coord\"\r\n print \"Scanning Eyebrow\"\r\n self.getEyebrowCoord(locatorList, resolutionList[4])\r\n print \"Got Eyebrow Coord\"\r\n print \"Scanning Ear\"\r\n self.getEarCoord(locatorList)\r\n print \"Got Ear Coord\"\r\n print \"Scanning SideProfile\"\r\n self.getSideProfileCoord(locatorList)\r\n print \"Got SideProfile Coord\"\r\n\r\n print \"Scanning FrontProfile\"\r\n self.getFrontProfileCoord(locatorList)\r\n print \"Got FrontProfile Coord\"\r\n\r\n #Grouping locatorList\r\n cmds.select(locatorList)\r\n locatorGrp = cmds.group(name = \"LocatorCoordGrp#\")\r\n\r\n self.scaleToUnitVolume(locatorGrp)\r\n\r\n self.reverseName(locatorGrp)\r\n for locator in locatorList:\r\n if \"SideProfile_Coord\" in locator:\r\n cmds.move(0, locator, x=True, ws=True)\r\n return locatorGrp", "def random_coordinates():\n return Coordinates(random.randint(0, 14), random.randint(0, 14))", "def __init__(self, x0, y0, x1, y1):\n\n self.x0 = x0\n self.y0 = y0\n self.x1 = x1\n self.y1 = y1", "def start(self):\r\n for z in range(100):\r\n x = utilities.randint(0, self.width)\r\n y = utilities.randint(0, self.height)\r\n h = utilities.randint(2, 20)\r\n w = utilities.randint(2, 20) \r\n c = utilities.randRGBA()\r\n Planet(x, y, z, h, w, c)", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def set_new_pose(circuit_positions_set):\n position = random.choice(list(enumerate(circuit_positions_set)))[0]\n print(position)\n # pos_number = int(circuit_positions_set[0])\n\n state = ModelState()\n state.model_name = \"f1_renault\"\n state.pose.position.x = circuit_positions_set[position][1]\n state.pose.position.y = circuit_positions_set[position][2]\n state.pose.position.z = circuit_positions_set[position][3]\n state.pose.orientation.x = circuit_positions_set[position][4]\n state.pose.orientation.y = circuit_positions_set[position][5]\n state.pose.orientation.z = circuit_positions_set[position][6]\n state.pose.orientation.w = circuit_positions_set[position][7]\n\n rospy.wait_for_service(\"/gazebo/set_model_state\")\n try:\n set_state = rospy.ServiceProxy(\"/gazebo/set_model_state\", SetModelState)\n set_state(state)\n except rospy.ServiceException as e:\n print(\"Service call failed: {}\".format(e))\n return position", "def __init__(self,x,y):\n self.x = x\n self.y = y", "def drawCoordinatePlane_region():\r\n turtle2 = t.Screen()\r\n turtle2.title(\"Life Expectancy versus Region\")\r\n t2.speed(0)\r\n t3.speed(0)\r\n setTurtle(t0)\r\n setTurtle(t1)\r\n setTurtle(t2)\r\n setTurtle(t3)\r\n drawAxes(t0)\r\n t1.left(90)\r\n drawAxes(t1)\r\n t0.pu()\r\n t0.fd(-80)\r\n t0.lt(90)\r\n drawlabels(t0, t1)\r\n drawPoints(t0, t1)\r\n t0.pu()\r\n t1.pu()\r\n t2.pu()\r\n t3.pu()\r\n t0.goto(initialCoordinates())\r\n t1.goto(initialCoordinates())\r\n t2.goto(initialCoordinates())\r\n t3.goto(initialCoordinates())\r\n t1.lt(90)", "def make_move(self, coordinates, player):\n moves = [piece.get_position() for piece in self.get_move_pieces(player)]\n if coordinates not in moves:\n raise ValueError\n\n placed = coordinates[0] + (coordinates[1] * WIDTH)\n\n p = self.pieces[placed]\n if player == WHITE:\n p.set_white()\n else:\n p.set_black()\n\n for d in DIRECTIONS:\n if outside_board(placed, d):\n continue\n\n tile = start = placed + d\n\n to_flip = []\n while self.pieces[tile].get_state() != BOARD:\n if self.pieces[tile].get_state() == player or outside_board(tile, d):\n break\n else:\n to_flip.append(self.pieces[tile])\n tile += d\n\n if self.pieces[tile].get_state() == player:\n for pp in to_flip:\n if player == WHITE:\n pp.set_white()\n else:\n pp.set_black()\n\n self.pieces[start].reset_flipped()", "def __init__(self):\r\n \r\n # World params\r\n self.spawn_distance = 0\r\n\r\n # Nest planning\r\n self.done_init = False\r\n self.wall_set = None\r\n self.planned_nest_set = None\r\n self.nest_completion_set = None\r\n\r\n # Task mapping\r\n self.uuid_task_map = {}\r\n\r\n self.turn = 0", "def local_run(self, parameters=None) -> \"Run\":\n # NOTE -is there a use-case for this?\n raise NotImplementedError()", "def __init__(self, x: int, y: int, w: int, h: int):\n self.x1 = x\n self.y1 = y\n self.x2 = x + w\n self.y2 = y + h", "def __init__(self, x, y):\n # assigning the initial position\n self.x = x\n self.y = y", "def __init__(self, coordinates): \n\t\tsuper().__init__(coordinates)\n\t\tself.type = 'source'", "def start_epoch(self):\n raise NotImplementedError", "def coordinates_from(self, start):\r\n x, y, z = start\r\n return (\r\n x + self.x,\r\n y + self.y,\r\n z + self.z\r\n )", "def _populate(self, coordinates : Optional[np.ndarray]) -> None: \n if coordinates is not None\\\n and not tuple(coordinates.shape) == (self.num_nodes, self.dim):\n raise ValueError(\"_populate - shape error. Expected (num_nodes, dim), but got {}.\".format(coordinates.shape)) \n coordinates = coordinates if coordinates is not None else np.random.rand(self.num_nodes, self.dim)\n\n for node_coords in coordinates: # iterate over first dimension\n new_node = SpatialGraph.Node(self, node_coords)\n self.nodes[new_node.id] = new_node" ]
[ "0.5933312", "0.58285725", "0.5704058", "0.5686776", "0.5686776", "0.56424886", "0.56335765", "0.5611578", "0.5542422", "0.5480548", "0.54717386", "0.54646283", "0.5367157", "0.533819", "0.5320714", "0.52867836", "0.52701634", "0.52323407", "0.5213995", "0.5198065", "0.5189356", "0.51871794", "0.51694363", "0.516808", "0.5153907", "0.51512486", "0.5149177", "0.5128454", "0.5120164", "0.5120069", "0.5115655", "0.51013273", "0.5100362", "0.50937974", "0.50926244", "0.50817007", "0.50790185", "0.50765365", "0.50729287", "0.5057514", "0.50507206", "0.5046254", "0.50410557", "0.50360435", "0.50344336", "0.502313", "0.5011821", "0.50056756", "0.5001883", "0.49967387", "0.49942762", "0.4984204", "0.4979822", "0.49787277", "0.49713778", "0.4971274", "0.49682543", "0.49679592", "0.49575782", "0.4953594", "0.4952692", "0.49525702", "0.49525645", "0.49487308", "0.49413675", "0.49400425", "0.49343756", "0.49298978", "0.49295387", "0.49283007", "0.4927758", "0.49273455", "0.49273455", "0.49273455", "0.49273455", "0.49273455", "0.49273455", "0.49273455", "0.49273455", "0.49273455", "0.49273455", "0.49273455", "0.49273455", "0.49273455", "0.49273455", "0.49273455", "0.49273455", "0.49273455", "0.49260923", "0.49235633", "0.49168804", "0.49157867", "0.4914705", "0.49125355", "0.49082556", "0.4906182", "0.4904682", "0.4901627", "0.49010307", "0.48993102" ]
0.5322073
14
Change the ending coordinates of the most recent run
def update(self, x, y): delta_x = x-self._runs[-1]._x # pylint: disable=W0212 delta_y = y-self._runs[-1]._y # pylint: disable=W0212 if abs(delta_x) > abs(delta_y): self._runs[-1]._vertical = False # pylint: disable=W0212 self._runs[-1]._length = delta_x # pylint: disable=W0212 else: self._runs[-1]._vertical = True # pylint: disable=W0212 self._runs[-1]._length = delta_y # pylint: disable=W0212 i = len(self._runs) - 1 self.dataChanged.emit(self.index(i, 0), self.index(i, 0)) self.scriptChanged.emit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_end(self, coordinates):\n self._end = coordinates", "def end(self):\n self.set_initial_offset(1e6)", "def set_finishing_pos(self, finish):\n if finish and self.is_unoccupied(*finish):\n self.finish_pos = finish[:]\n else:\n self.set_random_pos('finishing')", "def __gotoLastEditPosition(self):\n self.activeWindow().gotoLastEditPosition()", "def set_to_end(self) -> None:\n final_config = self._path_points[-1]\n self._mobile.set_2d_pose(final_config[:3])", "def increment_exit_points(self):\r\n self.exit_points = self.exit_points + 1", "def setLastRepOffset(self):\n self.lastRepOffset = self.firstRepOffset + \\\n (self.numberOfSamples * self.numberOfTechReps)", "def find_new_coordinates(self):\n max_weight = max(self.norm_weights)\n max_index = list(self.norm_weights).index(max_weight)\n new_x = int(self.rad + (max_index / (self.window_y / self.dy))*self.dx)\n new_y = int(self.rad + (max_index % (self.window_y / self.dy))*self.dy)\n self.set_coordinates(new_x, new_y)", "def set_goal_pos(self):\n goal_list = np.where(self.value_map == self.value_map.max())\n # assume the first one\n self.goal_pos = (goal_list[0][0], goal_list[1][0])", "def final_coord(steps):\n return reduce(add, steps, (0, 0))", "def refresh(self):\n self.goto(self.starting_position)", "def _update_setpoint(self, *args, value: Any, **kwargs) -> None:\n self._last_setpoint = value\n # Always set done to False when a move is requested\n # This means we always get a rising edge when finished moving\n # Even if the move distance is under our done moving tolerance\n self.done.put(0, internal=True)\n self._update_done()", "def set_new_location(self, xPos, yPos):", "def lastTick():", "def epoch_end(self):\n pass", "def target_position(self, time):\n pass", "def target_position(self, time):\n pass", "def _update_end_lineno():\n if origin:\n record.origin.line_end = lineno", "def update_epoch(self):\n raise NotImplementedError", "def initialCoordinates():\r\n return (-250,-250)", "def recheckPosition(self):\n self.start = self.bounds[0].pos\n self.end = self.bounds[1].pos", "def on_epoch_end(self):\n self.epoch_bar.update()", "def on_epoch_end(self):", "def grab_current_point(self):\n self.open_gripper(80)\n time.sleep(2.5)\n self.execute_action((0, 0, -10), self.GRAB_ORIENTATION)\n self.open_gripper(-30)\n time.sleep(2.5)\n self.execute_action((0, 0, 10), self.GRAB_ORIENTATION)\n time.sleep(2.5)\n self.initial_position = np.array(self.get_current_cartesian_position().position)\n print self.initial_position", "def _query_end_set(self, value):\n self._query_end = self._prep_coord(value, \"query_start\", ge)", "def reset_epoch(self):\n self.ix = 0", "def returnToStagePosition(self): \n c= \"/cli:python /app:matrix /sys:1 /cmd:returntosavedposition /dev:stage\"\n self.sendCMDstring(c)", "def last_pos(self):\n return self.locs[self.indices[-1], 2:4]", "def get_end_loc(self) -> Tuple[int, int]:\n assert self.pos_marker\n return self.pos_marker.working_loc_after(\n self.raw,\n )", "def set_coordinate(self):\n airqual_dictionary = self.realtime_data['stations'][0] #get the very first(recent) data/result\n self.latitude = airqual_dictionary['lat']\n self.longitude = airqual_dictionary['lng']", "def start(self, x, y):\n self.last_x = x\n self.last_y = y\n self.aperture_id = None", "def end_of_line():\r\n set_point(point().end_of_line())", "def stop(self):\n self.change_x = 0\n self.change_y = 0", "def set_most_recent_coordinates(self, lon, lat):\n location = Location(uid=self.id, position=[lon, lat])\n location.save()\n\n self.most_recent_location_id = str(location.id)\n self.save()", "def returnToOrigin(intCurrentLeftPin, intCurrentRightPin,\r\n fltXCurrent, fltYCurrent):\r\n # First, drawing point should move to bottom-right corner.\r\n # Then, it should move to origin (bottom-left).\r\n # lsX = [fltDrawingX, fltOriginX]\r\n # TODO: Remove this magic number.\r\n # TODO: Change back to 100 if testing does not work.\r\n intIntermediates = 5\r\n lsX = [fltDrawingX] * intIntermediates\r\n lsX.extend(list(np.linspace(fltDrawingX, fltOriginX, intIntermediates)))\r\n lsY = list(np.linspace(fltYCurrent, fltOriginY, intIntermediates))\r\n lsY.extend([fltOriginY] * intIntermediates)\r\n assert len(lsX) == len(lsY)\r\n\r\n print(\"Returning to origin.\")\r\n\r\n for intCoordinateIndex in range(0,len(lsX)):\r\n \r\n fltXNew = lsX[intCoordinateIndex]\r\n fltYNew = lsY[intCoordinateIndex]\r\n\r\n # Keeping track of the desired (x,y) coordinates.\r\n lsDesiredX.append(fltXNew)\r\n lsDesiredY.append(fltYNew)\r\n\r\n printMovement(fltXCurrent, fltYCurrent, fltXNew, fltYNew)\r\n \r\n # Calculate the operations required to move the drawing point.\r\n lsOperations = calculatePath(fltXCurrent, fltYCurrent, fltXNew,\r\n fltYNew)\r\n \r\n # Execute the operations.\r\n tpCurrentState = executeOperations(lsOperations, intCurrentLeftPin,\r\n intCurrentRightPin,\r\n fltXCurrent, fltYCurrent)\r\n \r\n (intCurrentLeftPin, intCurrentRightPin,\r\n fltXCurrent, fltYCurrent) = tpCurrentState\r\n \r\n return (intCurrentLeftPin, intCurrentRightPin, fltXCurrent, fltYCurrent)", "def move_back(self):\r\n self.center_x, self.center_y = self.save_pos", "def _hit_end_set(self, value):\n self._hit_end = self._prep_coord(value, \"hit_start\", ge)", "def end_commit(self):\n #scn = bpy.context.scene\n #for pt in self.b_pts:\n # point_obj = bpy.data.objects.new(pt.label, None)\n # point_obj.location = pt.location\n # scn.objects.link(point_obj)\n #self.end_commit_post()\n pass", "def _update_position(self):\r\n for tstep in range(0, self.MAX_VELOCITY + 1):\r\n t = tstep / self.MAX_VELOCITY\r\n pos = self.position + np.round(self.velocity * t).astype(np.int16)\r\n if self._is_wall(pos):\r\n self._random_start_position()\r\n self.velocity = np.array([0, 0], dtype=np.int16)\r\n return\r\n if self._is_finish(pos):\r\n self.position = pos\r\n self.velocity = np.array([0, 0], dtype=np.int16)\r\n return\r\n self.position = pos", "def on_epoch_end(self, _initialisation=False):\n self.current_b = 0 # batch set to 0\n if self.shuffle>0: # shuffle the batches\n np.random.shuffle(self.idx_el)\n\n # id of the current file\n cidx =self.all_files_idx.tolist().index( self.current_file + self.current_folder*self._div)\n\n\n self.idx_folder = np.roll(self.idx_folder, -cidx-1)\n self.idx_file = np.roll(self.idx_file, -cidx-1)\n self.all_files_idx = np.roll(self.all_files_idx, -cidx-1)\n\n # Load new data\n if(self._initialisation):\n self.X, self.Y = self._load_a_couple0(self.load_a_path(self.current_folder, self.current_file))\n else:\n self.X, self.Y = self.load_a_couple(self.load_a_path(self.current_folder, self.current_file))", "def on_epoch_end(self):\n pass", "def last_pos(self) -> tuple[int, int]:\n if not self.actions:\n return (self.start_x, self.start_y)\n else:\n box = self.get_hitbox_at(self.time_consumed)\n return box.pos_x, box.pos_y", "def reset_to_last_junction(self):\n self.path = self.path[:self._junction_index[-1]]\n self.x, self.y = self.path[-1]\n self._moved = False", "def reset_position(self, x, y):\n\t\tself.grid[x][y] = self.terminal", "def _endPath(self):\n #self._closePath()\n self._lastCommand = None\n self._lastX = self._lastY = None", "def mc_update_xy(self):\n i = random.randint(0,self.N-1)\n return self.mc_update_fixed(i,xy = True)", "def returnToZPosition(self): \n c= \"/cli:python /app:matrix /sys:1 /cmd:returntosavedposition /dev:zdrive\"\n self.sendCMDstring(c)", "def move_end_node(self, x, y):", "def moveBasedOnCurrentMomentum(self):\n self.xPos-=self.xMomentum\n self.yPos-=self.yMomentum\n self.syncSpriteCoordinates()", "def _update_coords(self, change=None):\n if self.node_id:\n x, y = self.layout[self.node_id]\n self.coords = (x - self.dist, x + self.dist, y - self.dist, y + self.dist)", "def moveEast(self):\n east = (self.current[0] + 1, self.current[1])\n mv = self.lab.east()\n self.check_grue(mv)\n self.current = east\n self.visited.add(self.current)", "def reset_position(self):\n self.rect.left, self.rect.top = self.start_pos", "def final(self) -> OptCoordinates:\n if len(self) < 1:\n raise IndexError(\n \"Cannot obtain the final set of coordinates from \"\n \"an empty history\"\n )\n\n return self[-1]", "def update_location(self):\n if self.simulation:\n return (self.y, self.x)\n else:\n raise NotImplementedError\n\n self.y = new_y\n self.x = new_x\n\n return (new_y, new_x)", "def correct_position(self):\n\n width = self.screen.get_width()\n height = self.screen.get_height()\n\n if self.last_screen_dimensions[\"width\"] > width:\n self.x -= self.last_screen_dimensions[\"width\"] - width\n\n if self.last_screen_dimensions[\"height\"] > height:\n self.y -= self.last_screen_dimensions[\"height\"] - height", "def take_step(self):\n if self.facing == 0:\n self.new_loc = (self.new_loc[0], self.new_loc[1] + 1)\n elif self.facing == 1:\n self.new_loc = (self.new_loc[0] + 1, self.new_loc[1])\n elif self.facing == 2:\n self.new_loc = (self.new_loc[0], self.new_loc[1] - 1)\n else:\n self.new_loc = (self.new_loc[0] - 1, self.new_loc[1])", "def reset(self):\n self.t = 0.0\n self.last_t = None\n self.current_y = np.copy(self.start_y)\n self.current_yd = np.copy(self.start_yd)", "def set_position( self ):\n\t\tscreen_rect = self.get_preview_window_screen_rect( )\n\n\t\twhile screen_rect.Intersects( self.GetScreenRect( ) ):\n\t\t\tpos = self.GetPosition( )\n\t\t\tself.SetPosition( ( pos[ 0 ] - 2, pos[ 1 ] + 2 ) )", "def _record_experiment(self, final):\n i = np.argmin(self.finish_time) # get the worker which is closest to finishing\n idone = self.workers[i][0]\n if self.workers[i][1] == 'z':\n self.model.tz.remove(idone)\n self.model.z[idone] = self.z[idone]\n self.model.uu.remove(idone)\n self.model.tu.append(idone)\n self.history.append((idone, 'z'))\n else:\n self.model.ty.remove(idone)\n self.model.y[idone] = self.y[idone]\n self.model.tu.remove(idone)\n self.model.tt.append(idone)\n self.history.append((idone, 'y'))\n if final:\n self.workers[i] = None\n self.finish_time[i] = np.inf\n else:\n return i", "def update(self, initial, follows):", "def reset_position(self): \n self.rect.x = 400\n self.rect.y = 400\n \n # Specifies the Player's spawnpoint as maze_arrangement[8][8], representing\n # the tile in the center of the maze \n self.__minotaur_x = 8\n self.__minotaur_y = 8", "def position_last(self):\n return self._position_last", "def __update_current_motion_start(self) -> None:\n if len(self.past_movements) < 2:\n return\n if self.past_movements[-1] * self.past_movements[-2] < 0:\n self.current_motion_start_element = self.counterpoint[-2]", "def backward_character():\r\n set_point(point().offset(-1))", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def run(self):\n # type: () -> None\n self.move_to(self.location)", "def start(self, x, y):\n self.last_x = x\n if self.min_x is not None and x is not None:\n self.last_x = max(self.last_x, self.min_x)\n if self.max_x is not None and x is not None:\n self.last_x = min(self.last_x, self.max_x)\n self.last_y = y", "def last_run(self):\r\n with sqlite3.connect('fileTransfer.db') as connection:\r\n c = connection.cursor()\r\n cursor = c.execute('SELECT max(id) FROM tbl_lastRun') \r\n max_id = cursor.fetchone()[0]\r\n cursor = c.execute('SELECT col_timestamp FROM tbl_lastRun')\r\n #timeLastRun = cursor.fetchone()[0]\r\n tLR_str = time.strftime('%Y-%m-%d %H:%M %z', time.localtime(cursor.fetchone()[0])) \r\n self.txt_lastRun.delete(0, 'end')\r\n self.txt_lastRun.insert(0, tLR_str)", "def set_position(self, updated):\n self.buff_x = updated[0]\n self.buff_y = updated[1]", "def move_to_position2(self):", "def last_executed(self, last_executed):\n\n self._last_executed = last_executed", "def ending_round(self, index: int, state: State) -> None:\n log_title(\"Ending round {}\".format(index))\n if not self._previous:\n added = state.mapping.values()\n changed = []\n else:\n added = [can for k, can in state.mapping.items() if k not in self._previous]\n changed = [\n (self._previous[k], can)\n for k, can in state.mapping.items()\n if k in self._previous and self._previous[k] != can\n ]\n if added:\n stream.logger.info(\"New pins:\")\n for can in added:\n stream.logger.info(f\"\\t{can.name}\\t{can.version}\")\n if changed:\n stream.logger.info(\"Changed pins:\")\n for (old, new) in changed:\n stream.logger.info(f\"\\t{new.name}\\t{old.version} -> {new.version}\")\n self._previous = state.mapping", "def set_loc(self, moves):\n for move in moves:\n move.start_rank = self.location.rank\n move.start_file = self.location.file", "def on_epoch_end(self):\n self.indexes = np.arange(len(self.ids))\n\n if self.shuffle:\n np.random.shuffle(self.indexes)", "def end(self, finish=None):\n return self.bounds(finish=finish)[1]", "def _updatePos(self, newTile):\n self.pos = newTile\n self.rect = (newTile.x * TILE_W, newTile.y * TILE_H)", "def on_epoch_end(self):\n self.current_elbo = self.model.get_elbo()\n self.current_epoch += 1\n self.elbos += [self.current_elbo]\n self.epochs += [self.current_epoch]\n if self.verbose:\n print('Epoch {} \\tELBO: {}'.format(\n self.current_epoch,\n self.current_elbo))", "def lastScreenPos(self):\n return Point(self._lastScreenPos)", "def lastScreenPos(self):\n return Point(self._lastScreenPos)", "def step(self):\n #nx.draw_networkx(self.G, nx.get_node_attributes(self.G, 'pos'))\n self.schedule.step()", "def _generate_end_position(self):\n end_position = []\n new_row = []\n\n for i in range(1, self.PUZZLE_NUM_ROWS * self.PUZZLE_NUM_COLUMNS + 1):\n new_row.append(i)\n if len(new_row) == self.PUZZLE_NUM_COLUMNS:\n end_position.append(new_row)\n new_row = []\n\n end_position[-1][-1] = 0\n return end_position", "def saveCurrentZPosition(self):\n c= \"/cli:python /app:matrix /sys:1 /cmd:savecurrentposition /dev:zdrive\"\n self.sendCMDstring(c)", "def last_start(self, last_start):\n\n self._last_start = last_start", "def restart(self):\r\n self.agent_x = self.start_x\r\n self.agent_y = self.start_y\r\n self.terminated = False", "def update_positions(self, grid):\r\n self.grid = grid", "def after_epoch():\n raise NotImplementedError", "def position_at_end(self, block):\n self._curblock = block\n self._lastop = block.tail or 'tail'", "def move_to_position1(self):", "def lastPos(self):\n return Point(self.currentItem.mapFromScene(self._lastScenePos))", "def lastPos(self):\n return Point(self.currentItem.mapFromScene(self._lastScenePos))", "def lastPos(self):\n return Point(self.currentItem.mapFromScene(self._lastScenePos))" ]
[ "0.6409029", "0.6067654", "0.6002653", "0.59474105", "0.5929103", "0.5818964", "0.5757306", "0.5729312", "0.5727549", "0.5715503", "0.56266606", "0.56220305", "0.5599335", "0.55922794", "0.5553596", "0.55360717", "0.55360717", "0.5516263", "0.54933006", "0.5479724", "0.5466852", "0.5453491", "0.54335946", "0.5431786", "0.54093826", "0.5387925", "0.53854656", "0.53814715", "0.538002", "0.53631854", "0.53596276", "0.5338035", "0.5323061", "0.5320107", "0.53149325", "0.5284525", "0.52743053", "0.52665937", "0.52644765", "0.5262026", "0.5260699", "0.52580506", "0.5254962", "0.524043", "0.5232924", "0.52319026", "0.5229746", "0.5218175", "0.5203187", "0.52026165", "0.51890844", "0.5184861", "0.5183359", "0.51798534", "0.517284", "0.5172002", "0.5164527", "0.5153445", "0.51495254", "0.5145742", "0.51450926", "0.51378506", "0.51321954", "0.5129808", "0.512685", "0.512685", "0.512685", "0.512685", "0.512685", "0.512685", "0.512685", "0.512685", "0.512685", "0.512685", "0.512685", "0.5108988", "0.5106426", "0.50964415", "0.50919026", "0.5091867", "0.50909597", "0.5090265", "0.50893664", "0.5078229", "0.50775033", "0.5077071", "0.5075068", "0.5072314", "0.5072314", "0.5071077", "0.5070244", "0.5070089", "0.5068988", "0.50687855", "0.50670326", "0.5066858", "0.50643677", "0.50642806", "0.5053053", "0.5053053", "0.5053053" ]
0.0
-1
Access a single run.
def get(self, i): return self._runs[i]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self, run_number):\n return self[self.run_cache[run_number]]", "def get_run(self, run_id: str) -> sqlite3.Row:\n with self.table_access_condition:\n conn = self._get_connection()\n c = conn.cursor()\n c.execute(\n \"\"\"\n SELECT * from runs\n WHERE run_id = ?;\n \"\"\",\n (run_id,),\n )\n results = c.fetchall()\n return results[0]", "def get_run(arn=None):\n pass", "def get_run(self, _id):\n return Run.deserialize(self._get_single('runs', {'run': _id}))", "def _getExactlyOneRun(self):\n run_listing = json.loads(\n self.server.get(_ROUTE_PREFIX + \"/runs\").get_data()\n )\n self.assertLen(run_listing, 1)\n return list(run_listing.keys())[0]", "def get_run(self, id):\n if not id:\n return None\n \n query = \"SELECT * FROM task_history WHERE run_id='\"+str(id)+\"';\"\n \n cur = self.conn.cursor()\n cur.execute(query)\n self.conn.commit()\n run = cur.fetchone()\n \n if run:\n return Run(self.task_history_columns, run);\n else:\n return None", "def run(self, run_idx):\n return self._h5['{}/{}'.format(RUNS, int(run_idx))]", "def current_run(run_id):\n conn = create_connection(db_location)\n c = conn.cursor()\n c.execute(\"SELECT * FROM runs WHERE id = \" + str(run_id))\n result = c.fetchone()\n if result is not None:\n run = dict((c.description[i][0], value) for i, value in enumerate(result))\n run['penalties'], run['num_penalties'] = list_penalties(run_id);\n else:\n run = {'id': 0, 'start': None, 'middle_stop': None, 'middle_start': None, 'end': None, 'droid_uid': 0, 'member_uid': 0, 'first_half_time': None, 'second_half_time': None, 'clock_time': None, 'final_time': None, 'num_penalties': 0}\n if __debug__:\n print(run)\n conn.commit()\n conn.close()\n return run", "def run(self):\n return self.private_run()", "def getRunId(self):\n return self.runid", "def get_run(run_id):\n # Get the access token first to raise an error immediately if no token is\n # present (to avoid unnecessarily instantiating the service API).\n from robflask.service import service\n with service(access_token=ACCESS_TOKEN(request)) as api:\n # Authentication of the user from the expected api_token in the header\n # will fail if no token is given or if the user is not logged in.\n r = api.runs().get_run(run_id=run_id)\n return make_response(jsonify(r), 200)", "def get_run(*args, **kwargs):\n return fluent.get_run(*args, **kwargs)", "def get(self, run_id: str) -> RunResource:\n select_run_resource = sqlalchemy.select(_run_columns).where(\n run_table.c.id == run_id\n )\n\n select_actions = sqlalchemy.select(action_table).where(\n action_table.c.run_id == run_id\n )\n\n with self._sql_engine.begin() as transaction:\n try:\n run_row = transaction.execute(select_run_resource).one()\n except sqlalchemy.exc.NoResultFound as e:\n raise RunNotFoundError(run_id) from e\n action_rows = transaction.execute(select_actions).all()\n\n return _convert_row_to_run(run_row, action_rows)", "def start_run(self):\n return mlflow.start_run(\n run_id=self.run_id,\n experiment_id=self.experiment_id,\n run_name=self.run_name,\n nested=self.nested)", "def run_id(self):\n if self._run_id is not None:\n return self._run_id\n run = mlflow.active_run()\n if run:\n return run.info.run_id\n raise DataSetError(\"Cannot find run id.\")", "def ml_run(self, run_id):\n raise NotImplementedError()", "def get_task_run(self, task_run_id: str) -> Mapping[str, Any]:\n return self.__get_one_by_id(\"task_runs\", \"task_run_id\", task_run_id)", "def get_object_to_run(self):", "def run(self) -> 'outputs.CloudRunRewriteResponse':\n return pulumi.get(self, \"run\")", "def test_get_run(self):\n pass", "def get_live_run(self) -> \"LiveTaskRun\":\n live_run = self._live_run\n assert live_run is not None, \"Live run must be registered to use this\"\n return live_run", "def get_runObj(run):\n\n if os.path.exists(os.path.join(run, 'runParameters.xml')):\n run_parameters_file = \"runParameters.xml\"\n elif os.path.exists(os.path.join(run, 'RunParameters.xml')):\n run_parameters_file = \"RunParameters.xml\"\n else:\n logger.error(\"Cannot find RunParameters.xml or runParameters.xml in \"\n \"the run folder for run {}\".format(run))\n return None\n\n rppath = os.path.join(run, run_parameters_file)\n try:\n rp = RunParametersParser(os.path.join(run, run_parameters_file))\n except OSError:\n logger.warn(\"Problems parsing the runParameters.xml file at {}. \"\n \"This is quite unexpected. please archive the run {} manually\".format(rppath, run))\n return None\n else:\n # This information about the run type \n try:\n # Works for recent control software\n runtype = rp.data['RunParameters'][\"Setup\"][\"Flowcell\"]\n except KeyError:\n # Use this as second resource but print a warning in the logs\n logger.warn(\"Parsing runParameters to fecth instrument type, \"\n \"not found Flowcell information in it. Using ApplicaiotnName\")\n # here makes sense to use get with default value \"\" ->\n # so that it doesn't raise an exception in the next lines\n # (in case ApplicationName is not found, get returns None)\n runtype = rp.data['RunParameters'][\"Setup\"].get(\"ApplicationName\", \"\")\n\n if \"NextSeq\" in runtype:\n return NextSeq_Run(run, CONFIG[\"analysis\"][\"NextSeq\"])\n else:\n logger.warn(\"Unrecognized run type {}, cannot parse the run {}. \"\n \"The sequencer must be NextSeq\".format(runtype, run))\n return None", "async def read_task_run(self, task_run_id: UUID) -> TaskRun:\n response = await self._client.get(f\"/task_runs/{task_run_id}\")\n return TaskRun.parse_obj(response.json())", "def run_number(self):\n return self._runNumber", "def getRunAt(self, year, month, day, hour, mins=0, seconds=0):\n\n date = datetime(year, month, day, hour, mins, seconds)\n formatstr = \"%Y-%m-%d %H:%M:%S\"\n query = \"\"\"SELECT run FROM runs WHERE \"\"\"\n query += \"stop > '%s' AND start < '%s'; \"\"\" % (date.strftime(formatstr), date.strftime(formatstr))\n results = self.query(query)\n if len(results) >= 1:\n return results[0][0]\n else:\n return None", "def run_single(self):\n self.run_sim_time(1)", "def run_one_step(self):\n pass", "def getResult(self, runId):\n # if hasattr(self, \"results\"):\n # # load result from either the preloaded .result attribute (from .loadResults)\n # result = self.results[runId]\n # else:\n # # or from disk if results haven't been loaded yet\n # result = self.getRun(runId)\n\n # load result from either the preloaded .result attribute (from .loadResults)\n # or from disk if results haven't been loaded yet\n # result = self.results[runId] if hasattr(self, \"results\") else self.getRun(runId)\n return self.results[runId] if hasattr(self, \"results\") else self.getRun(runId)", "def oms_retrieve_run(run_number: int) -> OmsRun: # pragma: no cover\n run_check = OmsRun.objects.filter(run_number=run_number)\n if run_check.exists():\n logger.debug(f\"Run {run_number} found in DB\")\n return OmsRun.objects.get(run_number=run_number)\n\n response = get_oms_run(run_number)\n if response is None:\n msg = f\"Run {run_number} not found in OMS API\"\n logger.warning(msg)\n raise OmsApiRunNumberNotFound(msg)\n\n fill_number = response[\"attributes\"].pop(\"fill_number\")\n # There's a chance there's no fill number, see #127\n if fill_number:\n fill = oms_retrieve_fill(fill_number=fill_number)\n else:\n fill = None\n\n include_attribute_keys = [\n \"run_number\",\n \"b_field\",\n \"clock_type\",\n \"cmssw_version\",\n \"components\",\n \"delivered_lumi\",\n \"duration\",\n \"end_lumi\",\n \"energy\",\n \"fill_type_party1\",\n \"fill_type_party2\",\n \"fill_type_runtime\",\n \"hlt_key\",\n \"hlt_physics_counter\",\n \"hlt_physics_rate\",\n \"hlt_physics_size\",\n \"hlt_physics_throughput\",\n \"init_lumi\",\n \"initial_prescale_index\",\n \"l1_hlt_mode\",\n \"l1_hlt_mode_stripped\",\n \"l1_key\",\n \"l1_key_stripped\",\n \"l1_menu\",\n \"l1_rate\",\n \"l1_triggers_counter\",\n \"recorded_lumi\",\n \"sequence\",\n \"stable_beam\",\n \"tier0_transfer\",\n \"trigger_mode\",\n ]\n include_meta_keys = [\"init_lumi\", \"end_lumi\", \"delivered_lumi\", \"recorded_lumi\"]\n\n run_kwargs = {}\n for attribute_key in include_attribute_keys:\n if attribute_key in response[\"attributes\"].keys():\n run_kwargs[attribute_key] = response[\"attributes\"][attribute_key]\n\n # Get luminosity units\n for meta_key in include_meta_keys:\n meta_key_unit = meta_key + \"_unit\"\n if \"meta\" in response.keys():\n if meta_key in response[\"meta\"][\"row\"].keys():\n if response[\"meta\"][\"row\"][meta_key][\"units\"]:\n run_kwargs[meta_key_unit] = response[\"meta\"][\"row\"][meta_key][\n \"units\"\n ]\n\n run_kwargs[\"lumisections\"] = get_oms_lumisection_count(run_number)\n\n try:\n # Make sure that, if more than one user requests a run that had not been added to the DB,\n # it will only be added once.\n with transaction.atomic():\n OmsRun.objects.create(fill=fill, **run_kwargs)\n except IntegrityError as e:\n logger.error(f\"Failed to create OmsRun object: {repr(e)}\")\n OmsRun.objects.filter(run_number=run_number).update(**run_kwargs)\n\n return OmsRun.objects.get(run_number=run_number)", "def run_task(self) -> Task:", "def course_run(self):\n return self.course_key.run", "def run(self):\n task_func = getattr(self, self.task_data.get('task_type'))\n task_obj = task_func()\n return task_obj", "def next_run(self):\n self.load_run(run=self.run+1)", "def query_running(self):\n qp=win32com.client.constants.__dicts__[0]['EXP_RUNNING_EXPERIMENT']\n return self.app.GetParam(qp)[0]", "def get_runner(self, runner_id: int = 0) -> ExperimentRunner:\n return self.runners[runner_id]", "def run_id(self) -> str:\n return self._step_execution_context.run_id", "async def read_flow_run(self, flow_run_id: UUID) -> FlowRun:\n try:\n response = await self._client.get(f\"/flow_runs/{flow_run_id}\")\n except httpx.HTTPStatusError as e:\n if e.response.status_code == 404:\n raise prefect.exceptions.ObjectNotFound(http_exc=e) from e\n else:\n raise\n return FlowRun.parse_obj(response.json())", "def get_flowcell_run(self):\n self._update_flowcellrun()\n logger.debug(\"returning flowcell run object info: {}\".format(\n self.flowcellrun.to_json()))\n return self.flowcellrun", "def take_run_name(self):\n name=\"run\"\n Pattern=re.compile(r'''\\'(\\S*)\\'\\s*=\\s*run_tag''',re.I)\n card=open(\"./Cards/run_card.dat\")\n\n while 1:\n line=card.readline()\n if line=='':\n break\n \n if Pattern.search(line):\n name=Pattern.search(line).groups()[0]\n break\n return name", "def getAsRunFile(self):\r\n return ClientIF.getAsRun()", "def get_run_id(self):\n\t\tif self.have_metadata is False:\n\t\t\tself._get_metadata()\n\t\t\tself.have_metadata = True\n\n\t\ttry:\n\t\t\treturn self.keyinfo['tracking_id'].attrs['run_id']\n\t\texcept:\n\t\t\treturn None", "def Run():\r\n pass", "def getRun(self, runId, filename=None, trajectoryName=None, pypetShortNames=True):\n # chose HDF file to load\n filename = self.HDF_FILE or filename\n\n # either use loaded pypetTrajectory or load from HDF file if it isn't available\n pypetTrajectory = (\n self.pypetTrajectory\n if hasattr(self, \"pypetTrajectory\")\n else pu.loadPypetTrajectory(filename, trajectoryName)\n )\n\n # # if there was no pypetTrajectory loaded before\n # if pypetTrajectory is None:\n # # chose HDF file to load\n # filename = self.HDF_FILE or filename\n # pypetTrajectory = pu.loadPypetTrajectory(filename, trajectoryName)\n\n return pu.getRun(runId, pypetTrajectory, pypetShortNames=pypetShortNames)", "def run(self, run):\n\n self._run = run", "def run_single_task(model: api_model.Model, this_task, task_name):\n\n results_data = this_task.evaluate_model(model)\n task_info = this_task.get_task_details()\n\n assert isinstance(task_info, task.TaskMetadata), \\\n f'{task_name}: get_task_details() should return a TaskMetadata object'\n\n if isinstance(results_data, list):\n for k in results_data:\n assert isinstance(\n k, task.ScoreData\n ), f'{task_name}: evaluate_model() should return ScoreData object(s).' \n else:\n assert isinstance(\n results_data,\n task.ScoreData), f'{task_name}: evaluate_model() should return ScoreData object(s).'\n\n verify_keywords(task_info, task_name)", "def get_prediction_run(session: Session, prediction_model_id: int,\n prediction_run_timestamp: datetime.datetime) -> PredictionModelRunTimestamp:\n logger.info('get prediction run for %s', prediction_run_timestamp)\n return session.query(PredictionModelRunTimestamp).\\\n filter(PredictionModelRunTimestamp.prediction_model_id == prediction_model_id).\\\n filter(PredictionModelRunTimestamp.prediction_run_timestamp ==\n prediction_run_timestamp).first()", "def next_run(self):\n for run in self._runs:\n # Because the runs are ordered, look for the first run where\n # stop_time is in the future.\n if run.is_next_run(self._now):\n return run\n # If we arrive here, no next run (today).\n return None", "def run_id() -> int:\n return sg_covid_impact.config[\"flows\"][\"glass\"][\"run_id\"]", "def start(self):\n return self._args[0]", "def run_type(self):\n return self._run_type", "def set_run(self, run_id: str):\n self.run_id = run_id", "def last_run(self):\n return self._last_run", "async def inspect(id: UUID):\n async with get_client() as client:\n try:\n flow_run = await client.read_flow_run(id)\n except httpx.HTTPStatusError as exc:\n if exc.response.status_code == fastapi.status.HTTP_404_NOT_FOUND:\n exit_with_error(f\"Flow run {id!r} not found!\")\n else:\n raise\n\n console.print(Pretty(flow_run))", "def get_run_log(self, run_id):\n postresult = requests.get(\n f\"{self.proto}://{self.host}/ga4gh/wes/v1/runs/{run_id}\",\n headers=self.auth,\n )\n return wes_reponse(postresult)", "def get_current_run_id(self):\n start_datetime = self.start_dt\n run_datetime = self._get_current_datetime()\n run_interval = self.training_interval\n\n time_since_start = run_datetime - start_datetime\n logger.info(\"Time between start and run_date: %s\" % str(time_since_start))\n\n run_id = int(time_since_start / run_interval)\n logger.info(\"Current run_id: %s\" % str(run_id))\n\n return run_id", "def run(self):\n# log.trace(\" run task %s \", self.name)\n return self.target.send(self.params)", "def run_scenario(self, run, run_id):\n\n raise NotImplementedError", "def run(self) -> Tuple[Any, Log]:\n return self._value", "def __getitem__(self, pos):\n try:\n return self.run_mem[pos]\n except KeyError:\n self.run_mem[pos] = self.program[pos]\n return self.run_mem[pos]", "def runsingle(self, data):\n self.rpc.call(MsfRpcMethod.SessionMeterpreterRunSingle, [self.sid, data])\n return self.read()", "def run(self):\n \n pass", "def get_result(self, run_id, as_type=None):\n resp = self.client._perform_raw(\n \"GET\", \"/projects/%s/runnables/%s/result/%s\" % (self.project_key, self.runnable_type, run_id))\n if as_type == 'string':\n with resp.raw as s:\n return s.read()\n elif as_type == 'json':\n with resp.raw as s:\n return json.load(s)\n else:\n return resp.raw", "def get_state(self, run_id):\n raise NotImplementedError()", "def run_info ( run_num ) : \n global _rinfos_\n rinfo = _rinfos_.get ( run_num , None )\n if rinfo : return rinfo \n \n try :\n \n #\n url = run_url.format ( run_num )\n _obj = urllib.urlopen ( url )\n rinfo = json.load ( _obj )\n\n rinfo = rinfo if rinfo else None\n _rinfos_ [ run_num ] = rinfo \n return rinfo\n \n except:\n return None \n\n return None", "async def inspect(id: UUID):\n async with get_client() as client:\n try:\n flow_run = await client.read_flow_run(id)\n except httpx.HTTPStatusError as exc:\n if exc.response.status_code == status.HTTP_404_NOT_FOUND:\n exit_with_error(f\"Flow run {id!r} not found!\")\n else:\n raise\n\n app.console.print(Pretty(flow_run))", "def run(self):\n raise NotImplementedError('Run method not implemented in %s' % type(self).__name__)", "def run(_):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def getRun(runId, pypetTrajectory, pypetShortNames=True):\n exploredParameters = pypetTrajectory.f_get_explored_parameters()\n niceParKeys = [p.split(\".\")[-1] for p in exploredParameters.keys()]\n\n pypetTrajectory.results[runId].f_load()\n result = pypetTrajectory.results[runId].f_to_dict(fast_access=True, short_names=pypetShortNames)\n pypetTrajectory.results[runId].f_remove()\n\n # convert to dotdict\n result = dotdict(result)\n\n # Postprocess result keys if pypet short names aren't used\n # Before: results.run_00000001.outputs.rates_inh\n # After: outputs.rates_inh\n if pypetShortNames == False:\n new_dict = {}\n for key, value in result.items():\n new_key = \"\".join(key.split(\".\", 2)[2:])\n new_dict[new_key] = result[key]\n result = copy.deepcopy(new_dict)\n\n # add parameters of this run\n result[\"params\"] = {}\n\n for nicep, p in zip(niceParKeys, exploredParameters.keys()):\n result[\"params\"][nicep] = exploredParameters[p].f_get_range()[runId]\n\n return result", "def next_run_idx(self):\n return self.num_runs", "def run_default(self):\n sim_run = self._runModel()\n\n self.default_runs.append(sim_run)\n\n return sim_run", "def test_with_run_command(self):\n self.build()\n self.runCmd(\"file \" + self.getBuildArtifact(\"a.out\"), CURRENT_EXECUTABLE_SET)\n\n lldbutil.run_break_set_by_file_and_line(\n self, \"main.cpp\", self.line, num_expected_locations=1, loc_exact=True)\n\n self.runCmd(\"run\", RUN_SUCCEEDED)\n\n # The stop reason of the thread should be breakpoint.\n self.expect(\"thread list\", STOPPED_DUE_TO_BREAKPOINT,\n substrs=['stopped',\n 'stop reason = breakpoint'])\n\n # This is the function to remove the custom formats in order to have a\n # clean slate for the next test case.\n def cleanup():\n self.runCmd('type format clear', check=False)\n self.runCmd('type summary clear', check=False)\n\n # Execute the cleanup function during test case tear down.\n self.addTearDownHook(cleanup)\n\n root = self.frame().FindVariable(\"root\")\n child = root.GetChildAtIndex(1)\n if self.TraceOn():\n print(root)\n print(child)\n for i in range(0, 15000):\n child = child.GetChildAtIndex(1)\n if self.TraceOn():\n print(child)\n self.assertTrue(\n child.IsValid(),\n \"could not retrieve the deep ValueObject\")\n self.assertTrue(\n child.GetChildAtIndex(0).IsValid(),\n \"the deep ValueObject has no value\")\n self.assertTrue(\n child.GetChildAtIndex(0).GetValueAsUnsigned() != 0,\n \"the deep ValueObject has a zero value\")\n self.assertTrue(\n child.GetChildAtIndex(1).GetValueAsUnsigned() != 0,\n \"the deep ValueObject has no next\")", "def get_run_plot(name, run, reference=False):\n if reference:\n run = utils.reference_run(name, run)\n\n # Get the latest run file in the run's directory\n base = utils.run_file_path(run)\n files = sorted(glob.glob(\"{0}/*.root\".format(base)))\n try:\n path = files[-1]\n except IndexError:\n raise IOError(\"Run file not found for run {0}\".format(run))\n\n # Try to open the file\n f = ROOT.TFile(path)\n if f.IsZombie():\n raise IOError(\"Run file not found for run {0}\".format(run))\n\n # Retrieve the object\n obj = f.Get(name)\n if not obj:\n raise KeyError(\"Plot {0} not found in run file {1}\".format(name, run))\n # The file will be closed when the function returns, so we need to clone\n # the fetched object outside the file's scope\n ROOT.gROOT.cd()\n clone = obj.Clone(obj.GetName())\n f.Close()\n\n return clone", "def Run(self):\n pass", "def RUN(self):", "def local_run(self, parameters=None) -> \"Run\":\n # NOTE -is there a use-case for this?\n raise NotImplementedError()", "def run(self):\r\n pass", "async def get_one(self, where):\n\n pass", "def start(self):\n return self.args[4]", "def run_grp(self, run_idx):\n return self.runs[\"{}\".format(run_idx)]", "def get_run_detail():\r\n params=request.values\r\n result=ExecRunDetail.query.filter(ExecRunDetail.run_id==params['run_id']).all()\r\n return json_response(result=result)", "def detail_running_instance(self):\n\n instance_id = self._choose_among_running_instances()\n\n # Exit option\n if not instance_id:\n print 'Operation cancelled'\n return\n\n # Print the details\n print '# Details of the \"%s\" instance' % instance_id\n self.compute.detail_running_instance(instance_id)", "def run(self, state: State) -> State:", "def run(self, **kwargs):", "def fetch_run_logs(id_, **kwargs):\n run = get_run_object(id_)\n check_run_permission(run, kwargs[\"token_info\"])\n query = \"ilyde-run-{}\".format(run.id)\n return query_elasticsearch(query)", "def test_run(make_runner: Callable[..., TargetFunctionRunner]) -> None:\n runner = make_runner(target, use_instances=True)\n run_info = TrialInfo(config=2, instance=\"test\", seed=0, budget=0.0)\n\n # submit runs! then get the value\n runner.submit_trial(run_info)\n result = next(runner.iter_results(), None)\n\n assert result is not None\n\n run_info, run_value = result\n\n assert run_value.cost == 4\n assert run_value.status == StatusType.SUCCESS" ]
[ "0.7563216", "0.75471956", "0.74779475", "0.7438003", "0.6921127", "0.690785", "0.6889391", "0.6586346", "0.6453123", "0.64372474", "0.6409679", "0.632492", "0.6297225", "0.62775546", "0.6272261", "0.6258751", "0.62365556", "0.6131638", "0.6095218", "0.60870063", "0.6036361", "0.602143", "0.59984756", "0.59898186", "0.59604543", "0.59457254", "0.5929036", "0.58952004", "0.58632535", "0.58563215", "0.582589", "0.57543635", "0.5734214", "0.5719599", "0.57127273", "0.5704815", "0.5703764", "0.56963754", "0.5685824", "0.56844306", "0.56765336", "0.56716657", "0.56588185", "0.562914", "0.5613832", "0.56082714", "0.560745", "0.560076", "0.55909264", "0.5587371", "0.5577352", "0.55567193", "0.5554169", "0.55466646", "0.55334896", "0.55321056", "0.5522705", "0.551574", "0.5508689", "0.549747", "0.5492211", "0.5486518", "0.5484774", "0.5465178", "0.5463264", "0.5452253", "0.5431299", "0.54302865", "0.54302865", "0.54302865", "0.54302865", "0.54302865", "0.54302865", "0.54302865", "0.54302865", "0.54302865", "0.54302865", "0.54302865", "0.54302865", "0.54302865", "0.54302865", "0.54302865", "0.5406784", "0.5402692", "0.53997856", "0.53991646", "0.53964585", "0.537887", "0.53690034", "0.5365808", "0.53592134", "0.5358527", "0.5355846", "0.5354694", "0.5348556", "0.53426707", "0.53369087", "0.5332675", "0.5330743", "0.53306043" ]
0.6402159
11
The instrument script the performs the requested runs
def script(self): temp = "\n\n".join([r.script_line( self._angle_command, self._horizontal_command, self._vertical_command, self._origin, self._frame_width, self._frame_height) for r in self._runs]) return temp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_experiment():\n pass", "def run(self):\n self.run_measurement()\n self.run_analysis()\n if self.get_param_value('update'):\n self.run_update()", "def run_script(self):\n pass", "def run(self,measurements,actions):\n raise NotImplementedError", "def run(self):\n\n self.create_trials() # create them *before* running!\n self.start_experiment()\n\n for trail in self.trials:\n trial.run()\n\n self.close()", "def run_tests(self):\n with self.report.timer.record(\"run\"):\n self.result.report.extend(self._run_tests())", "def Run():\r\n pass", "def RUN(self):", "def run_tests(self):\n\n self.endurance_results = []\n self._mozmill.add_listener(self.endurance_event, eventType='mozmill.enduranceResults')\n self._mozmill.persisted['endurance'] = {'delay': self.delay,\n 'iterations': self.options.iterations,\n 'entities': self.options.entities,\n 'restart': self.options.restart}\n\n self.manifest_path = os.path.join('tests', 'endurance')\n if not self.options.reserved:\n self.manifest_path = os.path.join(self.manifest_path,\n \"manifest.ini\")\n else:\n self.manifest_path = os.path.join(self.manifest_path,\n 'reserved',\n self.options.reserved + \".ini\")\n TestRun.run_tests(self)", "def run(self,infilename): \n ### initizlize the analysis\n self.init_analysis(infilename)\n ### run the analysis\n self.run_analysis()\n ### store selected results\n self.store_results()\n return", "def run(self):\r\n self.log(texto=f\"Executando {self._name}\")", "def run_run(self, cmds):\n pass", "def run_analysis(wf):\n if wf.analysis[\"type\"] == \"one_sample_tests\":\n start_one_sample_tests(wf)\n\n elif wf.analysis[\"type\"] == \"two_sample_tests\":\n start_two_sample_tests(wf)\n\n elif wf.analysis[\"type\"] == \"factorial_tests\":\n start_factorial_tests(wf)\n\n elif wf.analysis[\"type\"] == \"n_sample_tests\":\n start_n_sample_tests(wf)\n\n info(\"> Finished analysis\")", "def __run(self):\n sys.settrace(self.globaltrace)\n self.__run_backup()\n self.run = self.__run_backup", "def execute(self, targets):", "def run_analysis(self, argv):\n self._run_argparser(argv)\n self.run()", "def run_all(self):\n\n self.run_mash() ###Run MASH analysis\n self.filter_query() ###Filter fasta sequences out based on p value\n self.build_index(self.filtered_out_path) ###Build index for off-target analysis\n os.remove(self.filtered_out_path) ###Clean up intermediate fasta file\n self.format_gRNA(self.path1) ###Format everything in the right order\n self.run_OTF() ###Run off-target analysis\n self.output_parse() ###Parse output values and update table", "def run(sim_attr_generator):\n#TODO: clean\n#TODO: integrate analyses\n def analyze_and_save(simulation,simulation_attributes):\n#? Ugly conf file analyses integration.\n if simulation_attributes.analyses and Args.output_file != None:\n verbose_print(\"Saving analyses for {0}.\".format(simulation_attributes.id_name),2)\n results = analyze_datas(\n simulation.result,\n simulation_attributes.analyses\n )\n plotables = ana_results_to_plotables(\n results,\n simulation_attributes.analyses\n )\n#TODO error handling for save\n analysis_save_dm(\n results,\n plotables,\n simulation_attributes.analyses,\n simulation_attributes.id_name\n )\n\n def save_simulation(simulation,simulation_attributes):\n if not simulation_attributes.analyses and Args.output_file != None:\n verbose_print(\"Saving simulation datas of {0}.\".format(\n simulation_attributes.id_name\n ),2) \n try:\n np.save(\n simulation_attributes.id_name,\n simulation.result\n )\n except:\n raise EnvironmentError(\"Can't save data to {}.\".format(\n simulation_attributes.id_name\n ))\n\n verbose_print(\"Starting simulation run.\",1)\n for i,simulation_attributes in enumerate(sim_attr_generator):\n verbose_print(\"Starting simulation number {0}: {1}\".format(\n i,\n simulation_attributes.id_name\n ),2)\n simulation = Simulation(\n SimulationVariables(simulation_attributes)\n )\n simulation.start()\n save_simulation(simulation,simulation_attributes)\n analyze_and_save(simulation,simulation_attributes)", "def execute_experiment_callgrind(self):\n protocol_name = self.protocol_config['protocol']\n number_of_repetitions = self.protocol_config['numOfRepetitions']\n configurations = self.protocol_config['configurations']\n working_directory = self.protocol_config['workingDirectory']\n executables = self.protocol_config['executableName']\n for i in range(number_of_repetitions):\n for idx2 in range(len(configurations)):\n for idx in range(len(executables)):\n os.system(f'fab -f Execution/fabfile.py run_protocol_profiler:{self.protocol_config_path},'\n f'{configurations[idx2]},{executables[idx]},{working_directory[idx]} --parallel | '\n f' tee WebApp/ExecutionLogs/{protocol_name}.log')", "def run(self):\r\n self.inst.write(':RUN')", "def _run_simulator(self):\n os.chdir(self.test_cases_path)\n\n simulator_config_filename = self.simulator_config_filename\n script, options = runner.parse_commands(simulator_config_filename)\n\n if sys.platform.startswith('win'):\n subprocess.call([script] + options, shell=True)\n else:\n subprocess.call([script] + options)\n\n os.chdir(self.this_file_path)", "def run(self):\n\n self.preprocess()\n self.restore_ratings()\n self.prepare_UI()\n self.loop_through_units()\n self.cleanup()\n\n print('\\nAll Done - results are available in:\\n\\t{}'.format(self.out_dir))", "def run(self):\n self.run_measurement()\n self.run_analysis()\n self.results = self.analysis.proc_data_dict['analysis_params_dict']\n if self.get_param_value('update'):\n self.run_update()\n self.dev.update_cancellation_params()\n\n if self.get_param_value('configure_mux_drive'):\n drive_lo_freqs = self.get_param_value('drive_lo_freqs')\n configure_qubit_mux_drive(self.qubits, drive_lo_freqs)", "def run(self):\n self.tcex.log.trace('run')", "def run(self):\n\t\t\n\t\tpass", "def script(self):", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run_this(self, script):\n for line in script.strip().split(\"\\n\"):\n # TODO Interpret lines more than just calling functions\n if line.startswith(\"#\"):\n # Skip lines that start with #\n continue\n retval = self.call_function(line.strip())\n #print retval", "def run(self, **kwargs):", "def custom():\n run(\"example\")", "def execute(self):\n for test in self.tests:\n test.execute()\n self.logger.dump()\n print(\"Finished!\")", "def run_tests(self):\n\n # log\n self.logger.debug('\\n\\nExecute test methods:\\n-----------------------------')\n\n # test methods start here\n # ------------------------------------------------------------------\n # ------------------------------------------------------------------\n\n # dummy_method\n self.dummy_method()\n\n # ------------------------------------------------------------------\n # ------------------------------------------------------------------\n # test methods end here\n\n # log\n self.logger.debug('\\n\\n-----------------------------\\nFinished test methods.')", "def _auto_run(args):\n\n # TDH (2020-01-13) For developement testing the following section\n # replicates the functionality of \"standard_analysis.py\" so that\n # json_results can be created and used to create the graph image\n # files.\n import benchmark_postprocessing as bmpp\n file_list = bmpp.get_benchmark_files(args.benchmark_results_dir)\n json_results = bmpp.parse_files(file_list)\n json_results = bmpp.parse_and_add_benchmark_metadata(json_results)\n run_id_list = get_unique_run_ids(json_results)\n\n # TDH (2020-01-13) - Create unqiue reports for each run ID found.\n # Even a single results directory can contain results from multiple\n # run IDs.\n for run_id in run_id_list:\n output_path = os.path.join(\n args.benchmark_results_dir,\n '{}_report'.format(run_id))\n\n # TDH: Thorough attempt to safely create the results directory and\n # provide good error reporting if something went wrong.\n try:\n os.mkdir(output_path)\n except OSError:\n logging.error('Failed to create directory for report at {}'.format(\n output_path))\n create_standard_analysis_report(output_path,\n json_results,\n run_id)", "def run(self):\n\n self.steer()\n self.drive()", "def main():\n tng.api.runner()", "def __execute_experiment__(self, *args, **kwargs):\n from klibs.KLGraphics import clear\n\n if self.blocks == None:\n self.blocks = self.trial_factory.export_trials()\n\n P.block_number = 0\n P.trial_id = 0\n for block in self.blocks:\n P.recycle_count = 0\n P.block_number += 1\n P.practicing = block.practice\n self.block()\n P.trial_number = 1\n for trial in block: # ie. list of trials\n try:\n P.trial_id += 1 # Increments regardless of recycling\n self.__trial__(trial, block.practice)\n P.trial_number += 1\n except TrialException:\n block.recycle()\n P.recycle_count += 1\n clear() # NOTE: is this actually wanted?\n self.rc.reset()\n self.clean_up()\n\n self.incomplete = False\n if 'session_info' in self.database.tables:\n where = {'session_number': P.session_number}\n self.database.update('session_info', {'complete': True}, where)", "def run():\n main()", "def run(self):\n self.assign_inputs()\n self.execute()\n self.collect_outputs()", "def run(self):\n\n # How to retrieve your input data.\n input_1_data = self.in_data['input_1']\n\n # How to retrieve your params value.\n param_1 = self.param['param_1']\n\n # How to process data.\n # Just write any number of methods you want and use them here.\n sample_out_data = self.sample_method(input_1_data, param_1)\n\n # Go to the definition of this method to see how to log.\n self.demo_log()\n\n # This is how to set output data.\n self.out_data['output_1'] = sample_out_data", "def run(self, **kwargs):\n pass", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=False) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0) # reduce update_delay to speed up simulation\n sim.run(n_trials=num_of_experiments) # press Esc or close pygame window to quit\n \n pd.Series(a.success).to_pickle('success_' + exp_id + '.pickle')\n a.Q_table.to_pickle('qtable_' + exp_id + '.pickle')\n pd.Series(a.q_delta_avg).to_pickle('convergence_' + exp_id + '.pickle')\n pd.Series(a.t_total).to_pickle('steps_' + exp_id + '.pickle')", "def runall():\n sclogic.runall()", "def run(self):\r\n pass", "def start(self):\n for trial in self._trials:\n self._run(trial)", "def _run(self):\n raise NotImplementedError", "def _run(self):\n raise NotImplementedError", "def run1():\n #Reseting motors\n ResetRobot.reset_wheel_motors()\n ResetRobot.reset_attachment_motors()\n CalibrateRobot.calibrate_gyro()\n\n #mission M01 and M02 - space travel and solar panel\n M01_M02()\n \n #Mission M05- Extraction \n M05_M14()\n\n #Back to base before Gerhard (Remove comment if necessary)\n return_to_base1()\n\n # Must delete for competition.. This is to set up forklift to repeat run.\n Robot.attachment_left.on_for_rotations(-100, 8) #Raises Forklift ", "def run(self):\n self.run()", "def run(self, args):\n pass", "def run_script(self, params, config_no):\n raise NotImplementedError()", "def execute(targets, lines):", "def run(self): \r\n return", "def run(self):\n logging.info('running experiment...')\n self._prepare()\n self._load_data()\n self._run()\n self._evaluate()\n self._summarise()\n return True", "def run(self, steps):\n self.sim.run(steps)", "def run(self):\n computation_times=[]\n response_times=[]\n\n s=Session(self.BASE_URL)\n request=s.prepare_execution_request(code,files=['test.txt'])\n sequence=0\n with timing(computation_times):\n with timing(response_times):\n s.send_execution_request(request)\n\n done=False\n while not done:\n sleep(self.POLL_INTERVAL)\n with timing(response_times):\n r=s.output_poll(sequence)\n if len(r)==0 or 'content' not in r:\n continue\n for m in r['content']:\n sequence+=1\n if (m['msg_type']==\"extension\"\n and m['content']['msg_type']==\"files\"):\n returned_file=m['content']['content']['files'][0]\n if returned_file!='test.txt':\n print \"RETURNED FILENAME NOT CORRECT\"\n raise ValueError(\"Returned filename not correct: %s\"%returned_file)\n with timing(response_times):\n f=s.get_file(returned_file)\n if f!=FILE_RESULT_CONTENTS:\n print \"RETURNED FILE CONTENTS NOT CORRECT\"\n raise ValueError(\"Returned file contents not correct: %s\"%f)\n # if we've made it this far, we're done\n done=True\n break\n\n self.custom_timers['Computation']=computation_times\n self.custom_timers['Response']=response_times", "def main(argv):\n global g_test_root_dir\n global g_temp_filename\n\n if len(argv) < 2:\n print(\"invoke this script as python collectUnitTestRunTime.py 10 'python run.py_path/run.py --wipe \"\n \"--test dir_to_test/test1,python run.py_path/run.py --wipe --test dir_to_test2/test2,...' True\\n\")\n sys.exit(1)\n else: # we may be in business\n repeat_number = int(argv[1]) # number of times to run a unit test\n command_lists = argv[2] # list of unit tests to run\n\n for command in command_lists.split(','): # for each command in the list\n # run command repeat_number of times and collect results into result_dict\n run_commands(command, repeat_number, g_temp_filename)", "def run(self, lines):\r\n pass", "def run(self):\n if self.all:\n cmd = self.apply_options(self.test_all_cmd)\n self.call_and_exit(cmd)\n else:\n cmds = (self.apply_options(self.unit_test_cmd, (\"coverage\",)),)\n if self.coverage:\n cmds += (self.apply_options(self.coverage_cmd),)\n self.call_in_sequence(cmds)", "def execute_series(self):\n for n in xrange(self.conf[\"n_runs\"]):\n self.runs[n].execute()", "def run_autostig(self):\n raise NotImplementedError", "def run(self):\n \n pass", "def script_generator(self):\n analyze_tool = \"/home/haihuam/Projects/RepPoints/mmdetection/tools/analyze_logs.py\"\n ex_options = self.global_setting.get('analyze_options', str())\n py = self.global_setting.get('python', sys.executable)\n if os.access(py, os.X_OK):\n content = \"set -e \\n\" \n content += \"cd %s \\n\"%(self.run_dir)\n content += \"%s %s plot_curve *.log.json \"%(py, analyze_tool)\n content += \"--keys loss loss_cls loss_pts_init \"\n content += \"loss_pts_refine \"\n content += \"--out losses.pdf %s &> analyze.log \\n\"%(ex_options)\n\n content += \"touch analyze.done \\n\"\n self.script_content = content\n else:\n print(\"Error: %s is not executable.\"%py)\n sys.exit(0)", "def main():\n run_test_summary1a()\n run_test_summary1c()\n run_test_summary1c()", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def executeAnalysis(config, samples, visitor):\n # store cuts in \"info\" (re-created from TQCuts)\n # ROOT.xAOD.clearTransientTrees()\n #nEventsProcessed = 0\n\n CLI = config.getFolder(\"CLI+\")\n\n # flag indicating to run analysis in debug mode\n debug = CLI.getTagBoolDefault(\"debug\",False)\n # flag indicating to run a dummy analysis\n dummy = CLI.getTagBoolDefault(\"dummy\",False)\n\n downmerge = CLI.getTagBoolDefault(\"downmerge\",False)\n downmergeTo = CLI.getTagStandardStringDefault(\"downmergeTo\",\"\")\n\n pathselect = CLI.getTagVStandardString(\"pathselect\")\n\n if debug:\n maxEvents = 100\n else:\n maxEvents = config.getTagIntegerDefault(\"maxEvents\",-1)\n\n # proceed with analysis\n appname = QFramework.TQLibrary.getApplicationName().Data()\n visitor.setVisitTraceID(appname)\n if maxEvents > 0:\n QFramework.WARN(\"setting maximum number of events per sample to {:d}\".format(maxEvents))\n visitor.setMaxEvents(maxEvents)\n QFramework.TQLibrary.allowRedirection(False)\n timer = ROOT.TStopwatch()\n nsamples = 0\n if pathselect.size() > 0:\n paths = ROOT.TString(\",\".join(map(str,pathselect)))\n else:\n # Read in sample folder restrictions and convert to a single comma-\n # separated string, the same format as it would be passed in via CLI.\n # Can't use `join` since this is a vector<TString>\n # Can't read in the field as a single string with getTagString,\n # perhaps since it has commas\n paths = \"\"\n for path in config.getTagVString(\"restrict\"):\n paths += path.Data() + \",\"\n paths = ROOT.TString(paths[:-1])\n if paths.Length() != 0:\n if not dummy:\n nsamples = samples.visitSampleFolders(visitor,paths)\n QFramework.TQLibrary.recordMemory()\n QFramework.TQObservable.clearAll()\n QFramework.TQLibrary.recordMemory()\n if downmerge or downmergeTo:\n downmergeTargets = downmergeTo\n if not downmergeTargets:\n downmergeTargets = paths\n samples.setTag(\".generalize.histograms\",True,downmergeTargets)\n samples.setTag(\".generalize.cutflow\",True,downmergeTargets)\n else:\n QFramework.WARN(\"dummy run, skipping execution of cutbased analysis on paths '{:s}'\".format(pathselect))\n else:\n if not dummy:\n nsamples = samples.visitMe(visitor)\n QFramework.TQLibrary.recordMemory()\n else:\n QFramework.WARN(\"dummy run, skipping execution of cutbased analysis on root sample folder\")\n\n # TODO: put the rest of this in a separate function like for post processing?\n # right now nsamples is returned but nothing is done with it\n if nsamples > 0:\n if downmerge or downmergeTo:\n samples.generalizeObjects(\".generalize\")\n timer.Stop()\n\n # TODO: put this section in its own function (with cuts available)\n # just get cuts from visitor? (will need to provide a channel in the MCASV case I think)\n if config.getTagBoolDefault(\"checkRun\",True):\n\n if dummy:\n allevents = QFramework.TQCounter(\"dummy\",0,0,0)\n else:\n if isinstance(visitor,QFramework.TQAnalysisSampleVisitor):\n allevents = samples.getCounter(\".\",visitor.getBaseCut().GetName())\n elif isinstance(visitor,QFramework.TQMultiChannelAnalysisSampleVisitor):\n channels = config.getTagVString(\"channels\")\n allevents = samples.getCounter(\".\",visitor.getBaseCut(channels[0]).GetName())\n\n if nsamples > 0:\n # debugging printout\n # TODO: make separate method?\n if config.getTagBoolDefault(\"printCounterValues\",False):\n samples.printListOfCounters()\n printhists = config.getTagVString(\"printHistogramsASCII\")\n for hist in printhists:\n h = samples.getHistogram(\".\",hist)\n if h:\n QFramework.TQHistogramUtils.printHistogramASCII(h)\n else:\n QFramework.ERROR(\"unable to access histogram '{:s}'\".format(hist))\n\n else:\n QFramework.ERROR(\"execution of analysis finished but might have failed, no samples were visited successfully (they might simply be empty).\")\n runtime = config.getFolder(\"runtime+\")\n # store in runtime folder the fact that no samples were visited in the form of an error string\n analysisError = \"execution of analysis finished but might have failed, no samples were visited successfully (they might simply be empty).\"\n runtime.setTagString(\"analysisError\", analysisError)\n #don't quit just now, but instead we'll write an alternative output file later which basically states \"job didn't crash but there is a small chance something went wrong\"\n #quit()\n\n #return nEventsProcessed\n return nsamples", "def startTestRun(self):", "def run_tests(file, samples):\n # Get the script dir, name and check if the file given exists\n test_dir = os.path.dirname(os.path.realpath(__file__))\n script_name = os.path.basename(__file__)\n if not os.path.isfile(os.path.join(test_dir, file)):\n sys.stderr.write('{0}: file \\'{1}\\' not found\\n'.format(script_name, file))\n sys.exit(0)\n\n result_dir = os.path.join(test_dir, 'results')\n if not os.path.exists(result_dir):\n os.mkdir(result_dir)\n\n # Get a path to the build dir to run iengine and cd into it\n filepath = os.path.join(test_dir, file)\n exe_path = os.path.join(os.path.join(test_dir, '..'), 'cmake-build-debug')\n os.chdir(exe_path)\n\n # Open csv files for writing to\n time_dist = open(os.path.join(result_dir, 'time.csv'), 'a')\n inference_dist = open(os.path.join(result_dir, 'inference.csv'), 'a')\n time_writer = csv.DictWriter(time_dist, delimiter=',', fieldnames=['method',\n 'file',\n 'sample',\n 'time'])\n inference_writer = csv.DictWriter(inference_dist, delimiter=',',\n fieldnames=['method', 'file',\n 'sample', 'inference_length'])\n time_writer.writeheader()\n inference_writer.writeheader()\n\n # Run through tests for all inference methods\n for method in ['FC', 'BC', 'TT']:\n timer = timeit.Timer(functools.partial(execute, filepath, method))\n avg = 0\n avg_path = 0\n\n # Run through all samples for the current inference method getting the execution\n # time and the number of inferences/models considered in the process\n for i in range(0, samples):\n print(timer.timeit(1))\n current, (result, err) = timer.timeit(1)\n avg += current * 1000\n result = result.decode('ascii').replace(',', '').replace(':', '')\n result_list = str.split(result)[1:]\n length = len(result_list)\n if method == 'TT':\n length = int(result_list[0])\n avg_path += length\n time_writer.writerow({'method': method, 'file': file, 'sample': i,\n 'time': current})\n inference_writer.writerow({'method': method, 'file': file, 'sample': i,\n 'inference_length': length})\n\n terminology = 'inferences'\n if method == 'TT':\n terminology = 'models'\n\n print('Method: {0}, Average time: {1:.3f}ms, Average {2}: {3}'.format(method,\n avg / samples, terminology, avg_path / samples))\n\n time_dist.close()\n inference_dist.close()", "def execute(self, *args):\n\n # We accept any number of args because this can either be called\n # explicitely by the user, or as part of the gps_started hook\n self._do_test()", "def run_simulation(run):\n # Write the argument file used by metrosim.\n simulation = run.simulation\n metrosim_dir = settings.BASE_DIR + '/metrosim_files/'\n metrosim_file = '{0}execs/metrosim'.format(metrosim_dir)\n arg_file = (\n '{0}arg_files/simulation_{1!s}_run_{2!s}.txt'.format(metrosim_dir,\n simulation.id,\n run.id)\n )\n with open(arg_file, 'w') as f:\n database = settings.DATABASES['default']\n db_host = database['HOST']\n db_name = database['NAME']\n db_user = database['USER']\n db_pass = database['PASSWORD']\n log = metrosim_dir + 'logs/run_{}.txt'.format(run.id)\n tmp = metrosim_dir + 'output'\n stop = metrosim_dir + 'stop_files/run_{}.stop'.format(run.id)\n arguments = ('-dbHost \"{0}\" -dbName \"{1}\" -dbUser \"{2}\" '\n + '-dbPass \"{3}\" -logFile \"{4}\" -tmpDir \"{5}\" '\n + '-stopFile \"{6}\" -simId \"{7!s}\" -runId \"{8!s}\"'\n ).format(db_host, db_name, db_user, db_pass, log, tmp,\n stop, simulation.id, run.id)\n f.write(arguments)\n\n # Run the script 'prepare_run.py' then run metrosim then run the script \n # 'run_end.py'.\n # The two scripts are run with the run.id as an argument.\n prepare_run_file = settings.BASE_DIR + '/metro_app/prepare_run.py'\n build_results_file = settings.BASE_DIR + '/metro_app/build_results.py'\n log_file = (\n '{0}/website_files/script_logs/run_{1}.txt'.format(\n settings.BASE_DIR, run.id\n )\n )\n # Command looks like: \n #\n # python3 ./metro_app/prepare_results.py y\n # 2>&1 | tee ./website_files/script_logs/run_y.txt\n # && ./metrosim_files/execs/metrosim\n # ./metrosim_files/arg_files/simulation_x_run_y.txt \n # && python3 ./metro_app/build_results.py y \n # 2>&1 | tee ./website_files/script_logs/run_y.txt\n #\n # 2>&1 | tee is used to redirect output and errors to file.\n command = ('python3 {first_script} {run_id} 2>&1 | tee {log} && '\n + '{metrosim} {argfile} && '\n + 'python3 {second_script} {run_id} 2>&1 | tee {log}')\n command = command.format(first_script=prepare_run_file, run_id=run.id,\n log=log_file, metrosim=metrosim_file,\n argfile=arg_file,\n second_script=build_results_file)\n subprocess.Popen(command, shell=True)", "def run(self, **kwargs):\n for repl in self.replicas:\n self.log.info('-'*50)\n self.log.info(\"Running %s analysis...\"%repl.name)\n self.__submitReplica(repl, **kwargs)\n self.log.info('-'*50)", "def run(self, *args, **kwargs):\n pass", "def main():\n tester = Tester()\n # parse args, load configuration and create all required objects.\n tester.setup_experiment()\n # GO!\n tester.run_experiment()", "def complete_run():\n pass", "def runsbeana(self):", "def run(self):\n for tool in self.tools:\n tool.run()\n return", "def main():\r\n run_processes('tests.csv', 'labs.csv')", "def run(self):\n for i in range(self.exploration_steps):\n self.single_step(i)\n if self.save_checkpoints:\n self.save_results()\n self.save_results()\n if self.verbose:\n print(\"\\nExploration completed\")\n return", "def run(self):\n f_out = open(TESTS_PATH + \"/\" + self.name,\"w\")\n f_out.write(self.script)\n f_out.close()\n\n t1 = threading.Thread(target=self._push_test_on_phone)\n t2 = threading.Thread(target=self._run_test_on_phone)\n\tt3 = threading.Thread(target=self._del_test_on_phone)\n print(\"\\n=================================================\\n\")\n print(\"Pushing test onto phone...\")\n t1.start()\n t1.join()\n print(\"\\nTest file now on phone.\")\n\n print(\"\\n=================================================\\n\")\n print(\"Running test...\")\n\tst=time.time()\n\tprint \"start time=\"+str(st)\n t2.start()\n t2.join()\n\ten=time.time()\n\tprint \"end time=\"+str(en)\n\tprint \"duration=\"+str(en-st)\n\tself.duration=en-st\n print(\"\\nDone running test.\")\n\tt3.start()\n\tt3.join()\n\treturn self.duration", "def run(self):\n\n step = self.steps['diagnostics_files']\n step.cores = self.config.getint('make_diagnostics_files', 'cores')\n\n # run the step\n super().run()" ]
[ "0.68662244", "0.68151057", "0.6767565", "0.65983254", "0.6565703", "0.65549105", "0.6449239", "0.6410411", "0.6353265", "0.6352231", "0.63458776", "0.6331616", "0.6285805", "0.6264376", "0.62491834", "0.6246712", "0.62430114", "0.62197703", "0.621477", "0.620736", "0.62068206", "0.6203707", "0.6192468", "0.61551493", "0.6154213", "0.61465955", "0.6119395", "0.6119395", "0.6119395", "0.6119395", "0.6119395", "0.6119395", "0.6119395", "0.6119395", "0.6119395", "0.6119395", "0.6119395", "0.6119395", "0.6119395", "0.6119395", "0.6119395", "0.6117706", "0.61110634", "0.6109854", "0.6086593", "0.60822535", "0.60814273", "0.6079095", "0.60722387", "0.6071633", "0.60680795", "0.6067889", "0.60618955", "0.60548764", "0.6041009", "0.60361606", "0.6031451", "0.6029173", "0.6013649", "0.6013649", "0.5997564", "0.5993587", "0.5985694", "0.5977095", "0.5973025", "0.5970808", "0.59701824", "0.5968418", "0.5951468", "0.5939187", "0.5936649", "0.59359235", "0.5933478", "0.5932955", "0.59327537", "0.5930587", "0.5925549", "0.59202474", "0.59202474", "0.59202474", "0.59202474", "0.59202474", "0.59202474", "0.59202474", "0.59202474", "0.59202474", "0.5919995", "0.59138167", "0.59137255", "0.59027386", "0.5892457", "0.5889605", "0.58775437", "0.587698", "0.5876192", "0.5873158", "0.5872866", "0.5866593", "0.5863834", "0.5860104", "0.5851948" ]
0.0
-1
Can the current model be exported into a usable script
def valid(self): if not self._runs: return False return all([r.valid for r in self._runs])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dump_model(self):", "def export_model(self, save_path: str, save_format: Optional[str] = None) -> None:", "def save_model(self, filename):\r\n pass", "def export_model(model, name):\n\tpath = \"data/{}/\".format(name)\n\tfilename = \"{}.model\".format(name)\n\tif os.path.isdir(path):\n\t\tprint(\"model already exists\")\n\t\treturn\n\telse:\n\t\tos.mkdir(path)\n\t\tjoblib.dump(model, path + filename)", "def export_model(self, output_model_dir):\n logger.info(\"Exporting model to directory : {}\".format(output_model_dir))\n self.model.export(output_model_dir=output_model_dir)", "def script(self):", "def save_model(learn, name):\n# callback_fns = learn.callback_fns # preserve wandb callback and others\n# callbacks = learn.callbacks\n \n# learn.callback_fns = [] # clean callbacks\n# learn.callbacks = []\n \n learn.save(PATH_TO_MODELS / name) # save only weights, adds .pth automatically\n learn.export(PATH_TO_MODELS / f\"{name}.pkl\") # serialize entire model, need to add .pkl", "def export_to_file(self):\r\n return True", "def save_model(self):\n joblib.dump(self.pipeline, \"model.joblib\")", "def intf_MMSAVE(E):\n global SAVEFILE\n with open(SAVEFILE,'w') as f:\n f.write( MMEL.simplistic_mm_save_format() )\n print(\"Model script written to: %s\\n\" % SAVEFILE)", "def save_model(self, filename) -> None:\n #t.save(self, filename)\n traced=t.jit.script(self)\n t.jit.save(traced,filename)", "def on_action_4_triggered(self):\n # TODO: not implemented yet\n model = self.model2\n self.doExport(model)\n #raise NotImplementedError", "def save_model(model):\n model.to_disk(\"../model/custom_ner_model\")", "def save_model(model, model_filepath):", "def save_model(self):\n pass", "def _export_model_representations(self, config):\n\n self.logger.msg1(\"Preparing model representations\")\n modelsets = get_modelsets(self.dbpath, self.obo, config.partition_size)\n prefix = self.rootpath + \"-models-\"\n for i, refset in enumerate(modelsets):\n progress = str(i+1) + \"/\" + str(len(modelsets))\n self.logger.msg1(\"Saving model representations: \"+progress)\n refset.save(prefix + str(i+1), \"phenotype\", what=(\"data\",))", "def export_freeze_model(self, export_dir='.', version=1):\n self.feed_dict.update({self.training_phase: False})\n tf.keras.backend.set_learning_phase(0) \n\n self.outputs = tf.identity_n(self.outputs, name='output/hr')\n sess = tf.get_default_session()\n export_path = Path(export_dir) / str(version)\n while export_path.exists():\n version += 1 # step ahead 1 version\n export_path = Path(export_dir) / str(version)\n export_path = str(export_path)\n graph = sess.graph.as_graph_def()\n graph = tf.graph_util.remove_training_nodes(graph)\n graph = tf.graph_util.convert_variables_to_constants(\n sess, graph, [outp.name.split(':')[0] for outp in self.outputs])\n # fcarrio\n for node in graph.node:\n print (node.name)\n\n\n for op in tf.get_default_graph().get_operations():\n print(str(op.name))\n\n tf.train.write_graph(graph, export_path, self.name, as_text=False)\n LOG.info(\"Model exported to {}/{}.\".format(export_path, self.name))", "def save_model(self):\n joblib.dump(self.pipeline, 'model.joblib')\n print(colored('model.joblib saved locally', 'green'))", "def _gtModel(self):\n if os.path.isfile(self.outgtmod):\n print(\"\\t=== '{}' already exists ===\".format(self.outgtmod))\n return\n else:\n if not os.path.isfile(self.outsrcmap):\n self._gtSrcmap()\n if not os.path.isfile(self.outmodel):\n self.gtLike()\n\n os.popen(\"gtmodel srcmaps={} srcmdl={} outfile={} irfs={} expcube={}\\\n bexpmap={}\".format(self.outsrcmap, self.outmodel, self.outgtmod,\n self.irf, self.outltcube, self.outbinexp))\n return", "def exportTorchscript(model, sampleInput, checkInputs, fileName):\n try:\n print(f\"Starting TorchScript export with torch {torch.__version__}\")\n ts = torch.jit.trace(model, sampleInput, check_inputs=checkInputs)\n ts.save(fileName)\n print(f\"TorchScript export success, saved as {fileName}\")\n return ts\n except Exception as e:\n print(f\"TorchScript export failure: {e}\")", "def convert_to_model(self, *args):", "def save_plot_model_script(folderOUT):\n with open(folderOUT+'generate_model_plot.py', 'w') as f_out:\n f_out.write('#!/usr/bin/env python' + '\\n')\n f_out.write('try:' + '\\n')\n f_out.write('\\timport keras as ks' + '\\n')\n f_out.write('except ImportError:' + '\\n')\n f_out.write('\\tprint \"Keras not available. Activate tensorflow_cpu environment\"' + '\\n')\n f_out.write('\\traise SystemExit(\"=========== Error -- Exiting the script ===========\")' + '\\n')\n f_out.write('model = ks.models.load_model(\"%smodels/model-000.hdf5\")'%(folderOUT) + '\\n')\n f_out.write('try:' + '\\n')\n f_out.write('\\tks.utils.plot_model(model, to_file=\"%s/plot_model.png\", show_shapes=True, show_layer_names=True)'%(folderOUT) + '\\n')\n f_out.write('except OSError:' + '\\n')\n f_out.write('\\tprint \"could not produce plot_model.png ---- try on CPU\"' + '\\n')\n f_out.write('\\traise SystemExit(\"=========== Error -- Exiting the script ===========\")' + '\\n')\n f_out.write('print \"=========== Generating Plot Finished ===========\"' + '\\n')\n f_out.write('\\n')", "def save_model(self, export_path, save_ae=True):\n net_dict = self.net.state_dict()\n ae_net_dict = self.ae_net.state_dict() if save_ae else None\n #ae_threshold = self.scores_threhold_rec if save_ae else None\n\n torch.save({'c': self.c,\n 'net_dict': net_dict,\n 'ae_net_dict': ae_net_dict}, export_path)", "def save_model(self, model_path: str):", "def visualize_model(self):\n if self.model is None:\n print(\"%s.visualize: implement me\" % (self.__class__.__name__))", "def export_model(model, model_type, export_dir, model_column_fn):\n wide_columns, deep_columns = model_column_fn()\n if model_type == 'wide':\n columns = wide_columns\n elif model_type == 'deep':\n columns = deep_columns\n else:\n columns = wide_columns + deep_columns\n feature_spec = tf.feature_column.make_parse_example_spec(columns)\n example_input_fn = (\n tf.estimator.export.build_parsing_serving_input_receiver_fn(feature_spec))\n model.export_savedmodel(export_dir, example_input_fn,\n strip_default_attrs=True)", "def gen_script(model: onnx.ModelProto, output_file: str = None) -> str:\n current_dir = os.path.dirname(os.path.realpath(__file__))\n env = jinja2.Environment(loader=jinja2.FileSystemLoader(current_dir + '/templates/'))\n model_header_render = gen_model_header(env, model)\n imports, main_function, sub_functions = gen_graph_functions(env, model.graph)\n\n wdir = \"\"\n if len(imports) > 0:\n # need to set wdir to enable imports\n wdir = util.resolve_systemds_root() + \"/scripts\"\n\n main_template = env.get_template(\"main.dml.jinja\")\n result_render = main_template.render(\n title=\"This file was generated by onnx-systemds\",\n model_header_render=model_header_render,\n wdir=wdir,\n imports=imports,\n main_function=main_function,\n sub_functions=sub_functions\n )\n if output_file:\n directory = os.path.dirname(output_file)\n if len(directory) > 0:\n os.makedirs(directory, exist_ok=True)\n with open(output_file, 'w') as f:\n f.write(result_render)\n\n return result_render", "def save(path_to_model):\n pass", "def save_torchscript(\n self,\n save_path: str,\n model_only: bool = False,\n device: Optional[TorchDevice] = None,\n ):\n if device is None:\n device = DEVICE\n\n save_ludwig_model_for_inference(\n save_path,\n self.model,\n self.config_obj.to_dict(),\n self.training_set_metadata,\n model_only=model_only,\n device=device,\n )", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def saveSvmModel(self, model):\n # Create the save dialog box\n name, _ = QtWidgets.QFileDialog.getSaveFileName(self, 'Save Model Parameters',\n '', 'pkl files (*.pkl)', 'pkl file (*.pkl)')\n\n if not name:\n return\n # Check the extension when saving\n if self.joblibExt in name:\n joblib.dump(model, name)\n else:\n message = 'Error saving file {}.'.format(name)\n self.messageBox(message)", "def build_model ( self, transformer, classifier, dumpfile ) :\n summitAIModel = make_pipeline ( transformer , classifier )\n summitAIModel.fit ( self.X , self.y )\n joblib.dump ( summitAIModel , dumpfile )", "def test_model_can_import():\n assert hasattr(model, \"SEIR_model_publish_w_risk\")\n assert hasattr(model, \"compute_R0\")", "def save_model(self):\n print(\"\\nModels are integrated to be multi scale.\\nSaving to disk.\")\n self.column_names = [ \"x_\" + str(x) for x in range(self.embedding.shape[1])]\n self.embedding = pd.DataFrame(self.embedding, columns = self.column_names)\n self.embedding.to_csv(self.args.output, index = None)", "def export(\n name: str = typer.Option(..., '-n', '--name', help='Architecture'),\n framework: Optional[Framework] = typer.Option(None, '-fw', '--framework', case_sensitive=False,\n help='Framework'),\n trt: Optional[bool] = typer.Option(\n False,\n is_flag=True,\n help='Flag for exporting models served by TensorRT. Please make sure you have TensorRT installed in your '\n 'machine before set this flag.')\n):\n from modelci.hub.init_data import export_model\n\n export_model(model_name=name, framework=framework, enable_trt=trt)", "def build_model_fn(self):", "def object_script_to_model(path):\n\n\tobject_model = {\"name\": None,\n\t\t\t\t\t\"vision_model\": None,\n\t\t\t\t\t\"is_ignore_orientation\": None,\n\t\t\t\t\t\"teaching_position\": None,\n\t\t\t\t\t\"new_snapshot_pos\": None,\n\t\t\t\t\t\"new_snapshot_pos_inv\": None,\n\t\t\t\t\t\"current_speed_0_thr\": None,\n\t\t\t\t\t\"current_force_0_thr\": None,\n\t\t\t\t\t\"move_approach\": None,\n\t\t\t\t\t\"move_pick\": None,\n\t\t\t\t\t\"move_retract\": None}\n\n\t_name = re.search(r'(?:\\\\|\\/)(.*)\\.script', path)\n\tobject_model[\"name\"] = \"object\" if not _name else _name.group(1)\n\n\t# re patterns\n\t_re_6_fl_list = r'\\[(?:-?\\d*\\.?\\d*E?-?\\d+,? ?){6}\\]' \t# re for list of 6 signed float\n\t_re_vision_model = r'(?m)^\\s*f = xmlrpc_server\\.findmodel\\(\\\"(\\{[\\w-]+\\})\", tool\\[0\\], tool\\[1\\], tool\\[2\\], tool\\[3\\], tool\\[4\\], tool\\[5\\]\\)'\n\t_re_is_ignore_orientation = r'(?m)^\\s*is_ignore_orientation = ((?:True)|(?:False))'\n\t_re_teaching_pos = r'(?m)^\\s*object_teaching_location = p({})'.format(_re_6_fl_list)\n\t_re_new_snapshot_pos = r'(?m)^\\s*snapshot_pos = pose_trans\\(object_location, pose_trans\\(pose_inv\\(p({})\\), p({})\\)\\)'.format(_re_6_fl_list, _re_6_fl_list)\n\t_re_current_speed_0_thr = r'(?m)^\\s*if \\(current_speed\\[0\\] != (-?\\d*\\.?\\d*)\\):'\n\t_re_current_force_0_thr = r'(?m)^\\s*if \\(current_force\\[0\\] != (-?\\d*\\.?\\d*)\\):'\n\n\t_re_move_approach = r'(?m)^\\s*\\$ \\d+ \"Rel_approach\"\\s+movel\\(pose_trans\\(snapshot_pos, p({})\\), a=(?:-?\\d*\\.?\\d*), v=(?:-?\\d*\\.?\\d*)\\)'.format(_re_6_fl_list)\n\t_re_move_pick = r'(?m)^\\s*\\$ \\d+ \"Rel_pick\"\\s+movel\\(pose_trans\\(snapshot_pos, p({})\\), a=(?:-?\\d*\\.?\\d*), v=(?:-?\\d*\\.?\\d*)\\)'.format(_re_6_fl_list)\n\t_re_move_retract = r'(?m)^\\s*\\$ \\d+ \"Rel_retract\"\\s+movel\\(pose_trans\\(snapshot_pos, p({})\\), a=(?:-?\\d*\\.?\\d*), v=(?:-?\\d*\\.?\\d*)\\)'.format(_re_6_fl_list)\n\n\n\twith open(path, \"r\") as file:\n\t\tcontent = file.read()\n\t\tcamera_locate_match = re.search(r'\\s*\\$ \\d+ \"Camera Locate\"', content)\n\t\tcontent = content[camera_locate_match.start():]\n\t\t\n\t\tobject_model[\"vision_model\"] = _reg_catch(_re_vision_model, content)\n\t\tobject_model[\"is_ignore_orientation\"] = _reg_catch(_re_is_ignore_orientation, content)\n\t\tobject_model[\"teaching_position\"] = _reg_catch(_re_teaching_pos, content)\n\t\tobject_model[\"new_snapshot_pos_inv\"] = _reg_catch(_re_new_snapshot_pos, content)\n\t\tobject_model[\"new_snapshot_pos\"] = _reg_catch(_re_new_snapshot_pos, content, 2)\n\t\tobject_model[\"current_speed_0_thr\"] = _reg_catch(_re_current_speed_0_thr, content)\n\t\tobject_model[\"current_force_0_thr\"] = _reg_catch(_re_current_force_0_thr, content)\n\t\tobject_model[\"move_approach\"] = _reg_catch(_re_move_approach, content)\n\t\tobject_model[\"move_pick\"] = _reg_catch(_re_move_pick, content)\n\t\tobject_model[\"move_retract\"] = _reg_catch(_re_move_retract, content)\n\treturn object_model", "def on_action_2_triggered(self):\n # TODO: not implemented yet\n model = self.model\n self.doExport(model)", "def export3DModel(self, fileName, filePath, fileFormat=\".step\", object_list=[], removed_objects=[]):\n if not object_list:\n allObjects = self.modeler.primitives.object_names\n if removed_objects:\n for rem in removed_objects:\n allObjects.remove(rem)\n else:\n if \"Region\" in allObjects:\n allObjects.remove(\"Region\")\n else:\n allObjects = object_list[:]\n\n self.add_info_message(\"Exporting {} objects\".format(len(allObjects)))\n\n stringa = \",\".join(allObjects)\n arg = [\n \"NAME:ExportParameters\",\n \"AllowRegionDependentPartSelectionForPMLCreation:=\",\n True,\n \"AllowRegionSelectionForPMLCreation:=\",\n True,\n \"Selections:=\",\n stringa,\n \"File Name:=\",\n str(filePath) + \"/\" + str(fileName) + str(fileFormat),\n \"Major Version:=\",\n -1,\n \"Minor Version:=\",\n -1,\n ]\n\n self.modeler.oeditor.Export(arg)\n return True", "def save_model(self, file=None):\n return None", "def save_model(self):\n torch.save(self.get_params(), 'code/lr-model.pt')", "def export_model(self, file, add_metada_file = False):\n assert self.is_fitted_\n file = os.path.expanduser(file)\n metadata = self._export_metadata()\n if add_metada_file:\n with open(file + \".metadata\", \"w\") as of:\n json.dump(metadata, of, indent=4)\n metadata = json.dumps(metadata)\n metadata = metadata.encode('utf-8')\n self._cpp_obj.serialize_obj(file, metadata, self.ndim_ > 1, has_imputer=self.build_imputer)\n return self", "def save_model(model):\n # ***\n # Please remove the comment to enable model save.\n # However, it will overwrite the baseline model we provided.\n # ***\n model.save(\"model/model.h5\")\n print(\"Model Saved Successfully.\")", "def save_model(script_name, feature_set, model_fname):\n import requests\n import json\n from urllib.parse import urljoin\n\n model_payload = {\n \"model\": {\n \"name\": script_name,\n \"model\": {\n \"type\": \"model/ranklib\",\n \"definition\": {\n }\n }\n }\n }\n\n with open(model_fname) as modelFile:\n model_content = modelFile.read()\n path = \"_ltr/_featureset/%s/_createmodel\" % feature_set\n full_path = urljoin(ES_HOST, path)\n print(\"full_path\", full_path)\n model_payload['model']['model']['definition'] = model_content\n Logger.logger.info(\"POST %s\" % full_path)\n head = {'Content-Type': 'application/json'}\n resp = requests.post(full_path, data=json.dumps(model_payload), auth = HTTPBasicAuth(ES_User,ES_Passw),headers=head,verify=False)\n Logger.logger.info(resp.status_code)\n if resp.status_code >= 300:\n Logger.logger.error(resp.text)", "def export(self):\n if self.model.algorithm == 'DecisionTree':\n dot_data = tree.export_graphviz(self.model.clf, out_file=None)\n graph = graphviz.Source(dot_data)\n graph.render(\"exports/DecisionTreeRegressor\")", "def _check_model_export(self, crypten_model, x_enc):\n pytorch_model = crypten_model.decrypt().to_pytorch()\n x_plain = x_enc.get_plain_text()\n\n y_plain = pytorch_model(x_plain)\n crypten_model.encrypt()\n y_enc = crypten_model(x_enc)\n\n self._check(y_enc, y_plain, msg=\"Model export failed.\")", "def export_model(model, path=None, input_shape=(1, 3, 64, 64)):\n path = get_model_path() if path is None else path\n model = deepcopy(model).cpu().eval()\n if not isinstance(model, torch.jit.ScriptModule):\n assert input_shape is not None, \"`input_shape` must be provided since model is not a \" \\\n \"`ScriptModule`.\"\n traced_model = trace(model, torch.zeros(*input_shape))\n else:\n traced_model = model\n torch.jit.save(traced_model, path)\n return path", "def model(self) -> str:\n ...", "def save(model: nn.Module, path):\n save_model(model, path)", "def test_export():\n tpot_obj = TPOTClassifier()\n\n try:\n tpot_obj.export(\"test_export.py\")\n assert False # Should be unreachable\n except ValueError:\n pass", "def export_model(self):\n\n model_pkg = dict()\n\n for k, v in self.__dict__.items():\n if k not in ['datas'] and not k.startswith('_'):\n model_pkg[k] = v\n\n for i in range(len(self.datas)):\n for k, v in self.datas[i].__dict__.items():\n model_pkg['datas_%d_%s' % (i, k)] = v\n\n return model_pkg", "def post_save(model, os_path, contents_manager):\n if model['type'] != 'notebook':\n return # only do this for notebooks\n d, fname = os.path.split(os_path)\n check_call(['jupyter', 'nbconvert', '--to', 'script', fname], cwd=d)", "def __switch_command_export(self, file_name, selection_only):\n ext = file_name.split('.')[-1]\n if ext == 'mat':\n self.model.to_mat_file(file_name, selection_only)\n elif ext == 'json':\n print \"exporting to: \", file_name\n buff = self.model.to_json_dict(selection_only)\n buff = json.dumps(buff)\n with open(file_name, 'wb') as f:\n f.write(buff)\n else:\n raise DataExplorerError('Unsupported file format: {}'.format(ext))", "def model_info():\n pass", "def export_workflow(args):\n if args.type == 'magnis':\n clarity_epp.export.workflow.helix_magnis(lims, args.process_id, args.output_file)\n elif args.type == 'mip':\n clarity_epp.export.workflow.helix_mip(lims, args.process_id, args.output_file)", "def GenerateModel(modelData, outputFilePath, objectName = 'SBMLmodel'):\n #The library mathFuncs serves to both only allow functions supported\n #functions in SBML/user defined functions, but also the python equivalent\n \n np.set_printoptions(threshold=sys.maxsize)\n \n \n \n outputFile = open(outputFilePath, \"w\")\n\n parameters = modelData.parameters\n compartments = modelData.compartments\n species = modelData.species\n reactions = modelData.reactions\n functions = modelData.functions\n \n assignmentRules = modelData.assignmentRules\n rateRules = modelData.rateRules\n initialAssignments = modelData.initialAssignments\n \n mathFuncs = {'abs' : 'abs',\n 'max' : 'max',\n 'min' : 'min',\n 'pow' : 'pow',\n 'exp' : 'math.exp',\n 'floor' : 'np.floor',\n 'ceiling' : 'math.ceil',\n 'exp' : 'math.exp',\n 'ln' : 'math.log',\n 'log' : 'math.log10',\n 'factorial' : 'math.factorial',\n 'sqrt' : 'math.sqrt',\n \n 'eq' : 'operator.eq',\n 'neq' : 'operator.ne',\n 'gt' : 'operator.gt',\n 'lt' : 'operator.lt',\n 'geq' : 'operator.ge',\n 'leq' : 'operator.le',\n \n 'and' : 'operator.and_',\n 'or' : 'operator.or_',\n 'xor' : 'operator.xor_',\n 'not' : 'operator.not_',\n \n 'sin' : 'np.sin',\n 'cos' : 'np.cos',\n 'tan' : 'np.tan',\n 'sec' : '1/np.cos',\n 'csc' : '1/np.sin',\n 'cot' : '1/np.tan',\n 'sinh' : 'np.sinh',\n 'cosh' : 'np.cosh',\n 'tanh' : 'np.tanh',\n 'sech' : '1/np.cosh',\n 'csch' : '1/np.sinh',\n 'coth' : '1/np.tanh',\n 'arcsin' : 'np.arcsin',\n 'arccos' : 'np.arccos',\n 'arctan' : 'np.arctan',\n 'arcsinh' : 'np.arcsinh',\n 'arccosh' : 'np.arccosh',\n 'arctanh' : 'np.arctanh',\n \n 'true' : 'True',\n 'false' : 'False',\n 'notanumber' : 'np.nan',\n 'pi' : 'np.pi',\n 'infinity' : 'np.inf',\n 'exponentiale' : 'np.e',\n 'piecewise' : 'Piecewise'\n } \n #Add in user defined functions\n# for function in functions:\n# mathFuncs[function] = \"self.\" + function\n\t\t\n #Set up stoichCoeffMat, a matrix of stoichiometric coefficients for solving the reactions\n reactantCounter = 0\n reactantIndex = {}\n reactionCounter = 0\n reactionIndex = {}\n \n rateRuleVars = []\n rateParams = 0\n for specie in species:\n reactantIndex[specie] = reactantCounter\n reactantCounter += 1\n for key, rateRule in rateRules.items():\n if rateRule.variable in parameters or rateRule.variable in compartments:\n rateParams += 1\n reactantIndex[rateRule.variable] = reactantCounter\n reactantCounter += 1\n rateRuleVars.append(rateRule.variable)\n elif rateRule.variable in species:\n pass\n else:\n raise Exception(\"Rate Rule adjusting something other than specie amount, parameter value, or compartment size.\")\n\n \t\t\n stoichCoeffMat = np.zeros([len(species) + rateParams, max(len(reactions),1)])\n \n for rxnId in reactions:\n reactionIndex[rxnId] = reactionCounter\n reactionCounter += 1\n reaction = reactions[rxnId]\n for reactant in reaction.reactants:\n if reactant[1] not in reactantIndex:\n reactantIndex[reactant[1]] = reactantCounter\n reactantCounter += 1\n if not (species[reactant[1]].isBoundarySpecies == \"True\"):\n stoichCoeffMat[reactantIndex[reactant[1]], reactionIndex[rxnId]] += reactant[0]\n\n \t\n # for reaction in reactions:\n # for reactant in reactions[reaction][0]:\n # if reactant[1] not in reactantIndex:\n # reactantIndex[reactant[1]] = reactantCounter\n # reactantCounter += 1\n # if not species[reactant[1]][4]:\n # stoichCoeffMat[reactantIndex[reactant[1]], reaction-1] += reactant[0]\n #print(rateParams)\n #print(stoichCoeffMat)\n \n outputFile.write(\"from sbmltopyode.SBMLModelClasses import *\\n\")\n outputFile.write(\"from scipy.integrate import odeint\\n\")\n outputFile.write(\"import numpy as np\\n\")\n outputFile.write(\"import operator\\n\")\n outputFile.write(\"import math\\n\\n\")\n \n outputFile.write(\"class \" + objectName +\":\\n\\n\")\n \n outputFile.write(\"\\tdef __init__(self):\\n\\n\")\n outputFile.write(\"\\t\\tself.p = {} #Dictionary of model parameters\\n\")\n for paramId in parameters:\n outputFile.write(\"\\t\\tself.p[\\'\" + paramId + \"\\'] = Parameter(\" + str(parameters[paramId].value)+ \", \\'\"+ paramId + \"\\', \" + str(parameters[paramId].isConstant) +\")\\n\")\n \n outputFile.write(\"\\n\\t\\tself.c = {} #Dictionary of compartments\\n\")\n for compartmentId in compartments:\n outputFile.write(\"\\t\\tself.c[\\'\" + compartmentId + \"\\'] = Compartment(\" + str(compartments[compartmentId].size) + \", \" + str(compartments[compartmentId].dimensionality)+ \", \" + str(compartments[compartmentId].isConstant) + \")\\n\")\n \n outputFile.write(\"\\n\\t\\tself.s = {} #Dictionary of chemical species\\n\")\n for speciesId in species:\n outputFile.write(\"\\t\\tspeciesMetadata = SBMLMetadata('\" + species[speciesId].name +\"')\\n\")\n outputFile.write(\"\\t\\tself.s[\\'\" + speciesId + \"\\'] = Species(\" + str(species[speciesId].value) + \", '\" + species[speciesId].valueType + \"', self.c['\" + species[speciesId].compartment + \"'], \" + str(species[speciesId].hasOnlySubstanceUnits) + \", constant = \" + str(species[speciesId].isConstant) + \")\\n\")\n for key, rule in assignmentRules.items():\n if rule.variable == speciesId:\n outputFile.write(\"\\t\\tself.s[\\'\" + speciesId + \"\\']._modifiedBy = \" + rule.Id + \"\\n\")\n for key, rule in rateRules.items():\n if rule.variable == speciesId:\n outputFile.write(\"\\t\\tself.s[\\'\" + speciesId + \"\\']._modifiedBy = \" + rule.Id + \"\\n\")\n \n \n outputFile.write(\"\\n\\t\\tself.r = {} #Dictionary of reactiions\\n\")\n for reactionId in reactions:\n outputFile.write(\"\\t\\tself.r[\\'\" + reactionId + \"\\'] = \" + reactionId + \"(self, SBMLMetadata('\" + reactions[reactionId].name + \"'))\\n\")\n \n outputFile.write(\"\\t\\tself.time = 0\\n\\n\")\n \n outputFile.write(\"\\t\\tself.reactionMetadata = {\")\n commaFlag = 0\n for reactionId in reactions:\n if commaFlag == 0:\n commaFlag = 1\n outputFile.write(\"\\n\\t\\t\")\n else:\n outputFile.write(\",\\n\\t\\t\")\n outputFile.write(\"self.Reaction\" + reactionId + \": SBMLMetadata('\" + reactions[reactionId].name + \"')\")\n outputFile.write(\"\\n\\t\\t}\\n\")\n \n outputFile.write('\\t\\tself.AssignmentRules()\\n\\n')\n \n outputFile.write(\"\\n\\n\")\n outputFile.write(\"\\tdef AssignmentRules(self):\\n\\n\")\n #These functions are defined here due to reading variables in the parent function's namespace\n #These are not intended to be used elsewhere\n def ParseLHS(rawLHS):\n returnLHS = ''\n if rawLHS in parameters:\n returnLHS = \"self.p[\\'\" + rawLHS + \"\\'].value = \"\n elif rawLHS in species:\n if not species[rawLHS].hasOnlySubstanceUnits: \n returnLHS = 'self.s[\\'' + rawLHS + '\\'].concentration = '\n else: \n returnLHS = 'self.s[\\'' + rawLHS + '\\'].amount = '\n elif rawLHS in compartments:\n returnLHS = 'self.c[\\'' + rawLHS + '\\'].size = '\n else:\n raise(Exception(\"New case: rule LHS not in p: \" + rawLHS))\n\n return returnLHS\n\t\n def ParseRHS(rawRHS, extendedParams = [], objectText = \"self\"):\n #objectText is not \"self\" when parsing reaction math\n \n #The main purpose of this function is to turn math strings given by libSBML into\n #code formated to properly call members of the resulting class\n #For example k_1*C_A may turn to\n \n \n rawRHS = rawRHS.replace(\"^\", \"**\") #Replaces carrot notation for exponentiation with ** operator\n variables = []\n for match in re.finditer(r'\\b[a-zA-Z_]\\w*', rawRHS): #look for variable names\n #ToDo: check for function calls\n variables.append([rawRHS[match.start():match.end()], match.span()])\n \n #rule[1] contains the right hand side\n returnRHS = ''\n oldSpan = None\n if variables != []:\n for variable in variables:\n if oldSpan == None and variable[1][0] != 0:\n returnRHS += rawRHS[0:variable[1][0]]\n elif oldSpan != None:\n returnRHS += rawRHS[oldSpan[1]:variable[1][0]]\n oldSpan = variable[1]\n if variable[0] in parameters:\n returnRHS += objectText + '.p[\\'' + variable[0] + '\\'].value'\n elif variable[0] in species:\n if not species[variable[0]].hasOnlySubstanceUnits == \"True\": \n returnRHS += objectText + '.s[\\'' + variable[0] + '\\'].concentration'\n else: \n returnRHS += objectText + '.s[\\'' + variable[0] + '\\'].amount'\n elif variable[0] in compartments:\n returnRHS += objectText + '.c[\\'' + variable[0] + '\\'].size'\n elif variable[0] in mathFuncs:\n returnRHS += mathFuncs[variable[0]]\n elif variable[0] in functions:\n returnRHS += objectText + '.' + variable[0]\n elif variable[0] in extendedParams:\n if objectText == \"self\":\n returnRHS += variable[0]\n else:\n returnRHS += \"self.p[\\'\" + variable[0] + \"\\'].value\"\n\n elif variable[0] == \"time\":\n returnRHS += objectText + '.time'\n elif variable[0] == \"pi\":\n returnRHS += \"np.pi\"\n else:\n raise(Exception('New case: unkown RHS variable: ' + variable[0]))\n returnRHS += rawRHS[variable[1][1]:len(rawRHS)]\n # print(rule[1][variable[1][1]])\n #print(rule[1][-1])\n else:\n returnRHS = rawRHS\n\t\t\n return returnRHS\n\n ruleDefinedVars = [rule.variable for rule in assignmentRules.values()]\n for key, assignment in initialAssignments.items():\n ruleDefinedVars.append(assignment.variable)\n \n for key, rule in assignmentRules.items():\n rule.dependents = []\n for match in re.finditer(r'\\b[a-zA-Z_]\\w*', rule.math): #look for variable names\n rule.dependents.append(rule.math[match.start():match.end()])\n originalLen = len(rule.dependents)\n for i in range(originalLen):\n if rule.dependents[originalLen - i -1] not in ruleDefinedVars:\n rule.dependents.pop(originalLen- i-1)\n \n for key, assignment in initialAssignments.items():\n assignment.dependents = []\n for match in re.finditer(r'\\b[a-zA-Z_]\\w*', assignment.math): #look for variable names\n assignment.dependents.append(assignment.math[match.start():match.end()])\n originalLen = len(assignment.dependents)\n for i in range(originalLen):\n if assignment.dependents[originalLen - i -1] not in ruleDefinedVars :\n assignment.dependents.pop(originalLen- i-1)\n \n# breakVar = False\n while True:\n continueVar = False\n breakVar = True\n varDefinedThisLoop = None\n for key, rule in assignmentRules.items():\n if rule.dependents == []:\n ruleLHS = ParseLHS(rule.variable)\n ruleRHS = ParseRHS(rule.math)\n outputFile.write(\"\\t\\t\" + ruleLHS + ruleRHS + '\\n\\n')\n varDefinedThisLoop = rule.variable\n rule.dependents = None\n continueVar = True\n breakVar = False\n break\n elif not rule.dependents == None:\n breakVar = False\n \n if not continueVar:\n for key, assignment in initialAssignments.items():\n if assignment.dependents == []:\n assignmentLHS = ParseLHS(assignment.variable)\n assignmentRHS = ParseRHS(assignment.math)\n outputFile.write(\"\\t\\tif self.time <= 0 :\\n\")\n if assignment.variable in parameters:\n outputFile.write(\"\\t\\t\\tisConstantValue = self.p['\" + assignment.variable + \"']._constant\\n\")\n outputFile.write(\"\\t\\t\\tself.p['\" + assignment.variable + \"']._constant = False\\n\")\n outputFile.write(\"\\t\\t\\t\" + assignmentLHS + assignmentRHS + '\\n')\n outputFile.write(\"\\t\\t\\tself.p['\" + assignment.variable + \"']._constant = isConstantValue\\n\\n\")\n elif assignment.variable in species:\n outputFile.write(\"\\t\\t\\tisConstantValue = self.s['\" + assignment.variable + \"']._constant\\n\")\n outputFile.write(\"\\t\\t\\tself.s['\" + assignment.variable + \"']._constant = False\\n\")\n outputFile.write(\"\\t\\t\\t\" + assignmentLHS + assignmentRHS + '\\n')\n outputFile.write(\"\\t\\t\\tself.s['\" + assignment.variable + \"']._constant = isConstantValue\\n\\n\")\n elif assignment.variable in compartment:\n outputFile.write(\"\\t\\t\\tisConstantValue = self.c['\" + assignment.variable + \"']._constant\\n\")\n outputFile.write(\"\\t\\t\\tself.c['\" + assignment.variable + \"']._constant = False\\n\")\n outputFile.write(\"\\t\\t\\t\" + assignmentLHS + assignmentRHS + '\\n')\n outputFile.write(\"\\t\\t\\tself.c['\" + assignment.variable + \"']._constant = isConstantValue\\n\\n\")\n \n varDefinedThisLoop = assignment.variable\n assignment.dependents = None\n continueVar = True\n breakVar = False\n break\n elif not rule.dependents == None:\n breakVar = False\n \n for rule in assignmentRules.values():\n if not rule.dependents == None:\n originalLen = len(rule.dependents)\n for i in range(originalLen):\n if rule.dependents[originalLen - i -1] == varDefinedThisLoop:\n rule.dependents.pop(originalLen - i -1)\n# print(rule.variable + ':' + str(rule.dependents))\n\n for assignment in initialAssignments.values():\n if not assignment.dependents == None:\n originalLen = len(assignment.dependents)\n for i in range(originalLen):\n if assignment.dependents[originalLen - i - 1] == varDefinedThisLoop:\n assignment.dependents.pop(originalLen - i - 1)\n# print(assignment.variable + ':' + str(assignment.dependents))\n \n if continueVar:\n continue\n elif breakVar:\n break\n else:\n raise Exception('Algebraic Loop in AssignmentRules')\n \n outputFile.write(\"\\t\\treturn\\n\\n\")\n \n for functionId in functions:\n arguments = functions[functionId].arguments\n argumentString = \"\"\n for i in range(len(arguments)):\n argumentString += arguments[i]\n if i != len(arguments) - 1:\n argumentString += \", \"\n \n outputFile.write(\"\\tdef \" + functionId + \"(self, \" + argumentString + \"):\\n\")\n outputFile.write(\"\\t\\treturn \" + functions[functionId].mathString.replace(\"^\", \"**\") + \"\\n\")\n \n for reactionId in reactions:\n outputFile.write(\"\\tdef Reaction\" + str(reactionId) + \"(self):\\n\\n\")\n\n rxnParameters = []\n for param in reactions[reactionId].rxnParameters:\n outputFile.write(\"\\t\\t\" + param[0] + \" = \" + str(param[1]) + \"\\n\")\n rxnParameters.append(param[0])\n\t\t\t\n rateLaw = ParseRHS(reactions[reactionId].rateLaw, rxnParameters)\n \n outputFile.write('\\t\\treturn ' + rateLaw + '\\n\\n')\n\n rateRuleLHSVars = []\n for key, rateRule in rateRules.items():\n rateRuleLHSVars.append(rateRule.variable)\n outputFile.write(\"\\tdef Rate\" + rateRule.variable + \"(self):\\n\\n\")\n rateLaw = ParseRHS(rateRule.math)\n outputFile.write('\\t\\treturn ' + rateLaw + '\\n\\n')\n \n yArray = ''\n i = 0\n yArrayVars = [0 for x in range(len(species) + rateParams)]\n for variable, index in reactantIndex.items():\n yArrayVars[index] = variable\n \n for index in range(len(yArrayVars)):\n # print(yArrayVars[index])\n if index != 0:\n yArray += ', '\n \n if yArrayVars[index] in species:\n yArray += 'self.s[\\'' + yArrayVars[index] + '\\'].amount'\n continue\n \n if yArrayVars[index] in parameters:\n yArray += 'self.p[\\'' + yArrayVars[index] + '\\'].value'\n continue\n \n if yArrayVars[index] in compartments:\n yArray += 'self.c\\'' + yArrayVars[index] + '\\'].size'\n continue\n \n\n \n outputFile.write('\\tdef _SolveReactions(self, y, t):\\n\\n')\n outputFile.write('\\t\\tself.time = t\\n')\n outputFile.write('\\t\\t' + yArray + ' = y\\n')\n outputFile.write('\\t\\tself.AssignmentRules()\\n\\n')\n# outputFile.write('\\t\\t[self.s[speciesId].UpdateCompartmentSizeMember() for speciesId in self.s]\\n')\n rateArray = '[ '\n i = 0\n rateArrayVars = [0 for x in range(len(species) + rateParams)]\n \n for variable, index in reactantIndex.items():\n if variable in rateRuleLHSVars:\n rateArrayVars[index] = variable\n \n\n \n for variable in rateArrayVars:\n if i != 0:\n rateArray += ', '\n i += 1\n if variable == 0:\n rateArray += '0'\n else:\n rateArray += 'self.Rate' + variable + '()'\n \n \n \n \n rateArray += ']'\n outputFile.write('\\t\\trateRuleVector = np.array(' + str(rateArray) + ', dtype = np.float64)\\n\\n') \n \n outputFile.write('\\t\\tstoichiometricMatrix = np.array(' + re.sub('\\n,', ',\\n\\t\\t\\t\\t\\t', re.sub('[^[] +', ',' ,str(stoichCoeffMat))) + ', dtype = np.float64)\\n\\n')\n outputFile.write('\\t\\treactionVelocities = np.array([')\n reactionElements = ''\n if reactions:\n for reactionId in reactions:\n if reactionElements == '':\n reactionElements += ('self.r[\\'' + str(reactionId) + '\\']()')\n else:\n reactionElements += (', self.r[\\'' + str(reactionId) + '\\']()')\n else:\n reactionElements = '0'\n outputFile.write(reactionElements + '], dtype = np.float64)\\n\\n')\n outputFile.write('\\t\\trateOfSpeciesChange = stoichiometricMatrix @ reactionVelocities + rateRuleVector\\n\\n')\n outputFile.write('\\t\\treturn rateOfSpeciesChange\\n\\n')\n \n outputFile.write('\\tdef RunSimulation(self, deltaT, absoluteTolerance = 1e-12, relativeTolerance = 1e-6):\\n\\n')\n \n outputFile.write('\\t\\tfinalTime = self.time + deltaT\\n')\n outputFile.write('\\t\\ty0 = np.array([' + yArray + '], dtype = np.float64)\\n')\n outputFile.write('\\t\\t' + yArray + ' = odeint(self._SolveReactions, y0, [self.time, finalTime], atol = absoluteTolerance, rtol = relativeTolerance, mxstep=5000000)[-1]\\n')\n outputFile.write('\\t\\tself.time = finalTime\\n')\n outputFile.write('\\t\\tself.AssignmentRules()\\n')\n# outputFile.write('\\t\\t[self.s[speciesId].UpdateCompartmentSizeMember() for speciesId in self.s]\\n')\n outputFile.write('\\n')\n \n for key in reactions.keys():\n outputFile.write('class ' + key + ':\\n\\n')\n outputFile.write('\\tdef __init__(self, parent, metadata = None):\\n\\n')\n outputFile.write('\\t\\tself.parent = parent\\n')\n outputFile.write('\\t\\tself.p = {}\\n')\n outputFile.write('\\t\\tself.metadata = metadata\\n\\n')\n for param in reactions[key].rxnParameters:\n outputFile.write(\"\\t\\tself.p[\\'\" + param[0] + \"\\'] = Parameter(\" + str(param[1]) + \", '\" + param[0] + \"')\\n\")\n #\"\\t\\tself.p[\\'\" + paramId + \"\\'] = Parameter(\" + str(parameters[paramId].value)+ \", \"+ paramId + \", \" + str(parameters[paramId].isConstant) +\")\\n\"\n \n outputFile.write('\\n\\tdef __call__(self):\\n')\n# print(key)\n# print(reactions[key].rxnParameters)\n rxnParamNames = [param[0] for param in reactions[key].rxnParameters]\n rateLaw = ParseRHS(reactions[key].rateLaw, rxnParamNames, \"self.parent\")\n outputFile.write('\\t\\treturn ' + rateLaw + '\\n\\n')\n\n \n for key in functions.keys():\n outputFile.write('class ' + key + ':\\n\\n')\n outputFile.write('\\tdef __init__(self, parent, metadata = None):\\n\\n')\n outputFile.write('\\t\\tself.parent = parent\\n')\n outputFile.write('\\t\\tself.metadata = metadata\\n\\n')\n\n arguments = functions[key].arguments\n argumentString = \"\"\n for i in range(len(arguments)):\n argumentString += arguments[i]\n if i != len(arguments) - 1:\n argumentString += \", \"\n \n outputFile.write('\\tdef __call__(self, ' + argumentString + '):\\n')\n outputFile.write(\"\\t\\treturn \" + functions[key].mathString.replace(\"^\", \"**\") + \"\\n\\n\")\n\n outputFile.close()", "def save_model(self):\n\n # =============================================================\n # Default : pickle the trained model. Change this (and the load\n # function, below) only if the library you used does not support\n # pickling.\n # self.Model_made.save(\"Model_made.h5\")\n # self.Model_claim.save(\"Model_claim.h5\")\n # Model_made = self.Model_made\n # Model_claim = self.Model_claim\n # self.Model_made = None\n # self.Model_claim = None\n with open('pricing_model.p', 'wb') as target:\n pickle.dump(self, target)\n\n # self.Model_made = Model_made\n # self.Model_claim = Model_claim\n\n # zipObj = ZipFile(\"model.zip\",\"w\")\n # zipObj.write(\"Model_made.h5\")\n # zipObj.write(\"Model_claim.h5\")\n # zipObj.write(\"pricing_model.p\")\n # zipObj.close()", "def _publish_model(self):\n # Check if already published\n if self.model_published:\n return\n\n # Trace CPO model if required\n ctx = self.context\n lout = ctx.get_log_output()\n if lout and ctx.solver.trace_cpo:\n stime = time.time()\n lout.write(\"Model '\" + str(self.model.get_name()) + \"' in CPO format:\\n\")\n lout.write(self.cpostr)\n lout.write(\"\\n\")\n self.model.write_information(lout)\n lout.write(\"\\n\")\n lout.flush()\n self.process_infos.incr(CpoProcessInfos.MODEL_DUMP_TIME, time.time() - stime)\n\n # Dump in dump directory if required\n if ctx.model.dump_directory:\n stime = time.time()\n make_directories(ctx.model.dump_directory)\n mname = self.model.get_name()\n if mname is None:\n mname = \"Anonymous\"\n else:\n # Remove special characters introduced by Jupyter\n mname = mname.replace('<', '').replace('>', '')\n file = ctx.model.dump_directory + \"/\" + mname + \".cpo\"\n with utils.open_utf8(file, 'w') as f:\n f.write(self.cpostr)\n self.process_infos.incr(CpoProcessInfos.MODEL_DUMP_TIME, time.time() - stime)\n\n # Set published indicator\n self.model_published = True", "def export_model(model_file, key, verbose):\n if verbose:\n click.secho('Loading model from {}'.format(model_file), fg='green')\n\n from microbenthos.utils import yaml, validate_dict\n\n defs = yaml.unsafe_load(model_file)\n if key:\n try:\n defs = defs[key]\n except KeyError:\n click.secho('Could not get key {!r}! Found: {}'.format(\n key, defs.keys()\n ))\n raise click.Abort()\n\n try:\n valid = validate_dict(defs, key='model')\n from pprint import pformat\n if verbose:\n click.secho('Validated dictionary!', fg='green')\n model_definition = dict(model=valid)\n click.secho(\n yaml.dump(model_definition,\n indent=4,\n explicit_start=True,\n explicit_end=True),\n fg='yellow')\n\n except ValueError:\n click.secho('Model definition not validated!', fg='red')\n click.Abort()\n finally:\n if verbose:\n click.secho('Model export done', fg='green')", "def _write_model_def(self, file):\n file.write('model =')\n file.write('\\n')\n self._write_model_code(file, ' ')", "def model(self):\n self.add_file_string('Model file')\n self.make_dangerous('Model file')", "def model_artifact(self):\n pass", "def save_model(self, file_name=None):\n try:\n if file_name:\n self.agent.save_model(file_name)\n else:\n self.agent.save_model()\n print('Model saved successfully')\n return 1\n except:\n print('Failed to save model')\n return 0", "def buildModel (self , transformer, classifier ):\n for module in ('acct' , 'arch', 'bo', 'fo', 'risk'):\n summitAIModel = make_pipeline ( transformer , classifier )\n summitAIModel.fit ( self.X[self.ModuleData[module]], self.y[self.ModuleData[module]] )\n joblib.dump ( summitAIModel, self.modelDumps[module] )", "def buildModel (self , transformer, classifier ):\n for module in ('acct' , 'arch', 'bo', 'fo', 'risk'):\n summitAIModel = make_pipeline ( transformer , classifier )\n summitAIModel.fit ( self.X[self.ModuleData[module]], self.y[self.ModuleData[module]] )\n joblib.dump ( summitAIModel, self.modelDumps[module] )", "def save_model(self, step):\n\n # file_name = params['name']\n # pickle.dump(self, gzip.open(file_name, 'wb'))", "def create_model(self):\r\n model = self.model_fn(self.flags)\r\n print(model)\r\n return model", "def bakeModel(objlist, modelname, posename=\"\", decimate_type='COLLAPSE', decimate_parameter=0.1):\n if bpy.context.scene.phobosexportsettings.relativePath:\n # CHECK careful with path consistency (Windows)\n outpath = securepath(\n os.path.expanduser(\n os.path.join(bpy.path.abspath(\"//\"), bpy.context.scene.phobosexportsettings.path)\n )\n )\n else:\n # CHECK careful with path consistency (Windows)\n outpath = securepath(os.path.expanduser(bpy.context.scene.phobosexportsettings.path))\n\n # TODO delete me?\n # bake_outpath = securepath(os.path.join(outpath, modelname) if savetosubfolder else outpath)\n bake_outpath = outpath\n\n if bpy.context.scene.phobosexportsettings.structureExport:\n securepath(os.path.join(bake_outpath, 'bakes'))\n bake_outpath = os.path.join(bake_outpath, 'bakes/')\n\n export_name = modelname + '_' + posename\n\n visuals = [o for o in objlist if (\"phobostype\" in o and o.phobostype == \"visual\")]\n if len(visuals) > 0:\n\n log(\"Baking model to \" + bake_outpath, \"INFO\")\n sUtils.selectObjects(visuals, active=0)\n log(\"Copying objects for joining...\", \"INFO\")\n bpy.ops.object.duplicate(linked=False, mode='TRANSLATION')\n log(\"Joining...\", \"INFO\")\n bpy.ops.object.join()\n obj = bpy.context.active_object\n log(\"Deleting vertices...\", \"INFO\")\n bpy.ops.object.editmode_toggle()\n bpy.ops.mesh.select_all(action='TOGGLE')\n bpy.ops.mesh.select_all(action='TOGGLE')\n bpy.ops.mesh.remove_doubles()\n bpy.ops.object.editmode_toggle()\n log(\"Adding modifier...\", \"INFO\")\n\n bpy.ops.object.modifier_add(type='DECIMATE')\n bpy.context.object.modifiers[\"Decimate\"].decimate_type = decimate_type\n if decimate_type == 'COLLAPSE':\n bpy.context.object.modifiers[\"Decimate\"].ratio = decimate_parameter\n elif decimate_type == 'UNSUBDIV':\n bpy.context.object.modifiers[\"Decimate\"].iterations = decimate_parameter\n elif decimate_type == 'DISSOLVE':\n bpy.context.object.modifiers[\"Decimate\"].angle_limit = decimate_parameter\n\n log(\"Applying modifier...\", \"INFO\")\n bpy.ops.object.modifier_apply(apply_as='DATA', modifier=\"Decimate\")\n obj.name = export_name + \".obj\"\n\n # TODO use_selection might cause bugs, depending on Blender version\n bpy.ops.export_scene.obj(filepath=os.path.join(bake_outpath, obj.name), use_selection=True)\n\n obj.hide_render = True\n previewfile = export_name\n bUtils.createPreview(\n visuals, export_path=bake_outpath, modelname=modelname, previewfile=previewfile\n )\n\n obj.select_set(True)\n\n bpy.ops.object.delete()\n log(\"Done baking...\", \"INFO\")\n\n else:\n log(\"No visuals to bake!\", \"WARNING\")", "def makeModel(self):\n\n # Get the script\n modelScript = os.path.join(self.datapath, 'make3FGLxml.py')\n if not os.path.isfile(modelScript):\n # download it\n print(\"\\t=== Downloading make3FGLxml.py ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/user/make3FGLxml.py -O {}'.format(modelScript))\n\n # Create the model using Tyrel's script\n galModel = os.path.join(self.diffpath, 'gll_iem_v06.fits')\n isoModel = os.path.join(self.diffpath, 'iso_'+self.irf+'_v06.txt')\n if (not os.path.isfile(galModel)) or (not os.path.isfile(isoModel)):\n print(\"\\t=== Unable to find the diffuse models, check the variable '$FERMI_DIR' ===\")\n return\n if not os.path.isdir(self.extpath):\n print(\"\\t=== Unable to find models of extended sources, check the variable '$LATEXTDIR' ===\")\n return\n if not os.path.isfile(self.fermicat):\n # download it\n print(\"\\t=== Downloading 3FGL catalog ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/access/lat/4yr_catalog/gll_psc_v16.fit -O {}'.format(self.fermicat))\n\n os.popen(\"python {} {} {} -o {} -G {} -g 'gll_iem_v06'\\\n -I {} -i 'iso_source_v06' -e {} -r 5 -R 10 -ER 10\\\n -s 9 -m False -GIF False\".format(modelScript, self.fermicat,\n self.ft1, self.model, galModel, isoModel, self.extpath))\n\n # Add the target to the model\n tmpName = self.model + '.tmp'\n rfil = open(self.model, 'r')\n wfil = open(tmpName, 'w')\n # Copy the XML to the temporary model\n wfil.writelines([l for l in rfil.readlines() if not l=='</source_library>']) # copy everything but the last line\n wfil.write(' <source ROI_Center_Distance=\"0.00\" name=\"TARGET\" type=\"PointSource\">\\n')\n wfil.write(' <spectrum type=\"PowerLaw2\">\\n')\n wfil.write(' <parameter free=\"1\" max=\"1000\" min=\"1e-05\" name=\"Integral\" scale=\"1e-08\" value=\"0.3591824258\"/>\\n')\n wfil.write(' <parameter free=\"1\" max=\"1\" min=\"-5\" name=\"Index\" scale=\"1\" value=\"-2.7\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"LowerLimit\" scale=\"1\" value=\"100\"/>\\n')\n wfil.write('<parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"UpperLimit\" scale=\"1\" value=\"100000\"/>\\n')\n wfil.write(' </spectrum>\\n')\n wfil.write(' <spatialModel type=\"SkyDirFunction\">\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"RA\" scale=\"1.0\" value=\"'+str(self.ra)+'\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"DEC\" scale=\"1.0\" value=\"'+str(self.dec)+'\"/>\\n')\n wfil.write(' </spatialModel>\\n')\n wfil.write(' </source>\\n')\n wfil.write('</source_library>\\n')\n rfil.close()\n wfil.close()\n\n os.remove(self.model)\n os.rename(tmpName, self.model)\n \n print(\"\\t=== Source model {} added ===\".format(self.model))\n return", "def load_model(self) -> Any:", "def save_model(self, filename):\n self.model.save('models/' + str(filename))", "def save_model(model, model_filepath):\n dump(model, model_filepath)", "def test_export_pytorch_model(self):\n pytorch_model = PyTorchLinear()\n dummy_input = torch.empty(10, 10)\n\n with io.BytesIO() as f:\n onnx_converter._export_pytorch_model(f, pytorch_model, dummy_input)", "def buildModel( self, transformer, classifier ):\n summitAIModel = make_pipeline ( transformer , classifier )\n summitAIModel.fit ( self.X , self.y )\n joblib.dump ( summitAIModel , self.modeldump )", "def save(self, export_path: str):", "def save_model(self, path):\n pass", "def _save_model(self):\n save_generic(self.model, self.model_pkl_fname)", "def save(self,sess):\n self.saver.save(sess,\"./Models/\" + self.mod_name + \".ckpt\")", "def export_saved_model(self, export_dir='.', version=1):\n\n sess = tf.get_default_session()\n export_path = Path(export_dir) / str(version)\n while export_path.exists():\n version += 1 # step ahead 1 version\n export_path = Path(export_dir) / str(version)\n export_path = str(export_path)\n LOG.debug(\"exporting to {}\".format(export_path))\n builder = tf.saved_model.builder.SavedModelBuilder(export_path)\n # build the signature_def_map\n inputs, outputs = {}, {}\n for n, inp in enumerate(self.inputs):\n tag = 'input_' + str(n)\n inputs[tag] = tf.saved_model.utils.build_tensor_info(inp)\n for n, outp in enumerate(self.outputs):\n tag = 'output_' + str(n)\n outputs[tag] = tf.saved_model.utils.build_tensor_info(outp)\n sig = tf.saved_model.signature_def_utils.build_signature_def(\n inputs=inputs, outputs=outputs,\n method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)\n\n builder.add_meta_graph_and_variables(\n sess, [tf.saved_model.tag_constants.SERVING],\n signature_def_map={\n tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: sig\n },\n strip_default_attrs=True)\n builder.save()", "def save_pipeline(model_to_persist):\n\n save_file_name = 'model.pkl'\n save_path = configuracion.TRAINED_MODEL_DIR / save_file_name\n joblib.dump(model_to_persist, save_path)\n\n print('saved pipeline')", "def test_make_update_script_for_model(self):\n\n self.setup_model_params()\n self.write_file(self.first_model_path, self.base_source)\n self.write_file(self.second_model_path, self.base_source + self.model_source)\n\n source_script = self.pyscript.make_update_script_for_model(\n engine=self.engine,\n oldmodel=load_model('testmodel_first:meta'),\n model=load_model('testmodel_second:meta'),\n repository=self.repo_path,\n )\n\n self.assertTrue(\"['User'].create()\" in source_script)\n self.assertTrue(\"['User'].drop()\" in source_script)", "def _export_model(\n self,\n precision: ModelPrecision = ModelPrecision.FP32,\n export_format: ExportType = ExportType.ONNX,\n dump_features: bool = True,\n ):\n # copied from OTX inference_task.py\n self._data_cfg = ConfigDict(\n data=ConfigDict(\n train=ConfigDict(\n otx_dataset=None,\n labels=self._labels,\n ),\n test=ConfigDict(\n otx_dataset=None,\n labels=self._labels,\n ),\n )\n )\n self._init_task(export=True)\n\n cfg = self.configure(False, None)\n\n self._precision[0] = precision\n export_options: Dict[str, Any] = {}\n export_options[\"deploy_cfg\"] = self._init_deploy_cfg(cfg)\n assert len(self._precision) == 1\n export_options[\"precision\"] = str(self._precision[0])\n export_options[\"type\"] = str(export_format)\n\n export_options[\"deploy_cfg\"][\"dump_features\"] = dump_features\n if dump_features:\n output_names = export_options[\"deploy_cfg\"][\"ir_config\"][\"output_names\"]\n if \"feature_vector\" not in output_names:\n output_names.append(\"feature_vector\")\n if export_options[\"deploy_cfg\"][\"codebase_config\"][\"task\"] != \"Segmentation\":\n if \"saliency_map\" not in output_names:\n output_names.append(\"saliency_map\")\n export_options[\"model_builder\"] = getattr(self, \"model_builder\", build_segmentor)\n\n if self._precision[0] == ModelPrecision.FP16:\n export_options[\"deploy_cfg\"][\"backend_config\"][\"mo_options\"][\"flags\"].append(\"--compress_to_fp16\")\n\n backend_cfg_backup = {}\n if export_format == ExportType.ONNX:\n backend_cfg_backup = export_options[\"deploy_cfg\"][\"backend_config\"]\n export_options[\"deploy_cfg\"][\"backend_config\"] = {\"type\": \"onnxruntime\"}\n export_options[\"deploy_cfg\"][\"ir_config\"][\"dynamic_axes\"][\"input\"] = {0: \"batch\"}\n\n exporter = SegmentationExporter()\n results = exporter.run(\n cfg,\n **export_options,\n )\n\n if export_format == ExportType.ONNX:\n results[\"inference_parameters\"] = {}\n results[\"inference_parameters\"][\"mean_values\"] = \" \".join(\n map(str, backend_cfg_backup[\"mo_options\"][\"args\"][\"--mean_values\"])\n )\n results[\"inference_parameters\"][\"scale_values\"] = \" \".join(\n map(str, backend_cfg_backup[\"mo_options\"][\"args\"][\"--scale_values\"])\n )\n\n return results", "def _save_model_info(self, model):\r\n with open_(self.output_path / \"model.info\", \"w+\") as f:\r\n f.write(model.info)", "def dumpme(self) :\n fileName = \"./data/oP4_ModelBuilder.dump\"\n with open(fileName,\"wb\") as dumpedFile:\n oPickler = pickle.Pickler(dumpedFile)\n oPickler.dump(self)", "def save_model(self, filename):\n\t\tpickle.dump(self, open(filename, 'wb'))\n\t\tprint('Model saved in',filename)", "def save_model(self, fname):\n self.get_booster().save_model(fname)", "def build_model():", "def write(self, model):\n\n # Initialize json_dump\n json_dump = {\"model\": [], \"metadata\": {}}\n\n # Set timestamp in metadata\n json_dump[\"metadata\"][\"time\"] = str(datetime.now())\n\n # Set the size of the model in metadata\n json_dump[\"metadata\"][\"model_size\"] = len(model.models)\n\n for obj in model.models:\n _class = type(obj).__name__\n if _class in [\n Winding,\n PhaseWinding,\n Wire,\n PhaseCapacitor,\n Position,\n PhaseLoad,\n ]:\n continue\n json_dump[\"model\"].append({})\n json_dump[\"model\"][-1][\"class\"] = _class\n\n try:\n json_dump[\"model\"][-1][\"name\"] = {\"class\": \"str\", \"value\": obj.name}\n except:\n json_dump[\"model\"][-1][\"name\"] = {\"class\": \"str\", \"value\": None}\n pass\n\n for key, value in obj._trait_values.items():\n if key in [\"capacitance_matrix\", \"impedance_matrix\", \"reactances\"]:\n json_dump[\"model\"][-1][key] = {\"class\": \"list\", \"value\": []}\n for v in value:\n if isinstance(v, complex):\n json_dump[\"model\"][-1][key][\"value\"].append(\n {\"class\": \"complex\", \"value\": [v.real, v.imag]}\n )\n elif isinstance(v, list):\n json_dump[\"model\"][-1][key][\"value\"].append(\n {\"class\": \"list\", \"value\": []}\n )\n for vv in v:\n if isinstance(vv, complex):\n json_dump[\"model\"][-1][key][\"value\"][-1][\n \"value\"\n ].append(\n {\n \"class\": \"complex\",\n \"value\": [vv.real, vv.imag],\n }\n )\n else:\n json_dump[\"model\"][-1][key][\"value\"][-1][\n \"value\"\n ].append(\n {\n \"class\": str(type(vv)).split(\"'\")[1],\n \"value\": vv,\n }\n )\n else:\n json_dump[\"model\"][-1][key][\"value\"].append(\n {\"class\": str(type(v)).split(\"'\")[1], \"value\": v}\n )\n continue\n if isinstance(value, list):\n json_dump[\"model\"][-1][key] = {\"class\": \"list\", \"value\": []}\n for v in value:\n\n if isinstance(v, complex):\n json_dump[\"model\"][-1][key][\"value\"].append(\n {\"class\": \"complex\", \"value\": [v.real, v.imag]}\n )\n\n elif isinstance(v, Position):\n json_dump[\"model\"][-1][key][\"value\"].append(\n {\"class\": \"Position\"}\n )\n for kkk, vvv in v._trait_values.items():\n json_dump[\"model\"][-1][key][\"value\"][-1][kkk] = {\n \"class\": str(type(vvv)).split(\"'\")[1],\n \"value\": vvv,\n }\n\n elif isinstance(v, Unicode):\n json_dump[\"model\"][-1][key][\"value\"].append(\n {\"class\": \"Unicode\", \"value\": v.default_value}\n )\n\n elif isinstance(v, Wire):\n json_dump[\"model\"][-1][key][\"value\"].append(\n {\"class\": \"Wire\"}\n )\n for kkk, vvv in v._trait_values.items():\n json_dump[\"model\"][-1][key][\"value\"][-1][kkk] = {\n \"class\": str(type(vvv)).split(\"'\")[1],\n \"value\": vvv,\n }\n\n elif isinstance(v, PhaseCapacitor):\n json_dump[\"model\"][-1][key][\"value\"].append(\n {\"class\": \"PhaseCapacitor\"}\n )\n for kkk, vvv in v._trait_values.items():\n json_dump[\"model\"][-1][key][\"value\"][-1][kkk] = {\n \"class\": str(type(vvv)).split(\"'\")[1],\n \"value\": vvv,\n }\n\n elif isinstance(v, Winding):\n json_dump[\"model\"][-1][key][\"value\"].append(\n {\"class\": \"Winding\"}\n )\n for kkk, vvv in v._trait_values.items():\n if kkk != \"phase_windings\":\n json_dump[\"model\"][-1][key][\"value\"][-1][kkk] = {\n \"class\": str(type(vvv)).split(\"'\")[1],\n \"value\": vvv,\n }\n json_dump[\"model\"][-1][key][\"value\"][-1][\n \"phase_windings\"\n ] = {\"class\": \"list\", \"value\": []}\n for phw in v.phase_windings:\n json_dump[\"model\"][-1][key][\"value\"][-1][\n \"phase_windings\"\n ][\"value\"].append({\"class\": \"PhaseWinding\"})\n for kkkk, vvvv in phw._trait_values.items():\n json_dump[\"model\"][-1][key][\"value\"][-1][\n \"phase_windings\"\n ][\"value\"][-1][kkkk] = {\n \"class\": str(type(vvvv)).split(\"'\")[1],\n \"value\": vvvv,\n }\n\n elif isinstance(v, PhaseLoad):\n json_dump[\"model\"][-1][key][\"value\"].append(\n {\"class\": \"PhaseLoad\"}\n )\n for kkk, vvv in v._trait_values.items():\n json_dump[\"model\"][-1][key][\"value\"][-1][kkk] = {\n \"class\": str(type(vvv)).split(\"'\")[1],\n \"value\": vvv,\n }\n\n continue\n\n if isinstance(value, complex):\n json_dump[\"model\"][-1][key] = {\n \"class\": \"complex\",\n \"value\": [value.real, value.imag],\n }\n continue\n\n json_dump[\"model\"][-1][key] = {\n \"class\": str(type(value)).split(\"'\")[1],\n \"value\": value,\n }\n\n with open(os.path.join(self.output_path, self.filename), \"w\") as f:\n f.write(\n json_tricks.dumps(json_dump, allow_nan=True, sort_keys=True, indent=4)\n )", "def _write_model_code(self, file, indent = ''):\n file.write(indent)\n file.write('mdo --')\n file.write('\\n')\n indent2 = indent + ' '\n for action in self._actions:\n file.write(indent2)\n file.write(action)\n file.write('\\n')\n file.write(indent2)\n file.write('return $\\n')\n file.write(indent2)\n file.write(' results\\n')\n self._write_sources(file, indent2 + ' ')\n file.write('\\n')", "def _generate_model(self, specs, experiment = None, filename = 'dist/app/Model.hs'):\n with open(filename, \"w\") as file:\n self._write_model(file, specs, experiment = experiment)", "def write_should_show(model, filename, output, db_url=None):\n r2dt.write_should_show(model, filename, db_url, output)", "def model() -> Any:\n with open(\"airbnb_regressor.pickle\",\"rb\") as f:\n model = pickle.load(f)\n return model", "def save_model(self, *args, **kwargs):\n raise NotImplementedError", "def on_model(self, m: clingo.Model):\n self.output.append(m.symbols(False, False, True, False, False))", "def save(self, output, data):", "def model_architecture_to_file(model, save_path, show_shapes=True):\n plot_model(model, to_file=save_path + \"_model_architecture.png\", show_shapes=show_shapes)", "def save_model(file_name, ep, model, optimizer):\n\n torch.save({\n 'epoch': ep,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n }, file_name) \n \n return" ]
[ "0.6738641", "0.66990805", "0.6495466", "0.6315162", "0.62201536", "0.61410815", "0.6110016", "0.6058837", "0.60287017", "0.5983388", "0.5955291", "0.59451455", "0.5926416", "0.5924503", "0.5913353", "0.58991826", "0.5894664", "0.5845883", "0.5842756", "0.58416146", "0.5836566", "0.58292025", "0.5823154", "0.58051306", "0.5797647", "0.578189", "0.5781533", "0.57732314", "0.5737993", "0.5727521", "0.5727521", "0.5727521", "0.5727521", "0.5727521", "0.5722656", "0.57064366", "0.5704433", "0.5697692", "0.5684146", "0.568287", "0.5671085", "0.5665638", "0.56629586", "0.5659148", "0.5654381", "0.56511617", "0.56499016", "0.56397045", "0.56361866", "0.5632816", "0.56316376", "0.56297916", "0.5628704", "0.562063", "0.5618609", "0.5613029", "0.560257", "0.5594028", "0.5590328", "0.55741316", "0.55738604", "0.5569649", "0.55670696", "0.55636406", "0.5533006", "0.5531151", "0.55284977", "0.5527372", "0.5527372", "0.5525247", "0.55248016", "0.5517491", "0.5512958", "0.550143", "0.5493108", "0.54869294", "0.54867876", "0.54858536", "0.5482192", "0.54810256", "0.54723954", "0.54606533", "0.5459708", "0.5459094", "0.54484063", "0.5446433", "0.54384744", "0.5435666", "0.5424488", "0.542414", "0.54152673", "0.5414155", "0.54035693", "0.54013526", "0.5399688", "0.5397821", "0.53976065", "0.5396292", "0.5393103", "0.5385914", "0.53838396" ]
0.0
-1
A helper function to perform a 3 year moving window filter for a single land cover value (such as Forest as 1) for one three year window representing year(i1), year(i), year(i+1) annual land cover classifications. This function applies on one window, and should only be called using the function applyWindow3years. The image bands do not need to be in order, but the bandNames argument must be in chronological order. The temporal filter inspects the central position of three consecutive years, and if the extremities of the consecutive years are identical but the centre position is not, then the central pixels are reclassified to match its temporal neighbour class. This function can be applied to whichever land cover values the user decides, such as all of the land cover values or a select few.
def mask3(imagem, value, bandNames): mask = imagem.select(bandNames[0]).eq(value) \ .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \ .bitwiseAnd(imagem.select(bandNames[2]).eq(value)) change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value) img_out = imagem.select(bandNames[1]).blend(change_img) return img_out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyWindow3years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-1):\n img_out = img_out.addBands(mask3(imagem, value,bandNames[(i-1):(i+2)]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def applyWindow4years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-2):\n img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def applyWindow5years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-3):\n img_out = img_out.addBands(mask5(imagem, value,bandNames[(i-1):(i+4)]))\n img_out = img_out.addBands(imagem.select(bandNames[-3]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): \n #Grab land cover classes as a list of strings\n lc_classes = classDictionary.keys().getInfo()\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Get the frequency of each class through the years by reducing the image collection to an image using the sum reducer\n class_frequency = binary_class_images.reduce(ee.Reducer.sum().unweighted()).rename(lc_classes)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Define an image to add bands with frequency filter applied\n out_img = ee.Image()\n \n #Loop through years\n for yearBand in yearBandNames:\n #Select the target year from the image\n yearImage = image.select(yearBand)\n \n #Loop through land cover classes in filterParams\n for lc_class in lc_classes:\n #Get the minimum occurance allowed in that land cover class\n min_occurance = filterParams.get(lc_class)\n \n #Find if the land cover class had less than the number of min_occurances in each pixel\n change_class = class_frequency.select(lc_class).lt(ee.Image.constant(min_occurance))\n \n #If change_class==1, then replace that pixel with the mode of all the years in that pixel\n #This filter is only applied to pixels of this land cover class\n #First mask yearImage to pixels of this land cover class, then get the union of pixels where change_class==1,\n #if both conditions are true, then the pixel is replaced with the mode\n yearImage = yearImage.where(yearImage.eq(ee.Number(classDictionary.get(lc_class))).bitwiseAnd(change_class),mode_image)\n #Rename yearImage to bandName\n yearImage = yearImage.rename(yearBand)\n #Append to output image\n out_img = out_img.addBands(yearImage)\n \n return out_img", "def applyForwardNoDataFilter(image, bandNames):\n #Get a list of band names from year(1) through the last year\n bandNamesEE = ee.List(bandNames[1:])\n \n #Define forwards filter\n #In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for the first year\n #currentImage = image.select(bandNames[1]), the image for the second year\n #previousImage = image.select(bandNames[0]), the first year\n #Find where the second year has missing data, replace those values with the values of the first year\n #Append previousImage to currentImage, so now currentImage is a two band image, with the first band being the second year with the gap fill\n #and the second band is the first years classification\n #The iteration continues, now with followingImage.select[0] being the second year with the gap fill applied, and bandName is the third year\n def forwardNoDataFilter(bandName, previousImage):\n currentImage = image.select(ee.String(bandName))\n previousImage = ee.Image(previousImage)\n currentImage = currentImage.unmask(previousImage.select([0]))\n return currentImage.addBands(previousImage)\n \n #Iterate through all the years, starting with the first year's classification\n filtered = bandNamesEE.iterate(forwardNoDataFilter,ee.Image(image.select(bandNames[0])))\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def stack_bands(yr, roi):\n\n water_year_start = '{}-10-01'.format(yr - 1)\n\n winter_s, winter_e = '{}-01-01'.format(yr), '{}-03-01'.format(yr),\n spring_s, spring_e = '{}-03-01'.format(yr), '{}-05-01'.format(yr),\n late_spring_s, late_spring_e = '{}-05-01'.format(yr), '{}-07-01'.format(yr)\n summer_s, summer_e = '{}-07-01'.format(yr), '{}-09-01'.format(yr)\n fall_s, fall_e = '{}-09-01'.format(yr), '{}-12-31'.format(yr)\n\n periods = [('cy', winter_s, fall_e),\n ('1', spring_s, spring_e),\n ('2', late_spring_s, late_spring_e),\n ('3', summer_s, summer_e),\n ('4', fall_s, fall_e)]\n\n first = True\n for name, start, end in periods:\n bands = landsat_composites(yr, start, end, roi, name)\n if first:\n input_bands = bands\n proj = bands.select('B2_cy').projection().getInfo()\n first = False\n else:\n input_bands = input_bands.addBands(bands)\n\n for s, e, n in [(spring_s, spring_e, 'espr'),\n (late_spring_s, late_spring_e, 'lspr'),\n (summer_s, summer_e, 'smr'),\n (fall_s, fall_e, 'fl'),\n (water_year_start, spring_e, 'wy_espr'),\n (water_year_start, late_spring_e, 'wy_espr'),\n (water_year_start, summer_e, 'wy_smr'),\n (water_year_start, fall_e, 'wy')]:\n gridmet = ee.ImageCollection(\"IDAHO_EPSCOR/GRIDMET\").filterBounds(\n roi).filterDate(s, e).select('pr', 'eto', 'tmmn', 'tmmx')\n temp_reducer = ee.Reducer.mean()\n t_names = ['tmax'.format(n), 'tmin'.format(n)]\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_{}'.format(n), 'pet_total_{}'.format(n)).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n wd_estimate = precip_sum.select('precip_total_{}'.format(n)).subtract(precip_sum.select(\n 'pet_total_{}'.format(n))).rename('wd_est_{}'.format(n))\n input_bands = input_bands.addBands([temp_perc, precip_sum, wd_estimate])\n\n temp_reducer = ee.Reducer.percentile([10, 50, 90])\n t_names = ['tmmn_p10_cy', 'tmmn_p50_cy', 'tmmn_p90_cy', 'tmmx_p10_cy', 'tmmx_p50_cy', 'tmmx_p90_cy']\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_cy', 'pet_total_cy').resample('bilinear').reproject(crs=proj['crs'], scale=30)\n wd_estimate = precip_sum.select('precip_total_cy').subtract(precip_sum.select(\n 'pet_total_cy')).rename('wd_est_cy')\n\n coords = ee.Image.pixelLonLat().rename(['Lon_GCS', 'LAT_GCS']).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n ned = ee.Image('USGS/NED')\n terrain = ee.Terrain.products(ned).select('elevation', 'slope', 'aspect').reduceResolution(\n ee.Reducer.mean()).reproject(crs=proj['crs'], scale=30)\n\n world_climate = get_world_climate(proj=proj)\n elev = terrain.select('elevation')\n tpi_1250 = elev.subtract(elev.focal_mean(1250, 'circle', 'meters')).add(0.5).rename('tpi_1250')\n tpi_250 = elev.subtract(elev.focal_mean(250, 'circle', 'meters')).add(0.5).rename('tpi_250')\n tpi_150 = elev.subtract(elev.focal_mean(150, 'circle', 'meters')).add(0.5).rename('tpi_150')\n static_input_bands = coords.addBands([temp_perc, wd_estimate, terrain, tpi_1250, tpi_250, tpi_150, world_climate])\n\n nlcd = ee.Image('USGS/NLCD/NLCD2011').select('landcover').reproject(crs=proj['crs'], scale=30).rename('nlcd')\n\n cdl_cult = ee.Image('USDA/NASS/CDL/2017').select('cultivated'). \\\n remap([1, 2], [0, 1]).reproject(crs=proj['crs'], scale=30).rename('cdlclt')\n\n cdl_crop = ee.Image('USDA/NASS/CDL/2017').select('cropland').reproject(crs=proj['crs'],\n scale=30).rename('cdlcrp')\n\n gsw = ee.Image('JRC/GSW1_0/GlobalSurfaceWater')\n occ_pos = gsw.select('occurrence').gt(0)\n water = occ_pos.unmask(0).rename('gsw')\n\n static_input_bands = static_input_bands.addBands([nlcd, cdl_cult, cdl_crop, water])\n\n input_bands = input_bands.addBands(static_input_bands).clip(roi)\n\n # standardize names to match EE javascript output\n standard_names = []\n temp_ct = 1\n prec_ct = 1\n names = input_bands.bandNames().getInfo()\n for name in names:\n if 'tavg' in name and 'tavg' in standard_names:\n standard_names.append('tavg_{}'.format(temp_ct))\n temp_ct += 1\n elif 'prec' in name and 'prec' in standard_names:\n standard_names.append('prec_{}'.format(prec_ct))\n prec_ct += 1\n else:\n standard_names.append(name)\n\n input_bands = input_bands.rename(standard_names)\n return input_bands", "def applyBackwardNoDataFilter(image, bandNames):\n #Get a list of band names to iterate over, from year(-2) through year(0)\n bandNamesEE = ee.List(bandNames[:-1]).reverse()\n \n #Define backwards filter\n #In first iteration, bandName=bandNames[-2] and followingImage is image.select(bandNames[-1]), or the classifications for the final year\n #currentImage = image.select(bandNames[-2]), the second to last year\n #followingImage = image.select(bandNames[-1]), the final year\n #Find where the second to last year has missing data, replace those values with the values of the following year\n #Append followingImage to currentImage, so now currentImage is a two band image, with the first band being the second to last year with the gap fill\n #and the second band is the final years classification\n #The iteration continues, now with followingImage.select[0] being the second to last year with the gap fill applied, and bandName is the third to last year\n def backwardNoDataFilter(bandName, followingImage):\n currentImage = image.select(ee.String(bandName))\n followingImage = ee.Image(followingImage)\n currentImage = currentImage.unmask(followingImage.select([0]))\n return currentImage.addBands(followingImage)\n \n #Apply backwards filter, starting with the final year and iterating through to year(0) \n filtered = bandNamesEE.iterate(backwardNoDataFilter,ee.Image(image.select(bandNames[-1])))\n #Re-order bands to be in chronological order\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def applyMask3last(imagem, value, bandNames):\n mask = imagem.select(bandNames[-3]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[-2]).eq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[-1]).neq(value))\n change_img = imagem.select(bandNames[-1]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[0:-1])\n img_out = img_out.addBands(imagem.select(bandNames[-1]).blend(change_img))\n return img_out", "def applyIncidenceFilter(image, bandNames, classDictionary, numChangesCutoff = 8, connectedPixelCutoff=6):\n #Calculate the number of times a pixel changes throughout the time series and determine if it is over the numChangesCutoff\n num_changes = calculateNumberOfChanges(image, bandNames)\n too_many_changes = num_changes.gt(numChangesCutoff)\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Calculate the number of connected pixels for each land cover class and year, reduce to a single band image representing the number\n #of connected pixels of the same land cover class as the central pixel, and determine if it is over the connectedPixelCutoff\n connected_pixel_count = ee.ImageCollection(binary_class_images.map(lambda x: x.mask(x).connectedPixelCount(100,False).reduce(ee.Reducer.sum()).lt(connectedPixelCutoff)))\n \n #Get a bitwiseAnd determination if the number of connected pixels <= connectedPixelCutoff and the number of changes > numChangesCutoff \n incidence_filter = ee.ImageCollection(connected_pixel_count.map(lambda x: x.bitwiseAnd(too_many_changes))).toBands().rename(bandNames)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Replace pixels of image where incidence_filter is True with mode_image\n incidence_filtered = image.where(incidence_filter, mode_image)\n \n return incidence_filtered", "def msatna_blocks_3lag_year(year: int) -> pd.Series:\n return msatna_blocks_3lag_panel()[year]", "def applyMask3first(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).neq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).eq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).eq(value))\n change_img = imagem.select(bandNames[0]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[0]).blend(change_img)\n img_out = img_out.addBands(imagem.select(bandNames[1:]))\n return img_out", "def calculate_daily_climatology(\n pctile,\n windowHalfWidth,\n lenClimYear,\n smoothPercentile,\n smoothPercentileWidth,\n thresh_climYear, # empty array\n seas_climYear, # empty array\n clim, # empty dict\n feb29,\n doyClim,\n clim_start,\n clim_end,\n tempClim,\n temp,\n):\n # Loop over all day-of-year values, and calculate threshold and seasonal climatology across years\n for d in range(1, lenClimYear + 1):\n # Special case for Feb 29\n if d == feb29:\n continue\n # find all indices for each day of the year +/- windowHalfWidth and from them calculate the threshold\n tt0 = np.where(doyClim[clim_start : clim_end + 1] == d)[\n 0\n ] # the index for that day each year\n # If this doy value does not exist (i.e. in 360-day calendars) then skip it\n if len(tt0) == 0:\n continue\n tt = np.array([])\n for w in range(-windowHalfWidth, windowHalfWidth + 1): # -5 : 5 default\n tt = np.append(\n tt, clim_start + tt0 + w\n ) # append the daily values 5days before and 5days after\n tt = tt[tt >= 0] # Reject indices \"before\" the first element\n tt = tt[tt < TClim] # Reject indices \"after\" the last element\n thresh_climYear[d - 1] = np.percentile(nonans(tempClim[tt.astype(int)]), pctile)\n seas_climYear[d - 1] = np.mean(nonans(tempClim[tt.astype(int)]))\n\n # Special case for Feb 29 (LEAP YEAR)\n thresh_climYear[feb29 - 1] = (\n 0.5 * thresh_climYear[feb29 - 2] + 0.5 * thresh_climYear[feb29]\n )\n seas_climYear[feb29 - 1] = (\n 0.5 * seas_climYear[feb29 - 2] + 0.5 * seas_climYear[feb29]\n )\n\n if smoothPercentile:\n thresh_climYear, seas_climYear = smooth_climatologies(\n thresh_climYear, seas_climYear, smoothPercentileWidth\n )\n\n # Generate threshold for full time series\n clim[\"thresh\"] = thresh_climYear[doy.astype(int) - 1]\n clim[\"seas\"] = seas_climYear[doy.astype(int) - 1]\n # Save vector indicating which points in temp are missing values\n clim[\"missing\"] = np.isnan(temp)\n\n return clim", "def countmap(band,skypos,tranges,skyrange,width=False,height=False,\n\t\t\t verbose=0,tscale=1000.,memlight=False,hdu=False,retries=20):\n\timsz = gxt.deg2pix(skypos,skyrange)\n\tcount = np.zeros(imsz)\n\tfor trange in tranges:\n\t\t# If memlight is requested, break the integration into\n\t\t# smaller chunks.\n\t\tstep = memlight if memlight else trange[1]-trange[0]\n\t\tfor i in np.arange(trange[0],trange[1],step):\n\t\t\tt0,t1=i,i+step\n\t\t\tif verbose:\n\t\t\t\tprint_inline('Coadding '+str(t0)+' to '+str(t1))\n\t\t\tevents = gQuery.getArray(gQuery.rect(band,skypos[0],skypos[1],t0,t1,\n\t\t\t\t\t\t\t\t\t\t\t\t skyrange[0],skyrange[1]),\n\t\t\t\t\t\t\t\t\t verbose=verbose,retries=retries)\n\n\t\t\t# Check that there is actually data here.\n\t\t\tif not events:\n\t\t\t\tif verbose>1:\n\t\t\t\t\tprint \"No data in \"+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\ttimes = np.array(events,dtype='float64')[:,0 ]/tscale\n\t\t\tcoo =\tnp.array(events,dtype='float64')[:,1:]\n\n\t\t\t# If there's no data, return a blank image.\n\t\t\tif len(coo)==0:\n\t\t\t\tif verbose:\n\t\t\t\t\tprint 'No data in this frame: '+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\t# Define World Coordinate System (WCS)\n\t\t\twcs = define_wcs(skypos,skyrange,width=False,height=False)\n\n\t\t\t# Map the sky coordinates onto the focal plane\n\t\t\tfoc = wcs.sip_pix2foc(wcs.wcs_world2pix(coo,1),1)\n\n\t\t\t# Bin the events into actual image pixels\n\t\t\tH,xedges,yedges=np.histogram2d(foc[:,1]-0.5,foc[:,0]-0.5,\n\t\t\t\t\t\t\t\tbins=imsz,range=([ [0,imsz[0]],[0,imsz[1]] ]))\n\t\t\tcount += H\n\n\treturn count", "def sky_groups():\n cam = \"sky\"\n for light, lens, ndc, good, window in [(True, True, False, True, True),\n (True, True, False, True, False),\n (True, True, False, False, False),\n (True, False, False, True, False),\n (True, False, False, False, False),\n (False, True, False, True, True),\n (False, True, False, False, True)]:\n filenames = flatfiles(cam)\n filenames = get_light_sky(filenames, light)\n filenames = get_lens(filenames, lens)\n filenames = get_ndc(filenames, ndc)\n filenames = get_good(filenames, good)\n filenames = get_window_sky(filenames, window)\n images = valid_images(filenames)\n process_images(images, cam, (light, lens, ndc, good, window))", "def winter_gif(self):\n # Create the directory.\n os.mkdir('./medal_figures_winter')\n start = self.start_year\n end = self.end_year\n duration = self.duration\n # Specify the years.\n years = [i for i in self.years_winter if (i >= start) and (i <= end)]\n # Setup the colormap.\n cmap = sns.cubehelix_palette(n_colors=6, start=2.5, rot=0.1, hue=2, dark=0.3, light=1, as_cmap=True)\n # Important variable and keywords to initialize cartopy.\n shapename = 'admin_0_countries'\n countries_shp = shpreader.natural_earth(resolution='110m', category='cultural', name=shapename)\n filenames = []\n # Loop in the specific years.\n for i in years:\n fig = plt.figure(figsize=(10, 8))\n ax = fig.add_subplot(1, 1, 1, projection=ccrs.Mercator())\n ax.set_extent([-169.95, 169.95, -65, 80], crs=ccrs.PlateCarree())\n ax.add_feature(cfeature.BORDERS)\n ax.coastlines(resolution='110m')\n # Add some titles for specific years.\n if i == 1924:\n fig.suptitle('The First Winter Olympics.', y=0.9, fontsize=14, fontweight='bold')\n if i == 1994:\n fig.suptitle('The International Olympic Committee voted to separate the Summer and Winter Games.',\n y=0.9, fontsize=12, fontweight='bold')\n if i == 2018:\n fig.suptitle('Suspension of the Russian Olympic Committee due to Olympic Doping Controversy.',\n y=0.9, fontsize=12, fontweight='bold')\n iso_lib = list(self.conv['ISO'])\n if i != 2018:\n city = self.df_winter.loc[self.df_winter['Year'] == i]['City'].iloc[0]\n ax.title.set_text('Total Number of Medals of Winter Olympics Year: %d City: %s' % (i, city))\n df_tmp = self.df_winter.loc[self.df_winter['Year'] == i]\n d = dict(df_tmp.groupby(df_tmp['Country']).size())\n else:\n ax.title.set_text('Total Number of Medals of Winter Olympics Year: %d City: %s' % (i, 'Pyeongchang'))\n m = []\n for j in self.df_2018_winter['NOC'].tolist():\n n = j[j.find('(')+1:j.find(')')]\n m.append(n)\n k = self.df_2018_winter['Total'].tolist()\n d = dict(zip(m, k))\n d.pop('30 NOCs', None)\n max_medal = float(max(d.values()))\n for country in shpreader.Reader(countries_shp).records():\n iso = country.attributes['ADM0_A3']\n medal_num = 0\n if iso in iso_lib:\n ioc = self.conv.loc[self.conv['ISO'] == iso,'IOC'].iloc[0]\n if not pd.isna(ioc):\n if ioc in d.keys():\n medal_num = d[ioc]\n if all([iso == 'RUS', i>=1956, i<=1988]):\n medal_num = d['URS']\n if all([iso=='DEU', i>=1968, i<=1988]):\n medal_num = d['FRG'] + d['GDR']\n if all([iso=='DEU', i>=1956, i<=1964]):\n medal_num = d['EUA']\n if i==1952 and iso=='DEU':\n medal_num = d['FRG']\n if i==1992 and iso=='RUS':\n medal_num = d['EUN']\n if i==2018 and iso=='RUS':\n medal_num = d['OAR']\n ax.add_geometries(country.geometry, ccrs.PlateCarree(),\n facecolor=cmap(medal_num / max_medal, 1))\n sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(0, max_medal))\n sm._A = []\n plt.colorbar(sm, ax=ax, orientation=\"horizontal\", fraction=0.046, pad=0.04)\n fname = './medal_figures_winter/year_%d.png' % i\n filenames.append(fname)\n plt.savefig(fname=fname, format='png')\n plt.close(fig)\n images = []\n # Create the gif.\n for filename in filenames:\n images.append(imageio.imread(filename))\n imageio.mimsave('./medal_figures_winter/movie.gif', images, duration=duration)\n return", "def applyGapFilter(image, bandNames):\n filtered = applyForwardNoDataFilter(image, bandNames)\n filtered = applyBackwardNoDataFilter(filtered, bandNames)\n return filtered", "def winter_avg(var_nc,lat_slice=None,lon_slice=None): \n #\n # accumulate in shape [plev,lat,lon]\n #\n # use the whole array if slice objects are missing\n #\n if (lat_slice is None) and (lon_slice is None):\n num_lats=var_nc.shape[2]\n num_lons=var_nc.shape[3]\n lat_slice=slice(0,num_lats)\n lon_slice=slice(0,num_lons)\n print \"in winter avg: \",lat_slice,lon_slice\n else:\n num_lats=lat_slice.stop - lat_slice.start\n num_lons=lon_slice.stop - lon_slice.start\n num_levs=var_nc.shape[1]\n accumulate=ma.zeros([num_levs,num_lats,num_lons],dtype=var_nc.dtype)\n #\n # year 0 is special case since it doesn't have a december\n #\n djf0=np.array([0,1],dtype=np.int32) #january and feburary\n the_slice=var_nc[djf0,:,lat_slice,lon_slice]\n the_slice=ma.mean(the_slice,axis=0) #average over the two months\n accumulate+=the_slice\n num_years=var_nc.shape[0]//12\n #\n # now year 1 has year 0's december\n #\n djf=np.array([11,12,13],dtype=np.int32)\n #\n # iterate one year less because we've alread\n # done year zero as a special case\n #\n for the_year in np.arange(0,num_years-1):\n the_slice=var_nc[djf,:,lat_slice,lon_slice]\n the_slice=ma.mean(the_slice,axis=0)\n accumulate+=the_slice\n djf=djf+12\n accumulate=accumulate/num_years \n the_avg=ma.mean(accumulate,axis=1)\n the_avg=ma.mean(the_avg,axis=1)\n return the_avg", "def octave_bands(fc=1000, third=False, start=0.0, n=8):\n\n div = 1\n if third:\n div = 3\n\n # Octave Bands\n fcentre = fc * (\n 2.0 ** (np.arange(start * div, (start + n) * div - (div - 1)) / div)\n )\n fd = 2 ** (0.5 / div)\n bands = np.array([[f / fd, f * fd] for f in fcentre])\n\n return bands, fcentre", "def load_copernicus_ammonia(layers, time_slice, lat_slice, lon_slice, verbose=False):\n xr_layers = []\n\n if 'agl' in layers:\n xr_layers.append(xr.load_dataset(\n './data/copernicus/ammonia/CAMS-GLOB-ANT_Glb_0.1x0.1_anthro_nh3_v4.2_monthly_agl.nc').agl.sel(\n time=time_slice, lat=lat_slice, lon=lon_slice))\n\n if 'ags' in layers:\n xr_layers.append(xr.load_dataset(\n './data/copernicus/ammonia/CAMS-GLOB-ANT_Glb_0.1x0.1_anthro_nh3_v4.2_monthly_ags.nc').ags.sel(\n time=time_slice, lat=lat_slice, lon=lon_slice))\n\n nh3 = sum(xr_layers)\n nh3.name = 'nh3'\n\n if verbose:\n\n shape = gpd.read_file('./shp/lombardia/lombardia.shp').to_crs(epsg=4326)\n\n ncols = len(xr_layers) + 1\n fig, axs = plt.subplots(ncols=ncols, figsize=(8 * ncols, 5))\n\n for i in range(len(xr_layers)):\n shape.plot(ax=axs[i], color='black', alpha=0.5)\n xr_layers[i].mean(dim='time').plot(ax=axs[i], alpha=0.5)\n\n shape.plot(ax=axs[len(xr_layers)], color='black', alpha=0.5)\n nh3.mean(dim='time').plot(ax=axs[len(xr_layers)], alpha=0.5)\n\n plt.show()\n\n return nh3", "def test_3dtproject_temporal_filter_wf(self):\n \n self.wf = build_3dtproject_temporal_filter(\n bpHigh= .9, bpLow= 0.005, tr=2,\n import_file=self.sample_raw_image,\n export_file=self.export_path,\n base_dir=self.test_path, crashdump_dir=self.test_path,\n mask_file=self.sample_raw_image_mask\n )", "def dwt(image_array, quantization_Array):\n # Create the high pass and low pass filters\n # both filters are non-causal\n # symmetric\n # [-2, -1, 0, 1, 2]\n LPF = [-0.125, 0.25, 0.75, 0.25, -0.125]\n LPF_center = 2\n\n # [ -2,-1, 0]\n HPF = [-0.5, 1, -0.5]\n HPF_center = 2\n\n nrow, ncol = image_array.shape\n\n # create an array that will contain the 4 different subbands of the image\n LL = np.zeros((nrow, ncol))\n LH = np.zeros((nrow, ncol))\n HL = np.zeros((nrow, ncol))\n HH = np.zeros((nrow, ncol))\n filtered_image = [LL, LH, HL, HH]\n\n # filtering the rows using a low pass and high pass filters\n LowPass_rows = np.zeros((nrow, ncol))\n HighPass_rows = np.zeros((nrow, ncol))\n for i in range(0, nrow):\n LowPass_rows[i, :] = lfilter(LPF, image_array[i, :], LPF_center)\n HighPass_rows[i, :] = lfilter(HPF, image_array[i, :], HPF_center)\n\n # down sample rows.\n # which means we will have half the number of columns\n for i in range(0, len(filtered_image)):\n filtered_image[i] = filtered_image[i][:, ::2]\n\n # apply filters accross columns\n for i in range(0, ncol):\n LL[:, i] = lfilter(LPF, LowPass_rows[:, i], LPF_center)\n LH[:, i] = lfilter(HPF, LowPass_rows[:, i], HPF_center)\n HL[:, i] = lfilter(LPF, HighPass_rows[:, i], LPF_center)\n HH[:, i] = lfilter(HPF, HighPass_rows[:, i], HPF_center)\n\n # down sample columns and quantize\n for i in range(0, len(filtered_image)):\n filtered_image[i] = filtered_image[i][::2, :]\n filtered_image[i] = np.round(\n filtered_image[i]/quantization_Array[i]).astype(int)\n\n return filtered_image", "def get_time_filtered_correlations(a_lt3,a_lt4,adwin_filt_bool,**kw):\r\n verbose = kw.pop('verbose',False)\r\n ### prepare RO results and sort them according to sweep point\r\n for a in [a_lt3,a_lt4]:\r\n a.pts = a.g.attrs['sweep_length']\r\n a.ssros = a.agrp['ssro_results'].value\r\n a.readouts = a.g.attrs['nr_of_ROsequences']\r\n # a.sorted_results = a_ssros.reshape((-1,a.pts,a.readouts))\r\n\r\n\r\n ### correlate the ROs with each other by making a boolean filter:\r\n ### variables here are described in terms of spin states!\r\n m00 = (a_lt3.ssros == 1)*(a_lt4.ssros == 1)\r\n m10 = (a_lt3.ssros == 1)*(a_lt4.ssros == 0)\r\n m01 = (a_lt3.ssros == 0)*(a_lt4.ssros == 1)\r\n m11 = (a_lt3.ssros == 0)*(a_lt4.ssros == 0)\r\n \r\n ### now define unique identifiers for each Ro correlation and recast the correlations into a single array.\r\n ### As identifieres I choose 1 = index 0 in the output list, i.e. 11; 2 = index 1 in the output list ... and so forth\r\n RO_correlators = np.array(len(a_lt3.ssros)*[1])*m11 \\\r\n + np.array(len(a_lt3.ssros)*[2])*m10 \\\r\n + np.array(len(a_lt3.ssros)*[3])*m01 \\\r\n + np.array(len(a_lt3.ssros)*[4])*m00 \r\n ### PH - added to make sure that has a full set of repetitions\r\n RO_correlators = RO_correlators[:(a.g.attrs['sweep_length']*(len(RO_correlators)/a.g.attrs['sweep_length']))]\r\n adwin_filt_bool = adwin_filt_bool[:(a.g.attrs['sweep_length']*(len(RO_correlators)/a.g.attrs['sweep_length']))]\r\n\r\n \r\n ### now sort the correlators and the adwin fltr according to the sweep pts\r\n sorted_RO_correlators = RO_correlators.reshape((-1,a_lt3.pts,a_lt3.readouts))\r\n sorted_adwin_fltr = adwin_filt_bool.reshape((-1,a_lt3.pts,a_lt3.readouts))\r\n\r\n ### from now on: no numpy magic anymore. from here it is brutforce 'for-looping'\r\n ### (all conceived arrays will have different lengths due to temporal filtering. this break most np methods)\r\n ### although vstack and hstack would probably work...\r\n \r\n return_list = range(a_lt3.pts) ## all of these pts will be substituted with the correlator occurence\r\n for i in range(a_lt3.pts): \r\n correlators_at_sweep_pt = [0,0,0,0]\r\n for j in [1,2,3,4]: ### loop over the correlator identifiers\r\n correlators_at_sweep_pt[j-1] = np.sum(np.logical_and(sorted_adwin_fltr[:,i,:],sorted_RO_correlators[:,i,:]==j)) ## exclude adwin filter and do a logical and with the correlator identifier. Then sum over the number of occurences\r\n\r\n\r\n return_list[i] = correlators_at_sweep_pt\r\n\r\n return return_list", "def constant_2015():\n\n #Load the CMIP6 historical\n cubes = iris.load(data_dir+'SO2DMS-em-anthro_input4MIPs_emissions_CMIP_CEDS-v2016-07-26-gr_200001-201412_n48.nc')\n #Get low and high level emissions just in the last year (2014)\n cubes = iris.cube.CubeList([cubes[2],cubes[1]])\n final_cubes = iris.cube.CubeList()\n for cube in cubes:\n final_cube = cube[-12:]\n final_cubes.append(final_cube)\n \n #Set the year-on-year proportional reductions to be nothing\n yoy_rates = calc_perc_reducts()\n yoy_rates = np.array(yoy_rates)\n yoy_rates = np.ones_like(yoy_rates)\n\n #Create coordinates for new nc file between 2014 and 2100\n lat_coord = cubes[0].coord('latitude')\n lon_coord = cubes[0].coord('longitude')\n time_coord = DimCoord(np.arange(95055.,95055.+(2100-2014+1)*360.,30.),standard_name=u'time', units=cf_units.Unit('days since 1750-1-1 00:00:00', calendar='360_day'), long_name=u'time', var_name='time')\n\n #Create the cube date\n cube_data_surf = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n cube_data_high = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n #Set first year equal to 2014 in CMIP6 historical\n cube_data_surf[:12,...] = final_cubes[0].data\n cube_data_high[:12,...] = final_cubes[1].data\n #Apply equal emissions in all other years too\n for i in range(12,cube_data_surf.shape[0]):\n cube_data_surf[i,...] = cube_data_surf[(i-12),...] * yoy_rates[0,i]\n cube_data_high[i,...] = cube_data_high[(i-12),...] * yoy_rates[1,i]\n #Make the output cubes\n fut_cube_surf = iris.cube.Cube(cube_data_surf,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[0].standard_name, long_name=final_cubes[0].long_name, var_name=final_cubes[0].var_name, units=final_cubes[0].units, attributes=final_cubes[0].attributes)\n fut_cube_high = iris.cube.Cube(cube_data_high,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[1].standard_name, long_name=final_cubes[1].long_name, var_name=final_cubes[1].var_name, units=final_cubes[1].units, attributes=final_cubes[1].attributes)\n\n fut_cube_high.var_name = 'field569_1'\n fut_cube_high.units='kg/m2/s'\n fut_cube_high.long_name ='HIGH LEVEL SO2 EMISSIONS KG/M2/S'\n fut_cube_surf.var_name = 'field569'\n fut_cube_surf.units='kg/m2/s'\n fut_cube_surf.long_name ='SULPHUR DIOXIDE EMISSIONS'\n\n #Load the DMS cube from standard RCP2.6\n dms_cube = iris.load(data_dir+'DMSSO2NH3_18502100_RCP26_monthly.nc')[0]\n iris.coord_categorisation.add_year(dms_cube,'time',name='year')\n dms_cube = dms_cube.extract(iris.Constraint(year = lambda y: y>=2014))\n\n dms_cube.var_name = 'field570'\n dms_cube.attributes.pop('name')\n dms_cube.coord('time').var_name = 'time'\n dms_cube.coord('time').long_name = 'time'\n\n fut_cube_high = fut_cube_high[:-2]\n fut_cube_surf = fut_cube_surf[:-2]\n\n fut_dms = iris.cube.Cube(dms_cube.data[:,0,::-1,:],dim_coords_and_dims=[(fut_cube_surf.coord('time'),0),(fut_cube_surf.coord('latitude'),1),(fut_cube_surf.coord('longitude'), 2)],standard_name=dms_cube.standard_name, long_name=dms_cube.long_name, var_name=dms_cube.var_name, units=dms_cube.units, attributes=dms_cube.attributes)\n\n #Save the final cubes as netcdf (cutting them to be the same length)\n iris.save(iris.cube.CubeList([fut_dms,fut_cube_high,fut_cube_surf]),data_dir+ \"SO2DMS_const2014.nc\")\n os.system('ncatted -O -a calendar,time,m,c,\"360_day\" '+data_dir+ \"SO2DMS_const2014.nc\")\n\n return", "def calculateNumberOfChanges(image, bandNames):\n #Get a collection of images where each image has 2 bands: classifications for year(i) and classifications for year(i+1)\n lc_one_change_col = npv.getYearStackIC(image,bandNames, band_indices=[0,1])\n #Get a collection of images where each image represents whether there was change from year(i) to year(i+1) and convert to an image\n lc_one_change_col = lc_one_change_col.map(npv.LC_OneChange)\n lc_one_change_image = lc_one_change_col.toBands()\n #Calculate the number of changes by applying the sum reducer\n lc_sum_changes = lc_one_change_image.reduce(ee.Reducer.sum().unweighted())\n return lc_sum_changes", "def _bands_competed_last_year():\n lLastYear = datetime.datetime.now().year - 1\n cursor = connection.cursor()\n cursor.execute(\"SELECT count(distinct(r.band_id)) FROM contests_contestevent e, contests_contestresult r WHERE r.contest_event_id = e.id AND extract(year from e.date_of_event) = %(year)s GROUP BY extract(year from e.date_of_event) ORDER BY extract(year from e.date_of_event) desc\", {'year' : lLastYear})\n rows = cursor.fetchall()\n lReturn = 0\n if rows and rows[0]:\n lReturn = rows[0][0]\n cursor.close()\n return lReturn", "def request_band_extract(file_prefix, points_layer, region, years, filter_bounds=False):\n roi = ee.FeatureCollection(region)\n plots = ee.FeatureCollection(points_layer)\n for yr in years:\n stack = stack_bands(yr, roi)\n\n if filter_bounds:\n plots = plots.filterBounds(roi)\n\n filtered = plots.filter(ee.Filter.eq('YEAR', yr))\n\n plot_sample_regions = stack.sampleRegions(\n collection=filtered,\n properties=['POINT_TYPE', 'YEAR'],\n scale=30,\n tileScale=16)\n\n task = ee.batch.Export.table.toCloudStorage(\n plot_sample_regions,\n description='{}_{}'.format(file_prefix, yr),\n bucket='wudr',\n fileNamePrefix='{}_{}'.format(file_prefix, yr),\n fileFormat='CSV')\n\n task.start()\n print(yr)\n exit()", "def write_images(band,skypos,tranges,skyrange,write_cnt=False,write_int=False,write_rr=False,framesz=0,width=False,height=False,verbose=0,tscale=1000.,memlight=False,coadd=False,response=False,calpath='../cal/',clobber=False,retries=20):\n\t# No files were requested, so don't bother doing anything.\n\tif not (write_cnt or write_int or write_rr):\n\t\treturn\n\tcount,rr,intensity=create_images(band,skypos,tranges,skyrange,framesz=framesz,width=width,height=height,verbose=verbose,tscale=tscale,memlight=memlight,coadd=coadd,response=response,calpath=calpath,retries=retries)\n\n\t# Add a conditional so that this is only created for multi-frame images\n\ttbl = movie_tbl(band,tranges,framesz=framesz,verbose=verbose,retries=retries)\n\n\tif write_cnt:\n\t\thdu = pyfits.PrimaryHDU(count)\n\t\thdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)\n\t\thdulist = pyfits.HDUList([hdu,tbl])\n\t\tif verbose:\n\t\t\tprint 'Writing count image to '+str(write_cnt)\n\t\thdulist.writeto(write_cnt,clobber=clobber)\n\tif write_rr:\n\t\thdu = pyfits.PrimaryHDU(rr)\n\t\thdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)\n\t\thdulist = pyfits.HDUList([hdu,tbl])\n\t\tif verbose:\n\t\t\tprint 'Writing response image to '+str(write_rr)\n hdulist.writeto(write_rr,clobber=clobber)\n\tif write_int:\n\t\thdu = pyfits.PrimaryHDU(intensity)\n\t\thdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)\n\t\thdulist = pyfits.HDUList([hdu,tbl])\n\t\tif verbose:\n\t\t\tprint 'Writing intensity image to '+str(write_int)\n\t\thdulist.writeto(write_int,clobber=clobber)\n\n\treturn", "def plot_land_cover(data, year=None, measurement=None, out_width=15, cols=4,):\n # get measurement name\n measurement = get_layer_name(measurement, data)\n\n # get colour map, normalisation\n try:\n cmap, norm = lc_colourmap(measurement)\n except AssertionError:\n\n raise KeyError('Could not automatically determine colour scheme from'\n f'DataArray name {measurement}. Please specify which '\n 'DEA Landcover measurement is being plotted by providing'\n 'the name using the \"measurement\" variable For example'\n '(measurement = \"full_classification\")')\n\n height, width = data.geobox.shape\n scale = out_width / width\n\n if year:\n #plotting protocall if 'year' variable is passed\n year_string = f\"{year}-01-01\"\n data = data.sel(time=year_string, method=\"nearest\")\n \n fig, ax = plt.subplots()\n fig.set_size_inches(width * scale, height * scale)\n make_colorbar(fig, ax, measurement)\n im = ax.imshow(data, cmap=cmap, norm=norm, interpolation=\"nearest\")\n\n \n elif len(data.time) == 1:\n #plotting protocall if only one timestep is passed and not a year variable\n fig, ax = plt.subplots()\n fig.set_size_inches(width * scale, height * scale)\n make_colorbar(fig, ax, measurement)\n im = ax.imshow(data.isel(time=0), cmap=cmap, norm=norm, interpolation=\"nearest\")\n else:\n #plotting protocall if multible time steps are passed to plot\n if cols > len(data.time):\n cols = len(data.time)\n rows = int((len(data.time) + cols-1)/cols)\n\n fig, ax = plt.subplots(nrows=rows, ncols=cols)\n fig.set_size_inches(\n width * scale, (height * scale / cols) * (len(data.time) / cols))\n\n make_colorbar(fig, ax.flat[0], measurement)\n\n for a, b in enumerate(ax.flat):\n if a < data.shape[0]:\n im = b.imshow(data[a], cmap=cmap, norm=norm,\n interpolation=\"nearest\")\n\n return im", "def filter_on_adwin_parameters(a_lt3,a_lt4,**kw):\r\n\r\n filter_params = kw.pop('adwin_filter_params',{})\r\n if len(filter_params):\r\n old_params = analysis_params.SPSP_fltr_adwin_settings\r\n \r\n for setup_key,setup_dict in filter_params.iteritems():\r\n for key,params in setup_dict.iteritems():\r\n analysis_params.SPSP_fltr_adwin_settings['fltr_dict_'+setup_key][key] = params\r\n\r\n fltr = np.array([True]*len(a_lt3.agrp['ssro_results'].value)) ### initially everything true\r\n\r\n for a,suffix in zip([a_lt3,a_lt4],['lt3','lt4']): ### loop over both files\r\n for key,val in analysis_params.SPSP_fltr_adwin_settings['fltr_dict_'+suffix].iteritems(): ### loop over the list of filter parameters\r\n [filter_on,minimum,maximum] = val\r\n\r\n if filter_on:\r\n if key == 'repetition_number':\r\n values = np.array([i for i in range(len(fltr)/a.g.attrs['sweep_length']) for _ in range(a.g.attrs['sweep_length'])]) ### Make an array of values corresponding to the current rep\r\n else:\r\n values = a.agrp[key].value\r\n\r\n fltr = np.logical_and(fltr,(values >= minimum) & ( values <= maximum)) ### update filter\r\n\r\n if len(filter_params):\r\n analysis_params.SPSP_fltr_adwin_settings = old_params\r\n\r\n return fltr", "def band_filter(self, bands) -> 'ImageCollection':\n\n process_id = 'filter_bands'\n args = {\n 'imagery': self.graph,\n 'bands': bands\n }\n return self.graph_add_process(process_id, args)", "def movie(band,skypos,tranges,skyrange,framesz=0,width=False,height=False,\n\t\t verbose=0,tscale=1000.,memlight=False,coadd=False,\n\t\t response=False,calpath='../cal/',hdu=False,retries=20):\n\t# Not defining stepsz effectively creates a count map.\n\tmv = []\n\trr = []\n\tif coadd:\n\t\tif verbose>2:\n\t\t\tprint 'Coadding across '+str(tranges)\n\t\tmv.append(countmap(band,skypos,tranges,skyrange,width=width,\n\t\t\t\t height=height,verbose=verbose,tscale=tscale,memlight=memlight,\n\t\t\t\t hdu=hdu,retries=retries))\n\t\trr.append(rrhr(band,skypos,tranges,skyrange,response=response,width=width,height=height,stepsz=1.,verbose=verbose,calpath=calpath,tscale=tscale,hdu=hdu,retries=retries)) if response else rr.append(np.ones(np.shape(mv)[1:]))\n\telse:\n\t\tfor trange in tranges:\n\t\t\tstepsz = framesz if framesz else trange[1]-trange[0]\n\t\t\tsteps = np.ceil((trange[1]-trange[0])/stepsz)\n\t\t\tfor i,t0 in enumerate(np.arange(trange[0],trange[1],stepsz)):\n\t\t\t\tif verbose>1:\n\t\t\t\t\tprint_inline('Movie frame '+str(i+1)+' of '+str(int(steps)))\n\t\t\t\tt1 = trange[1] if i==steps else t0+stepsz\n\t\t\t\tmv.append(countmap(band,skypos,[[t0,t1]],skyrange,width=width,height=height,verbose=verbose,tscale=tscale,memlight=memlight,hdu=hdu,retries=retries))\n\t# FIXME: This should not create an rr unless it's requested...\n\t\t\t\trr.append(rrhr(band,skypos,[[t0,t1]],skyrange,response=response,width=width,height=height,stepsz=1.,verbose=verbose,calpath=calpath,tscale=tscale,retries=retries)) if response else rr.append(np.ones(np.shape(mv)[1:]))\n\n\treturn np.array(mv),np.array(rr)", "def _mask3d(self, n, i, window):\n\n n = np.array(n)\n i = np.array(i)\n\n w2 = (window - 1) // 2\n\n x1, y1, z1 = np.clip(i - w2, 0 * n, n)\n x2, y2, z2 = np.clip(i + w2 + 1, 0 * n, n)\n\n mask = np.zeros(n, dtype=np.bool)\n mask[x1:x2, y1:y2, z1:z2] = True\n\n return mask", "def run_global(start_year, end_year, depth_from, depth_to, animate=True):\n# years, times, rootgrps = retrieve(1950,2018)\n# rootgrps_1950 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1950\\EN.4.2.1.f.analysis.g10.195001.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_1951 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1951\\EN.4.2.1.f.analysis.g10.195101.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_1952 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1952\\EN.4.2.1.f.analysis.g10.195201.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_1953 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1953\\EN.4.2.1.f.analysis.g10.195301.nc\", \"r+\", format=\"NETCDF4\")]\n#\n#\n# rootgrps_2015 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2015\\EN.4.2.1.f.analysis.g10.201501.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_2016 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2016\\EN.4.2.1.f.analysis.g10.201601.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_2017 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2017\\EN.4.2.1.f.analysis.g10.201701.nc\", \"r+\", format=\"NETCDF4\")]\n rootgrps_2018 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2018\\EN.4.2.1.f.analysis.g10.201801.nc\", \"r+\", format=\"NETCDF4\")]\n\n# HC_1950 = calculate_HC_global(rootgrps_1950, 0, 2000)\n# print('1950', time.time()-start)\n# HC_1951 = calculate_HC_global(rootgrps_1951, 0, 2000)\n# print('1951', time.time()-start)\n# HC_1952 = calculate_HC_global(rootgrps_1952, 0, 2000)\n# print('1952', time.time()-start)\n# HC_1953 = calculate_HC_global(rootgrps_1953, 0, 2000)\n# print('1953', time.time()-start) \n#\n# HC_2015 = calculate_HC_global(rootgrps_2015, 0, 2000)\n# print('2015', time.time()-start)\n# HC_2016 = calculate_HC_global(rootgrps_2016, 0, 2000)\n# print('2016', time.time()-start)\n# HC_2017 = calculate_HC_global(rootgrps_2017, 0, 2000)\n# print('2017', time.time()-start)\n HC_2018 = calculate_HC_global(rootgrps_2018, 0, 2000)\n# print('2018', time.time()-start)\n# HC_1950_mean = (HC_1950+HC_1951+HC_1952+HC_1953)/4\n# HC_2018_mean = (HC_2015+HC_2016+HC_2017+HC_2018)/4\n\n# dHC = (HC_2018_mean-HC_1950_mean)/(65*365*24*3600)\n if animate == True:\n plot(rootgrps_2018, HC_2018)\n return HC_2018", "def smooth_climatologies(thresh_climYear, seas_climYear, smoothPercentileWidth):\n # If the climatology contains NaNs, then assume it is a <365-day year and deal accordingly\n if np.sum(np.isnan(seas_climYear)) + np.sum(np.isnan(thresh_climYear)):\n valid = ~np.isnan(thresh_climYear)\n thresh_climYear[valid] = runavg(thresh_climYear[valid], smoothPercentileWidth)\n valid = ~np.isnan(seas_climYear)\n seas_climYear[valid] = runavg(seas_climYear[valid], smoothPercentileWidth)\n else: # >= 365-day year (no nans)\n thresh_climYear = runavg(thresh_climYear, smoothPercentileWidth)\n seas_climYear = runavg(seas_climYear, smoothPercentileWidth)\n\n return thresh_climYear, seas_climYear", "def initialize_layers(self, years):\n min_year = min(years)\n max_year = max(years)\n ordered_years = list(range(min_year, max_year + 1))\n self.layers = [Layer(y) for y in ordered_years]", "def calbands( band = 0, tmo = 30 ) :\n optimizeThresholds(band,tmo)\n flattenPhases(band,tmo)\n calibrateSpectra(band=band,tmo=tmo)", "def interpolate_dataframes(ff):\n assert isinstance(ff, dict)\n year_min = ff['CA'][0].index[0]\n year_max = ff['CA'][0].index[-1]\n years = list(range(year_min, year_max + 1))\n for state in ff.keys():\n for cf in ff[state]:\n for year in years:\n if year not in cf.index:\n cf.loc[year] = cf.loc[year-1:year+1, :].sum(axis=0)\n cf.loc[year] = (cf.loc[year] / 2).astype(np.int64)\n cf.sort_index(inplace=True)\n return(ff)", "def read_wxt_obs(years, time):\n\n met_vars = ['RH', 'Tair', 'press']\n vars = met_vars + ['time']\n filepath = ['C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/MorningBL/data/L1/' + \\\n 'Davis_BGH_' + str(i) + '_15min.nc' for i in years]\n wxt_obs_raw = eu.netCDF_read(filepath, vars=vars)\n\n\n # set up array to be filled\n wxt_obs = {}\n for met_var in met_vars:\n wxt_obs[met_var] = np.empty(len(time))\n wxt_obs[met_var][:] = np.nan\n wxt_obs['time'] = time\n\n # find data region and create an average if appropriate\n print_step = range(1000,20000, 1000)\n for t, time_t in enumerate(time):\n\n if t in print_step:\n print 't ='+str(t)\n\n # time t-1 (start of original time period, as all data is relevent for time ENDING at time_t)\n tm1 = t-1\n time_tm1 = time_t - dt.timedelta(minutes=60)\n\n # # start of time period\n # idx_extent = 8000\n # s_idx = int(eu.binary_search(wxt_obs_raw['time'], time_tm1, lo=max(0, tm1 - idx_extent),\n # hi=min(tm1 + idx_extent, len(wxt_obs_raw['time']))))\n # # end of time period\n # e_idx = int(eu.binary_search(wxt_obs_raw['time'], time_t, lo=max(0, t - idx_extent),\n # hi=min(t + idx_extent, len(wxt_obs_raw['time']))))\n\n s_idx = int(eu.binary_search(wxt_obs_raw['time'], time_tm1))\n # end of time period\n e_idx = int(eu.binary_search(wxt_obs_raw['time'], time_t))\n\n # if the time_range time and data['time'] found in this iteration are within an acceptable range (15 mins)\n tm1_diff = time_tm1 - wxt_obs_raw['time'][s_idx]\n t_diff = time_t - wxt_obs_raw['time'][e_idx]\n\n\n # _, s_idx, tm1_diff = eu.nearest(wxt_obs_raw['time'], time_tm1)\n # _, e_idx, t_diff = eu.nearest(wxt_obs_raw['time'], time_t)\n\n\n if (tm1_diff.total_seconds() <= 15 * 60) & (t_diff.total_seconds() <= 15 * 60):\n for met_var in met_vars:\n wxt_obs[met_var][t] = np.nanmean(wxt_obs_raw[met_var][s_idx:e_idx+1])\n\n\n # create RH_frac using RH data\n wxt_obs['RH_frac'] = wxt_obs['RH'] / 100.0\n\n # calculate extra variables\n e_s_hpa = 6.112 * (np.exp((17.67 * wxt_obs['Tair']) / (wxt_obs['Tair'] + 243.5))) # [hPa] # sat. v. pressure\n e_s = e_s_hpa * 100.0 # [Pa] # sat. v. pressure\n wxt_obs['e'] = wxt_obs['RH_frac'] * e_s # [Pa] # v. pressure\n wxt_obs['r_v'] = wxt_obs['e'] / (1.61 * ((wxt_obs['press']*100.0) - wxt_obs['e'])) # water_vapour mixing ratio [kg kg-1]\n wxt_obs['q'] = wxt_obs['e'] / ((1.61 * ((wxt_obs['press']*100.0) - wxt_obs['e'])) + wxt_obs['e']) # specific humidity [kg kg-1]\n wxt_obs['Tv'] = (1 + (0.61 * wxt_obs['q'])) * (wxt_obs['Tair'] + 273.15) # virtual temp [K]\n wxt_obs['air_density'] = (wxt_obs['press']*100.0) / (286.9 * wxt_obs['Tv'])# [kg m-3]\n\n return wxt_obs", "def make_lightcurve(centroids, bands, band_idx, box_size, aperture_radius):\n band_names = np.sort(list(bands.keys()))\n num_stars= range(len(centroids))\n for star_idx in num_stars:\n xcenters, ycenters = [],[]\n aperture_sums = []\n background = []\n fwhms = []\n obs_time = []\n obs_mjd = []\n ##extract lightcurve (enumerate all frames) in a given band\n for i in tqdm(bands[band_names[band_idx]]):\n #import pdb; pdb.set_trace()\n hdr = fits.open(i)[0].header\n img = fits.open(i)[0].data\n #get dates from fits header\n date=dt.strptime(hdr['DATE-OBS'], '%Y-%m-%d')\n time=dt.strptime(hdr['EXP-STRT'], '%H:%M:%S.%f')\n newdate = time.replace(year=date.year, month=date.month, day=date.day)\n obs_time.append(newdate)\n obs_mjd.append(hdr['MJD-STRT'])\n\n #crop\n #import pdb; pdb.set_trace()\n image_crop = get_crop(img, centroids[star_idx], box_size)\n\n ###aperture photometry###\n #compute centroid\n centroid = get_centroid(image_crop)\n\n xcenters.append(centroid[0])\n ycenters.append(centroid[1])\n\n #compute backgound\n bkg_mean=get_bkg(image_crop, centroid, r_in=20., r_out=30.)\n\n #measure fwhm\n fwhm=get_fwhm(image_crop)\n\n #without aperture photometry\n\n aperture_sum = get_phot(image_crop, centroid, r=aperture_radius)\n\n #minus background wihtin annulus\n #aperture_sum = get_phot2(image_crop,bkg_mean,centroid,r=aperture_radius)\n\n aperture_sums.append(aperture_sum)\n background.append(bkg_mean)\n\n # if fwhm < 10*np.median(fwhms):\n # fwhms.append(fwhm)\n # else:\n # fwhms.append(np.nan)\n fwhms.append(fwhm)\n\n #output as dataframe of given band and star\n\n dfs.append(pd.DataFrame(\n {'{0}_{1}_x'.format(band_names[band_idx], str(star_idx)) : xcenters,\n '{0}_{1}_y'.format(band_names[band_idx], str(star_idx)) : ycenters,\n '{0}_{1}_flux_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : aperture_sums,\n '{0}_{1}_bkg_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : background,\n '{0}_{1}_fwhm_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : fwhms},\n #'airmass' : airmass\n index = obs_time))\n return dfs, band_idx, band_names", "def firwin(N, cutoff, width=None, window='hamming'):\n\n from signaltools import get_window\n if isinstance(width,float):\n A = 2.285*N*width + 8\n if (A < 21): beta = 0.0\n elif (A <= 50): beta = 0.5842*(A-21)**0.4 + 0.07886*(A-21)\n else: beta = 0.1102*(A-8.7)\n window=('kaiser',beta)\n\n win = get_window(window,N,fftbins=1)\n alpha = N//2\n m = numpy.arange(0,N)\n h = win*special.sinc(cutoff*(m-alpha))\n return h / numpy.sum(h,axis=0)", "def filter_irrigated(asset, yr, region, filter_type='irrigated', addl_yr=None):\n filt_fc = None\n\n # filter out any weird geometries\n plots = ee.FeatureCollection(asset)\n plots = plots.map(lambda x: x.set('geo_type', x.geometry().type()))\n plots = plots.filter(ee.Filter.eq('geo_type', 'Polygon'))\n\n roi = ee.FeatureCollection(region)\n if filter_type == 'irrigated':\n\n summer_s, late_summer_e = '{}-05-01'.format(yr), '{}-07-15'.format(yr)\n late_summer_s_, summer_e = '{}-07-01'.format(yr), '{}-10-31'.format(yr)\n\n lsSR_masked = landsat_masked(yr, roi)\n\n early_nd = ee.Image(lsSR_masked.filterDate(summer_s, late_summer_e).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd')\n early_nd_max = early_nd.select('nd').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n early_int_mean = early_nd_max.reduceRegions(collection=plots,\n reducer=ee.Reducer.median(),\n scale=30.0)\n early_int_mean = early_int_mean.select('median')\n\n late_nd = ee.Image(lsSR_masked.filterDate(late_summer_s_, summer_e).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd_1')\n late_nd_max = late_nd.select('nd_1').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n\n combo = late_nd_max.reduceRegions(collection=early_int_mean,\n reducer=ee.Reducer.mean(),\n scale=30.0)\n\n filt_fc = combo # .filter(ee.Filter.Or(ee.Filter.gt('median', 0.9), ee.Filter.gt('mean', 0.8)))\n desc = '{}_{}_irr'.format(os.path.basename(region), yr)\n\n elif filter_type == 'dryland':\n\n summer_s, late_summer_e = '{}-07-01'.format(yr), '{}-10-31'.format(yr)\n late_summer_s_, late_summer_e_ = '{}-07-01'.format(addl_yr), '{}-10-31'.format(addl_yr)\n\n lsSR_masked = landsat_masked(yr, roi)\n early_nd = ee.Image(lsSR_masked.filterDate(summer_s, late_summer_e).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd')\n early_nd_max = early_nd.select('nd').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n early_int_mean = early_nd_max.reduceRegions(collection=plots,\n reducer=ee.Reducer.mean(),\n scale=30.0)\n early_int_mean = early_int_mean.select(['mean', 'MGRS_TILE', 'system:index', 'popper'],\n ['nd_e', 'MGRS_TILE', 'system:index', 'popper'])\n\n lsSR_masked = landsat_masked(addl_yr, roi)\n late_nd = ee.Image(lsSR_masked.filterDate(late_summer_s_, late_summer_e_).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd_1')\n late_nd_max = late_nd.select('nd_1').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n\n combo = late_nd_max.reduceRegions(collection=early_int_mean,\n reducer=ee.Reducer.mean(),\n scale=30.0)\n\n filt_fc = combo.filter(ee.Filter.Or(ee.Filter.lt('nd_e', 0.7), ee.Filter.lt('mean', 0.7)))\n desc = '{}_dry'.format(os.path.basename(region))\n\n else:\n raise NotImplementedError('must choose from filter_low or filter_high')\n\n task = ee.batch.Export.table.toCloudStorage(filt_fc,\n description=desc,\n bucket='wudr',\n fileFormat='SHP')\n print(yr, filter_type)\n task.start()", "def seasonal_avg(var_nc,the_season,lat_slice=None,lon_slice=None): \n the_season=np.array(the_season,dtype=np.int32)\n if (lat_slice is None) and (lon_slice is None):\n num_lats=var_nc.shape[2]\n num_lons=var_nc.shape[3]\n lat_slice=slice(0,num_lats)\n lon_slice=slice(0,num_lons)\n else:\n if lat_slice.stop is None:\n num_lats=var_nc.shape[2]\n else:\n num_lats=lat_slice.stop - lat_slice.start\n if lon_slice.stop is None:\n num_lons=var_nc.shape[3]\n else:\n num_lons=lon_slice.stop - lon_slice.start\n num_levs=var_nc.shape[1]\n accumulate=ma.zeros([num_levs,num_lats,num_lons],dtype=var_nc.dtype)\n num_years=var_nc.shape[0]//12\n\n for the_year in np.arange(0,num_years):\n the_slice=var_nc[the_season,:,lat_slice,lon_slice]\n the_slice=ma.mean(the_slice,axis=0)\n accumulate+=the_slice\n the_season=the_season+12\n accumulate=accumulate/num_years \n the_avg=ma.mean(accumulate,axis=1)\n the_avg=ma.mean(the_avg,axis=1)\n return the_avg", "def shift_photo_north(gflux=None, rflux=None, zflux=None):\n # ADM if floats were sent, treat them like arrays.\n flt = False\n if _is_row(gflux):\n flt = True\n gflux = np.atleast_1d(gflux)\n rflux = np.atleast_1d(rflux)\n zflux = np.atleast_1d(zflux)\n\n # ADM only use the g-band color shift when r and g are non-zero\n gshift = gflux * 10**(-0.4*0.004)\n w = np.where((gflux != 0) & (rflux != 0))\n gshift[w] = (gflux[w] * 10**(-0.4*0.004) * (gflux[w]/rflux[w])**complex(-0.059)).real\n\n # ADM only use the r-band color shift when r and z are non-zero\n # ADM and only use the z-band color shift when r and z are non-zero\n w = np.where((rflux != 0) & (zflux != 0))\n rshift = rflux * 10**(0.4*0.003)\n zshift = zflux * 10**(0.4*0.013)\n\n rshift[w] = (rflux[w] * 10**(0.4*0.003) * (rflux[w]/zflux[w])**complex(-0.024)).real\n zshift[w] = (zflux[w] * 10**(0.4*0.013) * (rflux[w]/zflux[w])**complex(+0.015)).real\n\n if flt:\n return gshift[0], rshift[0], zshift[0]\n\n return gshift, rshift, zshift", "def preprocess_land_cover(\n src_files, dst_raster, dst_crs, dst_bounds, dst_res, geom=None, overwrite=False\n):\n if os.path.isfile(dst_raster) and not overwrite:\n log.info(\"Land cover data already preprocessed. Skipping.\")\n return\n log.info(\"Starting preprocessing of land cover data.\")\n LC_CLASSES = [\n \"bare\",\n \"crops\",\n \"grass\",\n \"moss\",\n \"shrub\",\n \"tree\",\n \"urban\",\n \"water-permanent\",\n \"water-seasonal\",\n ]\n with TemporaryDirectory(prefix=\"geohealthaccess_\") as tmpdir:\n\n tmpdir = Path(tmpdir)\n for tile in src_files:\n unzip(tile, tmpdir)\n\n reprojected_files = []\n tile_names = unique_tiles(tmpdir)\n\n if not tile_names:\n raise MissingDataError(\"Land cover data not found.\")\n\n for lc_class in LC_CLASSES:\n tiles = [\n p.as_posix()\n for p in tmpdir.glob(f\"*{lc_class}-coverfraction-layer*.tif\")\n ]\n if len(tiles) > 1:\n src_file = merge_tiles(\n tiles, os.path.join(tmpdir, f\"{lc_class}_mosaic.tif\"), nodata=255,\n )\n else:\n src_file = tiles[0]\n reprojected_files.append(\n reproject(\n src_raster=src_file,\n dst_raster=os.path.join(tmpdir, f\"{lc_class}.tif\"),\n dst_crs=dst_crs,\n dst_bounds=dst_bounds,\n dst_res=dst_res,\n src_nodata=255,\n dst_nodata=255,\n dst_dtype=\"Byte\",\n resampling_method=\"cubic\",\n overwrite=overwrite,\n )\n )\n\n if len(reprojected_files) > 1:\n raster = concatenate_bands(\n src_files=reprojected_files,\n dst_file=dst_raster,\n band_descriptions=LC_CLASSES,\n )\n else:\n raster = reprojected_files[0]\n\n if geom:\n mask_raster(raster, geom)", "def reduce_dataset(years, values,flux_floor=0,max_tm_error=0,min_reduction_steps=200):\n non_zero_ind, min_retained_zero_years = remove_begin_end_zero_flux(years,values,flux_floor,min_reduction_steps)\n\n years_mod = years[non_zero_ind]\n values_mod = values[non_zero_ind]\n\n if years_mod.size <3:\n years_mod = years\n values_mod = values\n values_mod = 0\n else:\n #makes ure you have not removed more than 1% of the mass when removing 0 or flux floor rates\n o_mass = TimeSeries(years,values,None,None).integrate().values[-1]\n r_mass = TimeSeries(years_mod, values_mod, None, None).integrate().values[-1]\n if abs((o_mass-r_mass)/o_mass)*100 > 1:\n years_mod = years\n values_mod = values\n timeseries = TimeSeries(years_mod, values_mod, None, None)\n mass = timeseries.integrate()\n\n #normalize Values\n maxval = np.max(values_mod)\n values_mod = values_mod/maxval\n o_timeseries = TimeSeries(years,values/maxval,None,None)\n o_mass = o_timeseries.integrate()\n timeseries = TimeSeries(years_mod, values_mod, None, None)\n mass = timeseries.integrate()\n\n mx = np.argmax(timeseries.values)\n points = [0, mx, len(timeseries)]\n x = timeseries.times\n\n ythresh = 100*np.mean(timeseries.values)\n out_error = 1\n out_error_last = out_error\n OUT_ERROR_THRESHOLD = 1e-2\n\n UPPER_N = 200\n LOWER_N = 50\n last_result = None\n MAX_ITERATIONS = 80\n\n solve_type = SMOOTH\n simple_peaks = False\n last_result,ix = reduct_iter(timeseries,flux_floor,ythresh,out_error,out_error_last,OUT_ERROR_THRESHOLD,UPPER_N,LOWER_N,last_result,MAX_ITERATIONS)\n last_result = retain_min_years(last_result.reduced_flux,o_timeseries,o_mass,min_retained_zero_years)\n #if there are less points than the min_reduction_steps then use the remaining\n #points to rebalance the segments with the largest mass errors.\n play_points = min_reduction_steps - last_result.num_reduced_points\n bef = last_result.reduced_flux.times.size\n if play_points > 0:\n last_result = red_flux.rebalance_extra_points(last_result,play_points)\n\n rr = last_result\n\n #find peaks for data rebalance and reporting\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=3,rel_height=1)\n if peaks.size == 0 :\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=2,rel_height=1)\n if peaks.size == 0:\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=1,rel_height=1)\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=3,rel_height=1)\n if pneg.size == 0:\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=2,rel_height=1)\n if pneg.size == 0:\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=1,rel_height=1)\n\n peaks = rr.reduced_flux.times[peaks]\n pneg = rr.reduced_flux.times[pneg]\n\n peaks = np.isin(o_timeseries.times,peaks)\n pneg = np.isin(o_timeseries.times,pneg)\n peaks = np.where(peaks)\n pneg = np.where(pneg)\n\n peaks = peaks[0]\n pneg = pneg[0]\n iter = 0\n while iter < 100 and (abs(last_result.total_mass_error*maxval) > max_tm_error or abs(last_result.total_mass_error/last_result.mass.values[-1])*100 > .001) :\n rr = red_flux.rebalance_valleys(rr,peaks,pneg)\n #keep the lowest total_mass_error\n if abs(rr.total_mass_error) < abs(last_result.total_mass_error):\n last_result = rr\n else:\n break\n iter += 1\n\n out_times = last_result.reduced_flux.times\n out_values = last_result.reduced_flux.values\n #return the reduced data, undo normalize of the values (*maxval)\n return out_times, out_values*maxval,-(last_result.total_mass_error * maxval),peaks.size,iter", "def apply3filter(array, filter_):\n s = int(len(filter_)/2)\n width = len(array[0])\n height = len(array)\n new_array = np.array(np.zeros((height,width)))\n for row in range(s, (height-s)):\n for col in range(s, (width-s)):\n new_array[row,col] = np.sum(filter_ * array[(row-s):(row+s+1),(col-s):(col+s+1)])\n return new_array", "def _gaufit3d(self, coa_map, lx=None, ly=None, lz=None, thresh=0., win=7):\n\n # Get shape of 3-D coalescence map and max coalesence grid location\n nx, ny, nz = coa_map.shape\n mx, my, mz = np.unravel_index(np.nanargmax(coa_map), coa_map.shape)\n\n # Only use grid cells above threshold value, and within the specified\n # window around the coalescence peak\n flg = np.logical_and(coa_map > thresh,\n self._mask3d([nx, ny, nz], [mx, my, mz], win))\n ix, iy, iz = np.where(flg)\n\n # Subtract mean of entire 3-D coalescence map from the local grid\n # window so it is better approximated by a gaussian (which goes to zero\n # at infinity)\n coa_map = coa_map - np.nanmean(coa_map)\n\n # Fit 3-D gaussian function\n ncell = len(ix)\n\n if not lx:\n lx = np.arange(nx)\n ly = np.arange(ny)\n lz = np.arange(nz)\n\n if lx.ndim == 3:\n iloc = [lx[mx, my, mz], ly[mx, my, mz], lz[mx, my, mz]]\n x = lx[ix, iy, iz] - iloc[0]\n y = ly[ix, iy, iz] - iloc[1]\n z = lz[ix, iy, iz] - iloc[2]\n else:\n iloc = [lx[mx], ly[my], lz[mz]]\n x = lx[ix] - iloc[0]\n y = ly[iy] - iloc[1]\n z = lz[iz] - iloc[2]\n\n X = np.c_[x * x, y * y, z * z,\n x * y, x * z, y * z,\n x, y, z, np.ones(ncell)].T\n Y = -np.log(np.clip(coa_map.astype(np.float64)[ix, iy, iz],\n 1e-300, np.inf))\n\n X_inv = np.linalg.pinv(X)\n P = np.matmul(Y, X_inv)\n G = -np.array([2 * P[0], P[3], P[4],\n P[3], 2 * P[1], P[5],\n P[4], P[5], 2 * P[2]]).reshape((3, 3))\n H = np.array([P[6], P[7], P[8]])\n loc = np.matmul(np.linalg.inv(G), H)\n cx, cy, cz = loc\n\n K = P[9] \\\n - P[0] * cx ** 2 \\\n - P[1] * cy ** 2 \\\n - P[2] * cz ** 2 \\\n - P[3] * cx * cy \\\n - P[4] * cx * cz \\\n - P[5] * cy * cz \\\n\n M = np.array([P[0], P[3] / 2, P[4] / 2,\n P[3] / 2, P[1], P[5] / 2,\n P[4] / 2, P[5] / 2, P[2]]).reshape(3, 3)\n egv, vec = np.linalg.eig(M)\n sgm = np.sqrt(0.5 / np.clip(np.abs(egv), 1e-10, np.inf))/2\n val = np.exp(-K)\n csgm = np.sqrt(0.5 / np.clip(np.abs(M.diagonal()), 1e-10, np.inf))\n\n # Convert back to whole-grid coordinates\n gau_3d = [loc + iloc, vec, sgm, csgm, val]\n\n # Convert grid location to XYZ / coordinates\n xyz = self.lut.xyz2loc(np.array([[gau_3d[0][0],\n gau_3d[0][1],\n gau_3d[0][2]]]),\n inverse=True)\n loc_gau = self.lut.xyz2coord(xyz)[0]\n\n loc_gau_err = np.array([gau_3d[2][0] * self.lut.cell_size[0],\n gau_3d[2][1] * self.lut.cell_size[1],\n gau_3d[2][2] * self.lut.cell_size[2]])\n\n return loc_gau, loc_gau_err", "def load_images(filename, bands, Args):\n image = np.zeros([Args.num, Args.out_size,\n Args.out_size, len(bands)])\n for i, band in enumerate(bands):\n print (\"Getting pstamps for band\", band)\n full_image = fits.open(filename.replace(\"band\", band))[0].data\n image[:, :, :, i] = get_stamps(full_image, Args)\n return image", "def multiple_years(our_data, start, end):\n count = start\n album_list = []\n while count <= end:\n album_list.append(find_by_year(our_data,count))\n count += 1", "def numberOfWideBands(config=None):\n # Get correlator configuration\n c = config\n if c == None: \n c = utils.getConfigAstroband()\n\n # Determine if we have both wideband and spectral line astrobands. \n # If we do, we return nwide & maxbandwidth for sl only since \n # this is the correlator which will be attached to all ants.\n astrobands = [ abc[0] for abc in c ]\n if len( astrobands ) == 0:\n raise Exception, \"No existing astroband configuration.\"\n if max( astrobands ) > 8 and min( astrobands ) < 9: \n astrobands = [ ab for ab in astrobands if ab < 9 ]\n\n # Check bandwidth\n nwide = 0\n maxbandwidth = 0\n for t in c:\n astroband = t[0]\n # Skip band if it is not being used or is not in astroband list above.\n mp = commands.queryString('SignalPath.Mapping.Astroband%d.confTag' % (astroband) )\n if mp == 'NONE' or astroband not in astrobands: continue\n\n # Get bandwidth\n if t[2] == commands.BW500:\n bw = 500\n elif t[2] == commands.BW250:\n bw = 250\n elif t[2] == commands.BW125:\n bw = 125\n elif t[2] == commands.BW62:\n bw = 62\n elif t[2] == commands.BW31:\n bw = 31\n elif t[2] == commands.BW8:\n bw = 8\n elif t[2] == commands.BW2:\n bw = 2\n else:\n raise Exception, 'Could not find bandwith for '+str(t[2])\n\n # Maximum?\n if bw > maxbandwidth: \n maxbandwidth = bw\n if utils.isDualPol( astroband ):\n nwide = 2 \n else:\n nwide = 1\n elif bw == maxbandwidth:\n if utils.isDualPol( astroband ): \n nwide += 2 \n else:\n nwide += 1\n\n return nwide, maxbandwidth", "def bandpass_filter(data, lowcut, highcut, fs=2000, numtaps=255):\n nyq = fs / 2\n\n # design filter\n fe1 = lowcut / nyq\n fe2 = highcut / nyq\n b = firwin(numtaps, (fe1, fe2), pass_zero=False)\n\n filtered = lfilter(b, 1, data)\n\n return filtered", "def boxcar_filter(time_series, lb=0, ub=0.5, n_iterations=2):\r\n\r\n n = time_series.shape[-1]\r\n\r\n len_boxcar_ub = np.ceil(1 / (2.0 * ub))\r\n boxcar_ub = np.empty(len_boxcar_ub)\r\n boxcar_ub.fill(1.0 / len_boxcar_ub)\r\n boxcar_ones_ub = np.ones_like(boxcar_ub)\r\n\r\n if lb == 0:\r\n lb = None\r\n else:\r\n len_boxcar_lb = np.ceil(1 / (2.0 * lb))\r\n boxcar_lb = np.empty(len_boxcar_lb)\r\n boxcar_lb.fill(1.0 / len_boxcar_lb)\r\n boxcar_ones_lb = np.ones_like(boxcar_lb)\r\n\r\n #If the time_series is a 1-d, we add a dimension, so that we can iterate\r\n #over 2-d inputs:\r\n if len(time_series.shape) == 1:\r\n time_series = np.array([time_series])\r\n for i in range(time_series.shape[0]):\r\n if ub:\r\n #Start by applying a low-pass to the signal. Pad the signal on\r\n #each side with the initial and terminal signal value:\r\n pad_s = np.hstack((boxcar_ones_ub *\r\n time_series[i, 0], time_series[i]))\r\n pad_s = np.hstack((pad_s, boxcar_ones_ub * time_series[i, -1]))\r\n\r\n #Filter operation is a convolution with the box-car(iterate,\r\n #n_iterations times over this operation):\r\n for iteration in range(n_iterations):\r\n conv_s = np.convolve(pad_s, boxcar_ub)\r\n\r\n #Extract the low pass signal by excising the central\r\n #len(time_series) points:\r\n time_series[i] = conv_s[conv_s.shape[-1] / 2 - np.floor(n / 2.):\r\n conv_s.shape[-1] / 2 + np.ceil(n / 2.)]\r\n\r\n #Now, if there is a high-pass, do the same, but in the end subtract out\r\n #the low-passed signal:\r\n if lb:\r\n pad_s = np.hstack((boxcar_ones_lb *\r\n time_series[i, 0], time_series[i]))\r\n pad_s = np.hstack((pad_s, boxcar_ones_lb * time_series[i, -1]))\r\n\r\n #Filter operation is a convolution with the box-car(iterate,\r\n #n_iterations times over this operation):\r\n for iteration in range(n_iterations):\r\n conv_s = np.convolve(pad_s, boxcar_lb)\r\n\r\n #Extract the low pass signal by excising the central\r\n #len(time_series) points:\r\n s_lp = conv_s[conv_s.shape[-1] / 2 - np.floor(n / 2.):\r\n conv_s.shape[-1] / 2 + np.ceil(n / 2.)]\r\n\r\n #Extract the high pass signal simply by subtracting the high pass\r\n #signal from the original signal:\r\n time_series[i] = time_series[i] - s_lp + np.mean(s_lp) # add mean\r\n #to make sure that there are no negative values. This also seems to\r\n #make sure that the mean of the signal (in % signal change) is\r\n #close to 0\r\n\r\n return time_series.squeeze()", "def warm_region_cal(audio_samples, fs):\n #window the audio\n windowed_samples = timbral_util.window_audio(audio_samples)\n\n # need to define a function for the roughness stimuli, emphasising the 20 - 40 region (of the bark scale)\n min_bark_band = 10\n max_bark_band = 40\n mean_bark_band = (min_bark_band + max_bark_band) / 2.0\n array = np.arange(min_bark_band, max_bark_band)\n x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)\n x -= np.min(x)\n x /= np.max(x)\n\n wr_array = np.zeros(240)\n wr_array[min_bark_band:max_bark_band] = x\n\n # need to define a second array emphasising the 20 - 40 region (of the bark scale)\n min_bark_band = 80\n max_bark_band = 240\n mean_bark_band = (min_bark_band + max_bark_band) / 2.0\n array = np.arange(min_bark_band, max_bark_band)\n x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)\n x -= np.min(x)\n x /= np.max(x)\n\n hf_array = np.zeros(240)\n hf_array[min_bark_band:max_bark_band] = x\n\n windowed_loud_spec = []\n windowed_rms = []\n\n wr_vals = []\n hf_vals = []\n\n for i in range(windowed_samples.shape[0]):\n samples = windowed_samples[i, :]\n N_entire, N_single = timbral_util.specific_loudness(samples, Pref=100.0, fs=fs, Mod=0)\n\n # append the loudness spec\n windowed_loud_spec.append(N_single)\n windowed_rms.append(np.sqrt(np.mean(samples * samples)))\n\n wr_vals.append(np.sum(wr_array * N_single))\n hf_vals.append(np.sum(hf_array * N_single))\n\n mean_wr = np.mean(wr_vals)\n mean_hf = np.mean(hf_vals)\n weighted_wr = np.average(wr_vals, weights=windowed_rms)\n weighted_hf = np.average(hf_vals, weights=windowed_rms)\n\n return mean_wr, weighted_wr, mean_hf, weighted_hf", "def roi_to_wm(img,brain_wm,nth):\n \n data = img.get_data()\n wmdata = brain_wm.get_data()\n shape = data.shape\n\n roi_ids = np.unique(data)\n roi = roi_ids[1:]\n roi = [int(i) for i in roi]\n print roi\n \n wmdata = wmdata!=0\n result_mask = np.zeros(data.shape)\n #print wmdata \n \n #First, get the nonzero voxel index in image data.\n #Here image data is a label volume.\n #ROIs is in it\n for roi_id in roi:\n #print roi_id\n tmp_mask = data==roi_id\n #print tmp_mask\n indexs = np.transpose(tmp_mask.nonzero())\n #print indexs\n \n #Second, find the nearest wm voxel for each indexs.\n print indexs.shape\n for coor in indexs:\n #print coor\n x = coor[0]\n y = coor[1]\n z = coor[2]\n \n if wmdata[x,y,z]==1:\n result_mask[x,y,z] = roi_id\n else:\n #find the nearest neighbor.\n flag = False\n radius = 1\n mindist_voxel = []\n mindist = 1000 \n while radius<100: \n neigh_list = get_neighbors(coor,radius,shape)\n radius += 1\n #find the nearest white matter voxel.\n for n in neigh_list:\n #print n\n if wmdata[n[0],n[1],n[2]]==1:\n flag = True\n dist = np.sqrt((n[0]-x)**2+(n[1]-y)**2+(n[2]-z)**2)\n # if the distance is smaller than tag, choose it to be nearest.\n \n if dist < mindist:\n mindist = dist\n mindist_voxel = n\n \n if flag:\n break\n #print mindist_voxel\n if mindist_voxel!=[]:\n result_mask[mindist_voxel[0],mindist_voxel[1],mindist_voxel[2]] = roi_id \n for roi_id in roi:\n tmp_mask = result_mask==roi_id\n roi_size = tmp_mask.sum() \n print roi_id, roi_size\n result = img\n result._data = result_mask\n #roi_name = os.path.join(mkdir,'roi_%s.nii.gz'%i)\n nib.save(result,\"test_regroi.nii.gz\")\n \n return True", "def precip_stats_to_climatology(fili, start_year=1981, end_year=2015):\n\n nyear = end_year - start_year + 1\n \n ds = xr.open_dataset(fili)\n\n year = ds['time'].dt.year\n #dsMsk = ds.isel( time=( (year >= start_year) & (year <= end_year) ) ).count(dim='time')\n dsClm = ds.isel( time=( (year >= start_year) & (year <= end_year) ) ).mean(dim='time', skipna=False)\n #dsClm = dsClm.where(dsMsk == nyear)\n \n #dsMsk.to_netcdf('era5.count.nc4')\n\n print (dsClm)\n \n filo = fili.replace('annual','annual.clm')\n print (f'Writing climatology to {filo}') \n dsClm.to_netcdf(filo)\n\n return", "def fmask(bandname=\"fmask\"):\n\n def fmask(image):\n imgFmask = image.select(bandname)\n shadow = imgFmask.eq(3)\n snow = imgFmask.eq(4)\n cloud = imgFmask.eq(5)\n\n mask = shadow.Or(snow).Or(cloud)\n\n imgMask = image.updateMask(mask.Not())\n return imgMask\n return fmask", "def test_3dtproject_temporal_filter_wf_scrubs(self):\n\n self.wf = build_3dtproject_temporal_filter(\n bpHigh= .9, bpLow= 0.005, tr=2,\n scrub_targets=True,\n import_file=self.sample_raw_image,\n export_file=self.export_path,\n base_dir=self.test_path, crashdump_dir=self.test_path,\n mask_file=self.sample_raw_image_mask\n )\n scrub_targets = [1] * 100\n scrub_targets[46:52] = [0] * 6\n self.highlight_ranges = [(45.5, 52.5)]\n self.wf.inputs.inputnode.scrub_targets = scrub_targets", "def masked_f3kdb(clip: vs.VideoNode,\n rad: int = 16,\n thr: Union[int, List[int]] = 24,\n grain: Union[int, List[int]] = [12, 0],\n mask_args: Dict[str, Any] = {}\n ) -> vs.VideoNode:\n from debandshit import dumb3kdb\n\n deb_mask_args: Dict[str, Any] = dict(brz=(1000, 2750))\n deb_mask_args |= mask_args\n\n bits, clip = _get_bits(clip)\n\n deband_mask = detail_mask(clip, **deb_mask_args)\n\n deband = dumb3kdb(clip, radius=rad, threshold=thr, grain=grain, seed=69420)\n deband_masked = core.std.MaskedMerge(deband, clip, deband_mask)\n deband_masked = deband_masked if bits == 16 else depth(deband_masked, bits)\n return deband_masked", "def _make_filters(self):\n\n \"\"\"\n filter_bank = bandpass_filterbank(\n self.bands, fs=self.fs, order=order, output=output\n )\n\n return [lambda sig: sosfiltfilt(bpf, sig) for bpf in filter_bank]\n \"\"\"\n\n # This seems to work only for Octave bands out of the box\n centers = self.centers\n n = len(self.centers)\n\n new_bands = [[centers[0] / 2, centers[1]]]\n for i in range(1, n - 1):\n new_bands.append([centers[i - 1], centers[i + 1]])\n new_bands.append([centers[-2], self.fs / 2])\n\n n_freq = self.n_fft // 2 + 1\n freq_resp = np.zeros((n_freq, n))\n freq = np.arange(n_freq) / self.n_fft * self.fs\n\n for b, (band, center) in enumerate(zip(new_bands, centers)):\n lo = np.logical_and(band[0] <= freq, freq < center)\n freq_resp[lo, b] = 0.5 * (1 + np.cos(2 * np.pi * freq[lo] / center))\n\n if b != n - 1:\n hi = np.logical_and(center <= freq, freq < band[1])\n freq_resp[hi, b] = 0.5 * (1 - np.cos(2 * np.pi * freq[hi] / band[1]))\n else:\n hi = center <= freq\n freq_resp[hi, b] = 1.0\n\n filters = np.fft.fftshift(\n np.fft.irfft(freq_resp, n=self.n_fft, axis=0),\n axes=[0],\n )\n\n # remove the first sample to make them odd-length symmetric filters\n self.filters = filters[1:, :]", "def stack_tir(scene_urls,cloud_mask_bits,aoi,aoi_crs,\n subtract_median_lst=True,subtract_air_temp=False):\n if subtract_air_temp:\n ceda_password = get_ceda_password()\n at = met_climate.access_ukcp09(cf.ceda_username,ceda_password)\n\n \n # with rasterio.open(scene_bqa) as bqa:\n # with rasterio.open(scene_tir) as tir:\n\n # bqa_data,bqa_trans = ru.read_in_aoi(bqa,**aoi_kwargs)\n # tir_data,tir_trans = ru.read_in_aoi(tir,**aoi_kwargs)\n \n # bqa_data = bqa_data[0,:,:]\n # tir_data = tir_data[0,:,:]\n # tir_data = ma.array(tir_data,dtype=float,\n # mask=ru.mask_qa(bqa_data,bitmask=0b1))\n\n # (ymin,ymax) = (0, tir_data.shape[0])\n # (xmin,xmax) = (0, tir_data.shape[1])\n \n counter=-1\n for scene_url in scene_urls:\n counter+=1\n scene_tir = scene_url\n scene_bqa = scene_url.replace('B'+tirband,'B'+qaband)\n scene_red = scene_url.replace('B'+tirband,'B'+rband)\n scene_nir = scene_url.replace('B'+tirband,'B'+nband)\n scene_metadata = scene_url.replace('B'+tirband+'.TIF','MTL.txt')\n\n print('Reading scene {}'.format(counter+1))\n try:\n with rasterio.open(scene_bqa) as bqa:\n #print(scene_bqa)\n bqa_data,bqa_trans = ru.read_in_aoi(bqa,aoi=aoi,aoi_crs=aoi_crs)\n\n with rasterio.open(scene_tir) as tir:\n #print(scene_tir)\n tir_data,tir_trans = ru.read_in_aoi(tir,aoi=aoi,aoi_crs=aoi_crs)\n tir_crs = tir.crs\n tir_profile = tir.profile\n\n with rasterio.open(scene_red) as red:\n #print(scene_red)\n red_data,red_trans = ru.read_in_aoi(red,aoi=aoi,aoi_crs=aoi_crs)\n red_crs = red.crs\n\n with rasterio.open(scene_nir) as nir:\n #print(scene_nir)\n nir_data,nir_trans = ru.read_in_aoi(nir,aoi=aoi,aoi_crs=aoi_crs)\n \n except OSError as e:\n print('ERROR',e)\n print('skipping scene')\n counter = counter-1\n continue\n \n # Determine size of stack allowing for AoI to extend outside of scene\n if counter == 0:\n aoi_box = rasterio.warp.transform_bounds(aoi_crs,tir_crs,*aoi.values())\n aoi_left, aoi_bottom, aoi_right, aoi_top = aoi_box\n aoi_box = dict(zip(('minx','miny','maxx','maxy'),aoi_box))\n # rowmin,colmin = (bqa.index(aoi_left,aoi_top)) #,op=round))\n # rowmax,colmax = (bqa.index(aoi_right,aoi_bottom)) #,op=round))\n # The above two lines are fine but the following does not \n # require the rasterio dataset to be kept open\n rowmin,colmin = rasterio.transform.rowcol(tir_trans,aoi_left,aoi_top)\n rowmax,colmax = rasterio.transform.rowcol(tir_trans,aoi_right,aoi_bottom)\n stack_height,stack_width = (rowmax-rowmin,colmax-colmin)\n lst_stack = (ma.zeros((len(scene_urls),stack_height,stack_width),\n dtype=np.float,fill_value=np.nan\n )+np.nan) \n \n # Determine size of intersect in THIS scene\n intersect = ru.aoi_scene_intersection(aoi_box,bqa)\n ins_left, ins_bottom, ins_right, ins_top = intersect.bounds\n #rowmin,colmin = (bqa.index(ins_left,ins_top,op=round))\n #rowmax,colmax = (bqa.index(ins_right,ins_bottom,op=round))\n # The above two lines are incorrect now that we read a window:\n # We need to transform the coordinates into the row,col of \n # the window, not the original file.\n rowmin,colmin = rasterio.transform.rowcol(tir_trans,ins_left,ins_top)\n rowmax,colmax = rasterio.transform.rowcol(tir_trans,ins_right,ins_bottom)\n\n try:\n # Subset data \n bqa_data = ma.array(bqa_data[0,rowmin:rowmax,colmin:colmax])\n tir_data = ma.array(tir_data[0,rowmin:rowmax,colmin:colmax])\n red_data = ma.array(red_data[0,rowmin:rowmax,colmin:colmax])\n nir_data = ma.array(nir_data[0,rowmin:rowmax,colmin:colmax])\n assert tir_data.shape == lst_stack.shape[1:]\n except (IndexError,AssertionError) as e:\n print('ERROR:',e)\n print('loop count',counter)\n print(tir_data.shape, lst_stack.shape)\n print(rowmin,rowmax,colmin,colmax)\n import pdb; pdb.set_trace()\n\n lst_data = lst.calculate_land_surface_temperature_NB(\n red_data, nir_data, tir_data,\n red_trans, tir_trans, \n red_crs, tir_crs, scene_metadata\n )\n \n # Masks\n smw = 11\n mask_all = filters.maximum_filter(\n ru.mask_qa(bqa_data,bits=cloud_mask_bits),size=smw\n )\n\n lst_data_mask_all = ma.array(lst_data,\n mask=mask_all,\n dtype=np.float,\n fill_value=np.nan) #.filled()\n\n # After masking, reproject\n # not necessary if they share a CRS\n if counter > 0:\n assert tir_crs == prev_crs\n prev_crs = tir_crs\n\n # Now do some normalisation\n if subtract_air_temp:\n filename = scene_tir.split('/')[-1]\n datestring = filename.split('_')[3]\n\n atscene = met_climate.dummy_scene( \n tir_crs, tir_trans, aoi_box,(stack_height,stack_width))\n\n # import pdb; pdb.set_trace()\n # If the following fails, it may mean there was a problem setting up the session\n atdata = at.grid_temp_over_scene(\n atscene, datestring, interpolation='linear')\n atdata = atdata[rowmin:rowmax,colmin:colmax]\n assert lst_data_mask_all.shape == atdata.shape\n lst_data_mask_all = ma.array(\n lst_data_mask_all - atdata,\n mask=mask_all,\n fill_value=np.nan)\n \n if subtract_median_lst:\n # ALSO subtract median xLST\n medval = ma.median(lst_data_mask_all)\n lst_data_mask_all = ma.array(\n lst_data_mask_all - medval,\n mask=mask_all,\n fill_value=np.nan)\n \n elif subtract_median_lst:\n # Subtract median LST from scene (within QA mask) \n \n medval = ma.median(lst_data_mask_all)\n lst_data_mask_all = ma.array(\n lst_data_mask_all - medval,\n mask=mask_all,\n fill_value=np.nan)\n \n # Then add to stack\n lst_stack[counter,:,:] = lst_data_mask_all\n\n # Make profile for file output\n N_layers = counter+1\n tir_profile.update(\n dtype=rasterio.float64,\n width=stack_width,\n height=stack_height,\n transform=tir_trans,\n count=N_layers,\n compress='lzw'\n )\n\n\n return lst_stack, tir_profile", "def bandstop_filter(data, lowcut, highcut, fs=2000, numtaps=255):\n nyq = fs / 2\n\n # design filter\n fe1 = lowcut / nyq\n fe2 = highcut / nyq\n b = firwin(numtaps, (fe1, fe2), pass_zero=True)\n\n filtered = lfilter(b, 1, data)\n\n return filtered", "def contours_and_data(epoch, model, features, filters, figname, fgal=0.5,\n idx=-1, data='s82', N=60000):\n if data == 's82':\n # fetch Stripe 82 data\n X, Xcov = fetch_prepped_s82data(epoch, fgal, features, filters)\n Xcoadd, Xcoaddcov = fetch_prepped_s82data(epoch, fgal, features,\n filters, use_single=False)\n sind = np.abs(Xcoadd[:, idx]) < 0.03\n gind = np.abs(Xcoadd[:, idx]) > 0.03\n\n else:\n # fetch DR10 data\n X, Xcov = fetch_prepped_dr10data(N, fgal, features, filters)\n sind = np.abs(X[:, idx]) < 0.145\n gind = np.abs(X[:, idx]) > 0.145\n\n # unpickle the XD model\n if type(model) == str: \n f = open(model, 'rb')\n model = cPickle.load(f)\n f.close()\n\n fs = 5\n ms = 1\n lsize = 20\n idx = [[0, -1], [2, 3], [3, 4]]\n xlim = [(18., 22), (-0.5, 2.5), (-0.5, 2)]\n ylim = [(-0.1, 0.5), (-0.5, 2.5), (-0.5, 1.5)]\n xlab = ['psfmag $r$', 'modelmag $g-r$', 'modelmag $r-i$']\n ylab = ['psfmag - modelmag $r$', 'modelmag $r-i$', 'modelmag $i-z$']\n\n f = pl.figure(figsize=(3 * fs, 3 * fs))\n Nstar = len(np.where(model.fixed_means[:, idx] != np.inf)[0])\n pl.subplots_adjust(wspace=0.3)\n for i in range(1, 10):\n k = (i - 1) % 3\n if i < 4:\n ind = np.arange(X.shape[0], dtype=np.int)\n rng = range(model.n_components)\n elif 3 < i < 7:\n ind = sind\n rng = range(Nstar)\n else:\n ind = gind\n rng = range(Nstar, model.n_components)\n ax = pl.subplot(3, 3, i)\n for j in rng:\n if model.alpha[j] > 1.e-3:\n draw_ellipse(model.mu[j, idx[k]],\n model.V[j, idx[k]][:, idx[k]],\n scales=[2], ec='k', fc='gray', alpha=0.2)\n pl.plot(X[ind][::10, idx[k][0]],\n X[ind][::10, idx[k][1]], '.k',ms=ms)\n pl.xlim(xlim[k])\n pl.ylim(ylim[k])\n pl.xlabel(xlab[k], fontsize=lsize)\n pl.ylabel(ylab[k], fontsize=lsize)\n if ('psf' in ylab[k]) & ('model' in ylab[k]):\n ytick = ['%0.1f' % v for v in np.linspace(-.1, 0.4, 6)]\n ytick[0] = ''\n ax.set_yticklabels(ytick)\n if i == 1:\n s = 'All'\n elif i == 3:\n s = '\"Stars\"'\n else:\n s = '\"Galaxies\"'\n ax.text(-.3, 0.5, s, ha='center', va='center', fontsize=25,\n rotation='vertical', transform=ax.transAxes)\n f.savefig(figname, bbox_inches='tight')", "def winter_bar_chart(self):\n # Create the top n countries dataframe from 1994 to 2016\n df_winter = self.df_winter[self.df_winter['Year'] >= 1994]\n m = list(df_winter['Country'].value_counts()[:self.n_top].index)\n df_top = df_winter[df_winter['Country'].isin(m)].groupby(['Country', 'Medal']).size()\n new_index = pd.MultiIndex.from_product([m, ['Gold', 'Silver', 'Bronze']], names=df_top.index.names)\n df_top = df_top.reindex(new_index)\n unstacked_df_top = df_top.unstack().reindex(m, columns=['Gold', 'Silver', 'Bronze'])\n # Create the dataframe in 2018\n k = []\n for j in self.df_2018_winter['NOC'].tolist():\n n = j[j.find('(') + 1:j.find(')')]\n k.append((n, j))\n k = dict(k)\n winter_2018 = pd.DataFrame()\n for i in m:\n if i != 'RUS':\n df_tmp = self.df_2018_winter[self.df_2018_winter['NOC'] == k[i]]\n else:\n df_tmp = self.df_2018_winter[self.df_2018_winter['NOC'] == k['OAR']]\n winter_2018 = pd.concat([winter_2018, df_tmp])\n winter_2018['Country'] = m\n new_winter_2018 = winter_2018.set_index(['Country'])[['Gold', 'Silver', 'Bronze']]\n # Add two dataframes and plot.\n unstacked_df_top.add(new_winter_2018).reindex(m[::-1], columns=['Bronze', 'Silver', 'Gold']).plot(kind='barh')\n plt.title('Medal Result of Winter Olympics since 1994')\n fname = './medal_figures_winter/winter_bar_chart.png'\n plt.savefig(fname=fname, format='png')\n return", "def run(self,workspace):\n image_name = self.image_name.value\n cpimage = workspace.image_set.get_image(image_name)\n image = cpimage.pixel_data\n mask = cpimage.mask\n workspace.display_data.statistics = []\n level = int(self.atrous_level.value)\n\n wavelet = self.a_trous(1.0*image, level+1)\n wlevprod = wavelet[:,:,level-1] * 3.0\n\n spotthresh = wlevprod.mean() + float(self.noise_removal_factor.value) * wlevprod.std()\n tidx = wlevprod < spotthresh\n wlevprod[tidx] = 0\n\n wlevprod = self.circular_average_filter(wlevprod, int(self.smoothing_filter_size.value))\n wlevprod = self.smooth_image(wlevprod, mask)\n\n max_wlevprod = scipy.ndimage.filters.maximum_filter(wlevprod,3)\n maxloc = (wlevprod == max_wlevprod)\n twlevprod = max_wlevprod > float(self.final_spot_threshold.value)\n maxloc[twlevprod == 0] = 0\n \n labeled_image,object_count = scipy.ndimage.label(maxloc,\n np.ones((3,3),bool))\n\n unedited_labels = labeled_image.copy()\n # Filter out objects touching the border or mask\n border_excluded_labeled_image = labeled_image.copy()\n labeled_image = self.filter_on_border(image, labeled_image)\n border_excluded_labeled_image[labeled_image > 0] = 0\n \n # Relabel the image\n labeled_image,object_count = relabel(labeled_image)\n new_labeled_image, new_object_count = self.limit_object_count(\n labeled_image, object_count)\n if new_object_count < object_count:\n # Add the labels that were filtered out into the border\n # image.\n border_excluded_mask = ((border_excluded_labeled_image > 0) |\n ((labeled_image > 0) & \n (new_labeled_image == 0)))\n border_excluded_labeled_image = scipy.ndimage.label(border_excluded_mask,\n np.ones((3,3),bool))[0]\n object_count = new_object_count\n labeled_image = new_labeled_image\n \n # Make an outline image\n outline_image = cellprofiler.cpmath.outline.outline(labeled_image)\n outline_border_excluded_image = cellprofiler.cpmath.outline.outline(border_excluded_labeled_image)\n \n if self.show_window:\n statistics = workspace.display_data.statistics\n statistics.append([\"# of accepted objects\",\n \"%d\"%(object_count)])\n\n workspace.display_data.image = image\n workspace.display_data.labeled_image = labeled_image\n workspace.display_data.border_excluded_labels = border_excluded_labeled_image\n\n # Add image measurements\n objname = self.object_name.value\n measurements = workspace.measurements\n cpmi.add_object_count_measurements(measurements,\n objname, object_count)\n # Add label matrices to the object set\n objects = cellprofiler.objects.Objects()\n objects.segmented = labeled_image\n objects.unedited_segmented = unedited_labels\n objects.parent_image = image\n \n workspace.object_set.add_objects(objects,self.object_name.value)\n cpmi.add_object_location_measurements(workspace.measurements, \n self.object_name.value,\n labeled_image)\n if self.should_save_outlines.value:\n out_img = cpi.Image(outline_image.astype(bool),\n parent_image = image)\n workspace.image_set.add(self.save_outlines.value, out_img)", "def itkBoundedReciprocalImageFilterIF3IF3_cast(*args):\n return _itkBoundedReciprocalImageFilterPython.itkBoundedReciprocalImageFilterIF3IF3_cast(*args)", "def cygx3IndFlux(self):\n # --------------------------------------------------------------------------------------------- #\n # Read data\n fitsNnam = os.path.join(self.workpath, 'LCresults.fits')\n lcTab = Table.read(fitsNnam)\n detect = lcTab['ts'] >= self.tsmin\n lcTab = lcTab[detect] \n\n ind08 = (lcTab['mjd'] > 54700) & (lcTab['mjd'] < 54900) \n flux08 = lcTab['flux'][ind08]\n fluxerr08 = lcTab['fluxerr'][ind08]\n index08 = lcTab['index'][ind08]\n indexerr08 = lcTab['indexerr'][ind08]\n\n ind09 = (lcTab['mjd'] > 54900) & (lcTab['mjd'] < 55100) \n flux09 = lcTab['flux'][ind09]\n fluxerr09 = lcTab['fluxerr'][ind09]\n index09 = lcTab['index'][ind09]\n indexerr09 = lcTab['indexerr'][ind09]\n\n scale = 10**int(np.floor(np.log10( np.mean( np.concatenate( (flux08, flux09), axis=0) ) ))) \n\n # --------------------------------------------------------------------------------------------- #\n # Plot\n indplt = FermiPlot(savepath='', xsize=8.5, ysize=6)\n indplt.figname = os.path.join(self.workpath, 'IndvsFlux.pdf')\n indplt.xlabel = r'Flux ($10^{%d}$ ph\\,cm$^{-2}$\\,s$^{-1}$)'%(int(np.log10(scale)))\n indplt.ylabel = r'Index'\n indplt.mksize = 2\n indplt.color = self.lblue\n indplt.label = r'2008'\n indplt.plot(x=flux08/scale, xerr=fluxerr08/scale, y=index08, yerr=indexerr08)\n indplt.color = self.loran\n indplt.label = r'2009'\n indplt.plot(x=flux09/scale, xerr=fluxerr09/scale, y=index09, yerr=indexerr09)\n indplt.save()\n\n print(\"\\t=== Figure '{}' created ===\".format(indplt.figname)) \n return", "def sky_median_sig_clip(input_arr, sig_fract, percent_fract, max_iter=100, low_cut=True, high_cut=True):\r\n\t\r\n\twork_arr = np.ravel(input_arr)\r\n\told_sky = np.median(work_arr)\r\n\toldStaDesviation = work_arr.std()\r\n\tupper_limit = old_sky + sig_fract * oldStaDesviation\r\n\tlower_limit = old_sky - sig_fract * oldStaDesviation\r\n\tif low_cut and high_cut:\r\n\t\tindices = np.where((work_arr < upper_limit) & (work_arr > lower_limit))\r\n\telse:\r\n\t\tif low_cut:\r\n\t\t\tindices = np.where((work_arr > lower_limit))\r\n\t\telse:\r\n\t\t\tindices = np.where((work_arr < upper_limit))\r\n\twork_arr = work_arr[indices]\r\n\tnew_sky = np.median(work_arr)\r\n\titeration = 0\r\n\twhile ((math.fabs(old_sky - new_sky)/new_sky) > percent_fract) and (iteration < max_iter) :\r\n\t\titeration += 1\r\n\t\told_sky = new_sky\r\n\t\toldStaDesviation = work_arr.std()\r\n\t\tupper_limit = old_sky + sig_fract * oldStaDesviation\r\n\t\tlower_limit = old_sky - sig_fract * oldStaDesviation\r\n\t\tif low_cut and high_cut:\r\n\t\t\tindices = np.where((work_arr < upper_limit) & (work_arr > lower_limit))\r\n\t\telse:\r\n\t\t\tif low_cut:\r\n\t\t\t\tindices = np.where((work_arr > lower_limit))\r\n\t\t\telse:\r\n\t\t\t\tindices = np.where((work_arr < upper_limit))\r\n\t\twork_arr = work_arr[indices]\r\n\t\tnew_sky = np.median(work_arr)\r\n\treturn (new_sky, iteration)", "def get_bands(self, data_array_norm, baseline_array_norm, f):\n\n fmax = 50\n fidx = f < fmax\n fnum = f[fidx].size\n\n band_tot = np.zeros((fnum, fnum, data_array_norm.shape[0], data_array_norm.shape[2], data_array_norm.shape[3]))\n band_tot_bl = np.zeros((fnum, fnum, baseline_array_norm.shape[0], baseline_array_norm.shape[2], baseline_array_norm.shape[3]))\n for i in range(fnum):\n for j in range(fnum):\n if j > i:\n idx = (f >= f[i]) & (f < f[j])\n band_tot[i, j, :, :] = np.sum(data_array_norm[:, idx, :, :], axis=1) / (f[j] - f[i])\n band_tot_bl[i, j, :, :] = np.sum(baseline_array_norm[:, idx, :, :], axis=1) / (f[j] - f[i])\n\n\n band_tot_bl1 = np.mean(band_tot_bl, axis=3) # average across time bins\n band_tot_bl2 = np.repeat(band_tot_bl1[:, :, :, None, :], band_tot_bl.shape[3], axis=3) # repeat same value across time\n return band_tot, band_tot_bl2, f[fidx]", "def filter_tracks(df, start_year=1980, end_year=2010, zeta=0, age=36):\n tracks = df.groupby('num')\n filterdf = tracks.filter(lambda x: (x['datetime'].dt.year.min() >= start_year) &\n (x['datetime'].dt.year.max() <= end_year) &\n (x['age'].max() >= age) &\n (np.abs(x['vorticity'].min()) > zeta))\n return filterdf", "def bin_obs_data(ds, s_lat=-30, n_lat=30, bin_var_nm='omega500',\n grp_time_var='year', bins=np.arange(0,1.1,0.1), land_sea='global', land_mask_dir='./data/'):\n ds_m = ds.where(np.logical_and(ds.lat>=s_lat, ds.lat<=n_lat), drop=True)\n\n ds_mask = xr.open_dataset(os.path.join(land_mask_dir, 'era_land_t42.nc'), decode_times=False)\n ds_mask = ds_mask.where(np.logical_and(ds_mask.lat>=s_lat,ds_mask.lat<=n_lat), drop=True)\n #ds_m.coords['mask'] = (('lat', 'lon'), ds_mask.land_mask.values)\n\n bin_data_dict = {'omega500': ds_m.omega500} \n\n vars_dict = {}\n\n ## 3d variables\n bin_data_dict2 = copy.deepcopy(bin_data_dict)\n pdf_m, ds_bin_mean_m, dims, coords2 = select_3d_obs_data(ds_m, bin_data_dict2, ds_mask,\n bins, bin_var_nm=bin_var_nm, land_sea=land_sea, grp_time_var=grp_time_var)\n for key, val in ds_bin_mean_m.items():\n vars_dict[key] = (dims, val)\n \n vars_dict['pdf'] = (dims, pdf_m)\n ds_bin_mean_m_array = xr.Dataset(vars_dict, coords=coords2)\n\n return ds_bin_mean_m_array", "def ShowOneContour(index,all_images,all_pointing,thex0,they0,all_titles,object_name,all_expo,dir_top_img,all_filt,figname):\n plt.figure(figsize=(15,6))\n spec_index_min=100 # cut the left border\n spec_index_max=1900 # cut the right border\n star_halfwidth=70\n \n YMIN=-15\n YMAX=15\n \n figfilename=os.path.join(dir_top_img,figname) \n \n #center is approximately the one on the original raw image (may be changed)\n #x0=int(all_pointing[index][0])\n x0=int(thex0[index])\n \n \n # Extract the image \n full_image=np.copy(all_images[index])\n \n # refine center in X,Y\n star_region_X=full_image[:,x0-star_halfwidth:x0+star_halfwidth]\n \n profile_X=np.sum(star_region_X,axis=0)\n profile_Y=np.sum(star_region_X,axis=1)\n\n NX=profile_X.shape[0]\n NY=profile_Y.shape[0]\n \n X_=np.arange(NX)\n Y_=np.arange(NY)\n \n avX,sigX=weighted_avg_and_std(X_,profile_X**4) # take squared on purpose (weigh must be >0)\n avY,sigY=weighted_avg_and_std(Y_,profile_Y**4)\n \n x0=int(avX+x0-star_halfwidth)\n \n \n # find the center in Y on the spectrum\n yprofile=np.sum(full_image[:,spec_index_min:spec_index_max],axis=1)\n y0=np.where(yprofile==yprofile.max())[0][0]\n\n # cut the image in vertical and normalise by exposition time\n reduc_image=full_image[y0-20:y0+20,x0:spec_index_max]/all_expo[index] \n reduc_image[:,0:100]=0 # erase central star\n \n X_Size_Pixels=np.arange(0,reduc_image.shape[1])\n Y_Size_Pixels=np.arange(0,reduc_image.shape[0])\n Transverse_Pixel_Size=Y_Size_Pixels-int(float(Y_Size_Pixels.shape[0])/2.)\n \n # calibration in wavelength\n #grating_name=all_filt[index].replace('dia ','')\n grating_name=get_disperser_filtname(all_filt[index])\n \n lambdas=Pixel_To_Lambdas(grating_name,X_Size_Pixels,all_pointing[index],True)\n \n #if grating_name=='Ron200':\n # holo = Hologram('Ron400',verbose=True)\n #else: \n # holo = Hologram(grating_name,verbose=True)\n #lambdas=holo.grating_pixel_to_lambda(X_Size_Pixels,all_pointing[index])\n #if grating_name=='Ron200':\n # lambdas=lambdas*2.\n \n\n X,Y=np.meshgrid(lambdas,Transverse_Pixel_Size) \n T=np.transpose(reduc_image)\n \n \n plt.contourf(X, Y, reduc_image, 100, alpha=1., cmap='jet',origin='lower')\n C = plt.contour(X, Y, reduc_image , 20, colors='black', linewidth=.5,origin='lower')\n \n \n for line in LINES:\n if line == O2 or line == HALPHA or line == HBETA or line == HGAMMA:\n plt.plot([line['lambda'],line['lambda']],[YMIN,YMAX],'-',color='lime',lw=0.5)\n plt.text(line['lambda'],YMAX-3,line['label'],verticalalignment='bottom', horizontalalignment='center',color='lime', fontweight='bold',fontsize=16)\n \n \n \n plt.axis([X.min(), X.max(), Y.min(), Y.max()]); plt.grid(True)\n plt.title(all_titles[index])\n plt.grid(color='white', ls='solid')\n plt.text(200,-5.,all_filt[index],verticalalignment='bottom', horizontalalignment='center',color='yellow', fontweight='bold',fontsize=16)\n plt.xlabel('$\\lambda$ (nm)')\n plt.ylabel('pixels')\n plt.ylim(YMIN,YMAX)\n plt.xlim(0.,1200.)\n plt.savefig(figfilename)", "def analysis_function_cwr(system, wavelength_idx, config, surface):\n\n # Set Current Configuration\n system.MCE.SetCurrentConfiguration(config)\n\n # Get the Field Points for that configuration\n sysField = system.SystemData.Fields\n N_fields = sysField.NumberOfFields\n N_waves = len(wavelength_idx)\n\n # We will trace all 3 available Field Points (the centre and both edges of the slice)\n X_MAX = np.max([np.abs(sysField.GetField(i + 1).X) for i in range(N_fields)])\n Y_MAX = np.max([np.abs(sysField.GetField(i + 1).Y) for i in range(N_fields)])\n\n hx = np.array([sysField.GetField(i + 1).X / X_MAX for i in range(N_fields)])\n hy = np.array([sysField.GetField(i + 1).Y / Y_MAX for i in range(N_fields)])\n\n # The Field coordinates for the Object\n # obj_xy = np.array([X_MAX * hx, Y_MAX * hy]).T\n foc_xy = np.empty((N_waves, N_fields, 2))\n\n N_rays = N_waves * N_fields\n\n raytrace = system.Tools.OpenBatchRayTrace()\n normUnPolData = raytrace.CreateNormUnpol(N_rays, constants.RaysType_Real, surface)\n\n # Loop over the wavelengths\n for i_wave, wave_idx in enumerate(wavelength_idx):\n\n # Loop over all Spaxels in the Slice\n for j_field, (h_x, h_y) in enumerate(zip(hx, hy)):\n\n # Add the ray to the RayTrace\n normUnPolData.AddRay(wave_idx, h_x, h_y, 0, 0, constants.OPDMode_None)\n\n # Run the RayTrace for the whole Slice\n CastTo(raytrace, 'ISystemTool').RunAndWaitForCompletion()\n # time_ray = time() - start\n # print(\"Time spent running Raytrace: %.3f sec\" % time_ray)\n\n # start = time()\n normUnPolData.StartReadingResults()\n # Loop over the wavelengths\n for i_wave, wave_idx in enumerate(wavelength_idx):\n # Loop over all Spaxels in the Slice\n for j_field, (h_x, h_y) in enumerate(zip(hx, hy)):\n\n output = normUnPolData.ReadNextResult()\n if output[2] == 0: # ignore the vignetting\n x, y = output[4], output[5]\n foc_xy[i_wave, j_field, 0] = x\n foc_xy[i_wave, j_field, 1] = y\n\n normUnPolData.ClearData()\n CastTo(raytrace, 'ISystemTool').Close()\n\n return [foc_xy]", "def _window_function(arr: np.ndarray, border: int = 0) -> np.ndarray:\n ndata = len(arr)\n nwind = ndata - 2 * border\n w = np.zeros(ndata)\n for i in range(nwind):\n w[i + border] = np.sin(np.pi * (i + 1.0) / (nwind + 1.0))\n return w", "def filter_callback(self, assemblers, filters):\n # Find the characteristic length of the domain and set the filter length scale\n r0 = self.r0_frac * self.a\n mfilter = TopOptUtils.Mfilter(self.N, assemblers, filters, dim=3, r=r0)\n mfilter.initialize()\n return mfilter", "def include_wcs_in_masks(input_images):\n img_list = [astroim.Astroim(im_name, memmap=True) for im_name in input_images]\n mask_names = [im.primary_header.get(\"MASK\") for im in img_list]\n output = []\n for im_object, mask_name in zip(img_list, mask_names):\n with fits.open(mask_name, 'readonly') as mask:\n mask_header = im_object.chips[0].header.hdr\n mask_data = mask[0].data.copy()\n mask_data[mask_data>0] = 1\n _, path = tempfile.mkstemp(suffix=\".fits\")\n fits.writeto(path, mask_data * 1., mask_header, clobber=True)\n output.append(path)\n return output", "def _process_data(data, band):\n\n meta = {key:value for key,value in data[0].items() if key != \"subset\" }\n meta['band'] = band\n data_dict = {'dates': [], 'arrays': [], 'metadata': meta}\n for i in data:\n for j in i['subset']:\n if j['band'] == band:\n data_dict['dates'].append(j['calendar_date'])\n data = []\n for x in j['data']:\n try:\n data.append(float(x))\n except ValueError:\n data.append(np.nan) \n data_dict['arrays'].append(np.array(data).reshape(meta['nrows'], \n meta['ncols'])) \n dtdates = [dt.datetime.strptime(d,\"%Y-%m-%d\") for d in data_dict['dates']]\n xcoordinates = ([float(meta['xllcorner'])] + \n [i * meta['cellsize'] + float(meta['xllcorner']) \n for i in range(1, meta['ncols'])])\n ycoordinates = ([float(meta['yllcorner'])] + \n [i * meta['cellsize'] + float(meta['yllcorner'])\n for i in range(1, meta['nrows'])])\n return xr.DataArray(name = band,\n data = np.flipud(np.dstack(data_dict['arrays'])),\n coords = [np.array(ycoordinates), \n np.array(xcoordinates), dtdates],\n dims = [ \"y\", \"x\", \"time\" ],\n attrs = meta)", "def hourly_grib2_to_netcdf(grib_file, grib_source, nc_file, nc_var_name,\n grib_var_name, grib_level, cache_size=100,\n initial_year=1979, overwrite_nc_units=None,\n include_analysis=True,\n nc_format='NETCDF4'):\n\n list_of_msg_dicts = gribou.get_all_msg_dict(grib_file)\n list_of_i, analysis_present = filter_var_timesteps(list_of_msg_dicts,\n grib_var_name,\n grib_level,\n include_analysis)\n cfsr_var = CFSRVariable(list_of_msg_dicts[list_of_i[0]])\n lats, lons = gribou.get_latlons(grib_file, list_of_i[0] + 1)\n\n now = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S\")\n nc1 = netCDF4.Dataset(nc_file, 'w', format=nc_format)\n\n nc1.Conventions = 'CF-1.5'\n nc1.title = 'Climate System Forecast Reanalysis'\n nc1.history = \"%s: Convert from grib2 to NetCDF\" % (now,)\n nc1.institution = 'NCEP'\n nc1.source = 'Reanalysis'\n nc1.references = 'http://cfs.ncep.noaa.gov/cfsr/'\n if analysis_present:\n msg1 = \"Obtained from %s server, \" % (grib_source,)\n msg2 = \"analysis is included, 6h forecast removed.\"\n nc1.comment = msg1 + msg2\n else:\n msg1 = \"Obtained from %s server, \" % (grib_source,)\n msg2 = \"no analysis, 6h forecast is included.\"\n nc1.comment = msg1 + msg2\n nc1.redistribution = \"Free to redistribute.\"\n\n nc1.createDimension('time', None)\n nc1.createDimension('timecomp', 6)\n nc1.createDimension('lat', lats.shape[0])\n nc1.createDimension('lon', lats.shape[1])\n\n nc1.createVariable('timecomp', 'i2', ('timecomp',), zlib=True, fill_value=defi2)\n\n time = nc1.createVariable('time', 'i4', ('time',), zlib=True)\n time.axis = 'T'\n if initial_year is None:\n warp = (str(cfsr_var.grib_msg_dict['year']),)\n else:\n warp = (initial_year,)\n time.units = \"hours since %s-01-01 00:00:00\" % warp\n time.long_name = 'time'\n time.standard_name = 'time'\n time.calendar = 'gregorian'\n\n time_vectors = nc1.createVariable('time_vectors', 'i2', ('time', 'timecomp'),\n zlib=True)\n\n vtype = cfsr_var.vertical_type\n if vtype in ['depthBelowSea', 'heightAboveGround']:\n try:\n dummy = len(cfsr_var.level)\n bounds = True\n except:\n bounds = False\n else:\n nc1.createDimension('nv', 2)\n level = nc1.createVariable('level', 'f4', (), zlib=True)\n level.axis = 'Z'\n level.units = cfsr_var.vertical_units\n if vtype == 'depthBelowSea':\n level.positive = 'up'\n else:\n level.positive = 'down'\n level.long_name = vtype\n level.standard_name = standard_names[vtype]\n if bounds:\n level.bounds = 'level_bnds'\n level_bnds = nc1.createVariable('level_bnds', 'f4', ('nv',), zlib=True)\n level_bnds[0] = cfsr_var.level[0]\n level_bnds[1] = cfsr_var.level[1]\n level[:] = (level_bnds[0] + level_bnds[1]) / 2.0\n else:\n level[:] = cfsr_var.level\n\n lat = nc1.createVariable('lat', 'f4', ('lat'), zlib=True)\n lat.axis = 'Y'\n lat.units = 'degrees_north'\n lat.long_name = 'latitude'\n lat.standard_name = 'latitude'\n lat[:] = lats[::-1, 0]\n\n lon = nc1.createVariable('lon', 'f4', ('lon'), zlib=True)\n lon.axis = 'X'\n lon.units = 'degrees_east'\n lon.long_name = 'longitude'\n lon.standard_name = 'longitude'\n lon[:] = lons[0, :]\n\n warp = optimal_chunksizes(len(list_of_i), lat.size, lon.size)\n var1 = nc1.createVariable(nc_var_name, 'f4', ('time', 'lat', 'lon'), zlib=True,\n fill_value=deff4, chunksizes=warp)\n if overwrite_nc_units is None:\n var1.units = cfsr_var.units\n else:\n var1.units = overwrite_nc_units\n var1.long_name = cfsr_var.name\n var1.standard_name = standard_names[nc_var_name]\n var1.statistic = cfsr_var.statistic\n\n t = 0 # counter for the NetCDF file\n c = 0 # counter for our temporary array\n temporary_array = ma.zeros([cache_size, var1.shape[1], var1.shape[2]])\n temporary_tvs = np.zeros([cache_size, 6])\n flag_runtimeerror = False\n for i, grb_msg in enumerate(gribou.msg_iterator(grib_file)):\n if i not in list_of_i:\n continue\n try:\n data = grb_msg['values'][::-1, :]\n except RuntimeError:\n data = ma.masked_all([var1.shape[1], var1.shape[2]])\n flag_runtimeerror = True\n dt = list_of_msg_dicts[i]['endStep'] - list_of_msg_dicts[i]['startStep']\n if cfsr_var.statistic == 'avg':\n if dt == 1:\n temporary_array[c, :, :] = data\n else:\n if list_of_msg_dicts[i]['startStep'] != 0:\n raise NotImplementedError(\"Weird delta t?\")\n x = list_of_msg_dicts[i]['endStep']\n temporary_array[c, :, :] = x * data - (x - 1) * previous_data\n elif cfsr_var.statistic == 'accum':\n if dt == 1:\n temporary_array[c, :, :] = data / 3600.0\n else:\n if list_of_msg_dicts[i]['startStep'] != 0:\n raise NotImplementedError(\"Weird delta t?\")\n temporary_array[c, :, :] = (data - previous_data) / 3600.0\n else:\n temporary_array[c, :, :] = data\n temporary_tvs[c, 0] = list_of_msg_dicts[i]['year']\n temporary_tvs[c, 1] = list_of_msg_dicts[i]['month']\n temporary_tvs[c, 2] = list_of_msg_dicts[i]['day']\n warp = list_of_msg_dicts[i]['hour'] + list_of_msg_dicts[i]['endStep']\n temporary_tvs[c, 3] = warp\n if temporary_tvs[c, 3] == 24:\n temporary_tvs[c, 3] = 0\n warp = CalGregorian.count_days_in_cycle(temporary_tvs[c, 1],\n temporary_tvs[c, 0])\n if temporary_tvs[c, 2] == warp:\n temporary_tvs[c, 2] = 1\n if temporary_tvs[c, 1] == 12:\n temporary_tvs[c, 1] = 1\n temporary_tvs[c, 0] = temporary_tvs[c, 0] + 1\n else:\n temporary_tvs[c, 1] = temporary_tvs[c, 1] + 1\n else:\n temporary_tvs[c, 2] = temporary_tvs[c, 2] + 1\n temporary_tvs[c, 4] = 0\n temporary_tvs[c, 5] = 0\n c += 1\n if c == cache_size:\n c = 0\n if nc_var_name == 'clt':\n var1[t:t + cache_size, :, :] = temporary_array / 100.0\n else:\n var1[t:t + cache_size, :, :] = temporary_array\n time_vectors[t:t + cache_size, :] = temporary_tvs\n t += cache_size\n previous_data = data\n if nc_var_name == 'clt':\n var1[t:t + c, :, :] = temporary_array[0:c, :, :] / 100.0\n else:\n var1[t:t + c, :, :] = temporary_array[0:c, :, :]\n time_vectors[t:t + c, :] = temporary_tvs[0:c, :]\n\n datetimes, masked, valid = nc._time_vectors_to_datetimes(time_vectors[:, :])\n num1 = netCDF4.date2num(datetimes, time.units, time.calendar)\n if time.dtype in [np.int8, np.int16, np.int32, np.int64]:\n time[valid] = np.array(np.round(num1), dtype=time.dtype)\n else:\n time[valid] = num1\n if len(masked): time[masked] = ma.masked_all([len(masked)])\n\n if flag_runtimeerror:\n nc1.warnings = \"RuntimeError encountered, missing values inserted.\"\n nc1.close()", "def rawSignals(obars, window=21, nbands=3, inc=0.5, save=True):\n bars = obars.copy() # avoid warnings\n bars['OHLC'] = np.nan # typical price\n bars.OHLC.values[:] = np.mean(bars.values[:,0:4], axis=1) # 1000x faster\n price = bars.OHLC.values\n for i in range(nbands):\n upband, sma, lwband = ta.BBANDS(price, window*inc)\n if save: # for plotting stuff\n bars['bandlw'+str(i)] = lwband\n bars['bandup'+str(i)] = upband\n bars['bandsg'+str(i)] = 0 # signal for this band\n signals = fastbollingerSignal(price, upband, lwband)\n bars.loc[:, 'bandsg'+str(i)] = signals.astype(int) # signal for this band\n inc += 0.5\n bars.dropna(inplace=True)\n return bars", "def smooth_spectrum(fluxes, window_width=7, passes=3):\n smoothed = numpy.array(fluxes)\n weights = numpy.ones(window_width) / window_width\n half_width = window_width // 2\n for i in range(passes):\n smoothed = numpy.concatenate((smoothed[half_width:0:-1], smoothed,\n smoothed[-2:-half_width - 2: -1]))\n smoothed = numpy.convolve(smoothed, weights, 'valid')\n return smoothed", "def create_grism_waverange(outname=\"\",\n history=\"Ground NIRCAM Grismwavelengthrange\",\n author=\"STScI\",\n filter_range=None):\n ref_kw = common_reference_file_keywords(reftype=\"wavelengthrange\",\n title=\"NIRCAM Grism wavelenghtrange\",\n description=\"NIRCAM Grism+Filter Wavelength Ranges\",\n exp_type=\"NRC_GRISM\",\n author=author,\n model_type=\"WavelengthrangeModel\",\n filename=outname,\n )\n\n if filter_range is None:\n # These numbers from Nor Pirzkal, in microns\n filter_range = {1: {'F250M': [2.500411072, 4.800260833],\n 'F277W': [2.500411072, 3.807062006],\n 'F300M': [2.684896869, 4.025318456],\n 'F322W2': [2.5011293930000003, 4.215842089],\n 'F335M': [3.01459734, 4.260432726],\n 'F356W': [3.001085025, 4.302320901],\n 'F360M': [3.178096344, 4.00099629],\n 'F410M': [3.6267051809999997, 4.5644598],\n 'F430M': [4.04828939, 4.511761774],\n 'F444W': [3.696969216, 4.899565197],\n 'F460M': [3.103778615, 4.881999188],\n 'F480M': [4.5158154679999996, 4.899565197]},\n 2: {'F250M': [2.500411072, 2.667345336],\n 'F277W': [2.500411072, 3.2642254050000004],\n 'F300M': [2.6659796289999997, 3.2997071729999994],\n 'F322W2': [2.5011293930000003, 4.136119434],\n 'F335M': [2.54572003, 3.6780519760000003],\n 'F356W': [2.529505253, 4.133416971],\n 'F360M': [2.557881113, 4.83740855],\n 'F410M': [2.5186954019999996, 4.759037127],\n 'F430M': [2.5362614100000003, 4.541488865],\n 'F444W': [2.5011293930000003, 4.899565197],\n 'F460M': [2.575447122, 4.883350419],\n 'F480M': [2.549773725, 4.899565197]}}\n\n # array of integers\n orders = list(filter_range.keys())\n orders.sort()\n\n # same filters for every order, array of strings\n wrange_selector = list(filter_range[orders[0]].keys())\n wrange_selector.sort()\n\n # The lists below need\n # to remain ordered to be correctly referenced\n wavelengthrange = []\n for order in orders:\n o = []\n for fname in wrange_selector:\n o.append(filter_range[order][fname])\n wavelengthrange.append(o)\n\n ref = wcs_ref_models.WavelengthrangeModel()\n ref.meta.update(ref_kw)\n ref.meta.exposure.p_exptype = \"NRC_GRISM|NRC_TSGRISM\"\n ref.meta.input_units = u.micron\n ref.meta.output_units = u.micron\n ref.wrange_selector = wrange_selector\n ref.wrange = wavelengthrange\n ref.order = orders\n\n entry = HistoryEntry({'description': history, 'time': datetime.datetime.utcnow()})\n sdict = Software({'name': 'nircam_reftools.py',\n 'author': author,\n 'homepage': 'https://github.com/spacetelescope/jwreftools',\n 'version': '0.7.1'})\n entry['sofware'] = sdict\n ref.history['entries'] = [entry]\n ref.to_asdf(outname)\n ref.validate()", "def write_jpeg(filename,band,skypos,tranges,skyrange,width=False,height=False,\n\t\t\t stepsz=1.,clobber=False,verbose=0,tscale=1000.,retries=20):\n\tscipy.misc.imsave(filename,countmap(band,skypos,tranges,skyrange,\n\t\t\t\t\t width=width,height=height,verbose=verbose,tscale=tscale,\n\t\t\t\t\t retries=retries))\n\treturn", "def masked_f3kdb(clip: vs.VideoNode,\n rad: int = 16,\n thr: Union[int, List[int]] = 24,\n grain: Union[int, List[int]] = [12, 0],\n mask_args: Dict[str, Any] = {},\n show_mask: bool = False) -> vs.VideoNode:\n from debandshit import dumb3kdb\n\n deb_mask_args: Dict[str, Any] = dict(detail_brz=1500, lines_brz=1000)\n deb_mask_args |= mask_args\n\n bits, clip = _get_bits(clip)\n\n deband_mask = detail_mask(clip, **deb_mask_args)\n\n deband = dumb3kdb(clip, radius=rad, threshold=thr, grain=grain)\n deband_masked = core.std.MaskedMerge(deband, clip, deband_mask)\n deband_masked = deband_masked if bits == 16 else depth(deband_masked, bits)\n return deband_masked", "def itkBoundedReciprocalImageFilterIUC3IUC3_cast(*args):\n return _itkBoundedReciprocalImageFilterPython.itkBoundedReciprocalImageFilterIUC3IUC3_cast(*args)", "def extract_cochlear_subbands(nets, SIGNAL_SIZE, SR, LOW_LIM, HIGH_LIM, N, SAMPLE_FACTOR, pad_factor, debug, subbands_ifft, return_subbands_only, rectify_and_lowpass_subbands, rFFT, custom_filts, erb_filter_kwargs, include_all_keys, compression_function, include_subbands_noise, subbands_noise_mean, subbands_noise_stddev):\n\n # make the erb filters tensor\n nets['filts_tensor'] = make_filts_tensor(SIGNAL_SIZE, SR, LOW_LIM, HIGH_LIM, N, SAMPLE_FACTOR, use_rFFT=rFFT, pad_factor=pad_factor, custom_filts=custom_filts, erb_filter_kwargs=erb_filter_kwargs)\n\n # make subbands by multiplying filts with fft of input\n nets['subbands'] = tf.multiply(nets['filts_tensor'],nets['fft_input'],name='mul_subbands')\n if debug: # return the real and imaginary parts of the subbands separately -- use if matching to their output\n nets['subbands_r'] = tf.real(nets['subbands'])\n nets['subbands_i'] = tf.imag(nets['subbands'])\n\n # TODO: with using subbands_ifft is redundant. \n # make the time subband operations if we are returning the subbands or if we want to include all of the keys in the graph\n if subbands_ifft or return_subbands_only or include_all_keys:\n if not rFFT:\n nets['subbands_ifft'] = tf.real(tf.ifft(nets['subbands'],name='ifft_subbands'),name='ifft_subbands_r')\n else:\n nets['subbands_ifft'] = tf.spectral.irfft(nets['subbands'],name='ifft_subbands')\n if return_subbands_only or include_all_keys:\n nets['subbands_time'] = nets['subbands_ifft']\n if rectify_and_lowpass_subbands: # TODO: the subband operations are hard coded in?\n nets['subbands_time_relu'] = tf.nn.relu(nets['subbands_time'], name='rectified_subbands')\n nets['subbands_time_lowpassed'] = hanning_pooling_1d_no_depthwise(nets['subbands_time_relu'], downsample=2, length_of_window=2*4, make_plots=False, data_format='NCW', normalize=True, sqrt_window=False)\n\n # TODO: noise is only added in the case when we are calcalculating the time subbands, but we might want something similar for the cochleagram\n if return_subbands_only or include_all_keys:\n # Compress subbands if specified and add noise. \n nets = compression_function(nets, input_node_name='subbands_time_lowpassed', output_node_name='subbands_time_lowpassed_compressed')\n if include_subbands_noise:\n nets = add_neural_noise(nets, subbands_noise_mean, subbands_noise_stddev, input_node_name='subbands_time_lowpassed_compressed', output_node_name='subbands_time_lowpassed_compressed_with_noise')\n nets['subbands_time_lowpassed_compressed_with_noise'] = tf.expand_dims(nets['subbands_time_lowpassed_compressed_with_noise'],-1)\n nets['subbands_time_processed'] = nets['subbands_time_lowpassed_compressed_with_noise']\n else:\n nets['subbands_time_lowpassed_compressed'] = tf.expand_dims(nets['subbands_time_lowpassed_compressed'],-1)\n nets['subbands_time_processed'] = nets['subbands_time_lowpassed_compressed']\n\n return nets", "def resample(is_local, baz, ac):\n if is_local == 'local':\n ac.data = ac.data[0: 1800 * ac[0].stats.sampling_rate]\n ac.decimate(factor=2)\n sec = 5\n cutoff = 2.0 # local events\n elif is_local == 'non-local':\n ac.decimate(factor=4)\n sec = 120\n cutoff = 1.0 # nonlocal events\n else:\n ac.data = trr.data[0: 1800 * ac[0].stats.sampling_rate]\n ac.decimate(factor=2)\n sec = 3\n cutoff = 4.0 # close events\n \n return ac, sec, cutoff", "def MyFilter(data, window_width=10, beta=2.0, draw_graph=False):\n\n #read data and change the format\n if 'time' in data.columns:\n date_list = []\n for i in data.index:\n date_parse = parse(str(data.ix[i].time))\n date_list.append(date_parse)\n data['date'] = date_list\n data_use = data\n data_use.index = data_use['date'].tolist()\n data_use = data_use.drop(['date','time'], axis=1)\n data_use.index.name = 'time'\n else:\n data_use = data\n #design filter, use the kaiser window here\n window = signal.kaiser(window_width, beta=beta)\n data_use['close_filtered'] = signal.convolve(data_use['close'], window, mode='same') / sum(window)\n data_use['high_frequency'] = data_use['close'] - data_use['close_filtered']\n\n #delete the distortion datas after filtered\n if window_width % 2 == 0:\n data_changed = data_use[window_width/2: -(window_width/2 - 1)]\n else:\n data_changed = data_use[(window_width-1)/2: -(window_width-1)/2]\n\n #draw graph\n if (draw_graph == True) :\n fig = plt.figure()\n ax1 = plt.subplot2grid((3,1), (0,0), rowspan=2)\n data_changed.loc[:,'close'].plot(style='r', label='original')\n data_changed.loc[:,'close_filtered'].plot(style='k', label='filtered')\n plt.title('Kaiser window_width = %d , const = %d' % (window_width, beta))\n plt.legend(loc='best')\n\n ax2 = plt.subplot2grid((3,1), (2,0))\n data_changed.loc[:,'high_frequency'].plot(label='high_frequency')\n ax2.set_ylim([-150, 150])\n plt.title('High Frequency')\n plt.legend(loc='best')\n plt.show()\n # print data_use\n # print data_changed\n data_out = data_changed['close_filtered']\n return np.array(data_out.tolist())", "def get_rgb_bands(image, bands):\n if bands is not MONOCHROME:\n red = image[:, :, bands['red']]\n green = image[:, :, bands['green']]\n blue = image[:, :, bands['blue']]\n\n img = np.rollaxis(np.array([red, green, blue]), 0, 3)\n else:\n img = color.grey2rgb(image)\n\n return img", "def apply_photo_style(path, decade):\n flt_path = os.path.dirname(path) + \"/\" + str(uuid.uuid4()) + \".jpg\"\n shutil.copyfile(path, flt_path) # make a copy of image because part of the filters change image in place\n f = None\n if decade <= 1930 or decade == 1950 or decade == 1970:\n success = execute_js(js_path, arguments='{} {} {}'.format(path, decade, flt_path)) # execute js rendering with Naked\n if decade == 1930:\n f = Thirties(flt_path)\n if decade == 1940:\n f = Gotham(flt_path)\n \n if decade == 1950 or decade == 1960: # for non-standard photo frames \n padding_x = 80\n if decade == 1950: # kodachrome frame\n padding_top = 80\n padding_bottom = 240\n else: # polaroid frame\n padding_bottom = 80\n padding_x = padding_top = 0\n expand_rect_padding(flt_path, padding_x, padding_top, padding_bottom, flt_path)\n \n if decade == 1950:\n f = Fifties(flt_path)\n if decade == 1960:\n f = Toaster(flt_path)\n if decade == 1970:\n f = Seventies(flt_path)\n if decade == 1980:\n f = Nashville(flt_path)\n if decade == 1990:\n f = Lomo(flt_path)\n if decade == 2000:\n f = Davehill(flt_path)\n \n if f is not None:\n f.apply() # apply photo filter using imagemagick\n\n if decade == 1940:\n # resize fix - gotham filter output image slightly differs in size so resize it to sizes of original image\n origin_img = Image.open(path)\n width, height = origin_img.size \n img = Image.open(flt_path) \n img = img.resize([width,height], Image.ANTIALIAS)\n img.save(flt_path, \"JPEG\")\n\n return flt_path", "def build_mask(sst, qual, qual_thresh=2, temp_bounds=(-2,33)):\n sst[np.isnan(sst)] = np.nan\n qual[np.isnan(qual)] = np.nan\n # Deal with NaN\n masks = np.logical_or(np.isnan(sst), np.isnan(qual))\n # Temperature bounds and quality\n qual_masks = np.zeros_like(masks)\n qual_masks[~masks] = (qual[~masks] > qual_thresh) | (sst[~masks] <= temp_bounds[0]) | (sst[~masks] > temp_bounds[1])\n masks = np.logical_or(masks, qual_masks)\n # Return\n return masks", "def test_window_filter(self):\n test_window_scheme = WindowingScheme(self.window_test_filter, 5)\n filtered_value = test_window_scheme.filter(self.middle_value)\n self.assertEquals(filtered_value, self.middle_value)", "def year_cv_split(X, year_range):\n return [\n ((X[\"year\"] < year).to_numpy(), (X[\"year\"] == year).to_numpy())\n for year in range(*year_range)\n ]", "def apply_bandpass_filter_timeseries(self, folder_name, indices, start_stop_freq, stop_stop_freq):\n (x_index, y_index) = indices\n photo_list = self.get_photo_list(folder_name)\n\n ts = self.get_pixel_timeseries(folder_name, (x_index, y_index))\n self.plot_fft_pixel_timeseries(folder_name, ts, str(x_index) + '_' + str(y_index) + 'pre_butterworth')\n n = len(ts)\n frequency = self.get_sampling_frequency(folder_name)\n d = 1.0 / frequency # 'sample spacing'\n fig, ax = plt.subplots()\n sample_freqs = np.fft.rfftfreq(n, d)\n fourier = np.fft.rfft(ts)\n print(sample_freqs)\n nyquist = frequency / 2.0\n\n start_stop_band = start_stop_freq / nyquist\n stop_stop_band = stop_stop_freq / nyquist\n\n print(start_stop_band)\n print(stop_stop_band)\n\n sos = sgnl.butter(2, Wn=[start_stop_band, stop_stop_band], btype='bandstop', output='sos')\n filtered = sgnl.sosfilt(sos, ts)\n self.plot_fft_pixel_timeseries(folder_name, filtered, str(x_index) + '_' + str(y_index) + 'post_butterworth')\n fig, ax = plt.subplots()\n indices = self.get_indices_from_filenames(folder_name)\n index_dates = dates.date2num(indices)\n ax.plot_date(index_dates, ts, xdate=True, linestyle='solid', marker='None',\n label=str(x_index) + ' , ' + str(y_index))\n ax.plot_date(index_dates, filtered, xdate=True, linestyle='solid', marker='None',\n label=str(x_index) + ' , ' + str(y_index) + ' filtered')\n\n ax.legend()\n ax.grid(b=True, which='major', color='#666666', linestyle='-')\n\n # Show the minor grid lines with very faint and almost transparent grey lines\n ax.minorticks_on()\n ax.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)\n fig.set_figwidth(40)\n fig.savefig(self.parent_folder + 'analysis/timeseries_filtered_' + str(x_index) + '_' + str(y_index) + '.png')\n fig.savefig(self.parent_folder + 'analysis/timeseries_filtered_' + str(x_index) + '_' + str(y_index) + '.svg')\n fig.clf()", "def filter_bands(self, imagery, bands=None, names=None, wavelengths=None) -> 'ImageCollection':\n\n graph = {\n 'process_id': 'filter_bands',\n 'imagery': imagery.graph,\n }\n\n if bands:\n graph['bands'] = bands\n if names:\n graph['names'] = names\n if wavelengths:\n graph['wavelengths'] = wavelengths\n\n imagery.graph = graph\n return imagery", "def wfc3ir_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, \n flt='ibhj34h6q_flt.fits', filter='G141'):\n import numpy as np\n \n import astropy.io.fits as pyfits\n import astropy.wcs as pywcs\n \n im = pyfits.open(flt)\n wcs = pywcs.WCS(im[1].header, relax=True)\n \n thet0 = np.arctan2(im[1].header['CD2_2'], im[1].header['CD2_1'])/np.pi*180\n\n wcs.wcs.crval = np.array([ra, dec])\n \n ### Rotate the CD matrix\n theta = im[1].header['PA_APER'] - pa_aper \n cd_rot = rotate_CD_matrix(wcs.wcs.cd, theta)\n wcs.wcs.cd = cd_rot\n \n h = wcs.to_header(relax=True)\n \n for i in [1,2]:\n for j in [1,2]:\n h['CD%d_%d' %(i,j)] = h['PC%d_%d' %(i,j)]\n h.remove('PC%d_%d' %(i,j))\n \n h['BACKGR'] = 1.\n h['FILTER'] = filter\n h['INSTRUME'] = 'WFC3'\n h['READN'] = im[0].header['READNSEA']\n h['NAXIS1'] = h['NAXIS2'] = 1014\n h['DETECTOR'] = 'IR'\n h['PHOTFLAM'] = 1.\n h['PHOTPLAM'] = 1.\n \n return h, wcs", "def make_skydark(files, ext=1, nproc=6, title='ext_1', overwrite=False):\n\n # See if outfile already exists\n outfile = 'skydark_{}.fits'.format(title)\n if (os.path.exists(outfile)) & (overwrite is False):\n print('{} already exists, stopping...'.format(outfile))\n\n else:\n print('Making a stack of the input files...')\n stack = np.zeros((len(files), 2051, 4096))\n for i,f in enumerate(files):\n h = fits.open(f)\n data = h[ext].data\n #dq = h[ext+2].data\n\n # Get the segmap for this file\n segmap_file = f.replace('.fits', '_seg_ext_{}.fits'.format(ext))\n if not os.path.isfile(segmap_file): # sometimes input files are medsub/equalized\n segmap_file = f.replace('_medsub', '').replace('_eq', '').replace('.fits', '_seg_ext_{}.fits'.format(ext))\n segmap = fits.getdata(segmap_file)\n\n # Mask bad pixels and sources\n #data[dq!=0] = np.nan\n data[segmap>0] = np.nan\n stack[i] = data\n h.close()\n\n # Make the skydark\n print('Calculating the median through the stack of input files...')\n if nproc==1:\n skydark = np.nanmedian(stack, axis=0)\n else:\n stacks = np.split(stack, 16, axis=2) # split stack into 16 2048x256 sections\n p = Pool(nproc)\n results = p.map(med_stack, stacks)\n skydark = np.concatenate(results, axis=1)\n\n # Write out the sky dark\n fits.writeto(outfile, skydark, overwrite=True)\n print('Sky dark generated.')\n\n # Make a filtered version of the skydark\n print('Filtering the sky dark...')\n amp1, amp2 = np.split(skydark, 2, axis=1) # treat amps separately\n sigma_clip = SigmaClip(sigma=3.)\n bkg_estimator = MedianBackground()\n bkg1 = Background2D(amp1, (100, 100), filter_size=(10, 10), \n sigma_clip=sigma_clip, bkg_estimator=bkg_estimator)\n bkg2 = Background2D(amp2, (100, 100), filter_size=(10, 10), \n sigma_clip=sigma_clip, bkg_estimator=bkg_estimator)\n filtered = np.concatenate((bkg1.background, bkg2.background), axis=1)\n fits.writeto('{}_filtered.fits'.format(outfile.replace('.fits','')), \n filtered, overwrite=True)\n print('Filtered sky dark generated.')", "def foldcurve(_band, _period):\n # Set epoch to first date observed\n _epoch = _band[0][0]\n # Iterate through array, update date to phase\n for i in range(0, _band.shape[0]):\n _band[i, 0] = ((_band[i, 0] - _epoch) / _period) % 1\n # Return folded array\n return _band", "def colorfilter(FilteredMask):\n FilteredMask3channels=np.stack((FilteredMask,)*3, axis=-1)\n for i in range(len(FilteredMask)):\n for j in range(len(FilteredMask[0])):\n if FilteredMask[i,j]==0:\n FilteredMask3channels[i,j]=[0,0,0]\n if FilteredMask[i,j]==1:\n FilteredMask3channels[i,j]=[255,255,255]\n if FilteredMask[i,j]==2:\n FilteredMask3channels[i,j]=[255,0,0]\n if FilteredMask[i,j]==3:\n FilteredMask3channels[i,j]=[0,255,0]\n if FilteredMask[i,j]==4:\n FilteredMask3channels[i,j]=[0,0,255]\n return FilteredMask3channels", "def transition(name, year, grids, control):\n ## SETUP -------------------------------------------------\n cc = control['cohorts'][name + '_Control']\n from_cohort_a0 = grids.area[name + '--0', year]\n from_cohort = grids.area[name, year]\n transitions_to = cc['transitions_to']\n to_cohort_a0 = grids.area[transitions_to + '--0', year]\n to_cohort = grids.area[transitions_to + '--0', year]\n ice_slope = \\\n grids.ice.get_ice_slope_grid( name )\\\n .reshape(grids.shape).astype(np.float32)\n ALD, PL = grids.ald['ALD', year], grids.ald[name ,year] \n AOI = grids.area.area_of_interest()\n POIn = grids.poi[name, year]\n POInm1 = grids.poi[name, year-1]\n drainage = grids.drainage.grid.reshape(grids.shape)\n above_idx = drainage == 'above'\n porosity = grids.ald.porosity[name]\n\n max_rot = cc['max_terrain_transition']\n\n if cc['POI_Function'] == 'Sigmoid2':\n params = np.array([\n cc['Parameters']['above']['sigmoid2_K'],\n cc['Parameters']['above']['sigmoid2_C'],\n cc['Parameters']['above']['sigmoid2_A'],\n cc['Parameters']['above']['sigmoid2_B'],\n cc['Parameters']['below']['sigmoid2_K'],\n cc['Parameters']['below']['sigmoid2_C'],\n cc['Parameters']['below']['sigmoid2_A'],\n cc['Parameters']['below']['sigmoid2_B'],\n ]).astype(np.float32)\n poi_func = calc_new_sig2_poi\n elif cc['POI_Function'] == 'Sigmoid':\n params = np.array([\n cc['Parameters']['above']['sigmoid_A1'],\n cc['Parameters']['above']['sigmoid_A2'],\n cc['Parameters']['above']['sigmoid_x0'],\n cc['Parameters']['above']['sigmoid_dx'],\n cc['Parameters']['below']['sigmoid_A1'],\n cc['Parameters']['below']['sigmoid_A2'],\n cc['Parameters']['below']['sigmoid_x0'],\n cc['Parameters']['below']['sigmoid_dx'],\n ]).astype(np.float32)\n poi_func = calc_new_sig_poi\n elif cc['POI_Function'] == 'Hill':\n params = np.array([\n cc['Parameters']['above']['hill_B'],\n cc['Parameters']['above']['hill_N'],\n cc['Parameters']['below']['hill_B'],\n cc['Parameters']['below']['hill_N'],\n ]).astype(np.float32)\n poi_func = calc_new_hill_poi\n elif cc['POI_Function'] == 'Linear':\n params = np.array([\n cc['Parameters']['above']['linear_a'],\n cc['Parameters']['above']['linear_b'],\n cc['Parameters']['below']['linear_a'],\n cc['Parameters']['below']['linear_b'],\n ]).astype(np.float32)\n poi_func = calc_linear_linear_poi\n else:\n raise KeyError(\"Not a valid function type\")\n\n present = from_cohort > 0\n pl_breach = ALD >= PL\n current_cell_mask = np.logical_and(np.logical_and(AOI, present), pl_breach)\n\n ## work ---------------\n blocks = (32, 32)\n threads = (\n int(np.ceil(ALD.shape[0] / blocks[0])),\n int(np.ceil(ALD.shape[1] / blocks[1]))\n )\n \n X = np.zeros(ALD.shape)\n calc_x[blocks, threads](X, ALD,PL)#.astype(np.float32)\n\n \n new_poi = np.zeros(X.shape)\n poi_func(new_poi, params, X, above_idx)\n\n\n update_poi[blocks, threads](POIn, POInm1, new_poi, current_cell_mask)\n \n\n # not cuda'd\n ALD[current_cell_mask] = \\\n ALD[current_cell_mask] + \\\n (ALD[current_cell_mask] - PL[ current_cell_mask ] ) * porosity\n\n rate_of_transition = np.zeros(POIn.shape) \n calc_rot[blocks, threads](rate_of_transition, POIn, ice_slope, max_rot)\n\n change = np.zeros(POIn.shape) \n calc_change[blocks, threads](\n change, rate_of_transition, from_cohort, present\n )\n \n # not cuda'd\n to_cohort_a0[present] = to_cohort[present] + change[present]\n from_cohort_a0[present] = from_cohort[present] - change[present]", "def apply_tracking3(td, time_us=1000, alpha=0.7, threshold=-1):\n assert (alpha >= 0)\n assert (alpha <= 1)\n mix = 1 - alpha\n track_x = center_x = float(td.width / 2)\n track_y = center_y = float(td.height / 2)\n threshold_sq = math.floor(center_y ** 2)\n\n if threshold > 0:\n threshold_sq = math.floor(threshold ** 2)\n\n copy = np.copy(td.data).view(np.recarray)\n offset_x = offset_y = 0\n offset_x_arr = np.zeros(copy.size, np.float32)\n offset_y_arr = np.zeros(copy.size, np.float32)\n offset_index = 0 # used to keep track of the offsets we are writing to\n\n for start_ts in range(copy[0].ts, copy[-1].ts, time_us):\n end_ts = start_ts + time_us\n frame_data = copy[(copy.ts >= start_ts) & (copy.ts < end_ts)]\n distances = ((frame_data.x - track_x) ** 2) + (\n (frame_data.y - track_y) ** 2)\n valid_data = frame_data[distances < threshold_sq]\n\n if valid_data.size > 0:\n x_avg = float(np.sum(valid_data.x)) / valid_data.size\n y_avg = float(np.sum(valid_data.y)) / valid_data.size\n track_x = (track_x * alpha) + (x_avg * mix)\n track_y = (track_y * alpha) + (y_avg * mix)\n\n offset_x = int(round(center_x - track_x))\n offset_y = int(round(center_y - track_y))\n offset_x_arr[offset_index:offset_index + frame_data.size] = \\\n offset_x\n offset_y_arr[offset_index:offset_index + frame_data.size] = \\\n offset_y\n offset_index += frame_data.size\n\n offset_x_arr[offset_index:] = offset_x\n offset_y_arr[offset_index:] = offset_y\n copy.x = (copy.x + offset_x_arr).astype(np.uint8)\n copy.y = (copy.y + offset_y_arr).astype(np.uint8)\n # remove the events that are out of bounds\n return copy[(copy.x >= 0) & (copy.y >= 0) & (copy.x < td.width) & (\n copy.y < td.height)]", "def sliding_window(frame_length, step, Xsampleslist, ysampleslist):\n Xsamples = []\n ysamples = []\n for j in range(len(Xsampleslist)):\n X = Xsampleslist[j]\n ybinary = ysampleslist[j]\n for i in range(0, X.shape[0] - frame_length, step):\n xsub = X[i:i + frame_length, :]\n ysub = ybinary\n Xsamples.append(xsub)\n ysamples.append(ysub)\n return Xsamples, ysamples" ]
[ "0.77075994", "0.6490901", "0.642566", "0.5768101", "0.5706588", "0.548362", "0.5444088", "0.52517235", "0.50446564", "0.50012696", "0.49663457", "0.4957738", "0.49230793", "0.47313127", "0.4699657", "0.46902734", "0.4674825", "0.46389544", "0.45470658", "0.45407534", "0.45270845", "0.4523979", "0.45129216", "0.45099437", "0.4507825", "0.45014837", "0.44892976", "0.44728613", "0.44616643", "0.4445203", "0.44332707", "0.4421556", "0.44029093", "0.43944678", "0.4387747", "0.4383393", "0.435107", "0.43137622", "0.4301624", "0.4295893", "0.42883807", "0.42864302", "0.42647204", "0.42569658", "0.42092794", "0.42057022", "0.42051747", "0.41977724", "0.41975445", "0.41932267", "0.41896042", "0.41850904", "0.4176378", "0.4172823", "0.4149823", "0.41295046", "0.41261", "0.41144177", "0.4108039", "0.4102236", "0.4100695", "0.408579", "0.40815496", "0.40812632", "0.4066224", "0.4059625", "0.40579575", "0.40571797", "0.4056939", "0.40559867", "0.4050334", "0.4046147", "0.40436494", "0.40383732", "0.40382642", "0.403515", "0.40339205", "0.40316737", "0.40278447", "0.40229765", "0.40227398", "0.4017301", "0.40144643", "0.40128696", "0.400479", "0.40026283", "0.39980006", "0.39881015", "0.3982953", "0.3976032", "0.39673313", "0.39672306", "0.39649534", "0.39636496", "0.39624447", "0.39564636", "0.39549574", "0.39473647", "0.39473262", "0.3946425" ]
0.46771812
16
A helper function to perform a 4 year moving window filter for a single land cover value (such as Forest as 1) for one four year window representing year(i1), year(i), year(i+1), and year(i+2) annual land cover classifications. This function applies on one window, and should only be called using the function applyWindow4years. The image bands do not need to be in order, but the bandNames argument must be in chronological order. The temporal filter inspects the central position of four consecutive years, and if the extremities of the consecutive years are identical but the centre position is not, then the central pixels are reclassified to match its temporal neighbour class. This function can be applied to whichever land cover values the user decides, such as all of the land cover values or a select few.
def mask4(imagem, value, bandNames): mask = imagem.select(bandNames[0]).eq(value) \ .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \ .bitwiseAnd(imagem.select(bandNames[2]).neq(value)) \ .bitwiseAnd(imagem.select(bandNames[3]).eq(value)) change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value) change_img1 = imagem.select(bandNames[2]).mask(mask.eq(1)).where(mask.eq(1), value) img_out = imagem.select(bandNames[1]).blend(change_img).blend(change_img1) return img_out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyWindow4years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-2):\n img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def applyWindow3years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-1):\n img_out = img_out.addBands(mask3(imagem, value,bandNames[(i-1):(i+2)]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def applyWindow5years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-3):\n img_out = img_out.addBands(mask5(imagem, value,bandNames[(i-1):(i+4)]))\n img_out = img_out.addBands(imagem.select(bandNames[-3]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def applyForwardNoDataFilter(image, bandNames):\n #Get a list of band names from year(1) through the last year\n bandNamesEE = ee.List(bandNames[1:])\n \n #Define forwards filter\n #In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for the first year\n #currentImage = image.select(bandNames[1]), the image for the second year\n #previousImage = image.select(bandNames[0]), the first year\n #Find where the second year has missing data, replace those values with the values of the first year\n #Append previousImage to currentImage, so now currentImage is a two band image, with the first band being the second year with the gap fill\n #and the second band is the first years classification\n #The iteration continues, now with followingImage.select[0] being the second year with the gap fill applied, and bandName is the third year\n def forwardNoDataFilter(bandName, previousImage):\n currentImage = image.select(ee.String(bandName))\n previousImage = ee.Image(previousImage)\n currentImage = currentImage.unmask(previousImage.select([0]))\n return currentImage.addBands(previousImage)\n \n #Iterate through all the years, starting with the first year's classification\n filtered = bandNamesEE.iterate(forwardNoDataFilter,ee.Image(image.select(bandNames[0])))\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): \n #Grab land cover classes as a list of strings\n lc_classes = classDictionary.keys().getInfo()\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Get the frequency of each class through the years by reducing the image collection to an image using the sum reducer\n class_frequency = binary_class_images.reduce(ee.Reducer.sum().unweighted()).rename(lc_classes)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Define an image to add bands with frequency filter applied\n out_img = ee.Image()\n \n #Loop through years\n for yearBand in yearBandNames:\n #Select the target year from the image\n yearImage = image.select(yearBand)\n \n #Loop through land cover classes in filterParams\n for lc_class in lc_classes:\n #Get the minimum occurance allowed in that land cover class\n min_occurance = filterParams.get(lc_class)\n \n #Find if the land cover class had less than the number of min_occurances in each pixel\n change_class = class_frequency.select(lc_class).lt(ee.Image.constant(min_occurance))\n \n #If change_class==1, then replace that pixel with the mode of all the years in that pixel\n #This filter is only applied to pixels of this land cover class\n #First mask yearImage to pixels of this land cover class, then get the union of pixels where change_class==1,\n #if both conditions are true, then the pixel is replaced with the mode\n yearImage = yearImage.where(yearImage.eq(ee.Number(classDictionary.get(lc_class))).bitwiseAnd(change_class),mode_image)\n #Rename yearImage to bandName\n yearImage = yearImage.rename(yearBand)\n #Append to output image\n out_img = out_img.addBands(yearImage)\n \n return out_img", "def stack_bands(yr, roi):\n\n water_year_start = '{}-10-01'.format(yr - 1)\n\n winter_s, winter_e = '{}-01-01'.format(yr), '{}-03-01'.format(yr),\n spring_s, spring_e = '{}-03-01'.format(yr), '{}-05-01'.format(yr),\n late_spring_s, late_spring_e = '{}-05-01'.format(yr), '{}-07-01'.format(yr)\n summer_s, summer_e = '{}-07-01'.format(yr), '{}-09-01'.format(yr)\n fall_s, fall_e = '{}-09-01'.format(yr), '{}-12-31'.format(yr)\n\n periods = [('cy', winter_s, fall_e),\n ('1', spring_s, spring_e),\n ('2', late_spring_s, late_spring_e),\n ('3', summer_s, summer_e),\n ('4', fall_s, fall_e)]\n\n first = True\n for name, start, end in periods:\n bands = landsat_composites(yr, start, end, roi, name)\n if first:\n input_bands = bands\n proj = bands.select('B2_cy').projection().getInfo()\n first = False\n else:\n input_bands = input_bands.addBands(bands)\n\n for s, e, n in [(spring_s, spring_e, 'espr'),\n (late_spring_s, late_spring_e, 'lspr'),\n (summer_s, summer_e, 'smr'),\n (fall_s, fall_e, 'fl'),\n (water_year_start, spring_e, 'wy_espr'),\n (water_year_start, late_spring_e, 'wy_espr'),\n (water_year_start, summer_e, 'wy_smr'),\n (water_year_start, fall_e, 'wy')]:\n gridmet = ee.ImageCollection(\"IDAHO_EPSCOR/GRIDMET\").filterBounds(\n roi).filterDate(s, e).select('pr', 'eto', 'tmmn', 'tmmx')\n temp_reducer = ee.Reducer.mean()\n t_names = ['tmax'.format(n), 'tmin'.format(n)]\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_{}'.format(n), 'pet_total_{}'.format(n)).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n wd_estimate = precip_sum.select('precip_total_{}'.format(n)).subtract(precip_sum.select(\n 'pet_total_{}'.format(n))).rename('wd_est_{}'.format(n))\n input_bands = input_bands.addBands([temp_perc, precip_sum, wd_estimate])\n\n temp_reducer = ee.Reducer.percentile([10, 50, 90])\n t_names = ['tmmn_p10_cy', 'tmmn_p50_cy', 'tmmn_p90_cy', 'tmmx_p10_cy', 'tmmx_p50_cy', 'tmmx_p90_cy']\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_cy', 'pet_total_cy').resample('bilinear').reproject(crs=proj['crs'], scale=30)\n wd_estimate = precip_sum.select('precip_total_cy').subtract(precip_sum.select(\n 'pet_total_cy')).rename('wd_est_cy')\n\n coords = ee.Image.pixelLonLat().rename(['Lon_GCS', 'LAT_GCS']).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n ned = ee.Image('USGS/NED')\n terrain = ee.Terrain.products(ned).select('elevation', 'slope', 'aspect').reduceResolution(\n ee.Reducer.mean()).reproject(crs=proj['crs'], scale=30)\n\n world_climate = get_world_climate(proj=proj)\n elev = terrain.select('elevation')\n tpi_1250 = elev.subtract(elev.focal_mean(1250, 'circle', 'meters')).add(0.5).rename('tpi_1250')\n tpi_250 = elev.subtract(elev.focal_mean(250, 'circle', 'meters')).add(0.5).rename('tpi_250')\n tpi_150 = elev.subtract(elev.focal_mean(150, 'circle', 'meters')).add(0.5).rename('tpi_150')\n static_input_bands = coords.addBands([temp_perc, wd_estimate, terrain, tpi_1250, tpi_250, tpi_150, world_climate])\n\n nlcd = ee.Image('USGS/NLCD/NLCD2011').select('landcover').reproject(crs=proj['crs'], scale=30).rename('nlcd')\n\n cdl_cult = ee.Image('USDA/NASS/CDL/2017').select('cultivated'). \\\n remap([1, 2], [0, 1]).reproject(crs=proj['crs'], scale=30).rename('cdlclt')\n\n cdl_crop = ee.Image('USDA/NASS/CDL/2017').select('cropland').reproject(crs=proj['crs'],\n scale=30).rename('cdlcrp')\n\n gsw = ee.Image('JRC/GSW1_0/GlobalSurfaceWater')\n occ_pos = gsw.select('occurrence').gt(0)\n water = occ_pos.unmask(0).rename('gsw')\n\n static_input_bands = static_input_bands.addBands([nlcd, cdl_cult, cdl_crop, water])\n\n input_bands = input_bands.addBands(static_input_bands).clip(roi)\n\n # standardize names to match EE javascript output\n standard_names = []\n temp_ct = 1\n prec_ct = 1\n names = input_bands.bandNames().getInfo()\n for name in names:\n if 'tavg' in name and 'tavg' in standard_names:\n standard_names.append('tavg_{}'.format(temp_ct))\n temp_ct += 1\n elif 'prec' in name and 'prec' in standard_names:\n standard_names.append('prec_{}'.format(prec_ct))\n prec_ct += 1\n else:\n standard_names.append(name)\n\n input_bands = input_bands.rename(standard_names)\n return input_bands", "def applyBackwardNoDataFilter(image, bandNames):\n #Get a list of band names to iterate over, from year(-2) through year(0)\n bandNamesEE = ee.List(bandNames[:-1]).reverse()\n \n #Define backwards filter\n #In first iteration, bandName=bandNames[-2] and followingImage is image.select(bandNames[-1]), or the classifications for the final year\n #currentImage = image.select(bandNames[-2]), the second to last year\n #followingImage = image.select(bandNames[-1]), the final year\n #Find where the second to last year has missing data, replace those values with the values of the following year\n #Append followingImage to currentImage, so now currentImage is a two band image, with the first band being the second to last year with the gap fill\n #and the second band is the final years classification\n #The iteration continues, now with followingImage.select[0] being the second to last year with the gap fill applied, and bandName is the third to last year\n def backwardNoDataFilter(bandName, followingImage):\n currentImage = image.select(ee.String(bandName))\n followingImage = ee.Image(followingImage)\n currentImage = currentImage.unmask(followingImage.select([0]))\n return currentImage.addBands(followingImage)\n \n #Apply backwards filter, starting with the final year and iterating through to year(0) \n filtered = bandNamesEE.iterate(backwardNoDataFilter,ee.Image(image.select(bandNames[-1])))\n #Re-order bands to be in chronological order\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def request_band_extract(file_prefix, points_layer, region, years, filter_bounds=False):\n roi = ee.FeatureCollection(region)\n plots = ee.FeatureCollection(points_layer)\n for yr in years:\n stack = stack_bands(yr, roi)\n\n if filter_bounds:\n plots = plots.filterBounds(roi)\n\n filtered = plots.filter(ee.Filter.eq('YEAR', yr))\n\n plot_sample_regions = stack.sampleRegions(\n collection=filtered,\n properties=['POINT_TYPE', 'YEAR'],\n scale=30,\n tileScale=16)\n\n task = ee.batch.Export.table.toCloudStorage(\n plot_sample_regions,\n description='{}_{}'.format(file_prefix, yr),\n bucket='wudr',\n fileNamePrefix='{}_{}'.format(file_prefix, yr),\n fileFormat='CSV')\n\n task.start()\n print(yr)\n exit()", "def calculate_daily_climatology(\n pctile,\n windowHalfWidth,\n lenClimYear,\n smoothPercentile,\n smoothPercentileWidth,\n thresh_climYear, # empty array\n seas_climYear, # empty array\n clim, # empty dict\n feb29,\n doyClim,\n clim_start,\n clim_end,\n tempClim,\n temp,\n):\n # Loop over all day-of-year values, and calculate threshold and seasonal climatology across years\n for d in range(1, lenClimYear + 1):\n # Special case for Feb 29\n if d == feb29:\n continue\n # find all indices for each day of the year +/- windowHalfWidth and from them calculate the threshold\n tt0 = np.where(doyClim[clim_start : clim_end + 1] == d)[\n 0\n ] # the index for that day each year\n # If this doy value does not exist (i.e. in 360-day calendars) then skip it\n if len(tt0) == 0:\n continue\n tt = np.array([])\n for w in range(-windowHalfWidth, windowHalfWidth + 1): # -5 : 5 default\n tt = np.append(\n tt, clim_start + tt0 + w\n ) # append the daily values 5days before and 5days after\n tt = tt[tt >= 0] # Reject indices \"before\" the first element\n tt = tt[tt < TClim] # Reject indices \"after\" the last element\n thresh_climYear[d - 1] = np.percentile(nonans(tempClim[tt.astype(int)]), pctile)\n seas_climYear[d - 1] = np.mean(nonans(tempClim[tt.astype(int)]))\n\n # Special case for Feb 29 (LEAP YEAR)\n thresh_climYear[feb29 - 1] = (\n 0.5 * thresh_climYear[feb29 - 2] + 0.5 * thresh_climYear[feb29]\n )\n seas_climYear[feb29 - 1] = (\n 0.5 * seas_climYear[feb29 - 2] + 0.5 * seas_climYear[feb29]\n )\n\n if smoothPercentile:\n thresh_climYear, seas_climYear = smooth_climatologies(\n thresh_climYear, seas_climYear, smoothPercentileWidth\n )\n\n # Generate threshold for full time series\n clim[\"thresh\"] = thresh_climYear[doy.astype(int) - 1]\n clim[\"seas\"] = seas_climYear[doy.astype(int) - 1]\n # Save vector indicating which points in temp are missing values\n clim[\"missing\"] = np.isnan(temp)\n\n return clim", "def applyIncidenceFilter(image, bandNames, classDictionary, numChangesCutoff = 8, connectedPixelCutoff=6):\n #Calculate the number of times a pixel changes throughout the time series and determine if it is over the numChangesCutoff\n num_changes = calculateNumberOfChanges(image, bandNames)\n too_many_changes = num_changes.gt(numChangesCutoff)\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Calculate the number of connected pixels for each land cover class and year, reduce to a single band image representing the number\n #of connected pixels of the same land cover class as the central pixel, and determine if it is over the connectedPixelCutoff\n connected_pixel_count = ee.ImageCollection(binary_class_images.map(lambda x: x.mask(x).connectedPixelCount(100,False).reduce(ee.Reducer.sum()).lt(connectedPixelCutoff)))\n \n #Get a bitwiseAnd determination if the number of connected pixels <= connectedPixelCutoff and the number of changes > numChangesCutoff \n incidence_filter = ee.ImageCollection(connected_pixel_count.map(lambda x: x.bitwiseAnd(too_many_changes))).toBands().rename(bandNames)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Replace pixels of image where incidence_filter is True with mode_image\n incidence_filtered = image.where(incidence_filter, mode_image)\n \n return incidence_filtered", "def get_time_filtered_correlations(a_lt3,a_lt4,adwin_filt_bool,**kw):\r\n verbose = kw.pop('verbose',False)\r\n ### prepare RO results and sort them according to sweep point\r\n for a in [a_lt3,a_lt4]:\r\n a.pts = a.g.attrs['sweep_length']\r\n a.ssros = a.agrp['ssro_results'].value\r\n a.readouts = a.g.attrs['nr_of_ROsequences']\r\n # a.sorted_results = a_ssros.reshape((-1,a.pts,a.readouts))\r\n\r\n\r\n ### correlate the ROs with each other by making a boolean filter:\r\n ### variables here are described in terms of spin states!\r\n m00 = (a_lt3.ssros == 1)*(a_lt4.ssros == 1)\r\n m10 = (a_lt3.ssros == 1)*(a_lt4.ssros == 0)\r\n m01 = (a_lt3.ssros == 0)*(a_lt4.ssros == 1)\r\n m11 = (a_lt3.ssros == 0)*(a_lt4.ssros == 0)\r\n \r\n ### now define unique identifiers for each Ro correlation and recast the correlations into a single array.\r\n ### As identifieres I choose 1 = index 0 in the output list, i.e. 11; 2 = index 1 in the output list ... and so forth\r\n RO_correlators = np.array(len(a_lt3.ssros)*[1])*m11 \\\r\n + np.array(len(a_lt3.ssros)*[2])*m10 \\\r\n + np.array(len(a_lt3.ssros)*[3])*m01 \\\r\n + np.array(len(a_lt3.ssros)*[4])*m00 \r\n ### PH - added to make sure that has a full set of repetitions\r\n RO_correlators = RO_correlators[:(a.g.attrs['sweep_length']*(len(RO_correlators)/a.g.attrs['sweep_length']))]\r\n adwin_filt_bool = adwin_filt_bool[:(a.g.attrs['sweep_length']*(len(RO_correlators)/a.g.attrs['sweep_length']))]\r\n\r\n \r\n ### now sort the correlators and the adwin fltr according to the sweep pts\r\n sorted_RO_correlators = RO_correlators.reshape((-1,a_lt3.pts,a_lt3.readouts))\r\n sorted_adwin_fltr = adwin_filt_bool.reshape((-1,a_lt3.pts,a_lt3.readouts))\r\n\r\n ### from now on: no numpy magic anymore. from here it is brutforce 'for-looping'\r\n ### (all conceived arrays will have different lengths due to temporal filtering. this break most np methods)\r\n ### although vstack and hstack would probably work...\r\n \r\n return_list = range(a_lt3.pts) ## all of these pts will be substituted with the correlator occurence\r\n for i in range(a_lt3.pts): \r\n correlators_at_sweep_pt = [0,0,0,0]\r\n for j in [1,2,3,4]: ### loop over the correlator identifiers\r\n correlators_at_sweep_pt[j-1] = np.sum(np.logical_and(sorted_adwin_fltr[:,i,:],sorted_RO_correlators[:,i,:]==j)) ## exclude adwin filter and do a logical and with the correlator identifier. Then sum over the number of occurences\r\n\r\n\r\n return_list[i] = correlators_at_sweep_pt\r\n\r\n return return_list", "def countmap(band,skypos,tranges,skyrange,width=False,height=False,\n\t\t\t verbose=0,tscale=1000.,memlight=False,hdu=False,retries=20):\n\timsz = gxt.deg2pix(skypos,skyrange)\n\tcount = np.zeros(imsz)\n\tfor trange in tranges:\n\t\t# If memlight is requested, break the integration into\n\t\t# smaller chunks.\n\t\tstep = memlight if memlight else trange[1]-trange[0]\n\t\tfor i in np.arange(trange[0],trange[1],step):\n\t\t\tt0,t1=i,i+step\n\t\t\tif verbose:\n\t\t\t\tprint_inline('Coadding '+str(t0)+' to '+str(t1))\n\t\t\tevents = gQuery.getArray(gQuery.rect(band,skypos[0],skypos[1],t0,t1,\n\t\t\t\t\t\t\t\t\t\t\t\t skyrange[0],skyrange[1]),\n\t\t\t\t\t\t\t\t\t verbose=verbose,retries=retries)\n\n\t\t\t# Check that there is actually data here.\n\t\t\tif not events:\n\t\t\t\tif verbose>1:\n\t\t\t\t\tprint \"No data in \"+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\ttimes = np.array(events,dtype='float64')[:,0 ]/tscale\n\t\t\tcoo =\tnp.array(events,dtype='float64')[:,1:]\n\n\t\t\t# If there's no data, return a blank image.\n\t\t\tif len(coo)==0:\n\t\t\t\tif verbose:\n\t\t\t\t\tprint 'No data in this frame: '+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\t# Define World Coordinate System (WCS)\n\t\t\twcs = define_wcs(skypos,skyrange,width=False,height=False)\n\n\t\t\t# Map the sky coordinates onto the focal plane\n\t\t\tfoc = wcs.sip_pix2foc(wcs.wcs_world2pix(coo,1),1)\n\n\t\t\t# Bin the events into actual image pixels\n\t\t\tH,xedges,yedges=np.histogram2d(foc[:,1]-0.5,foc[:,0]-0.5,\n\t\t\t\t\t\t\t\tbins=imsz,range=([ [0,imsz[0]],[0,imsz[1]] ]))\n\t\t\tcount += H\n\n\treturn count", "def applyGapFilter(image, bandNames):\n filtered = applyForwardNoDataFilter(image, bandNames)\n filtered = applyBackwardNoDataFilter(filtered, bandNames)\n return filtered", "def winter_gif(self):\n # Create the directory.\n os.mkdir('./medal_figures_winter')\n start = self.start_year\n end = self.end_year\n duration = self.duration\n # Specify the years.\n years = [i for i in self.years_winter if (i >= start) and (i <= end)]\n # Setup the colormap.\n cmap = sns.cubehelix_palette(n_colors=6, start=2.5, rot=0.1, hue=2, dark=0.3, light=1, as_cmap=True)\n # Important variable and keywords to initialize cartopy.\n shapename = 'admin_0_countries'\n countries_shp = shpreader.natural_earth(resolution='110m', category='cultural', name=shapename)\n filenames = []\n # Loop in the specific years.\n for i in years:\n fig = plt.figure(figsize=(10, 8))\n ax = fig.add_subplot(1, 1, 1, projection=ccrs.Mercator())\n ax.set_extent([-169.95, 169.95, -65, 80], crs=ccrs.PlateCarree())\n ax.add_feature(cfeature.BORDERS)\n ax.coastlines(resolution='110m')\n # Add some titles for specific years.\n if i == 1924:\n fig.suptitle('The First Winter Olympics.', y=0.9, fontsize=14, fontweight='bold')\n if i == 1994:\n fig.suptitle('The International Olympic Committee voted to separate the Summer and Winter Games.',\n y=0.9, fontsize=12, fontweight='bold')\n if i == 2018:\n fig.suptitle('Suspension of the Russian Olympic Committee due to Olympic Doping Controversy.',\n y=0.9, fontsize=12, fontweight='bold')\n iso_lib = list(self.conv['ISO'])\n if i != 2018:\n city = self.df_winter.loc[self.df_winter['Year'] == i]['City'].iloc[0]\n ax.title.set_text('Total Number of Medals of Winter Olympics Year: %d City: %s' % (i, city))\n df_tmp = self.df_winter.loc[self.df_winter['Year'] == i]\n d = dict(df_tmp.groupby(df_tmp['Country']).size())\n else:\n ax.title.set_text('Total Number of Medals of Winter Olympics Year: %d City: %s' % (i, 'Pyeongchang'))\n m = []\n for j in self.df_2018_winter['NOC'].tolist():\n n = j[j.find('(')+1:j.find(')')]\n m.append(n)\n k = self.df_2018_winter['Total'].tolist()\n d = dict(zip(m, k))\n d.pop('30 NOCs', None)\n max_medal = float(max(d.values()))\n for country in shpreader.Reader(countries_shp).records():\n iso = country.attributes['ADM0_A3']\n medal_num = 0\n if iso in iso_lib:\n ioc = self.conv.loc[self.conv['ISO'] == iso,'IOC'].iloc[0]\n if not pd.isna(ioc):\n if ioc in d.keys():\n medal_num = d[ioc]\n if all([iso == 'RUS', i>=1956, i<=1988]):\n medal_num = d['URS']\n if all([iso=='DEU', i>=1968, i<=1988]):\n medal_num = d['FRG'] + d['GDR']\n if all([iso=='DEU', i>=1956, i<=1964]):\n medal_num = d['EUA']\n if i==1952 and iso=='DEU':\n medal_num = d['FRG']\n if i==1992 and iso=='RUS':\n medal_num = d['EUN']\n if i==2018 and iso=='RUS':\n medal_num = d['OAR']\n ax.add_geometries(country.geometry, ccrs.PlateCarree(),\n facecolor=cmap(medal_num / max_medal, 1))\n sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(0, max_medal))\n sm._A = []\n plt.colorbar(sm, ax=ax, orientation=\"horizontal\", fraction=0.046, pad=0.04)\n fname = './medal_figures_winter/year_%d.png' % i\n filenames.append(fname)\n plt.savefig(fname=fname, format='png')\n plt.close(fig)\n images = []\n # Create the gif.\n for filename in filenames:\n images.append(imageio.imread(filename))\n imageio.mimsave('./medal_figures_winter/movie.gif', images, duration=duration)\n return", "def _bands_competed_last_year():\n lLastYear = datetime.datetime.now().year - 1\n cursor = connection.cursor()\n cursor.execute(\"SELECT count(distinct(r.band_id)) FROM contests_contestevent e, contests_contestresult r WHERE r.contest_event_id = e.id AND extract(year from e.date_of_event) = %(year)s GROUP BY extract(year from e.date_of_event) ORDER BY extract(year from e.date_of_event) desc\", {'year' : lLastYear})\n rows = cursor.fetchall()\n lReturn = 0\n if rows and rows[0]:\n lReturn = rows[0][0]\n cursor.close()\n return lReturn", "def movie(band,skypos,tranges,skyrange,framesz=0,width=False,height=False,\n\t\t verbose=0,tscale=1000.,memlight=False,coadd=False,\n\t\t response=False,calpath='../cal/',hdu=False,retries=20):\n\t# Not defining stepsz effectively creates a count map.\n\tmv = []\n\trr = []\n\tif coadd:\n\t\tif verbose>2:\n\t\t\tprint 'Coadding across '+str(tranges)\n\t\tmv.append(countmap(band,skypos,tranges,skyrange,width=width,\n\t\t\t\t height=height,verbose=verbose,tscale=tscale,memlight=memlight,\n\t\t\t\t hdu=hdu,retries=retries))\n\t\trr.append(rrhr(band,skypos,tranges,skyrange,response=response,width=width,height=height,stepsz=1.,verbose=verbose,calpath=calpath,tscale=tscale,hdu=hdu,retries=retries)) if response else rr.append(np.ones(np.shape(mv)[1:]))\n\telse:\n\t\tfor trange in tranges:\n\t\t\tstepsz = framesz if framesz else trange[1]-trange[0]\n\t\t\tsteps = np.ceil((trange[1]-trange[0])/stepsz)\n\t\t\tfor i,t0 in enumerate(np.arange(trange[0],trange[1],stepsz)):\n\t\t\t\tif verbose>1:\n\t\t\t\t\tprint_inline('Movie frame '+str(i+1)+' of '+str(int(steps)))\n\t\t\t\tt1 = trange[1] if i==steps else t0+stepsz\n\t\t\t\tmv.append(countmap(band,skypos,[[t0,t1]],skyrange,width=width,height=height,verbose=verbose,tscale=tscale,memlight=memlight,hdu=hdu,retries=retries))\n\t# FIXME: This should not create an rr unless it's requested...\n\t\t\t\trr.append(rrhr(band,skypos,[[t0,t1]],skyrange,response=response,width=width,height=height,stepsz=1.,verbose=verbose,calpath=calpath,tscale=tscale,retries=retries)) if response else rr.append(np.ones(np.shape(mv)[1:]))\n\n\treturn np.array(mv),np.array(rr)", "def sky_groups():\n cam = \"sky\"\n for light, lens, ndc, good, window in [(True, True, False, True, True),\n (True, True, False, True, False),\n (True, True, False, False, False),\n (True, False, False, True, False),\n (True, False, False, False, False),\n (False, True, False, True, True),\n (False, True, False, False, True)]:\n filenames = flatfiles(cam)\n filenames = get_light_sky(filenames, light)\n filenames = get_lens(filenames, lens)\n filenames = get_ndc(filenames, ndc)\n filenames = get_good(filenames, good)\n filenames = get_window_sky(filenames, window)\n images = valid_images(filenames)\n process_images(images, cam, (light, lens, ndc, good, window))", "def dwt(image_array, quantization_Array):\n # Create the high pass and low pass filters\n # both filters are non-causal\n # symmetric\n # [-2, -1, 0, 1, 2]\n LPF = [-0.125, 0.25, 0.75, 0.25, -0.125]\n LPF_center = 2\n\n # [ -2,-1, 0]\n HPF = [-0.5, 1, -0.5]\n HPF_center = 2\n\n nrow, ncol = image_array.shape\n\n # create an array that will contain the 4 different subbands of the image\n LL = np.zeros((nrow, ncol))\n LH = np.zeros((nrow, ncol))\n HL = np.zeros((nrow, ncol))\n HH = np.zeros((nrow, ncol))\n filtered_image = [LL, LH, HL, HH]\n\n # filtering the rows using a low pass and high pass filters\n LowPass_rows = np.zeros((nrow, ncol))\n HighPass_rows = np.zeros((nrow, ncol))\n for i in range(0, nrow):\n LowPass_rows[i, :] = lfilter(LPF, image_array[i, :], LPF_center)\n HighPass_rows[i, :] = lfilter(HPF, image_array[i, :], HPF_center)\n\n # down sample rows.\n # which means we will have half the number of columns\n for i in range(0, len(filtered_image)):\n filtered_image[i] = filtered_image[i][:, ::2]\n\n # apply filters accross columns\n for i in range(0, ncol):\n LL[:, i] = lfilter(LPF, LowPass_rows[:, i], LPF_center)\n LH[:, i] = lfilter(HPF, LowPass_rows[:, i], HPF_center)\n HL[:, i] = lfilter(LPF, HighPass_rows[:, i], LPF_center)\n HH[:, i] = lfilter(HPF, HighPass_rows[:, i], HPF_center)\n\n # down sample columns and quantize\n for i in range(0, len(filtered_image)):\n filtered_image[i] = filtered_image[i][::2, :]\n filtered_image[i] = np.round(\n filtered_image[i]/quantization_Array[i]).astype(int)\n\n return filtered_image", "def plot_land_cover(data, year=None, measurement=None, out_width=15, cols=4,):\n # get measurement name\n measurement = get_layer_name(measurement, data)\n\n # get colour map, normalisation\n try:\n cmap, norm = lc_colourmap(measurement)\n except AssertionError:\n\n raise KeyError('Could not automatically determine colour scheme from'\n f'DataArray name {measurement}. Please specify which '\n 'DEA Landcover measurement is being plotted by providing'\n 'the name using the \"measurement\" variable For example'\n '(measurement = \"full_classification\")')\n\n height, width = data.geobox.shape\n scale = out_width / width\n\n if year:\n #plotting protocall if 'year' variable is passed\n year_string = f\"{year}-01-01\"\n data = data.sel(time=year_string, method=\"nearest\")\n \n fig, ax = plt.subplots()\n fig.set_size_inches(width * scale, height * scale)\n make_colorbar(fig, ax, measurement)\n im = ax.imshow(data, cmap=cmap, norm=norm, interpolation=\"nearest\")\n\n \n elif len(data.time) == 1:\n #plotting protocall if only one timestep is passed and not a year variable\n fig, ax = plt.subplots()\n fig.set_size_inches(width * scale, height * scale)\n make_colorbar(fig, ax, measurement)\n im = ax.imshow(data.isel(time=0), cmap=cmap, norm=norm, interpolation=\"nearest\")\n else:\n #plotting protocall if multible time steps are passed to plot\n if cols > len(data.time):\n cols = len(data.time)\n rows = int((len(data.time) + cols-1)/cols)\n\n fig, ax = plt.subplots(nrows=rows, ncols=cols)\n fig.set_size_inches(\n width * scale, (height * scale / cols) * (len(data.time) / cols))\n\n make_colorbar(fig, ax.flat[0], measurement)\n\n for a, b in enumerate(ax.flat):\n if a < data.shape[0]:\n im = b.imshow(data[a], cmap=cmap, norm=norm,\n interpolation=\"nearest\")\n\n return im", "def make_lightcurve(centroids, bands, band_idx, box_size, aperture_radius):\n band_names = np.sort(list(bands.keys()))\n num_stars= range(len(centroids))\n for star_idx in num_stars:\n xcenters, ycenters = [],[]\n aperture_sums = []\n background = []\n fwhms = []\n obs_time = []\n obs_mjd = []\n ##extract lightcurve (enumerate all frames) in a given band\n for i in tqdm(bands[band_names[band_idx]]):\n #import pdb; pdb.set_trace()\n hdr = fits.open(i)[0].header\n img = fits.open(i)[0].data\n #get dates from fits header\n date=dt.strptime(hdr['DATE-OBS'], '%Y-%m-%d')\n time=dt.strptime(hdr['EXP-STRT'], '%H:%M:%S.%f')\n newdate = time.replace(year=date.year, month=date.month, day=date.day)\n obs_time.append(newdate)\n obs_mjd.append(hdr['MJD-STRT'])\n\n #crop\n #import pdb; pdb.set_trace()\n image_crop = get_crop(img, centroids[star_idx], box_size)\n\n ###aperture photometry###\n #compute centroid\n centroid = get_centroid(image_crop)\n\n xcenters.append(centroid[0])\n ycenters.append(centroid[1])\n\n #compute backgound\n bkg_mean=get_bkg(image_crop, centroid, r_in=20., r_out=30.)\n\n #measure fwhm\n fwhm=get_fwhm(image_crop)\n\n #without aperture photometry\n\n aperture_sum = get_phot(image_crop, centroid, r=aperture_radius)\n\n #minus background wihtin annulus\n #aperture_sum = get_phot2(image_crop,bkg_mean,centroid,r=aperture_radius)\n\n aperture_sums.append(aperture_sum)\n background.append(bkg_mean)\n\n # if fwhm < 10*np.median(fwhms):\n # fwhms.append(fwhm)\n # else:\n # fwhms.append(np.nan)\n fwhms.append(fwhm)\n\n #output as dataframe of given band and star\n\n dfs.append(pd.DataFrame(\n {'{0}_{1}_x'.format(band_names[band_idx], str(star_idx)) : xcenters,\n '{0}_{1}_y'.format(band_names[band_idx], str(star_idx)) : ycenters,\n '{0}_{1}_flux_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : aperture_sums,\n '{0}_{1}_bkg_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : background,\n '{0}_{1}_fwhm_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : fwhms},\n #'airmass' : airmass\n index = obs_time))\n return dfs, band_idx, band_names", "def write_images(band,skypos,tranges,skyrange,write_cnt=False,write_int=False,write_rr=False,framesz=0,width=False,height=False,verbose=0,tscale=1000.,memlight=False,coadd=False,response=False,calpath='../cal/',clobber=False,retries=20):\n\t# No files were requested, so don't bother doing anything.\n\tif not (write_cnt or write_int or write_rr):\n\t\treturn\n\tcount,rr,intensity=create_images(band,skypos,tranges,skyrange,framesz=framesz,width=width,height=height,verbose=verbose,tscale=tscale,memlight=memlight,coadd=coadd,response=response,calpath=calpath,retries=retries)\n\n\t# Add a conditional so that this is only created for multi-frame images\n\ttbl = movie_tbl(band,tranges,framesz=framesz,verbose=verbose,retries=retries)\n\n\tif write_cnt:\n\t\thdu = pyfits.PrimaryHDU(count)\n\t\thdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)\n\t\thdulist = pyfits.HDUList([hdu,tbl])\n\t\tif verbose:\n\t\t\tprint 'Writing count image to '+str(write_cnt)\n\t\thdulist.writeto(write_cnt,clobber=clobber)\n\tif write_rr:\n\t\thdu = pyfits.PrimaryHDU(rr)\n\t\thdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)\n\t\thdulist = pyfits.HDUList([hdu,tbl])\n\t\tif verbose:\n\t\t\tprint 'Writing response image to '+str(write_rr)\n hdulist.writeto(write_rr,clobber=clobber)\n\tif write_int:\n\t\thdu = pyfits.PrimaryHDU(intensity)\n\t\thdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)\n\t\thdulist = pyfits.HDUList([hdu,tbl])\n\t\tif verbose:\n\t\t\tprint 'Writing intensity image to '+str(write_int)\n\t\thdulist.writeto(write_int,clobber=clobber)\n\n\treturn", "def filter_on_adwin_parameters(a_lt3,a_lt4,**kw):\r\n\r\n filter_params = kw.pop('adwin_filter_params',{})\r\n if len(filter_params):\r\n old_params = analysis_params.SPSP_fltr_adwin_settings\r\n \r\n for setup_key,setup_dict in filter_params.iteritems():\r\n for key,params in setup_dict.iteritems():\r\n analysis_params.SPSP_fltr_adwin_settings['fltr_dict_'+setup_key][key] = params\r\n\r\n fltr = np.array([True]*len(a_lt3.agrp['ssro_results'].value)) ### initially everything true\r\n\r\n for a,suffix in zip([a_lt3,a_lt4],['lt3','lt4']): ### loop over both files\r\n for key,val in analysis_params.SPSP_fltr_adwin_settings['fltr_dict_'+suffix].iteritems(): ### loop over the list of filter parameters\r\n [filter_on,minimum,maximum] = val\r\n\r\n if filter_on:\r\n if key == 'repetition_number':\r\n values = np.array([i for i in range(len(fltr)/a.g.attrs['sweep_length']) for _ in range(a.g.attrs['sweep_length'])]) ### Make an array of values corresponding to the current rep\r\n else:\r\n values = a.agrp[key].value\r\n\r\n fltr = np.logical_and(fltr,(values >= minimum) & ( values <= maximum)) ### update filter\r\n\r\n if len(filter_params):\r\n analysis_params.SPSP_fltr_adwin_settings = old_params\r\n\r\n return fltr", "def applyMask3last(imagem, value, bandNames):\n mask = imagem.select(bandNames[-3]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[-2]).eq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[-1]).neq(value))\n change_img = imagem.select(bandNames[-1]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[0:-1])\n img_out = img_out.addBands(imagem.select(bandNames[-1]).blend(change_img))\n return img_out", "def numberOfWideBands(config=None):\n # Get correlator configuration\n c = config\n if c == None: \n c = utils.getConfigAstroband()\n\n # Determine if we have both wideband and spectral line astrobands. \n # If we do, we return nwide & maxbandwidth for sl only since \n # this is the correlator which will be attached to all ants.\n astrobands = [ abc[0] for abc in c ]\n if len( astrobands ) == 0:\n raise Exception, \"No existing astroband configuration.\"\n if max( astrobands ) > 8 and min( astrobands ) < 9: \n astrobands = [ ab for ab in astrobands if ab < 9 ]\n\n # Check bandwidth\n nwide = 0\n maxbandwidth = 0\n for t in c:\n astroband = t[0]\n # Skip band if it is not being used or is not in astroband list above.\n mp = commands.queryString('SignalPath.Mapping.Astroband%d.confTag' % (astroband) )\n if mp == 'NONE' or astroband not in astrobands: continue\n\n # Get bandwidth\n if t[2] == commands.BW500:\n bw = 500\n elif t[2] == commands.BW250:\n bw = 250\n elif t[2] == commands.BW125:\n bw = 125\n elif t[2] == commands.BW62:\n bw = 62\n elif t[2] == commands.BW31:\n bw = 31\n elif t[2] == commands.BW8:\n bw = 8\n elif t[2] == commands.BW2:\n bw = 2\n else:\n raise Exception, 'Could not find bandwith for '+str(t[2])\n\n # Maximum?\n if bw > maxbandwidth: \n maxbandwidth = bw\n if utils.isDualPol( astroband ):\n nwide = 2 \n else:\n nwide = 1\n elif bw == maxbandwidth:\n if utils.isDualPol( astroband ): \n nwide += 2 \n else:\n nwide += 1\n\n return nwide, maxbandwidth", "def _resampler(df_year, year):\n # Aggregates data using mean for each time interval and gets a\n # sample count for each new data point.\n df_15 = df_year.resample('15T').apply(['mean', 'count'])\n df_30 = df_year.resample('30T').apply(['mean', 'count'])\n df_1h = df_year.resample('1H').apply(['mean', 'count'])\n df_1d = df_year.resample('D').apply(['mean', 'count'])\n\n # Removes top level title that is not needed.\n df_15.columns = df_15.columns.droplevel(0)\n df_30.columns = df_30.columns.droplevel(0)\n df_1h.columns = df_1h.columns.droplevel(0)\n df_1d.columns = df_1d.columns.droplevel(0)\n\n # Creating new date range to include all time intervals within the year.\n idx_15 = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:45:00', freq='15T')\n idx_30 = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:30:00', freq='30T')\n idx_1h = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:00:00', freq='1H')\n idx_1d = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:00:00', freq='D')\n\n # Reindexing so data that starts in, for example August, will now\n # have the months prior to August filled with nans.\n df_15_reindex = df_15.reindex(idx_15, fill_value=np.nan)\n df_15_reindex[['count']] = df_15_reindex[['count']].fillna(0).astype(int)\n # Adding all columns to match example excel.\n df_15_reindex = df_15_reindex.rename(columns={'mean': 'H(ft)'})\n df_15_reindex = df_15_reindex.rename(columns={'count': 'SampleCount'})\n\n # Adding meters column.\n df_15_reindex['H(m)'] = df_15_reindex['H(ft)'] / 3.28\n # Rounds meters column so significant digits match\n # original height column.\n df_15_reindex['H(m)'] = df_15_reindex['H(m)'].round(2)\n df_15_reindex['H(ft)'] = df_15_reindex['H(ft)'].round(2)\n df_15_reindex['DateTime2'] = df_15_reindex.index\n df_15_reindex['Date'] = df_15_reindex.index\n df_15_reindex['Date2'] = df_15_reindex.index\n df_15_reindex['Date_Python_generated'] = df_15_reindex['Date'].dt.date\n df_15_reindex['Time1'] = df_15_reindex['Date'].dt.time\n df_15_reindex['Time2'] = df_15_reindex['Date'].dt.time\n df_15_reindex['H(m)_final'] = df_15_reindex['H(m)']\n df_15_reindex = df_15_reindex.reset_index(drop=True)\n # Adding original datetime and height data to dataframe. To do this\n # pd.concat is used because the column lengths are different.\n df_15_reindex = pd.concat([\n df_15_reindex, df_year.reset_index(drop=True)], axis=1)\n df_15_reindex['dateTime'] = pd.to_datetime(df_15_reindex['dateTime'])\n # Reordering columns to match example excel.\n df_15_reindex = df_15_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n # Filling nans with empty cells in columns similar to example excel.\n df_15_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_15_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n\n # Similar to 15 minute interval code but 30 minutes interval.\n df_30_reindex = df_30.reindex(idx_30, fill_value=np.nan)\n df_30_reindex[['count']] = df_30_reindex[['count']].fillna(0).astype(int)\n df_30_reindex = df_30_reindex.rename(columns={'mean': 'H(ft)'})\n df_30_reindex = df_30_reindex.rename(columns={'count': 'SampleCount'})\n df_30_reindex['H(m)'] = df_30_reindex['H(ft)'] / 3.28\n df_30_reindex['H(m)'] = df_30_reindex['H(m)'].round(2)\n df_30_reindex['H(ft)'] = df_30_reindex['H(ft)'].round(2)\n df_30_reindex['DateTime2'] = df_30_reindex.index\n df_30_reindex['Date'] = df_30_reindex.index\n df_30_reindex['Date2'] = df_30_reindex.index\n df_30_reindex['Date_Python_generated'] = df_30_reindex['Date'].dt.date\n df_30_reindex['Time1'] = df_30_reindex['Date'].dt.time\n df_30_reindex['Time2'] = df_30_reindex['Date'].dt.time\n df_30_reindex['H(m)_final'] = df_30_reindex['H(m)']\n df_30_reindex = df_30_reindex.reset_index(drop=True)\n df_30_reindex = pd.concat([\n df_30_reindex, df_year.reset_index(drop=True)], axis=1)\n df_30_reindex['dateTime'] = pd.to_datetime(df_30_reindex['dateTime'])\n df_30_reindex = df_30_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n df_30_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_30_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n\n # Similar to 15 minute interval code but hourly interval.\n df_1h_reindex = df_1h.reindex(idx_1h, fill_value=np.nan)\n df_1h_reindex[['count']] = df_1h_reindex[['count']].fillna(0).astype(int)\n df_1h_reindex = df_1h_reindex.rename(columns={'mean': 'H(ft)'})\n df_1h_reindex = df_1h_reindex.rename(columns={'count': 'SampleCount'})\n df_1h_reindex['H(m)'] = df_1h_reindex['H(ft)'] / 3.28\n df_1h_reindex['H(m)'] = df_1h_reindex['H(m)'].round(2)\n df_1h_reindex['H(ft)'] = df_1h_reindex['H(ft)'].round(2)\n df_1h_reindex['DateTime2'] = df_1h_reindex.index\n df_1h_reindex['Date'] = df_1h_reindex.index\n df_1h_reindex['Date2'] = df_1h_reindex.index\n df_1h_reindex['Date_Python_generated'] = df_1h_reindex['Date'].dt.date\n df_1h_reindex['Time1'] = df_1h_reindex['Date'].dt.time\n df_1h_reindex['Time2'] = df_1h_reindex['Date'].dt.time\n df_1h_reindex['H(m)_final'] = df_1h_reindex['H(m)']\n df_1h_reindex = df_1h_reindex.reset_index(drop=True)\n df_1h_reindex = pd.concat([\n df_1h_reindex, df_year.reset_index(drop=True)], axis=1)\n df_1h_reindex['dateTime'] = pd.to_datetime(df_1h_reindex['dateTime'])\n df_1h_reindex = df_1h_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n df_1h_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_1h_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n\n # Similar to 15 minute interval code but daily interval.\n df_1d_reindex = df_1d.reindex(idx_1d, fill_value=np.nan)\n df_1d_reindex[['count']] = df_1d_reindex[['count']].fillna(0).astype(int)\n df_1d_reindex = df_1d_reindex.rename(columns={'mean': 'H(ft)'})\n df_1d_reindex = df_1d_reindex.rename(columns={'count': 'SampleCount'})\n df_1d_reindex['H(m)'] = df_1d_reindex['H(ft)'] / 3.28\n df_1d_reindex['H(m)'] = df_1d_reindex['H(m)'].round(2)\n df_1d_reindex['H(ft)'] = df_1d_reindex['H(ft)'].round(2)\n df_1d_reindex['DateTime2'] = df_1d_reindex.index\n df_1d_reindex['Date'] = df_1d_reindex.index\n df_1d_reindex['Date2'] = df_1d_reindex.index\n df_1d_reindex['Date_Python_generated'] = df_1d_reindex['Date'].dt.date\n df_1d_reindex['Time1'] = df_1d_reindex['Date'].dt.time\n df_1d_reindex['Time2'] = df_1d_reindex['Date'].dt.time\n df_1d_reindex['H(m)_final'] = df_1d_reindex['H(m)']\n df_1d_reindex = df_1d_reindex.reset_index(drop=True)\n df_1d_reindex = pd.concat([\n df_1d_reindex, df_year.reset_index(drop=True)], axis=1)\n df_1d_reindex['dateTime'] = pd.to_datetime(df_1d_reindex['dateTime'])\n df_1d_reindex = df_1d_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n df_1d_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_1d_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n return df_15_reindex, df_30_reindex, df_1h_reindex, df_1d_reindex", "def calbands( band = 0, tmo = 30 ) :\n optimizeThresholds(band,tmo)\n flattenPhases(band,tmo)\n calibrateSpectra(band=band,tmo=tmo)", "def band_filter(self, bands) -> 'ImageCollection':\n\n process_id = 'filter_bands'\n args = {\n 'imagery': self.graph,\n 'bands': bands\n }\n return self.graph_add_process(process_id, args)", "def run_global(start_year, end_year, depth_from, depth_to, animate=True):\n# years, times, rootgrps = retrieve(1950,2018)\n# rootgrps_1950 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1950\\EN.4.2.1.f.analysis.g10.195001.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_1951 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1951\\EN.4.2.1.f.analysis.g10.195101.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_1952 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1952\\EN.4.2.1.f.analysis.g10.195201.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_1953 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1953\\EN.4.2.1.f.analysis.g10.195301.nc\", \"r+\", format=\"NETCDF4\")]\n#\n#\n# rootgrps_2015 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2015\\EN.4.2.1.f.analysis.g10.201501.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_2016 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2016\\EN.4.2.1.f.analysis.g10.201601.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_2017 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2017\\EN.4.2.1.f.analysis.g10.201701.nc\", \"r+\", format=\"NETCDF4\")]\n rootgrps_2018 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2018\\EN.4.2.1.f.analysis.g10.201801.nc\", \"r+\", format=\"NETCDF4\")]\n\n# HC_1950 = calculate_HC_global(rootgrps_1950, 0, 2000)\n# print('1950', time.time()-start)\n# HC_1951 = calculate_HC_global(rootgrps_1951, 0, 2000)\n# print('1951', time.time()-start)\n# HC_1952 = calculate_HC_global(rootgrps_1952, 0, 2000)\n# print('1952', time.time()-start)\n# HC_1953 = calculate_HC_global(rootgrps_1953, 0, 2000)\n# print('1953', time.time()-start) \n#\n# HC_2015 = calculate_HC_global(rootgrps_2015, 0, 2000)\n# print('2015', time.time()-start)\n# HC_2016 = calculate_HC_global(rootgrps_2016, 0, 2000)\n# print('2016', time.time()-start)\n# HC_2017 = calculate_HC_global(rootgrps_2017, 0, 2000)\n# print('2017', time.time()-start)\n HC_2018 = calculate_HC_global(rootgrps_2018, 0, 2000)\n# print('2018', time.time()-start)\n# HC_1950_mean = (HC_1950+HC_1951+HC_1952+HC_1953)/4\n# HC_2018_mean = (HC_2015+HC_2016+HC_2017+HC_2018)/4\n\n# dHC = (HC_2018_mean-HC_1950_mean)/(65*365*24*3600)\n if animate == True:\n plot(rootgrps_2018, HC_2018)\n return HC_2018", "def smooth_climatologies(thresh_climYear, seas_climYear, smoothPercentileWidth):\n # If the climatology contains NaNs, then assume it is a <365-day year and deal accordingly\n if np.sum(np.isnan(seas_climYear)) + np.sum(np.isnan(thresh_climYear)):\n valid = ~np.isnan(thresh_climYear)\n thresh_climYear[valid] = runavg(thresh_climYear[valid], smoothPercentileWidth)\n valid = ~np.isnan(seas_climYear)\n seas_climYear[valid] = runavg(seas_climYear[valid], smoothPercentileWidth)\n else: # >= 365-day year (no nans)\n thresh_climYear = runavg(thresh_climYear, smoothPercentileWidth)\n seas_climYear = runavg(seas_climYear, smoothPercentileWidth)\n\n return thresh_climYear, seas_climYear", "def initialize_layers(self, years):\n min_year = min(years)\n max_year = max(years)\n ordered_years = list(range(min_year, max_year + 1))\n self.layers = [Layer(y) for y in ordered_years]", "def year_cv_split(X, year_range):\n return [\n ((X[\"year\"] < year).to_numpy(), (X[\"year\"] == year).to_numpy())\n for year in range(*year_range)\n ]", "def mask5(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[3]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[4]).eq(value))\n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value) \n change_img1 = imagem.select(bandNames[2]).mask(mask.eq(1)).where(mask.eq(1), value) \n change_img2 = imagem.select(bandNames[3]).mask(mask.eq(1)).where(mask.eq(1), value) \n img_out = imagem.select(bandNames[1]).blend(change_img).blend(change_img1).blend(change_img2)\n return img_out", "def load_images(filename, bands, Args):\n image = np.zeros([Args.num, Args.out_size,\n Args.out_size, len(bands)])\n for i, band in enumerate(bands):\n print (\"Getting pstamps for band\", band)\n full_image = fits.open(filename.replace(\"band\", band))[0].data\n image[:, :, :, i] = get_stamps(full_image, Args)\n return image", "def multiple_years(our_data, start, end):\n count = start\n album_list = []\n while count <= end:\n album_list.append(find_by_year(our_data,count))\n count += 1", "def sliding_window(frame_length, step, Xsampleslist, ysampleslist):\n Xsamples = []\n ysamples = []\n for j in range(len(Xsampleslist)):\n X = Xsampleslist[j]\n ybinary = ysampleslist[j]\n for i in range(0, X.shape[0] - frame_length, step):\n xsub = X[i:i + frame_length, :]\n ysub = ybinary\n Xsamples.append(xsub)\n ysamples.append(ysub)\n return Xsamples, ysamples", "def interpolate_dataframes(ff):\n assert isinstance(ff, dict)\n year_min = ff['CA'][0].index[0]\n year_max = ff['CA'][0].index[-1]\n years = list(range(year_min, year_max + 1))\n for state in ff.keys():\n for cf in ff[state]:\n for year in years:\n if year not in cf.index:\n cf.loc[year] = cf.loc[year-1:year+1, :].sum(axis=0)\n cf.loc[year] = (cf.loc[year] / 2).astype(np.int64)\n cf.sort_index(inplace=True)\n return(ff)", "def bandpass_filter(data, lowcut, highcut, fs=2000, numtaps=255):\n nyq = fs / 2\n\n # design filter\n fe1 = lowcut / nyq\n fe2 = highcut / nyq\n b = firwin(numtaps, (fe1, fe2), pass_zero=False)\n\n filtered = lfilter(b, 1, data)\n\n return filtered", "def _filter_images(data, hmin):\n #Laziest way to get a circle mask\n fp = CircularAperture((0,0), r=hmin).to_mask().data>.1\n fp = fp.astype(bool)\n\n # Apply maximum filter, flux filter\n filt_image = maximum_filter(data, footprint=fp,\n mode='constant', cval=0)\n origins = product([0,-1], [0,-1])\n max_4sum = np.amax([_conv_origin(data, o) for o in origins], axis=0)\n return(filt_image, max_4sum)", "def msatna_blocks_3lag_year(year: int) -> pd.Series:\n return msatna_blocks_3lag_panel()[year]", "def apply_bandpass_filter_timeseries(self, folder_name, indices, start_stop_freq, stop_stop_freq):\n (x_index, y_index) = indices\n photo_list = self.get_photo_list(folder_name)\n\n ts = self.get_pixel_timeseries(folder_name, (x_index, y_index))\n self.plot_fft_pixel_timeseries(folder_name, ts, str(x_index) + '_' + str(y_index) + 'pre_butterworth')\n n = len(ts)\n frequency = self.get_sampling_frequency(folder_name)\n d = 1.0 / frequency # 'sample spacing'\n fig, ax = plt.subplots()\n sample_freqs = np.fft.rfftfreq(n, d)\n fourier = np.fft.rfft(ts)\n print(sample_freqs)\n nyquist = frequency / 2.0\n\n start_stop_band = start_stop_freq / nyquist\n stop_stop_band = stop_stop_freq / nyquist\n\n print(start_stop_band)\n print(stop_stop_band)\n\n sos = sgnl.butter(2, Wn=[start_stop_band, stop_stop_band], btype='bandstop', output='sos')\n filtered = sgnl.sosfilt(sos, ts)\n self.plot_fft_pixel_timeseries(folder_name, filtered, str(x_index) + '_' + str(y_index) + 'post_butterworth')\n fig, ax = plt.subplots()\n indices = self.get_indices_from_filenames(folder_name)\n index_dates = dates.date2num(indices)\n ax.plot_date(index_dates, ts, xdate=True, linestyle='solid', marker='None',\n label=str(x_index) + ' , ' + str(y_index))\n ax.plot_date(index_dates, filtered, xdate=True, linestyle='solid', marker='None',\n label=str(x_index) + ' , ' + str(y_index) + ' filtered')\n\n ax.legend()\n ax.grid(b=True, which='major', color='#666666', linestyle='-')\n\n # Show the minor grid lines with very faint and almost transparent grey lines\n ax.minorticks_on()\n ax.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)\n fig.set_figwidth(40)\n fig.savefig(self.parent_folder + 'analysis/timeseries_filtered_' + str(x_index) + '_' + str(y_index) + '.png')\n fig.savefig(self.parent_folder + 'analysis/timeseries_filtered_' + str(x_index) + '_' + str(y_index) + '.svg')\n fig.clf()", "def filter_irrigated(asset, yr, region, filter_type='irrigated', addl_yr=None):\n filt_fc = None\n\n # filter out any weird geometries\n plots = ee.FeatureCollection(asset)\n plots = plots.map(lambda x: x.set('geo_type', x.geometry().type()))\n plots = plots.filter(ee.Filter.eq('geo_type', 'Polygon'))\n\n roi = ee.FeatureCollection(region)\n if filter_type == 'irrigated':\n\n summer_s, late_summer_e = '{}-05-01'.format(yr), '{}-07-15'.format(yr)\n late_summer_s_, summer_e = '{}-07-01'.format(yr), '{}-10-31'.format(yr)\n\n lsSR_masked = landsat_masked(yr, roi)\n\n early_nd = ee.Image(lsSR_masked.filterDate(summer_s, late_summer_e).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd')\n early_nd_max = early_nd.select('nd').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n early_int_mean = early_nd_max.reduceRegions(collection=plots,\n reducer=ee.Reducer.median(),\n scale=30.0)\n early_int_mean = early_int_mean.select('median')\n\n late_nd = ee.Image(lsSR_masked.filterDate(late_summer_s_, summer_e).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd_1')\n late_nd_max = late_nd.select('nd_1').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n\n combo = late_nd_max.reduceRegions(collection=early_int_mean,\n reducer=ee.Reducer.mean(),\n scale=30.0)\n\n filt_fc = combo # .filter(ee.Filter.Or(ee.Filter.gt('median', 0.9), ee.Filter.gt('mean', 0.8)))\n desc = '{}_{}_irr'.format(os.path.basename(region), yr)\n\n elif filter_type == 'dryland':\n\n summer_s, late_summer_e = '{}-07-01'.format(yr), '{}-10-31'.format(yr)\n late_summer_s_, late_summer_e_ = '{}-07-01'.format(addl_yr), '{}-10-31'.format(addl_yr)\n\n lsSR_masked = landsat_masked(yr, roi)\n early_nd = ee.Image(lsSR_masked.filterDate(summer_s, late_summer_e).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd')\n early_nd_max = early_nd.select('nd').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n early_int_mean = early_nd_max.reduceRegions(collection=plots,\n reducer=ee.Reducer.mean(),\n scale=30.0)\n early_int_mean = early_int_mean.select(['mean', 'MGRS_TILE', 'system:index', 'popper'],\n ['nd_e', 'MGRS_TILE', 'system:index', 'popper'])\n\n lsSR_masked = landsat_masked(addl_yr, roi)\n late_nd = ee.Image(lsSR_masked.filterDate(late_summer_s_, late_summer_e_).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd_1')\n late_nd_max = late_nd.select('nd_1').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n\n combo = late_nd_max.reduceRegions(collection=early_int_mean,\n reducer=ee.Reducer.mean(),\n scale=30.0)\n\n filt_fc = combo.filter(ee.Filter.Or(ee.Filter.lt('nd_e', 0.7), ee.Filter.lt('mean', 0.7)))\n desc = '{}_dry'.format(os.path.basename(region))\n\n else:\n raise NotImplementedError('must choose from filter_low or filter_high')\n\n task = ee.batch.Export.table.toCloudStorage(filt_fc,\n description=desc,\n bucket='wudr',\n fileFormat='SHP')\n print(yr, filter_type)\n task.start()", "def _make_filters(self):\n\n \"\"\"\n filter_bank = bandpass_filterbank(\n self.bands, fs=self.fs, order=order, output=output\n )\n\n return [lambda sig: sosfiltfilt(bpf, sig) for bpf in filter_bank]\n \"\"\"\n\n # This seems to work only for Octave bands out of the box\n centers = self.centers\n n = len(self.centers)\n\n new_bands = [[centers[0] / 2, centers[1]]]\n for i in range(1, n - 1):\n new_bands.append([centers[i - 1], centers[i + 1]])\n new_bands.append([centers[-2], self.fs / 2])\n\n n_freq = self.n_fft // 2 + 1\n freq_resp = np.zeros((n_freq, n))\n freq = np.arange(n_freq) / self.n_fft * self.fs\n\n for b, (band, center) in enumerate(zip(new_bands, centers)):\n lo = np.logical_and(band[0] <= freq, freq < center)\n freq_resp[lo, b] = 0.5 * (1 + np.cos(2 * np.pi * freq[lo] / center))\n\n if b != n - 1:\n hi = np.logical_and(center <= freq, freq < band[1])\n freq_resp[hi, b] = 0.5 * (1 - np.cos(2 * np.pi * freq[hi] / band[1]))\n else:\n hi = center <= freq\n freq_resp[hi, b] = 1.0\n\n filters = np.fft.fftshift(\n np.fft.irfft(freq_resp, n=self.n_fft, axis=0),\n axes=[0],\n )\n\n # remove the first sample to make them odd-length symmetric filters\n self.filters = filters[1:, :]", "def get_bands(self, data_array_norm, baseline_array_norm, f):\n\n fmax = 50\n fidx = f < fmax\n fnum = f[fidx].size\n\n band_tot = np.zeros((fnum, fnum, data_array_norm.shape[0], data_array_norm.shape[2], data_array_norm.shape[3]))\n band_tot_bl = np.zeros((fnum, fnum, baseline_array_norm.shape[0], baseline_array_norm.shape[2], baseline_array_norm.shape[3]))\n for i in range(fnum):\n for j in range(fnum):\n if j > i:\n idx = (f >= f[i]) & (f < f[j])\n band_tot[i, j, :, :] = np.sum(data_array_norm[:, idx, :, :], axis=1) / (f[j] - f[i])\n band_tot_bl[i, j, :, :] = np.sum(baseline_array_norm[:, idx, :, :], axis=1) / (f[j] - f[i])\n\n\n band_tot_bl1 = np.mean(band_tot_bl, axis=3) # average across time bins\n band_tot_bl2 = np.repeat(band_tot_bl1[:, :, :, None, :], band_tot_bl.shape[3], axis=3) # repeat same value across time\n return band_tot, band_tot_bl2, f[fidx]", "def calculateNumberOfChanges(image, bandNames):\n #Get a collection of images where each image has 2 bands: classifications for year(i) and classifications for year(i+1)\n lc_one_change_col = npv.getYearStackIC(image,bandNames, band_indices=[0,1])\n #Get a collection of images where each image represents whether there was change from year(i) to year(i+1) and convert to an image\n lc_one_change_col = lc_one_change_col.map(npv.LC_OneChange)\n lc_one_change_image = lc_one_change_col.toBands()\n #Calculate the number of changes by applying the sum reducer\n lc_sum_changes = lc_one_change_image.reduce(ee.Reducer.sum().unweighted())\n return lc_sum_changes", "def use_w(args):\n try:\n bounddata = Table.read(\n f'./Input/UseWv/WaveRegions_{args.WRegion}_{args.band}.csv',\n format='csv')\n except IOError:\n sys.exit(\n f'WaveRegions FILE \"./Input/UseWv/WaveRegions'\n '_{args.WRegion}_{args.band}.csv\" NOT FOUND!')\n\n wavesols = pd.read_csv(f'./Input/UseWv/WaveSolns_{args.band}.csv')\n#-------------------------------------------------------------------------------\n XRegion_dir = f'./Input/UseWv/XRegions_{args.WRegion}_{args.band}.csv'\n with open(XRegion_dir,'w') as filew:\n filew.write('order, start, end, masks\\n')\n\n m_order = np.array(bounddata['order'])\n starts = np.array(bounddata['start'])\n ends = np.array(bounddata['end'])\n ords = list( sorted(OrderDictCla().orderdict[args.band].keys()) )\n\n Ostarts = [OrderDictCla().orderdict[args.band][k][0] for k in ords]\n Oends = [OrderDictCla().orderdict[args.band][k][1] for k in ords]\n labels = []\n\n m_orders_unique = np.unique(m_order)\n\n # For each order specified, find what pixel numbers correspond to the\n # wavelength bounds presented.\n # If multiple wavelength bounds given for a single order, output a\n # pixel mask between the two, as well.\n for o in range(len(m_orders_unique)):\n\n # if len(m_orders_unique) == 9:\n # filew.write('9, 150, 1950, []\\n')\n # continue\n\n pixs = []\n mini = np.where(m_order == m_orders_unique[o])[0]\n for j in range(len(mini)):\n i = mini[j]\n\n wavebounds = [starts[i],ends[i]]\n wO = wavesols['w'+str(m_orders_unique[o])]\n pixO = wavesols['x'+str(m_orders_unique[o])]\n pix = [pixO[(np.argmin(abs(wO-wavebounds[k])))] for k in [0,1]]\n pixs = pixs + pix\n\n pixsS = list(sorted(pixs))\n q = pixsS[1:-1]\n if len(pixsS) == 2:\n filew.write('{}, {}, {},[]\\n'.format(\n m_orders_unique[o], pixsS[0], pixsS[-1])\n )\n else:\n filew.write('{}, {}, {},\"{}\"\\n'.format(\n m_orders_unique[o], pixsS[0], pixsS[-1],\n [[first,second] for first, second in zip(q[0::2], q[1::2])]\n ))", "def firwin(N, cutoff, width=None, window='hamming'):\n\n from signaltools import get_window\n if isinstance(width,float):\n A = 2.285*N*width + 8\n if (A < 21): beta = 0.0\n elif (A <= 50): beta = 0.5842*(A-21)**0.4 + 0.07886*(A-21)\n else: beta = 0.1102*(A-8.7)\n window=('kaiser',beta)\n\n win = get_window(window,N,fftbins=1)\n alpha = N//2\n m = numpy.arange(0,N)\n h = win*special.sinc(cutoff*(m-alpha))\n return h / numpy.sum(h,axis=0)", "def get_dataframes_for_each_year(main_dataframe, years):\n list_of_dataframes = []\n for year in years:\n dataframe_by_year = main_dataframe.loc[ (main_dataframe['year'] == year) ].T\n # Getting rid of the first two rows \n dataframe_by_year = dataframe_by_year.iloc[2:]\n list_of_dataframes.append(dataframe_by_year)\n return list_of_dataframes", "def foldcurve(_band, _period):\n # Set epoch to first date observed\n _epoch = _band[0][0]\n # Iterate through array, update date to phase\n for i in range(0, _band.shape[0]):\n _band[i, 0] = ((_band[i, 0] - _epoch) / _period) % 1\n # Return folded array\n return _band", "def reduce_dataset(years, values,flux_floor=0,max_tm_error=0,min_reduction_steps=200):\n non_zero_ind, min_retained_zero_years = remove_begin_end_zero_flux(years,values,flux_floor,min_reduction_steps)\n\n years_mod = years[non_zero_ind]\n values_mod = values[non_zero_ind]\n\n if years_mod.size <3:\n years_mod = years\n values_mod = values\n values_mod = 0\n else:\n #makes ure you have not removed more than 1% of the mass when removing 0 or flux floor rates\n o_mass = TimeSeries(years,values,None,None).integrate().values[-1]\n r_mass = TimeSeries(years_mod, values_mod, None, None).integrate().values[-1]\n if abs((o_mass-r_mass)/o_mass)*100 > 1:\n years_mod = years\n values_mod = values\n timeseries = TimeSeries(years_mod, values_mod, None, None)\n mass = timeseries.integrate()\n\n #normalize Values\n maxval = np.max(values_mod)\n values_mod = values_mod/maxval\n o_timeseries = TimeSeries(years,values/maxval,None,None)\n o_mass = o_timeseries.integrate()\n timeseries = TimeSeries(years_mod, values_mod, None, None)\n mass = timeseries.integrate()\n\n mx = np.argmax(timeseries.values)\n points = [0, mx, len(timeseries)]\n x = timeseries.times\n\n ythresh = 100*np.mean(timeseries.values)\n out_error = 1\n out_error_last = out_error\n OUT_ERROR_THRESHOLD = 1e-2\n\n UPPER_N = 200\n LOWER_N = 50\n last_result = None\n MAX_ITERATIONS = 80\n\n solve_type = SMOOTH\n simple_peaks = False\n last_result,ix = reduct_iter(timeseries,flux_floor,ythresh,out_error,out_error_last,OUT_ERROR_THRESHOLD,UPPER_N,LOWER_N,last_result,MAX_ITERATIONS)\n last_result = retain_min_years(last_result.reduced_flux,o_timeseries,o_mass,min_retained_zero_years)\n #if there are less points than the min_reduction_steps then use the remaining\n #points to rebalance the segments with the largest mass errors.\n play_points = min_reduction_steps - last_result.num_reduced_points\n bef = last_result.reduced_flux.times.size\n if play_points > 0:\n last_result = red_flux.rebalance_extra_points(last_result,play_points)\n\n rr = last_result\n\n #find peaks for data rebalance and reporting\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=3,rel_height=1)\n if peaks.size == 0 :\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=2,rel_height=1)\n if peaks.size == 0:\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=1,rel_height=1)\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=3,rel_height=1)\n if pneg.size == 0:\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=2,rel_height=1)\n if pneg.size == 0:\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=1,rel_height=1)\n\n peaks = rr.reduced_flux.times[peaks]\n pneg = rr.reduced_flux.times[pneg]\n\n peaks = np.isin(o_timeseries.times,peaks)\n pneg = np.isin(o_timeseries.times,pneg)\n peaks = np.where(peaks)\n pneg = np.where(pneg)\n\n peaks = peaks[0]\n pneg = pneg[0]\n iter = 0\n while iter < 100 and (abs(last_result.total_mass_error*maxval) > max_tm_error or abs(last_result.total_mass_error/last_result.mass.values[-1])*100 > .001) :\n rr = red_flux.rebalance_valleys(rr,peaks,pneg)\n #keep the lowest total_mass_error\n if abs(rr.total_mass_error) < abs(last_result.total_mass_error):\n last_result = rr\n else:\n break\n iter += 1\n\n out_times = last_result.reduced_flux.times\n out_values = last_result.reduced_flux.values\n #return the reduced data, undo normalize of the values (*maxval)\n return out_times, out_values*maxval,-(last_result.total_mass_error * maxval),peaks.size,iter", "def imdb_crawl_by_years(years, verbose):\n for year in years:\n imdb_crawl_by_year(year, verbose)", "def fmask(bandname=\"fmask\"):\n\n def fmask(image):\n imgFmask = image.select(bandname)\n shadow = imgFmask.eq(3)\n snow = imgFmask.eq(4)\n cloud = imgFmask.eq(5)\n\n mask = shadow.Or(snow).Or(cloud)\n\n imgMask = image.updateMask(mask.Not())\n return imgMask\n return fmask", "def octave_bands(fc=1000, third=False, start=0.0, n=8):\n\n div = 1\n if third:\n div = 3\n\n # Octave Bands\n fcentre = fc * (\n 2.0 ** (np.arange(start * div, (start + n) * div - (div - 1)) / div)\n )\n fd = 2 ** (0.5 / div)\n bands = np.array([[f / fd, f * fd] for f in fcentre])\n\n return bands, fcentre", "def winter_avg(var_nc,lat_slice=None,lon_slice=None): \n #\n # accumulate in shape [plev,lat,lon]\n #\n # use the whole array if slice objects are missing\n #\n if (lat_slice is None) and (lon_slice is None):\n num_lats=var_nc.shape[2]\n num_lons=var_nc.shape[3]\n lat_slice=slice(0,num_lats)\n lon_slice=slice(0,num_lons)\n print \"in winter avg: \",lat_slice,lon_slice\n else:\n num_lats=lat_slice.stop - lat_slice.start\n num_lons=lon_slice.stop - lon_slice.start\n num_levs=var_nc.shape[1]\n accumulate=ma.zeros([num_levs,num_lats,num_lons],dtype=var_nc.dtype)\n #\n # year 0 is special case since it doesn't have a december\n #\n djf0=np.array([0,1],dtype=np.int32) #january and feburary\n the_slice=var_nc[djf0,:,lat_slice,lon_slice]\n the_slice=ma.mean(the_slice,axis=0) #average over the two months\n accumulate+=the_slice\n num_years=var_nc.shape[0]//12\n #\n # now year 1 has year 0's december\n #\n djf=np.array([11,12,13],dtype=np.int32)\n #\n # iterate one year less because we've alread\n # done year zero as a special case\n #\n for the_year in np.arange(0,num_years-1):\n the_slice=var_nc[djf,:,lat_slice,lon_slice]\n the_slice=ma.mean(the_slice,axis=0)\n accumulate+=the_slice\n djf=djf+12\n accumulate=accumulate/num_years \n the_avg=ma.mean(accumulate,axis=1)\n the_avg=ma.mean(the_avg,axis=1)\n return the_avg", "def get_rgb_bands(image, bands):\n if bands is not MONOCHROME:\n red = image[:, :, bands['red']]\n green = image[:, :, bands['green']]\n blue = image[:, :, bands['blue']]\n\n img = np.rollaxis(np.array([red, green, blue]), 0, 3)\n else:\n img = color.grey2rgb(image)\n\n return img", "def bandstop_filter(data, lowcut, highcut, fs=2000, numtaps=255):\n nyq = fs / 2\n\n # design filter\n fe1 = lowcut / nyq\n fe2 = highcut / nyq\n b = firwin(numtaps, (fe1, fe2), pass_zero=True)\n\n filtered = lfilter(b, 1, data)\n\n return filtered", "def collapse(self):\n try:\n wavelengths = pylab.linspace(self.start, self.end,\n self.image.shape[not self.waveaxis])\n except TypeError:\n print 'The starting and ending wavelengths must be specified.'\n background = pylab.zeros(len(wavelengths))\n backgroundlines = 0\n data = pylab.zeros(len(wavelengths))\n datalines = 0\n for region in self.regions:\n if region['group'] is 0:\n backgroundlines += region['max'] - region['min']\n background += self.image[region['min']:region['max'] + 1, :]\\\n .sum(axis=self.waveaxis)\n else:\n datalines += region['max'] - region['min']\n data += self.image[region['min']:region['max'] + 1, :]\\\n .sum(axis=self.waveaxis)\n background = [sum/backgroundlines for sum in background]\n data = [sum/datalines for sum in data]\n corrected = pylab.array(data) - pylab.array(background)\n output = Spectrum(list(wavelengths), list(corrected))\n return output", "def cut4(image):\r\n i, j = image.shape\r\n a1 = image[:i // 2, :j // 2]\r\n a2 = image[i // 2:, :j // 2]\r\n a3 = image[:i // 2, j // 2:]\r\n a4 = image[i // 2:, j // 2:]\r\n return a1, a2, a3, a4", "def mask3(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).eq(value))\n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[1]).blend(change_img)\n return img_out", "def butterworth_filter(dft4img, stopband2=10, order=3, showdft=False):\n h, w = dft4img.shape[0], dft4img.shape[1]\n P = h / 2\n Q = w / 2\n dst = np.zeros((h, w, 3), np.float64)\n for i in range(h):\n for j in range(w):\n r2 = float((i - P) ** 2 + (j - Q) ** 2)\n if r2 == 0:\n r2 = 1.0\n dst[i, j] = 1 / (1 + (r2 / stopband2) ** order)\n dst = np.float64(dst)\n if showdft:\n cv2.imshow(\"butterworth\", cv2.magnitude(dst[:, :, 0], dst[:, :, 1]))\n return dst", "def main(years=(2000, 2019)):\n year_list = range(years[0], years[1] + 1)\n dfs = []\n for year in year_list:\n dfs.append(get_df(year))\n print(f\"Done: {len(dfs)} dataframes written\")", "def applyMask3first(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).neq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).eq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).eq(value))\n change_img = imagem.select(bandNames[0]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[0]).blend(change_img)\n img_out = img_out.addBands(imagem.select(bandNames[1:]))\n return img_out", "def calculate_overall_winners(pContestGroup, pContestsRequired, pYear):\n # Work out how many bands competed that year\n lCursor = connection.cursor()\n lCursor.execute(\"SELECT count(distinct(r.band_id)) FROM contests_contestresult r, contests_contestevent e, contests_contest c WHERE r.contest_event_id = e.id AND e.contest_id = c.id AND c.group_id = %(groupid)s AND extract(year from e.date_of_event) = %(year)s\", {'groupid': pContestGroup.id, 'year' : pYear})\n lRows = lCursor.fetchall()\n for row in lRows:\n lBandsCompeting = row[0]\n lCursor.close()\n\n # get full results for that year - first element is the winner\n lResults = calculate_overall_results(pContestGroup, pContestsRequired, pYear)\n try:\n lWinner = lResults[0]\n except IndexError:\n lWinner = {}\n return lWinner, lBandsCompeting", "def filter_bands(self, imagery, bands=None, names=None, wavelengths=None) -> 'ImageCollection':\n\n graph = {\n 'process_id': 'filter_bands',\n 'imagery': imagery.graph,\n }\n\n if bands:\n graph['bands'] = bands\n if names:\n graph['names'] = names\n if wavelengths:\n graph['wavelengths'] = wavelengths\n\n imagery.graph = graph\n return imagery", "def scale_sky_spectrum(wlm, sky_spectrum, spectra, cut_sky=4., fmax=10, fmin=1, valid_wave_min=0, valid_wave_max=0, \n fibre_list=[100,200,300,400,500,600,700,800,900], plot=True, verbose=True, warnings=True): \n \n# # Read sky lines provided by 2dFdr\n# sky_line_,flux_sky_line_ = read_table(\"sky_lines_2dfdr.dat\", [\"f\", \"f\"] )\n# # Choose those lines in the range\n# sky_line=[]\n# flux_sky_line=[]\n# valid_wave_min = 6240\n# valid_wave_max = 7355\n# for i in range(len(sky_line_)):\n# if valid_wave_min < sky_line_[i] < valid_wave_max:\n# sky_line.append(sky_line_[i])\n# flux_sky_line.append(flux_sky_line_[i])\n \n \n if valid_wave_min == 0: valid_wave_min = wlm[0]\n if valid_wave_max == 0: valid_wave_max = wlm[-1]\n \n if verbose: print(\"\\n> Identifying sky lines using cut_sky =\",cut_sky,\", allowed SKY/OBJ values = [\",fmin,\",\",fmax,\"]\")\n if verbose: print(\" Using fibres = \",fibre_list)\n\n peaks,peaks_name,peaks_rest,continuum_limits=search_peaks(wlm,sky_spectrum, plot=plot, cut=cut_sky, fmax=fmax, only_id_lines=False, verbose=False) \n\n ratio_list=[]\n valid_peaks=[]\n \n if verbose: print(\"\\n Sky line Gaussian ratio Flux ratio\")\n n_sky_lines_found=0\n for i in range(len(peaks)):\n sky_spectrum_data=fluxes(wlm,sky_spectrum, peaks[i], fcal=False, lowlow=50,highhigh=50, plot=False, verbose=False, warnings=False)\n \n sky_median_continuum = np.nanmedian(sky_spectrum_data[11])\n \n object_spectrum_data_gauss=[]\n object_spectrum_data_integrated=[] \n median_list=[]\n for fibre in fibre_list: \n object_spectrum_flux=fluxes(wlm, spectra[fibre], peaks[i], fcal=False, lowlow=50,highhigh=50, plot=False, verbose=False, warnings=False)\n object_spectrum_data_gauss.append(object_spectrum_flux[3]) # Gaussian flux is 3\n object_spectrum_data_integrated.append(object_spectrum_flux[7]) # integrated flux is 7\n median_list.append(np.nanmedian(object_spectrum_flux[11]))\n object_spectrum_data=np.nanmedian(object_spectrum_data_gauss)\n object_spectrum_data_i=np.nanmedian(object_spectrum_data_integrated)\n \n object_median_continuum=np.nanmin(median_list) \n \n if fmin < object_spectrum_data/sky_spectrum_data[3] * sky_median_continuum/object_median_continuum < fmax :\n n_sky_lines_found = n_sky_lines_found + 1\n valid_peaks.append(peaks[i])\n ratio_list.append(object_spectrum_data/sky_spectrum_data[3])\n if verbose: print(\"{:3.0f} {:5.3f} {:2.3f} {:2.3f}\".format(n_sky_lines_found,peaks[i],object_spectrum_data/sky_spectrum_data[3], object_spectrum_data_i/sky_spectrum_data[7])) \n\n\n #print \"ratio_list =\", ratio_list\n #fit = np.polyfit(valid_peaks, ratio_list, 0) # This is the same that doing an average/mean\n #fit_line = fit[0]+0*wlm\n fit_line =np.nanmedian(ratio_list) # We just do a median\n #fit_line = fit[1]+fit[0]*wlm\n #fit_line = fit[2]+fit[1]*wlm+fit[0]*wlm**2\n #fit_line = fit[3]+fit[2]*wlm+fit[1]*wlm**2+fit[0]*wlm**3\n \n \n if plot:\n plt.plot(valid_peaks,ratio_list,\"+\")\n #plt.plot(wlm,fit_line)\n plt.axhline(y=fit_line, color='k', linestyle='--')\n plt.xlim(valid_wave_min-10, valid_wave_max+10) \n #if len(ratio_list) > 0:\n plt.ylim(np.nanmin(ratio_list)-0.2,np.nanmax(ratio_list)+0.2)\n plt.title(\"Scaling sky spectrum to object spectra\")\n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n plt.ylabel(\"OBJECT / SKY\")\n plt.minorticks_on()\n plt.show()\n plt.close()\n \n if verbose: print(\" Using this fit to scale sky spectrum to object, the median value is \",np.round(fit_line,3),\"...\") \n \n sky_corrected = sky_spectrum * fit_line\n\n# plt.plot(wlm,sky_spectrum, \"r\", alpha=0.3)\n# plt.plot(wlm,sky_corrected, \"g\", alpha=0.3)\n# plt.show()\n# plt.close()\n \n return sky_corrected, np.round(fit_line,3)", "def filter_tracks(df, start_year=1980, end_year=2010, zeta=0, age=36):\n tracks = df.groupby('num')\n filterdf = tracks.filter(lambda x: (x['datetime'].dt.year.min() >= start_year) &\n (x['datetime'].dt.year.max() <= end_year) &\n (x['age'].max() >= age) &\n (np.abs(x['vorticity'].min()) > zeta))\n return filterdf", "def calc_band_filters(f_ranges, sfreq, filter_length=\"1000ms\", l_trans_bandwidth=4, h_trans_bandwidth=4):\n filter_list = list()\n for f_range in f_ranges:\n h = mne.filter.create_filter(None, sfreq, l_freq=f_range[0], h_freq=f_range[1], fir_design='firwin',\n l_trans_bandwidth=l_trans_bandwidth, h_trans_bandwidth=h_trans_bandwidth,\n filter_length=filter_length)\n filter_list.append(h)\n filter_bank = np.vstack(filter_list)\n return filter_bank", "def preprocess_land_cover(\n src_files, dst_raster, dst_crs, dst_bounds, dst_res, geom=None, overwrite=False\n):\n if os.path.isfile(dst_raster) and not overwrite:\n log.info(\"Land cover data already preprocessed. Skipping.\")\n return\n log.info(\"Starting preprocessing of land cover data.\")\n LC_CLASSES = [\n \"bare\",\n \"crops\",\n \"grass\",\n \"moss\",\n \"shrub\",\n \"tree\",\n \"urban\",\n \"water-permanent\",\n \"water-seasonal\",\n ]\n with TemporaryDirectory(prefix=\"geohealthaccess_\") as tmpdir:\n\n tmpdir = Path(tmpdir)\n for tile in src_files:\n unzip(tile, tmpdir)\n\n reprojected_files = []\n tile_names = unique_tiles(tmpdir)\n\n if not tile_names:\n raise MissingDataError(\"Land cover data not found.\")\n\n for lc_class in LC_CLASSES:\n tiles = [\n p.as_posix()\n for p in tmpdir.glob(f\"*{lc_class}-coverfraction-layer*.tif\")\n ]\n if len(tiles) > 1:\n src_file = merge_tiles(\n tiles, os.path.join(tmpdir, f\"{lc_class}_mosaic.tif\"), nodata=255,\n )\n else:\n src_file = tiles[0]\n reprojected_files.append(\n reproject(\n src_raster=src_file,\n dst_raster=os.path.join(tmpdir, f\"{lc_class}.tif\"),\n dst_crs=dst_crs,\n dst_bounds=dst_bounds,\n dst_res=dst_res,\n src_nodata=255,\n dst_nodata=255,\n dst_dtype=\"Byte\",\n resampling_method=\"cubic\",\n overwrite=overwrite,\n )\n )\n\n if len(reprojected_files) > 1:\n raster = concatenate_bands(\n src_files=reprojected_files,\n dst_file=dst_raster,\n band_descriptions=LC_CLASSES,\n )\n else:\n raster = reprojected_files[0]\n\n if geom:\n mask_raster(raster, geom)", "def constant_2015():\n\n #Load the CMIP6 historical\n cubes = iris.load(data_dir+'SO2DMS-em-anthro_input4MIPs_emissions_CMIP_CEDS-v2016-07-26-gr_200001-201412_n48.nc')\n #Get low and high level emissions just in the last year (2014)\n cubes = iris.cube.CubeList([cubes[2],cubes[1]])\n final_cubes = iris.cube.CubeList()\n for cube in cubes:\n final_cube = cube[-12:]\n final_cubes.append(final_cube)\n \n #Set the year-on-year proportional reductions to be nothing\n yoy_rates = calc_perc_reducts()\n yoy_rates = np.array(yoy_rates)\n yoy_rates = np.ones_like(yoy_rates)\n\n #Create coordinates for new nc file between 2014 and 2100\n lat_coord = cubes[0].coord('latitude')\n lon_coord = cubes[0].coord('longitude')\n time_coord = DimCoord(np.arange(95055.,95055.+(2100-2014+1)*360.,30.),standard_name=u'time', units=cf_units.Unit('days since 1750-1-1 00:00:00', calendar='360_day'), long_name=u'time', var_name='time')\n\n #Create the cube date\n cube_data_surf = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n cube_data_high = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n #Set first year equal to 2014 in CMIP6 historical\n cube_data_surf[:12,...] = final_cubes[0].data\n cube_data_high[:12,...] = final_cubes[1].data\n #Apply equal emissions in all other years too\n for i in range(12,cube_data_surf.shape[0]):\n cube_data_surf[i,...] = cube_data_surf[(i-12),...] * yoy_rates[0,i]\n cube_data_high[i,...] = cube_data_high[(i-12),...] * yoy_rates[1,i]\n #Make the output cubes\n fut_cube_surf = iris.cube.Cube(cube_data_surf,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[0].standard_name, long_name=final_cubes[0].long_name, var_name=final_cubes[0].var_name, units=final_cubes[0].units, attributes=final_cubes[0].attributes)\n fut_cube_high = iris.cube.Cube(cube_data_high,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[1].standard_name, long_name=final_cubes[1].long_name, var_name=final_cubes[1].var_name, units=final_cubes[1].units, attributes=final_cubes[1].attributes)\n\n fut_cube_high.var_name = 'field569_1'\n fut_cube_high.units='kg/m2/s'\n fut_cube_high.long_name ='HIGH LEVEL SO2 EMISSIONS KG/M2/S'\n fut_cube_surf.var_name = 'field569'\n fut_cube_surf.units='kg/m2/s'\n fut_cube_surf.long_name ='SULPHUR DIOXIDE EMISSIONS'\n\n #Load the DMS cube from standard RCP2.6\n dms_cube = iris.load(data_dir+'DMSSO2NH3_18502100_RCP26_monthly.nc')[0]\n iris.coord_categorisation.add_year(dms_cube,'time',name='year')\n dms_cube = dms_cube.extract(iris.Constraint(year = lambda y: y>=2014))\n\n dms_cube.var_name = 'field570'\n dms_cube.attributes.pop('name')\n dms_cube.coord('time').var_name = 'time'\n dms_cube.coord('time').long_name = 'time'\n\n fut_cube_high = fut_cube_high[:-2]\n fut_cube_surf = fut_cube_surf[:-2]\n\n fut_dms = iris.cube.Cube(dms_cube.data[:,0,::-1,:],dim_coords_and_dims=[(fut_cube_surf.coord('time'),0),(fut_cube_surf.coord('latitude'),1),(fut_cube_surf.coord('longitude'), 2)],standard_name=dms_cube.standard_name, long_name=dms_cube.long_name, var_name=dms_cube.var_name, units=dms_cube.units, attributes=dms_cube.attributes)\n\n #Save the final cubes as netcdf (cutting them to be the same length)\n iris.save(iris.cube.CubeList([fut_dms,fut_cube_high,fut_cube_surf]),data_dir+ \"SO2DMS_const2014.nc\")\n os.system('ncatted -O -a calendar,time,m,c,\"360_day\" '+data_dir+ \"SO2DMS_const2014.nc\")\n\n return", "def bandpass_filter(files, lowpass_freq=0.1, highpass_freq=0.01, tr=2):\n import os\n\n import nibabel as nb\n import numpy as np\n from nipype.utils.filemanip import (\n filename_to_list,\n list_to_filename,\n split_filename\n )\n\n fs = 1./tr\n\n out_files = []\n for filename in filename_to_list(files):\n path, name, ext = split_filename(filename)\n out_file = os.path.join(os.getcwd(), name + '_bandpassed' + ext)\n\n img = nb.load(filename)\n timepoints = img.shape[-1]\n F = np.zeros((timepoints))\n\n lowidx = int(timepoints / 2) + 1\n if lowpass_freq > 0:\n lowidx = np.round(float(lowpass_freq) / fs * timepoints)\n\n highidx = 0\n if highpass_freq > 0:\n highidx = np.round(float(highpass_freq) / fs * timepoints)\n F[int(highidx):int(lowidx)] = 1\n F = ((F + F[::-1]) > 0).astype(int)\n data = img.get_data()\n if np.all(F == 1):\n filtered_data = data\n else:\n filtered_data = np.real(np.fft.ifftn(np.fft.fftn(data) * F))\n img_out = nb.Nifti1Image(filtered_data, img.affine, img.header)\n img_out.to_filename(out_file)\n out_files.append(out_file)\n\n return list_to_filename(out_files)", "def build_sea_data(\n start_year=1999,\n end_year=2016,\n netcdf_path=\"data/sea_level/netcdf/\",\n target_lon=175.8606890,\n target_lat=-36.993684,\n buffer_degrees=0.5,\n path_out=\".\",\n):\n # tairua_coords = (-36.993684, 175.8606890)\n df_sea_data = pd.DataFrame()\n\n for year in range(start_year, end_year + 1):\n ds_first = xr.open_mfdataset(\n os.path.join(netcdf_path, f\"dt_global_twosat_phy_l4_{year}*.nc\")\n )\n\n target_lon = xr.DataArray(\n list(target_lon + np.linspace(-buffer_degrees, buffer_degrees))\n )\n target_lat = xr.DataArray(\n list(target_lat + np.linspace(-buffer_degrees, buffer_degrees))\n )\n\n ds_tairua = ds_first[[\"adt\", \"ugos\", \"vgos\"]].sel(\n longitude=target_lon, latitude=target_lat, method=\"nearest\"\n )\n df_sealevel_pandas = (\n ds_tairua.resample(time=\"MS\")\n .mean()\n .mean(dim=\"dim_0\")\n .to_dataframe()\n )\n\n df_sea_data = pd.concat([df_sea_data, df_sealevel_pandas])\n\n print(\n f\"************************Done {year} ************************************\"\n )\n print(df_sea_data.tail(10))\n\n df_sea_data.to_csv(os.path.join(path_out, \"df_sea_data.csv\"))", "def apply_sliding_windows(self, binary_warped, leftx_base, rightx_base):\r\n # Choose the number of sliding windows\r\n nwindows = 9\r\n # Set height of windows\r\n window_height = np.int(binary_warped.shape[0] / nwindows)\r\n # Identify the x and y positions of all nonzero pixels in the image\r\n nonzero = binary_warped.nonzero()\r\n nonzeroy, nonzerox = np.array(nonzero[0]), np.array(nonzero[1])\r\n # Current positions to be updated for each window\r\n leftx_current, rightx_current = leftx_base, rightx_base\r\n # Set the width of the windows +/- margin\r\n margin = 100\r\n # Set minimum number of pixels found to recenter window\r\n minpix = 50\r\n # Create empty lists to receive left and right lane pixel indices\r\n left_lane_inds, right_lane_inds = [], []\r\n\r\n # Step through the windows one by one\r\n # Create an output image to draw on and visualize the result\r\n out_img = np.dstack((binary_warped, binary_warped, binary_warped)) * 255\r\n for window in range(nwindows):\r\n # Identify window boundaries in x and y (and right and left)\r\n win_y_low = binary_warped.shape[0] - (window + 1) * window_height\r\n win_y_high = binary_warped.shape[0] - window * window_height\r\n win_xleft_low, win_xleft_high = leftx_current - margin, leftx_current + margin\r\n win_xright_low, win_xright_high = rightx_current - margin, rightx_current + margin\r\n # Draw the windows on the visualization image\r\n if self.debug:\r\n cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),(0,255,0), 2)\r\n cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),(0,255,0), 2)\r\n # Identify the nonzero pixels in x and y within the window\r\n good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (\r\n nonzerox < win_xleft_high)).nonzero()[0]\r\n good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (\r\n nonzerox < win_xright_high)).nonzero()[0]\r\n # Append these indices to the lists\r\n left_lane_inds.append(good_left_inds)\r\n right_lane_inds.append(good_right_inds)\r\n # If you found > minpix pixels, recenter next window on their mean position\r\n if len(good_left_inds) > minpix:\r\n leftx_current = np.int(np.mean(nonzerox[good_left_inds]))\r\n if len(good_right_inds) > minpix:\r\n rightx_current = np.int(np.mean(nonzerox[good_right_inds]))\r\n if self.debug:\r\n cv_rgb = cv2.cvtColor(out_img.astype(np.uint8), cv2.COLOR_BGR2RGB)\r\n plt.imshow(cv_rgb)\r\n #cv2.imshow('Sliding window computation',out_img)\r\n # Concatenate the arrays of indices\r\n left_lane_inds = np.concatenate(left_lane_inds)\r\n right_lane_inds = np.concatenate(right_lane_inds)\r\n if self.debug:\r\n self.fit_dict['left_lane_inds'] = left_lane_inds\r\n self.fit_dict['right_lane_inds'] = right_lane_inds\r\n\r\n # Extract left and right line pixel positions\r\n leftx, lefty = nonzerox[left_lane_inds], nonzeroy[left_lane_inds]\r\n rightx, righty = nonzerox[right_lane_inds], nonzeroy[right_lane_inds]\r\n return leftx, lefty, rightx, righty", "def makeSkyList(skyFrameList, sciencelist, obsDir):\n logging.info(\"\\n#############################################################\")\n logging.info(\"# #\")\n logging.info(\"# Matching science frames with sky frames closest in time #\")\n logging.info(\"# #\")\n logging.info(\"#############################################################\\n\")\n # Do some tests first.\n # Check that data is either:\n # ABA ABA ABA- one sky frame per two science frames.\n # AB AB AB- one sky frame per one two science frames.\n #\n # If it is neither warn user to verify that sky frames were matched with science frames correctly.\n if len(skyFrameList) != len(sciencelist)/2 and len(skyFrameList) != len(sciencelist):\n logging.info(\"\\n#####################################################################\")\n logging.info(\"#####################################################################\")\n logging.info(\"\")\n logging.info(\" WARNING in reduce: it appears science frames and sky frames were not\")\n logging.info(\" taken in an ABA ABA or AB AB pattern.\")\n logging.info(\"\")\n logging.info(\"#####################################################################\")\n logging.info(\"#####################################################################\\n\")\n skytimes = []\n prepared_sky_list = []\n # Calculate time of each sky frame. Store the calculated time and the frame name in skytimes, a\n # 2D list of [skyframe_time, skyframe_name] pairs.\n # Eg: [[39049.3, 'N20130527S0241'], [39144.3, 'N20130527S0244'], [39328.8, 'N20130527S0247'], [39590.3, 'N20130527S0250']]\n for item in skyFrameList:\n # Strip off the trailing newline.\n item = str(item).strip()\n # Calculate the time of the sky frame.\n skytime = timeCalc(item+'.fits')\n # Store the sky frame time and corresponding sky frame name in skytimes.\n templist = [skytime, item]\n skytimes.append(templist)\n logging.info(\"scienceframelist: skyFrameList: time delta (between observation UT start times from .fits headers):\")\n for item in sciencelist:\n # Calculate time of the science frame in seconds.\n item = str(item).strip()\n sciencetime = timeCalc(item+'.fits')\n # Sort the 2D list of [skyframe_time, skyframe_name] pairs by absolute science_frame_time - skyframe_time.\n # Eg: [[39049.3, 'N20130527S0241'], [39144.3, 'N20130527S0244'], [39328.8, 'N20130527S0247'], [39590.3, 'N20130527S0250']]\n sorted_by_closest_time = sorted(skytimes, key=lambda x: (abs(sciencetime - x[0])))\n # Append the name corresponding to the minimum time difference to prepared_sky_list.\n prepared_sky_list.append(sorted_by_closest_time[0][1])\n # Print the scienceframe, matching skyframe and time difference side by side for later comparison.\n logging.info(\" \"+ str(item)+ \" \"+ str(sorted_by_closest_time[0][1])+ \" \"+ str(abs(sciencetime - sorted_by_closest_time[0][0])))\n logging.info(\"\\n\")\n\n os.rename('skyFrameList', 'original_skyFrameList')\n\n f = open('skyFrameList', 'w')\n for image in prepared_sky_list:\n f.write(image+'\\n')\n f.close()\n\n return prepared_sky_list", "def wind(\n da_model: Union[xr.DataArray, xr.Dataset],\n wind: xr.DataArray = None,\n wind_u: xr.DataArray = None,\n wind_v: xr.DataArray = None,\n altitude: float = 10,\n altitude_correction: bool = False,\n freq: pd.Timedelta = None,\n reproj_method: str = \"nearest_index\",\n resample_kwargs: dict = {},\n logger=logger,\n):\n if wind_u is not None and wind_v is not None:\n wind = np.sqrt(np.power(wind_u, 2) + np.power(wind_v, 2))\n elif wind is None:\n raise ValueError(\"Either wind or wind_u and wind_v varibales must be supplied.\")\n\n if wind.raster.dim0 != \"time\":\n raise ValueError(f'First wind dim should be \"time\", not {wind.raster.dim0}')\n\n # compute wind at 2 meters altitude\n if altitude_correction:\n wind = wind * (4.87 / np.log((67.8 * altitude) - 5.42))\n # downscale wind (lazy)\n wind_out = wind.raster.reproject_like(da_model, method=reproj_method)\n # resample time\n wind_out.name = \"wind\"\n wind_out.attrs.update(unit=\"m s-1\")\n if freq is not None:\n resample_kwargs.update(upsampling=\"bfill\", downsampling=\"mean\", logger=logger)\n wind_out = resample_time(wind_out, freq, conserve_mass=False, **resample_kwargs)\n return wind_out", "def skycombine(dir = 'Objects'):\n \n if dir ==\"Objects\":\n dir = 'Objects/*/*/flat_corrected/'\n \n for d in glob(dir):\n \n directory = \"/\".join(d.split('/')[0:2]) + '/swarped'\n if not os.path.exists(directory):\n os.makedirs(directory)\n \n keys = ['OBJECTS', 'ITIME', 'FWINAME', 'OBSDATE', 'CAMNAME', 'HISTORY', 'FLSPECTR']\n images = ImageFileCollection(d, keywords = keys, glob_include = 'f*.fits')\n \n swarpfilter(d, dir, directory, images, keys, filter='H', lamp = '*', camera = 'narrow',\n done='Dark Subtracted', output='cKSkyNarrowH', type='EQUATORIAL')\n swarpfilter(d, dir, directory, images, keys, filter='H',lamp = '*', camera = 'wide', \n done='Dark Subtracted', output='cKSkyWideH', type='EQUATORIAL')\n swarpfilter(d, dir, directory, images, keys, filter='J',lamp = '*', camera = 'narrow', \n done='Dark Subtracted', output='cKSkyNarrowJ', type='EQUATORIAL')\n swarpfilter(d, dir, directory, images, keys, filter='J', lamp = '*',camera = 'wide', \n done='Dark Subtracted', output='cKSkyWideJ', type='EQUATORIAL') \n swarpfilter(d, dir, directory, images, keys, filter='Ks',lamp = '*', camera = 'narrow', \n done='Dark Subtracted', output='cKSkyNarrowKs', type='EQUATORIAL')\n swarpfilter(d, dir, directory, images, keys, filter='Ks',lamp = '*', camera = 'wide', \n done='Dark Subtracted', output='cKSkyWideKs', type='EQUATORIAL')\n swarpfilter(d, dir, directory, images, keys, filter='Lp',lamp = '*', camera = 'narrow', \n done='Dark Subtracted', output='cKSkyNarrowLp', type='EQUATORIAL')\n swarpfilter(d, dir, directory, images, keys, filter='Lp',lamp = '*', camera = 'wide', \n done='Dark Subtracted', output='cKSkyNarrowLp', type='EQUATORIAL')", "def bins_per_year(self):\n # Load the vector version #\n df = self.grouped_bins.reset_index()\n # Add year and remove TimeStep #\n df['year'] = self.country.timestep_to_year(df['time_step'])\n df = df.drop('time_step', axis=1)\n # Only if we are in the calibration scenario #\n if self.parent.parent.scenario.short_name == 'calibration':\n # Patch the harvest data frame to stop at the simulation year #\n selector = df['year'] <= self.parent.parent.country.base_year\n df = df.loc[selector].copy()\n # Return #\n return df", "def norm_band(bands: np.ndarray,\n band_min: Numeric = 0,\n band_max: Numeric = 7000,\n gamma: Numeric = 1.0) -> np.ndarray:\n assert band_max > band_min, f'invalid range specified by band_min {band_min} and band_max {band_max}'\n bands = np.clip(bands, band_min, band_max)\n\n bands = (bands - band_min) / (band_max - band_min)\n\n bands = exposure.adjust_gamma(bands, gamma)\n return bands", "def rrhr(band,skypos,tranges,skyrange,width=False,height=False,stepsz=1.,\n\t\t verbose=0,calpath='../cal/',tscale=1000.,response=True,hdu=False,\n\t\t retries=20):\n\timsz = gxt.deg2pix(skypos,skyrange)\n\t# TODO the if width / height\n\n\tflat = get_fits_data(flat_filename(band,calpath),verbose=verbose)\n\tflatinfo = get_fits_header(flat_filename(band,calpath))\n\tnpixx,npixy \t= flat.shape\n\tfltsz \t\t= flat.shape\n\tpixsz = flatinfo['CDELT2']\n\tdetsize = 1.25\n\n\t# Rotate the flat into the correct orientation to start.\n\tflat = np.flipud(np.rot90(flat))\n\n\t# NOTE: This upsample interpolation is done _last_ in the canonical\n\t#\tpipeline as part of the poissonbg.c routine.\n\t# \tThe interpolation function is \"congrid\" in the same file.\n\t# TODO: Should this be first order interpolation? (i.e. bilinear)\n\thrflat = scipy.ndimage.interpolation.zoom(flat,4.,order=0,prefilter=False)\n\timg = np.zeros(hrflat.shape)[\n\t\t\t\thrflat.shape[0]/2.-imsz[0]/2.:hrflat.shape[0]/2.+imsz[0]/2.,\n\t\t\t\thrflat.shape[1]/2.-imsz[1]/2.:hrflat.shape[1]/2+imsz[1]/2.]\n\n\tfor trange in tranges:\n\t\tt0,t1=trange\n\t\tentries = gQuery.getArray(gQuery.aspect(t0,t1),retries=retries)\n\t\tn = len(entries)\n\n\t\tasptime = np.float64(np.array(entries)[:,2])/tscale\n\t\taspra = np.float32(np.array(entries)[:,3])\n\t\taspdec = np.float32(np.array(entries)[:,4])\n\t\tasptwist= np.float32(np.array(entries)[:,5])\n\t\taspflags= np.float32(np.array(entries)[:,6])\n\t\tasptwist= np.float32(np.array(entries)[:,9])\n\t\taspra0 = np.zeros(n)+skypos[0]\n\t\taspdec0 = np.zeros(n)+skypos[1]\n\n\t\txi_vec, eta_vec = gnomonic.gnomfwd_simple(\n\t\t\t\t\t\t\taspra,aspdec,aspra0,aspdec0,-asptwist,1.0/36000.,0.)\n\n\t\tcol = 4.*( ((( xi_vec/36000.)/(detsize/2.)*(detsize/(fltsz[0]*pixsz)) + 1.)/2. * fltsz[0]) - (fltsz[0]/2.) )\n\t\trow = 4.*( (((eta_vec/36000.)/(detsize/2.)*(detsize/(fltsz[1]*pixsz)) + 1.)/2. * fltsz[1]) - (fltsz[1]/2.) )\n\n\t\tvectors = rotvec(np.array([col,row]),-asptwist)\n\n\t\tfor i in range(n):\n\t\t\tif verbose>1:\n\t\t\t\tprint_inline('Stamping '+str(asptime[i]))\n\t\t\t\t# FIXME: Clean this mess up a little just for clarity.\n\t \timg += scipy.ndimage.interpolation.shift(scipy.ndimage.interpolation.rotate(hrflat,-asptwist[i],reshape=False,order=0,prefilter=False),[vectors[1,i],vectors[0,i]],order=0,prefilter=False)[hrflat.shape[0]/2.-imsz[0]/2.:hrflat.shape[0]/2.+imsz[0]/2.,hrflat.shape[1]/2.-imsz[1]/2.:hrflat.shape[1]/2+imsz[1]/2.]*dbt.compute_exptime(band,[asptime[i],asptime[i]+1],verbose=verbose,retries=retries)*gxt.compute_flat_scale(asptime[i]+0.5,band,verbose=0)\n\n\treturn img", "def create_grism_waverange(outname=\"\",\n history=\"Ground NIRCAM Grismwavelengthrange\",\n author=\"STScI\",\n filter_range=None):\n ref_kw = common_reference_file_keywords(reftype=\"wavelengthrange\",\n title=\"NIRCAM Grism wavelenghtrange\",\n description=\"NIRCAM Grism+Filter Wavelength Ranges\",\n exp_type=\"NRC_GRISM\",\n author=author,\n model_type=\"WavelengthrangeModel\",\n filename=outname,\n )\n\n if filter_range is None:\n # These numbers from Nor Pirzkal, in microns\n filter_range = {1: {'F250M': [2.500411072, 4.800260833],\n 'F277W': [2.500411072, 3.807062006],\n 'F300M': [2.684896869, 4.025318456],\n 'F322W2': [2.5011293930000003, 4.215842089],\n 'F335M': [3.01459734, 4.260432726],\n 'F356W': [3.001085025, 4.302320901],\n 'F360M': [3.178096344, 4.00099629],\n 'F410M': [3.6267051809999997, 4.5644598],\n 'F430M': [4.04828939, 4.511761774],\n 'F444W': [3.696969216, 4.899565197],\n 'F460M': [3.103778615, 4.881999188],\n 'F480M': [4.5158154679999996, 4.899565197]},\n 2: {'F250M': [2.500411072, 2.667345336],\n 'F277W': [2.500411072, 3.2642254050000004],\n 'F300M': [2.6659796289999997, 3.2997071729999994],\n 'F322W2': [2.5011293930000003, 4.136119434],\n 'F335M': [2.54572003, 3.6780519760000003],\n 'F356W': [2.529505253, 4.133416971],\n 'F360M': [2.557881113, 4.83740855],\n 'F410M': [2.5186954019999996, 4.759037127],\n 'F430M': [2.5362614100000003, 4.541488865],\n 'F444W': [2.5011293930000003, 4.899565197],\n 'F460M': [2.575447122, 4.883350419],\n 'F480M': [2.549773725, 4.899565197]}}\n\n # array of integers\n orders = list(filter_range.keys())\n orders.sort()\n\n # same filters for every order, array of strings\n wrange_selector = list(filter_range[orders[0]].keys())\n wrange_selector.sort()\n\n # The lists below need\n # to remain ordered to be correctly referenced\n wavelengthrange = []\n for order in orders:\n o = []\n for fname in wrange_selector:\n o.append(filter_range[order][fname])\n wavelengthrange.append(o)\n\n ref = wcs_ref_models.WavelengthrangeModel()\n ref.meta.update(ref_kw)\n ref.meta.exposure.p_exptype = \"NRC_GRISM|NRC_TSGRISM\"\n ref.meta.input_units = u.micron\n ref.meta.output_units = u.micron\n ref.wrange_selector = wrange_selector\n ref.wrange = wavelengthrange\n ref.order = orders\n\n entry = HistoryEntry({'description': history, 'time': datetime.datetime.utcnow()})\n sdict = Software({'name': 'nircam_reftools.py',\n 'author': author,\n 'homepage': 'https://github.com/spacetelescope/jwreftools',\n 'version': '0.7.1'})\n entry['sofware'] = sdict\n ref.history['entries'] = [entry]\n ref.to_asdf(outname)\n ref.validate()", "def collect_data_for_multiple_years(start_year, end_year, filename):\n \n full_dict = {}\n \n for year in range(start_year, end_year + 1):\n year_dict = collect_data_by_year(year)\n full_dict.update(year_dict) # Create dict of dicts\n \n print(year)\n time.sleep(random.randint(5, 60)) # Sleep to not overwhelm site\n \n # Convert dict of dicts to Pandas df\n df = pd.DataFrame.from_dict(full_dict, orient='index')\n df.reset_index(inplace=True)\n df = df.rename(columns = {'index':'playerCode'})\n save_df_to_csv(df, filename, col_headers=True, index=True,\n index_label='idNum', mode='w')\n \n return df", "def _window_function(arr: np.ndarray, border: int = 0) -> np.ndarray:\n ndata = len(arr)\n nwind = ndata - 2 * border\n w = np.zeros(ndata)\n for i in range(nwind):\n w[i + border] = np.sin(np.pi * (i + 1.0) / (nwind + 1.0))\n return w", "def closeyear(year):\n\n # Return the specific year\n return int(year % 4)", "def write_jpeg(filename,band,skypos,tranges,skyrange,width=False,height=False,\n\t\t\t stepsz=1.,clobber=False,verbose=0,tscale=1000.,retries=20):\n\tscipy.misc.imsave(filename,countmap(band,skypos,tranges,skyrange,\n\t\t\t\t\t width=width,height=height,verbose=verbose,tscale=tscale,\n\t\t\t\t\t retries=retries))\n\treturn", "def adjust_daterange_filter_for_rolling_window(dimensions, operations, filters):\n has_datetime_dimension_in_first_dimension_pos = (\n not len(dimensions) or not dimensions[0].data_type == DataType.date\n )\n if has_datetime_dimension_in_first_dimension_pos:\n return filters\n\n has_rolling = any(\n [isinstance(operation, RollingOperation) for operation in operations]\n )\n if not has_rolling:\n return filters\n\n dim0 = dimensions[0]\n filters_on_dim0 = [\n filter_\n for filter_ in filters\n if isinstance(filter_, RangeFilter)\n and str(filter_.definition.term) == str(dim0.definition)\n ]\n if not 0 < len(filters_on_dim0):\n return filters\n\n max_rolling_period = max(\n operation.window\n for operation in operations\n if isinstance(operation, RollingOperation)\n )\n\n for filter_ in filters_on_dim0:\n # Monkey patch the update start date on the date filter\n print(\"stop\")\n args = (\n {dim0.interval_key + \"s\": max_rolling_period}\n if isinstance(dim0, DatetimeInterval) and \"quarter\" != dim0.interval_key\n else {\"months\": max_rolling_period * 3}\n )\n filter_.definition.start.value -= relativedelta(**args)\n\n return filters", "def warm_region_cal(audio_samples, fs):\n #window the audio\n windowed_samples = timbral_util.window_audio(audio_samples)\n\n # need to define a function for the roughness stimuli, emphasising the 20 - 40 region (of the bark scale)\n min_bark_band = 10\n max_bark_band = 40\n mean_bark_band = (min_bark_band + max_bark_band) / 2.0\n array = np.arange(min_bark_band, max_bark_band)\n x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)\n x -= np.min(x)\n x /= np.max(x)\n\n wr_array = np.zeros(240)\n wr_array[min_bark_band:max_bark_band] = x\n\n # need to define a second array emphasising the 20 - 40 region (of the bark scale)\n min_bark_band = 80\n max_bark_band = 240\n mean_bark_band = (min_bark_band + max_bark_band) / 2.0\n array = np.arange(min_bark_band, max_bark_band)\n x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)\n x -= np.min(x)\n x /= np.max(x)\n\n hf_array = np.zeros(240)\n hf_array[min_bark_band:max_bark_band] = x\n\n windowed_loud_spec = []\n windowed_rms = []\n\n wr_vals = []\n hf_vals = []\n\n for i in range(windowed_samples.shape[0]):\n samples = windowed_samples[i, :]\n N_entire, N_single = timbral_util.specific_loudness(samples, Pref=100.0, fs=fs, Mod=0)\n\n # append the loudness spec\n windowed_loud_spec.append(N_single)\n windowed_rms.append(np.sqrt(np.mean(samples * samples)))\n\n wr_vals.append(np.sum(wr_array * N_single))\n hf_vals.append(np.sum(hf_array * N_single))\n\n mean_wr = np.mean(wr_vals)\n mean_hf = np.mean(hf_vals)\n weighted_wr = np.average(wr_vals, weights=windowed_rms)\n weighted_hf = np.average(hf_vals, weights=windowed_rms)\n\n return mean_wr, weighted_wr, mean_hf, weighted_hf", "def feats_array_4_window(window: np.ndarray):\n\n outvec = np.zeros((len(funclist), window.shape[1]))\n\n for i in range(len(funclist)):\n for j in range(window.shape[1]):\n outvec[i, j] = funclist[i](window[:, j])\n\n outvec = outvec.reshape(-1)\n\n return outvec", "def ShowOneContour(index,all_images,all_pointing,thex0,they0,all_titles,object_name,all_expo,dir_top_img,all_filt,figname):\n plt.figure(figsize=(15,6))\n spec_index_min=100 # cut the left border\n spec_index_max=1900 # cut the right border\n star_halfwidth=70\n \n YMIN=-15\n YMAX=15\n \n figfilename=os.path.join(dir_top_img,figname) \n \n #center is approximately the one on the original raw image (may be changed)\n #x0=int(all_pointing[index][0])\n x0=int(thex0[index])\n \n \n # Extract the image \n full_image=np.copy(all_images[index])\n \n # refine center in X,Y\n star_region_X=full_image[:,x0-star_halfwidth:x0+star_halfwidth]\n \n profile_X=np.sum(star_region_X,axis=0)\n profile_Y=np.sum(star_region_X,axis=1)\n\n NX=profile_X.shape[0]\n NY=profile_Y.shape[0]\n \n X_=np.arange(NX)\n Y_=np.arange(NY)\n \n avX,sigX=weighted_avg_and_std(X_,profile_X**4) # take squared on purpose (weigh must be >0)\n avY,sigY=weighted_avg_and_std(Y_,profile_Y**4)\n \n x0=int(avX+x0-star_halfwidth)\n \n \n # find the center in Y on the spectrum\n yprofile=np.sum(full_image[:,spec_index_min:spec_index_max],axis=1)\n y0=np.where(yprofile==yprofile.max())[0][0]\n\n # cut the image in vertical and normalise by exposition time\n reduc_image=full_image[y0-20:y0+20,x0:spec_index_max]/all_expo[index] \n reduc_image[:,0:100]=0 # erase central star\n \n X_Size_Pixels=np.arange(0,reduc_image.shape[1])\n Y_Size_Pixels=np.arange(0,reduc_image.shape[0])\n Transverse_Pixel_Size=Y_Size_Pixels-int(float(Y_Size_Pixels.shape[0])/2.)\n \n # calibration in wavelength\n #grating_name=all_filt[index].replace('dia ','')\n grating_name=get_disperser_filtname(all_filt[index])\n \n lambdas=Pixel_To_Lambdas(grating_name,X_Size_Pixels,all_pointing[index],True)\n \n #if grating_name=='Ron200':\n # holo = Hologram('Ron400',verbose=True)\n #else: \n # holo = Hologram(grating_name,verbose=True)\n #lambdas=holo.grating_pixel_to_lambda(X_Size_Pixels,all_pointing[index])\n #if grating_name=='Ron200':\n # lambdas=lambdas*2.\n \n\n X,Y=np.meshgrid(lambdas,Transverse_Pixel_Size) \n T=np.transpose(reduc_image)\n \n \n plt.contourf(X, Y, reduc_image, 100, alpha=1., cmap='jet',origin='lower')\n C = plt.contour(X, Y, reduc_image , 20, colors='black', linewidth=.5,origin='lower')\n \n \n for line in LINES:\n if line == O2 or line == HALPHA or line == HBETA or line == HGAMMA:\n plt.plot([line['lambda'],line['lambda']],[YMIN,YMAX],'-',color='lime',lw=0.5)\n plt.text(line['lambda'],YMAX-3,line['label'],verticalalignment='bottom', horizontalalignment='center',color='lime', fontweight='bold',fontsize=16)\n \n \n \n plt.axis([X.min(), X.max(), Y.min(), Y.max()]); plt.grid(True)\n plt.title(all_titles[index])\n plt.grid(color='white', ls='solid')\n plt.text(200,-5.,all_filt[index],verticalalignment='bottom', horizontalalignment='center',color='yellow', fontweight='bold',fontsize=16)\n plt.xlabel('$\\lambda$ (nm)')\n plt.ylabel('pixels')\n plt.ylim(YMIN,YMAX)\n plt.xlim(0.,1200.)\n plt.savefig(figfilename)", "def get_yearly_data(name, startyr=None, endyr=None, interpolated=False):\n varinfo = get_varinfo(name)\n \n if varinfo[\"type\"] == \"yearly\":\n data = get_data(varinfo[\"id\"], startyr=startyr, endyr=endyr)\n giddict = dict()\n sorteddata = sorted(data[\"cells\"], key=lambda vd: vd[\"gid\"])\n for gid,valuedicts in itertools.groupby(sorteddata, key=lambda vd: vd[\"gid\"]):\n yrdict = dict([(valuedict[\"year\"],valuedict[\"value\"])\n for valuedict in valuedicts\n ])\n info = {\"data\": yrdict}\n giddict[gid] = info\n\n if interpolated:\n def pairwise(iterable):\n a, b = itertools.tee(iterable)\n next(b, None)\n return zip(a, b)\n \n def lerp(factor, fromval, toval):\n valrange = toval - fromval\n return fromval + valrange * factor\n \n for gid,info in giddict.items():\n yrdict = info[\"data\"]\n if len(yrdict) > 1:\n for (fromyr,fromval),(toyr,toval) in pairwise(sorted(yrdict.items(),key=lambda i: i[0])):\n curyr = fromyr + 1\n interpneeded = fromval != toval\n \n while curyr != toyr:\n if interpneeded:\n factor = (curyr - fromyr) / float(toyr - fromyr)\n yrdict[curyr] = lerp(factor, fromval, toval)\n else:\n yrdict[curyr] = fromval\n curyr += 1\n\n return giddict\n\n else:\n raise Exception(\"Could not find a yearly variable with that name\")", "def modify_bands(\n xraster: xr.core.dataarray.DataArray, input_bands: List[str],\n output_bands: List[str], drop_bands: List[str] = []):\n # Do not modify if image has the same number of output bands\n if xraster['band'].shape[0] == len(output_bands):\n return xraster\n\n # Drop any bands from input that should not be on output\n for ind_id in list(set(input_bands) - set(output_bands)):\n drop_bands.append(input_bands.index(ind_id)+1)\n return xraster.drop(dim=\"band\", labels=drop_bands, drop=True)", "def _process_data(data, band):\n\n meta = {key:value for key,value in data[0].items() if key != \"subset\" }\n meta['band'] = band\n data_dict = {'dates': [], 'arrays': [], 'metadata': meta}\n for i in data:\n for j in i['subset']:\n if j['band'] == band:\n data_dict['dates'].append(j['calendar_date'])\n data = []\n for x in j['data']:\n try:\n data.append(float(x))\n except ValueError:\n data.append(np.nan) \n data_dict['arrays'].append(np.array(data).reshape(meta['nrows'], \n meta['ncols'])) \n dtdates = [dt.datetime.strptime(d,\"%Y-%m-%d\") for d in data_dict['dates']]\n xcoordinates = ([float(meta['xllcorner'])] + \n [i * meta['cellsize'] + float(meta['xllcorner']) \n for i in range(1, meta['ncols'])])\n ycoordinates = ([float(meta['yllcorner'])] + \n [i * meta['cellsize'] + float(meta['yllcorner'])\n for i in range(1, meta['nrows'])])\n return xr.DataArray(name = band,\n data = np.flipud(np.dstack(data_dict['arrays'])),\n coords = [np.array(ycoordinates), \n np.array(xcoordinates), dtdates],\n dims = [ \"y\", \"x\", \"time\" ],\n attrs = meta)", "def plotallbands(_zband, _yband, _jband, _hband, _kband, _period):\n # Set pyplot style to be consisten within the program\n plt.style.use('seaborn-whitegrid')\n # Frequency = 1 / Period\n _freq = 1 / _period\n\n # Create single dataset from all bands\n _bands = [_zband, _yband, _jband, _hband, _kband]\n # Iterate through each band and plot to screen\n i = 0\n while i < 5:\n # Array to set colours for each band\n _colours = ['-b', '-g', '-r', '-c', '-m']\n # Array to set strings for graph legend\n _legend = ['Z-band', 'Y-band', 'J-band', 'H-band', 'K-band']\n # Determine the line of best fit for each band\n _xfit, _lobf = calclobf(_bands[i], _period)\n # Plot the data in the array to screen, lightly coloured and z rank behind the line of best fit\n plt.plot(_xfit, _lobf, _colours[i], lw=1, zorder=2, label=_legend[i])\n i += 1\n\n # Set x-axis limit to a single period\n plt.xlim(0, 1)\n # Set graph and axis titles\n plt.xlabel(\"Phase\")\n plt.ylabel(\"Magnitude\")\n plt.title(\"Folded light curve\")\n # Show the legend\n plt.legend()\n # Invert y-axis as convention\n plt.gca().invert_yaxis()\n # Save to current folder\n plt.savefig('curve.png')\n # Display to screen\n plt.show()", "def apply_pixel(self, bands:List, bandfunction) -> 'ImageCollection':\n pickled_lambda = cloudpickle.dumps(bandfunction)\n\n process_id = 'apply_pixel'\n args = {\n 'imagery':self.graph,\n 'bands':bands,\n 'function': str(base64.b64encode(pickled_lambda), \"UTF-8\")\n }\n\n return self.graph_add_process(process_id, args)", "def apply_pixel(self, bands:List, bandfunction) -> 'ImageCollection':\n pickled_lambda = cloudpickle.dumps(bandfunction)\n\n process_id = 'apply_pixel'\n args = {\n 'imagery':self.graph,\n 'bands':bands,\n 'function': str(base64.b64encode(pickled_lambda), \"UTF-8\")\n }\n\n return self.graph_add_process(process_id, args)", "def fir(X, y, trial_index, window, tr):\n\n # Norm then pad.\n scaler = MinMaxScaler(feature_range=(0, 1))\n X = scaler.fit_transform(X.astype(np.float))\n X = np.vstack([X, np.ones((window, X.shape[1]), dtype=np.float)])\n\n # Save the org y names\n ynames = sorted(np.unique(y))\n ynames = unique_sorted_with_nan(ynames)\n \n # y becomes integers\n y = create_y(y)\n\n # Make the design matrix.\n dm = _create_dm(y, window)\n # dm DEBUG\n #import time\n #np.savetxt(\"dm-{0}\".format(time.strftime(\"%m_%d_%Y_%H_%s_%m\")), dm, fmt=\"%1.0f\")\n dm = np.matrix(dm)\n \n # FIR!\n fir_names = []\n firs = []\n for j in range(X.shape[1]):\n x = np.matrix(X[:,j])\n fir = np.array(np.linalg.pinv(dm.T * dm) * dm.T * x.T)[0:-1] \n ## Drop dummy\n fir = fir.reshape(len(ynames)-1, window) \n\n firs.append(fir)\n fir_names.extend(ynames[1:]) ## Drop nan/baseline\n\n Xfir = np.vstack(firs).transpose()\n fir_names = np.asarray(fir_names)\n\n assert checkX(Xfir)\n assert Xfir.shape[0] == window, (\"After FIR rows not equal to window\")\n assert Xfir.shape[1] == (len(ynames[1:]) * X.shape[1]), (\"After\" \n \"FIR wrong number of features\")\n assert fir_names.shape[0] == Xfir.shape[1], (\"fir_names and Xfir\" \n \"don't match\")\n\n return Xfir, fir_names", "def winter_bar_chart(self):\n # Create the top n countries dataframe from 1994 to 2016\n df_winter = self.df_winter[self.df_winter['Year'] >= 1994]\n m = list(df_winter['Country'].value_counts()[:self.n_top].index)\n df_top = df_winter[df_winter['Country'].isin(m)].groupby(['Country', 'Medal']).size()\n new_index = pd.MultiIndex.from_product([m, ['Gold', 'Silver', 'Bronze']], names=df_top.index.names)\n df_top = df_top.reindex(new_index)\n unstacked_df_top = df_top.unstack().reindex(m, columns=['Gold', 'Silver', 'Bronze'])\n # Create the dataframe in 2018\n k = []\n for j in self.df_2018_winter['NOC'].tolist():\n n = j[j.find('(') + 1:j.find(')')]\n k.append((n, j))\n k = dict(k)\n winter_2018 = pd.DataFrame()\n for i in m:\n if i != 'RUS':\n df_tmp = self.df_2018_winter[self.df_2018_winter['NOC'] == k[i]]\n else:\n df_tmp = self.df_2018_winter[self.df_2018_winter['NOC'] == k['OAR']]\n winter_2018 = pd.concat([winter_2018, df_tmp])\n winter_2018['Country'] = m\n new_winter_2018 = winter_2018.set_index(['Country'])[['Gold', 'Silver', 'Bronze']]\n # Add two dataframes and plot.\n unstacked_df_top.add(new_winter_2018).reindex(m[::-1], columns=['Bronze', 'Silver', 'Gold']).plot(kind='barh')\n plt.title('Medal Result of Winter Olympics since 1994')\n fname = './medal_figures_winter/winter_bar_chart.png'\n plt.savefig(fname=fname, format='png')\n return", "def contours_and_data(epoch, model, features, filters, figname, fgal=0.5,\n idx=-1, data='s82', N=60000):\n if data == 's82':\n # fetch Stripe 82 data\n X, Xcov = fetch_prepped_s82data(epoch, fgal, features, filters)\n Xcoadd, Xcoaddcov = fetch_prepped_s82data(epoch, fgal, features,\n filters, use_single=False)\n sind = np.abs(Xcoadd[:, idx]) < 0.03\n gind = np.abs(Xcoadd[:, idx]) > 0.03\n\n else:\n # fetch DR10 data\n X, Xcov = fetch_prepped_dr10data(N, fgal, features, filters)\n sind = np.abs(X[:, idx]) < 0.145\n gind = np.abs(X[:, idx]) > 0.145\n\n # unpickle the XD model\n if type(model) == str: \n f = open(model, 'rb')\n model = cPickle.load(f)\n f.close()\n\n fs = 5\n ms = 1\n lsize = 20\n idx = [[0, -1], [2, 3], [3, 4]]\n xlim = [(18., 22), (-0.5, 2.5), (-0.5, 2)]\n ylim = [(-0.1, 0.5), (-0.5, 2.5), (-0.5, 1.5)]\n xlab = ['psfmag $r$', 'modelmag $g-r$', 'modelmag $r-i$']\n ylab = ['psfmag - modelmag $r$', 'modelmag $r-i$', 'modelmag $i-z$']\n\n f = pl.figure(figsize=(3 * fs, 3 * fs))\n Nstar = len(np.where(model.fixed_means[:, idx] != np.inf)[0])\n pl.subplots_adjust(wspace=0.3)\n for i in range(1, 10):\n k = (i - 1) % 3\n if i < 4:\n ind = np.arange(X.shape[0], dtype=np.int)\n rng = range(model.n_components)\n elif 3 < i < 7:\n ind = sind\n rng = range(Nstar)\n else:\n ind = gind\n rng = range(Nstar, model.n_components)\n ax = pl.subplot(3, 3, i)\n for j in rng:\n if model.alpha[j] > 1.e-3:\n draw_ellipse(model.mu[j, idx[k]],\n model.V[j, idx[k]][:, idx[k]],\n scales=[2], ec='k', fc='gray', alpha=0.2)\n pl.plot(X[ind][::10, idx[k][0]],\n X[ind][::10, idx[k][1]], '.k',ms=ms)\n pl.xlim(xlim[k])\n pl.ylim(ylim[k])\n pl.xlabel(xlab[k], fontsize=lsize)\n pl.ylabel(ylab[k], fontsize=lsize)\n if ('psf' in ylab[k]) & ('model' in ylab[k]):\n ytick = ['%0.1f' % v for v in np.linspace(-.1, 0.4, 6)]\n ytick[0] = ''\n ax.set_yticklabels(ytick)\n if i == 1:\n s = 'All'\n elif i == 3:\n s = '\"Stars\"'\n else:\n s = '\"Galaxies\"'\n ax.text(-.3, 0.5, s, ha='center', va='center', fontsize=25,\n rotation='vertical', transform=ax.transAxes)\n f.savefig(figname, bbox_inches='tight')", "def resample(self, octave_bands):\n self.energy_absorption = {\n \"coeffs\": octave_bands(**self.energy_absorption),\n \"center_freqs\": octave_bands.centers,\n }\n self.scattering = {\n \"coeffs\": octave_bands(**self.scattering),\n \"center_freqs\": octave_bands.centers,\n }", "def year_data(self,year):\n idx = [i for i in range(self.dates.shape[0]) if self.dates[i].year == year]\n year_dates = self.dates[idx]\n year_dc = self.dc[idx]\n return year_dates, year_dc", "def allbroadbandfilters(self):\n return [exposuretimeandbroadbandfilter[1] for exposuretimeandbroadbandfilter in self.__allexposuretimesandbroadbandfilters]", "def rawSignals(obars, window=21, nbands=3, inc=0.5, save=True):\n bars = obars.copy() # avoid warnings\n bars['OHLC'] = np.nan # typical price\n bars.OHLC.values[:] = np.mean(bars.values[:,0:4], axis=1) # 1000x faster\n price = bars.OHLC.values\n for i in range(nbands):\n upband, sma, lwband = ta.BBANDS(price, window*inc)\n if save: # for plotting stuff\n bars['bandlw'+str(i)] = lwband\n bars['bandup'+str(i)] = upband\n bars['bandsg'+str(i)] = 0 # signal for this band\n signals = fastbollingerSignal(price, upband, lwband)\n bars.loc[:, 'bandsg'+str(i)] = signals.astype(int) # signal for this band\n inc += 0.5\n bars.dropna(inplace=True)\n return bars", "def flattenFrames(stack, onh_info):\n \n maxHeight=0\n frameList=[]\n\n if onh_info!=-1:\n y_min = onh_info.bbox[0]\n #need to subtract one because index?\n y_max = onh_info.bbox[2]\n \n #hull starts at (0,0), add the y and x min to translate to correct indices.\n hull_onh = np.array(np.where(onh_info.convex_image)) + np.array([[y_min], [onh_info.bbox[1]]])\n elif onh_info==-1:\n #should prevent shiftDetectorONH from running since i will always be greater than -1\n #hull_onh has been left undefined.\n y_min, y_max = -1,-1\n \n for i, frame in enumerate(stack):\n #medFrame = ndimage.filters.median_filter(frame,size=(1,60)) #Takes 3.5 minutes\n medFrame = ndimage.filters.uniform_filter1d(frame, 60) #Takes 1.0 minutes and has same output as med filter\n if i>=y_min and i<y_max:\n #get the index of x pixels that are part of the onh for each frame\n #these are indices of indices\n x_onh_ind = np.array(np.where(hull_onh[0]==i)) \n x_onh = hull_onh.T[x_onh_ind][0].T[1]\n #this should be sorted so that its the x_min and max for each frame\n x_onh_bounds = (x_onh[0], x_onh[-1])\n shifts = shiftDetectorONH(medFrame, onh_info, x_onh_bounds)\n else:\n shifts = shiftDetector(medFrame)\n newFrame = adjustFrame(frame, shifts)\n frameList.append(newFrame)\n if newFrame.shape[0] > maxHeight:\n maxHeight = newFrame.shape[0]\n \n #Show percentage of loop completed.\n print('\\rFinding and correcting horizontal shifts: {:.2f}% done'.format((100.0*((i+1)/len(stack)))), end='', flush=True)\n print('\\n')\n \n flattenedStack = padFrames(frameList, maxHeight)\n\n return flattenedStack" ]
[ "0.77455264", "0.69279945", "0.6802912", "0.58571285", "0.5832874", "0.5571592", "0.5528454", "0.51787966", "0.50988805", "0.49669826", "0.49113834", "0.4877425", "0.48612544", "0.48353228", "0.4781184", "0.47058412", "0.46759126", "0.4645408", "0.45981827", "0.45730013", "0.45723224", "0.45414692", "0.45168623", "0.4503879", "0.4479773", "0.44086748", "0.44026402", "0.43920904", "0.43908378", "0.4367114", "0.43426174", "0.43129024", "0.43095702", "0.42971972", "0.42958915", "0.42899433", "0.42860436", "0.42817375", "0.42817235", "0.42731482", "0.42730728", "0.42730376", "0.42645693", "0.4246681", "0.42369685", "0.42368168", "0.42304066", "0.42181864", "0.42015064", "0.4197148", "0.4191763", "0.4191549", "0.41740513", "0.41679657", "0.41588452", "0.41515106", "0.41388878", "0.41380686", "0.41202876", "0.4104275", "0.40969506", "0.4094642", "0.40904757", "0.4085733", "0.40848187", "0.40727144", "0.4067943", "0.40622154", "0.40547863", "0.40446323", "0.40435654", "0.4038175", "0.40359607", "0.40244335", "0.40220362", "0.4014885", "0.40129727", "0.4010388", "0.40096134", "0.40071085", "0.40001518", "0.39972675", "0.39890885", "0.39820036", "0.39818707", "0.39743036", "0.39638525", "0.39566526", "0.3955349", "0.39507657", "0.39499202", "0.39499202", "0.3948689", "0.39366394", "0.39364308", "0.39317298", "0.39295954", "0.39286268", "0.3924188", "0.3920137" ]
0.49336118
10
A helper function to perform 5 year moving window filter for a single land cover value (such as Forest as 1) for one five year window representing year(i1), year(i), year(i+1), year(i+2), and year(i+3) annual land cover classifications. This function applies on one window, and should only be called using the function applyWindow5years. The image bands do not need to be in order, but the bandNames argument must be in chronological order. The temporal filter inspects the central position of five consecutive years, and if the extremities of the consecutive years are identical but the centre position is not, then the central pixels are reclassified to match its temporal neighbour class. This function can be applied to whichever land cover values the user decides, such as all of the land cover values or a select few.
def mask5(imagem, value, bandNames): mask = imagem.select(bandNames[0]).eq(value) \ .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \ .bitwiseAnd(imagem.select(bandNames[2]).neq(value)) \ .bitwiseAnd(imagem.select(bandNames[3]).neq(value)) \ .bitwiseAnd(imagem.select(bandNames[4]).eq(value)) change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value) change_img1 = imagem.select(bandNames[2]).mask(mask.eq(1)).where(mask.eq(1), value) change_img2 = imagem.select(bandNames[3]).mask(mask.eq(1)).where(mask.eq(1), value) img_out = imagem.select(bandNames[1]).blend(change_img).blend(change_img1).blend(change_img2) return img_out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyWindow5years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-3):\n img_out = img_out.addBands(mask5(imagem, value,bandNames[(i-1):(i+4)]))\n img_out = img_out.addBands(imagem.select(bandNames[-3]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def applyWindow3years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-1):\n img_out = img_out.addBands(mask3(imagem, value,bandNames[(i-1):(i+2)]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def applyWindow4years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-2):\n img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def stack_bands(yr, roi):\n\n water_year_start = '{}-10-01'.format(yr - 1)\n\n winter_s, winter_e = '{}-01-01'.format(yr), '{}-03-01'.format(yr),\n spring_s, spring_e = '{}-03-01'.format(yr), '{}-05-01'.format(yr),\n late_spring_s, late_spring_e = '{}-05-01'.format(yr), '{}-07-01'.format(yr)\n summer_s, summer_e = '{}-07-01'.format(yr), '{}-09-01'.format(yr)\n fall_s, fall_e = '{}-09-01'.format(yr), '{}-12-31'.format(yr)\n\n periods = [('cy', winter_s, fall_e),\n ('1', spring_s, spring_e),\n ('2', late_spring_s, late_spring_e),\n ('3', summer_s, summer_e),\n ('4', fall_s, fall_e)]\n\n first = True\n for name, start, end in periods:\n bands = landsat_composites(yr, start, end, roi, name)\n if first:\n input_bands = bands\n proj = bands.select('B2_cy').projection().getInfo()\n first = False\n else:\n input_bands = input_bands.addBands(bands)\n\n for s, e, n in [(spring_s, spring_e, 'espr'),\n (late_spring_s, late_spring_e, 'lspr'),\n (summer_s, summer_e, 'smr'),\n (fall_s, fall_e, 'fl'),\n (water_year_start, spring_e, 'wy_espr'),\n (water_year_start, late_spring_e, 'wy_espr'),\n (water_year_start, summer_e, 'wy_smr'),\n (water_year_start, fall_e, 'wy')]:\n gridmet = ee.ImageCollection(\"IDAHO_EPSCOR/GRIDMET\").filterBounds(\n roi).filterDate(s, e).select('pr', 'eto', 'tmmn', 'tmmx')\n temp_reducer = ee.Reducer.mean()\n t_names = ['tmax'.format(n), 'tmin'.format(n)]\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_{}'.format(n), 'pet_total_{}'.format(n)).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n wd_estimate = precip_sum.select('precip_total_{}'.format(n)).subtract(precip_sum.select(\n 'pet_total_{}'.format(n))).rename('wd_est_{}'.format(n))\n input_bands = input_bands.addBands([temp_perc, precip_sum, wd_estimate])\n\n temp_reducer = ee.Reducer.percentile([10, 50, 90])\n t_names = ['tmmn_p10_cy', 'tmmn_p50_cy', 'tmmn_p90_cy', 'tmmx_p10_cy', 'tmmx_p50_cy', 'tmmx_p90_cy']\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_cy', 'pet_total_cy').resample('bilinear').reproject(crs=proj['crs'], scale=30)\n wd_estimate = precip_sum.select('precip_total_cy').subtract(precip_sum.select(\n 'pet_total_cy')).rename('wd_est_cy')\n\n coords = ee.Image.pixelLonLat().rename(['Lon_GCS', 'LAT_GCS']).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n ned = ee.Image('USGS/NED')\n terrain = ee.Terrain.products(ned).select('elevation', 'slope', 'aspect').reduceResolution(\n ee.Reducer.mean()).reproject(crs=proj['crs'], scale=30)\n\n world_climate = get_world_climate(proj=proj)\n elev = terrain.select('elevation')\n tpi_1250 = elev.subtract(elev.focal_mean(1250, 'circle', 'meters')).add(0.5).rename('tpi_1250')\n tpi_250 = elev.subtract(elev.focal_mean(250, 'circle', 'meters')).add(0.5).rename('tpi_250')\n tpi_150 = elev.subtract(elev.focal_mean(150, 'circle', 'meters')).add(0.5).rename('tpi_150')\n static_input_bands = coords.addBands([temp_perc, wd_estimate, terrain, tpi_1250, tpi_250, tpi_150, world_climate])\n\n nlcd = ee.Image('USGS/NLCD/NLCD2011').select('landcover').reproject(crs=proj['crs'], scale=30).rename('nlcd')\n\n cdl_cult = ee.Image('USDA/NASS/CDL/2017').select('cultivated'). \\\n remap([1, 2], [0, 1]).reproject(crs=proj['crs'], scale=30).rename('cdlclt')\n\n cdl_crop = ee.Image('USDA/NASS/CDL/2017').select('cropland').reproject(crs=proj['crs'],\n scale=30).rename('cdlcrp')\n\n gsw = ee.Image('JRC/GSW1_0/GlobalSurfaceWater')\n occ_pos = gsw.select('occurrence').gt(0)\n water = occ_pos.unmask(0).rename('gsw')\n\n static_input_bands = static_input_bands.addBands([nlcd, cdl_cult, cdl_crop, water])\n\n input_bands = input_bands.addBands(static_input_bands).clip(roi)\n\n # standardize names to match EE javascript output\n standard_names = []\n temp_ct = 1\n prec_ct = 1\n names = input_bands.bandNames().getInfo()\n for name in names:\n if 'tavg' in name and 'tavg' in standard_names:\n standard_names.append('tavg_{}'.format(temp_ct))\n temp_ct += 1\n elif 'prec' in name and 'prec' in standard_names:\n standard_names.append('prec_{}'.format(prec_ct))\n prec_ct += 1\n else:\n standard_names.append(name)\n\n input_bands = input_bands.rename(standard_names)\n return input_bands", "def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): \n #Grab land cover classes as a list of strings\n lc_classes = classDictionary.keys().getInfo()\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Get the frequency of each class through the years by reducing the image collection to an image using the sum reducer\n class_frequency = binary_class_images.reduce(ee.Reducer.sum().unweighted()).rename(lc_classes)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Define an image to add bands with frequency filter applied\n out_img = ee.Image()\n \n #Loop through years\n for yearBand in yearBandNames:\n #Select the target year from the image\n yearImage = image.select(yearBand)\n \n #Loop through land cover classes in filterParams\n for lc_class in lc_classes:\n #Get the minimum occurance allowed in that land cover class\n min_occurance = filterParams.get(lc_class)\n \n #Find if the land cover class had less than the number of min_occurances in each pixel\n change_class = class_frequency.select(lc_class).lt(ee.Image.constant(min_occurance))\n \n #If change_class==1, then replace that pixel with the mode of all the years in that pixel\n #This filter is only applied to pixels of this land cover class\n #First mask yearImage to pixels of this land cover class, then get the union of pixels where change_class==1,\n #if both conditions are true, then the pixel is replaced with the mode\n yearImage = yearImage.where(yearImage.eq(ee.Number(classDictionary.get(lc_class))).bitwiseAnd(change_class),mode_image)\n #Rename yearImage to bandName\n yearImage = yearImage.rename(yearBand)\n #Append to output image\n out_img = out_img.addBands(yearImage)\n \n return out_img", "def applyForwardNoDataFilter(image, bandNames):\n #Get a list of band names from year(1) through the last year\n bandNamesEE = ee.List(bandNames[1:])\n \n #Define forwards filter\n #In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for the first year\n #currentImage = image.select(bandNames[1]), the image for the second year\n #previousImage = image.select(bandNames[0]), the first year\n #Find where the second year has missing data, replace those values with the values of the first year\n #Append previousImage to currentImage, so now currentImage is a two band image, with the first band being the second year with the gap fill\n #and the second band is the first years classification\n #The iteration continues, now with followingImage.select[0] being the second year with the gap fill applied, and bandName is the third year\n def forwardNoDataFilter(bandName, previousImage):\n currentImage = image.select(ee.String(bandName))\n previousImage = ee.Image(previousImage)\n currentImage = currentImage.unmask(previousImage.select([0]))\n return currentImage.addBands(previousImage)\n \n #Iterate through all the years, starting with the first year's classification\n filtered = bandNamesEE.iterate(forwardNoDataFilter,ee.Image(image.select(bandNames[0])))\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def applyBackwardNoDataFilter(image, bandNames):\n #Get a list of band names to iterate over, from year(-2) through year(0)\n bandNamesEE = ee.List(bandNames[:-1]).reverse()\n \n #Define backwards filter\n #In first iteration, bandName=bandNames[-2] and followingImage is image.select(bandNames[-1]), or the classifications for the final year\n #currentImage = image.select(bandNames[-2]), the second to last year\n #followingImage = image.select(bandNames[-1]), the final year\n #Find where the second to last year has missing data, replace those values with the values of the following year\n #Append followingImage to currentImage, so now currentImage is a two band image, with the first band being the second to last year with the gap fill\n #and the second band is the final years classification\n #The iteration continues, now with followingImage.select[0] being the second to last year with the gap fill applied, and bandName is the third to last year\n def backwardNoDataFilter(bandName, followingImage):\n currentImage = image.select(ee.String(bandName))\n followingImage = ee.Image(followingImage)\n currentImage = currentImage.unmask(followingImage.select([0]))\n return currentImage.addBands(followingImage)\n \n #Apply backwards filter, starting with the final year and iterating through to year(0) \n filtered = bandNamesEE.iterate(backwardNoDataFilter,ee.Image(image.select(bandNames[-1])))\n #Re-order bands to be in chronological order\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def calculate_daily_climatology(\n pctile,\n windowHalfWidth,\n lenClimYear,\n smoothPercentile,\n smoothPercentileWidth,\n thresh_climYear, # empty array\n seas_climYear, # empty array\n clim, # empty dict\n feb29,\n doyClim,\n clim_start,\n clim_end,\n tempClim,\n temp,\n):\n # Loop over all day-of-year values, and calculate threshold and seasonal climatology across years\n for d in range(1, lenClimYear + 1):\n # Special case for Feb 29\n if d == feb29:\n continue\n # find all indices for each day of the year +/- windowHalfWidth and from them calculate the threshold\n tt0 = np.where(doyClim[clim_start : clim_end + 1] == d)[\n 0\n ] # the index for that day each year\n # If this doy value does not exist (i.e. in 360-day calendars) then skip it\n if len(tt0) == 0:\n continue\n tt = np.array([])\n for w in range(-windowHalfWidth, windowHalfWidth + 1): # -5 : 5 default\n tt = np.append(\n tt, clim_start + tt0 + w\n ) # append the daily values 5days before and 5days after\n tt = tt[tt >= 0] # Reject indices \"before\" the first element\n tt = tt[tt < TClim] # Reject indices \"after\" the last element\n thresh_climYear[d - 1] = np.percentile(nonans(tempClim[tt.astype(int)]), pctile)\n seas_climYear[d - 1] = np.mean(nonans(tempClim[tt.astype(int)]))\n\n # Special case for Feb 29 (LEAP YEAR)\n thresh_climYear[feb29 - 1] = (\n 0.5 * thresh_climYear[feb29 - 2] + 0.5 * thresh_climYear[feb29]\n )\n seas_climYear[feb29 - 1] = (\n 0.5 * seas_climYear[feb29 - 2] + 0.5 * seas_climYear[feb29]\n )\n\n if smoothPercentile:\n thresh_climYear, seas_climYear = smooth_climatologies(\n thresh_climYear, seas_climYear, smoothPercentileWidth\n )\n\n # Generate threshold for full time series\n clim[\"thresh\"] = thresh_climYear[doy.astype(int) - 1]\n clim[\"seas\"] = seas_climYear[doy.astype(int) - 1]\n # Save vector indicating which points in temp are missing values\n clim[\"missing\"] = np.isnan(temp)\n\n return clim", "def applyIncidenceFilter(image, bandNames, classDictionary, numChangesCutoff = 8, connectedPixelCutoff=6):\n #Calculate the number of times a pixel changes throughout the time series and determine if it is over the numChangesCutoff\n num_changes = calculateNumberOfChanges(image, bandNames)\n too_many_changes = num_changes.gt(numChangesCutoff)\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Calculate the number of connected pixels for each land cover class and year, reduce to a single band image representing the number\n #of connected pixels of the same land cover class as the central pixel, and determine if it is over the connectedPixelCutoff\n connected_pixel_count = ee.ImageCollection(binary_class_images.map(lambda x: x.mask(x).connectedPixelCount(100,False).reduce(ee.Reducer.sum()).lt(connectedPixelCutoff)))\n \n #Get a bitwiseAnd determination if the number of connected pixels <= connectedPixelCutoff and the number of changes > numChangesCutoff \n incidence_filter = ee.ImageCollection(connected_pixel_count.map(lambda x: x.bitwiseAnd(too_many_changes))).toBands().rename(bandNames)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Replace pixels of image where incidence_filter is True with mode_image\n incidence_filtered = image.where(incidence_filter, mode_image)\n \n return incidence_filtered", "def prepare_ERA5_moisture_flux(era5_path=era5_path):\n import xarray as xr\n from aux_gps import save_ncfile\n from aux_gps import anomalize_xr\n import numpy as np\n from aux_gps import convert_wind_direction\n from dask.diagnostics import ProgressBar\n ds = xr.open_dataset(\n era5_path / 'ERA5_UVQ_4xdaily_israel_1996-2019.nc', chunks={'level': 5})\n # ds = ds.resample(time='D', keep_attrs=True).mean(keep_attrs=True)\n # ds.attrs['action'] = 'resampled to 1D from 12:00UTC data points'\n mf = (ds['q'] * ds['u']).to_dataset(name='qu')\n mf.attrs = ds.attrs\n mf['qu'].attrs['units'] = ds['u'].attrs['units']\n mf['qu'].attrs['long_name'] = 'U component of moisture flux'\n mf['qu'].attrs['standard_name'] = 'eastward moisture flux'\n mf['qv'] = ds['q'] * ds['v']\n mf['qv'].attrs['units'] = ds['v'].attrs['units']\n mf['qv'].attrs['long_name'] = 'V component moisture flux'\n mf['qv'].attrs['standard_name'] = 'northward moisture flux'\n mf['qf'], mf['qfdir'] = convert_wind_direction(u=mf['qu'], v=mf['qv'])\n mf['qf'].attrs['units'] = ds['v'].attrs['units']\n mf['qf'].attrs['long_name'] = 'moisture flux magnitude'\n # mf['qfdir'] = 270 - np.rad2deg(np.arctan2(mf['qv'], mf['qu']))\n mf['qfdir'].attrs['units'] = 'deg'\n mf['qfdir'].attrs['long_name'] = 'moisture flux direction (meteorological)'\n mf = mf.sortby('latitude')\n mf = mf.sortby('level', ascending=False)\n comp = dict(zlib=True, complevel=9)\n encoding_mf = {var: comp for var in mf}\n mf_delayed = mf.to_netcdf(era5_path / 'ERA5_MF_4xdaily_israel_1996-2019.nc',\n 'w', encoding=encoding_mf, compute=False)\n mf_anoms = anomalize_xr(mf, freq='MS', time_dim='time')\n mf_anoms_mean = mf_anoms.mean('latitude').mean('longitude')\n encoding_mf_anoms = {var: comp for var in mf_anoms}\n mf_anoms_delayed = mf_anoms_mean.to_netcdf(era5_path / 'ERA5_MF_anomalies_4xdaily_israel_mean_1996-2019.nc',\n 'w', encoding=encoding_mf_anoms, compute=False)\n with ProgressBar():\n results = mf_delayed.compute()\n with ProgressBar():\n results1 = mf_anoms_delayed.compute()\n # save_ncfile(mf, era5_path, 'ERA5_MF_4xdaily_israel_1996-2019.nc')\n # mf_anoms = anomalize_xr(mf, freq='MS', time_dim='time')\n # mf_anoms_mean = mf_anoms.mean('latitude').mean('longitude')\n # save_ncfile(mf_anoms_mean, era5_path,\n # 'ERA5_MF_anomalies_4xdaily_israel_mean_1996-2019.nc')\n return", "def dwt(image_array, quantization_Array):\n # Create the high pass and low pass filters\n # both filters are non-causal\n # symmetric\n # [-2, -1, 0, 1, 2]\n LPF = [-0.125, 0.25, 0.75, 0.25, -0.125]\n LPF_center = 2\n\n # [ -2,-1, 0]\n HPF = [-0.5, 1, -0.5]\n HPF_center = 2\n\n nrow, ncol = image_array.shape\n\n # create an array that will contain the 4 different subbands of the image\n LL = np.zeros((nrow, ncol))\n LH = np.zeros((nrow, ncol))\n HL = np.zeros((nrow, ncol))\n HH = np.zeros((nrow, ncol))\n filtered_image = [LL, LH, HL, HH]\n\n # filtering the rows using a low pass and high pass filters\n LowPass_rows = np.zeros((nrow, ncol))\n HighPass_rows = np.zeros((nrow, ncol))\n for i in range(0, nrow):\n LowPass_rows[i, :] = lfilter(LPF, image_array[i, :], LPF_center)\n HighPass_rows[i, :] = lfilter(HPF, image_array[i, :], HPF_center)\n\n # down sample rows.\n # which means we will have half the number of columns\n for i in range(0, len(filtered_image)):\n filtered_image[i] = filtered_image[i][:, ::2]\n\n # apply filters accross columns\n for i in range(0, ncol):\n LL[:, i] = lfilter(LPF, LowPass_rows[:, i], LPF_center)\n LH[:, i] = lfilter(HPF, LowPass_rows[:, i], HPF_center)\n HL[:, i] = lfilter(LPF, HighPass_rows[:, i], LPF_center)\n HH[:, i] = lfilter(HPF, HighPass_rows[:, i], HPF_center)\n\n # down sample columns and quantize\n for i in range(0, len(filtered_image)):\n filtered_image[i] = filtered_image[i][::2, :]\n filtered_image[i] = np.round(\n filtered_image[i]/quantization_Array[i]).astype(int)\n\n return filtered_image", "def band_filter(self, bands) -> 'ImageCollection':\n\n process_id = 'filter_bands'\n args = {\n 'imagery': self.graph,\n 'bands': bands\n }\n return self.graph_add_process(process_id, args)", "def calbands( band = 0, tmo = 30 ) :\n optimizeThresholds(band,tmo)\n flattenPhases(band,tmo)\n calibrateSpectra(band=band,tmo=tmo)", "def applyMask3last(imagem, value, bandNames):\n mask = imagem.select(bandNames[-3]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[-2]).eq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[-1]).neq(value))\n change_img = imagem.select(bandNames[-1]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[0:-1])\n img_out = img_out.addBands(imagem.select(bandNames[-1]).blend(change_img))\n return img_out", "def applyGapFilter(image, bandNames):\n filtered = applyForwardNoDataFilter(image, bandNames)\n filtered = applyBackwardNoDataFilter(filtered, bandNames)\n return filtered", "def plot_land_cover(data, year=None, measurement=None, out_width=15, cols=4,):\n # get measurement name\n measurement = get_layer_name(measurement, data)\n\n # get colour map, normalisation\n try:\n cmap, norm = lc_colourmap(measurement)\n except AssertionError:\n\n raise KeyError('Could not automatically determine colour scheme from'\n f'DataArray name {measurement}. Please specify which '\n 'DEA Landcover measurement is being plotted by providing'\n 'the name using the \"measurement\" variable For example'\n '(measurement = \"full_classification\")')\n\n height, width = data.geobox.shape\n scale = out_width / width\n\n if year:\n #plotting protocall if 'year' variable is passed\n year_string = f\"{year}-01-01\"\n data = data.sel(time=year_string, method=\"nearest\")\n \n fig, ax = plt.subplots()\n fig.set_size_inches(width * scale, height * scale)\n make_colorbar(fig, ax, measurement)\n im = ax.imshow(data, cmap=cmap, norm=norm, interpolation=\"nearest\")\n\n \n elif len(data.time) == 1:\n #plotting protocall if only one timestep is passed and not a year variable\n fig, ax = plt.subplots()\n fig.set_size_inches(width * scale, height * scale)\n make_colorbar(fig, ax, measurement)\n im = ax.imshow(data.isel(time=0), cmap=cmap, norm=norm, interpolation=\"nearest\")\n else:\n #plotting protocall if multible time steps are passed to plot\n if cols > len(data.time):\n cols = len(data.time)\n rows = int((len(data.time) + cols-1)/cols)\n\n fig, ax = plt.subplots(nrows=rows, ncols=cols)\n fig.set_size_inches(\n width * scale, (height * scale / cols) * (len(data.time) / cols))\n\n make_colorbar(fig, ax.flat[0], measurement)\n\n for a, b in enumerate(ax.flat):\n if a < data.shape[0]:\n im = b.imshow(data[a], cmap=cmap, norm=norm,\n interpolation=\"nearest\")\n\n return im", "def mask4(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[3]).eq(value)) \n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value)\n change_img1 = imagem.select(bandNames[2]).mask(mask.eq(1)).where(mask.eq(1), value) \n img_out = imagem.select(bandNames[1]).blend(change_img).blend(change_img1)\n return img_out", "def request_band_extract(file_prefix, points_layer, region, years, filter_bounds=False):\n roi = ee.FeatureCollection(region)\n plots = ee.FeatureCollection(points_layer)\n for yr in years:\n stack = stack_bands(yr, roi)\n\n if filter_bounds:\n plots = plots.filterBounds(roi)\n\n filtered = plots.filter(ee.Filter.eq('YEAR', yr))\n\n plot_sample_regions = stack.sampleRegions(\n collection=filtered,\n properties=['POINT_TYPE', 'YEAR'],\n scale=30,\n tileScale=16)\n\n task = ee.batch.Export.table.toCloudStorage(\n plot_sample_regions,\n description='{}_{}'.format(file_prefix, yr),\n bucket='wudr',\n fileNamePrefix='{}_{}'.format(file_prefix, yr),\n fileFormat='CSV')\n\n task.start()\n print(yr)\n exit()", "def boxcar_filter(time_series, lb=0, ub=0.5, n_iterations=2):\r\n\r\n n = time_series.shape[-1]\r\n\r\n len_boxcar_ub = np.ceil(1 / (2.0 * ub))\r\n boxcar_ub = np.empty(len_boxcar_ub)\r\n boxcar_ub.fill(1.0 / len_boxcar_ub)\r\n boxcar_ones_ub = np.ones_like(boxcar_ub)\r\n\r\n if lb == 0:\r\n lb = None\r\n else:\r\n len_boxcar_lb = np.ceil(1 / (2.0 * lb))\r\n boxcar_lb = np.empty(len_boxcar_lb)\r\n boxcar_lb.fill(1.0 / len_boxcar_lb)\r\n boxcar_ones_lb = np.ones_like(boxcar_lb)\r\n\r\n #If the time_series is a 1-d, we add a dimension, so that we can iterate\r\n #over 2-d inputs:\r\n if len(time_series.shape) == 1:\r\n time_series = np.array([time_series])\r\n for i in range(time_series.shape[0]):\r\n if ub:\r\n #Start by applying a low-pass to the signal. Pad the signal on\r\n #each side with the initial and terminal signal value:\r\n pad_s = np.hstack((boxcar_ones_ub *\r\n time_series[i, 0], time_series[i]))\r\n pad_s = np.hstack((pad_s, boxcar_ones_ub * time_series[i, -1]))\r\n\r\n #Filter operation is a convolution with the box-car(iterate,\r\n #n_iterations times over this operation):\r\n for iteration in range(n_iterations):\r\n conv_s = np.convolve(pad_s, boxcar_ub)\r\n\r\n #Extract the low pass signal by excising the central\r\n #len(time_series) points:\r\n time_series[i] = conv_s[conv_s.shape[-1] / 2 - np.floor(n / 2.):\r\n conv_s.shape[-1] / 2 + np.ceil(n / 2.)]\r\n\r\n #Now, if there is a high-pass, do the same, but in the end subtract out\r\n #the low-passed signal:\r\n if lb:\r\n pad_s = np.hstack((boxcar_ones_lb *\r\n time_series[i, 0], time_series[i]))\r\n pad_s = np.hstack((pad_s, boxcar_ones_lb * time_series[i, -1]))\r\n\r\n #Filter operation is a convolution with the box-car(iterate,\r\n #n_iterations times over this operation):\r\n for iteration in range(n_iterations):\r\n conv_s = np.convolve(pad_s, boxcar_lb)\r\n\r\n #Extract the low pass signal by excising the central\r\n #len(time_series) points:\r\n s_lp = conv_s[conv_s.shape[-1] / 2 - np.floor(n / 2.):\r\n conv_s.shape[-1] / 2 + np.ceil(n / 2.)]\r\n\r\n #Extract the high pass signal simply by subtracting the high pass\r\n #signal from the original signal:\r\n time_series[i] = time_series[i] - s_lp + np.mean(s_lp) # add mean\r\n #to make sure that there are no negative values. This also seems to\r\n #make sure that the mean of the signal (in % signal change) is\r\n #close to 0\r\n\r\n return time_series.squeeze()", "def rawSignals(obars, window=21, nbands=3, inc=0.5, save=True):\n bars = obars.copy() # avoid warnings\n bars['OHLC'] = np.nan # typical price\n bars.OHLC.values[:] = np.mean(bars.values[:,0:4], axis=1) # 1000x faster\n price = bars.OHLC.values\n for i in range(nbands):\n upband, sma, lwband = ta.BBANDS(price, window*inc)\n if save: # for plotting stuff\n bars['bandlw'+str(i)] = lwband\n bars['bandup'+str(i)] = upband\n bars['bandsg'+str(i)] = 0 # signal for this band\n signals = fastbollingerSignal(price, upband, lwband)\n bars.loc[:, 'bandsg'+str(i)] = signals.astype(int) # signal for this band\n inc += 0.5\n bars.dropna(inplace=True)\n return bars", "def countmap(band,skypos,tranges,skyrange,width=False,height=False,\n\t\t\t verbose=0,tscale=1000.,memlight=False,hdu=False,retries=20):\n\timsz = gxt.deg2pix(skypos,skyrange)\n\tcount = np.zeros(imsz)\n\tfor trange in tranges:\n\t\t# If memlight is requested, break the integration into\n\t\t# smaller chunks.\n\t\tstep = memlight if memlight else trange[1]-trange[0]\n\t\tfor i in np.arange(trange[0],trange[1],step):\n\t\t\tt0,t1=i,i+step\n\t\t\tif verbose:\n\t\t\t\tprint_inline('Coadding '+str(t0)+' to '+str(t1))\n\t\t\tevents = gQuery.getArray(gQuery.rect(band,skypos[0],skypos[1],t0,t1,\n\t\t\t\t\t\t\t\t\t\t\t\t skyrange[0],skyrange[1]),\n\t\t\t\t\t\t\t\t\t verbose=verbose,retries=retries)\n\n\t\t\t# Check that there is actually data here.\n\t\t\tif not events:\n\t\t\t\tif verbose>1:\n\t\t\t\t\tprint \"No data in \"+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\ttimes = np.array(events,dtype='float64')[:,0 ]/tscale\n\t\t\tcoo =\tnp.array(events,dtype='float64')[:,1:]\n\n\t\t\t# If there's no data, return a blank image.\n\t\t\tif len(coo)==0:\n\t\t\t\tif verbose:\n\t\t\t\t\tprint 'No data in this frame: '+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\t# Define World Coordinate System (WCS)\n\t\t\twcs = define_wcs(skypos,skyrange,width=False,height=False)\n\n\t\t\t# Map the sky coordinates onto the focal plane\n\t\t\tfoc = wcs.sip_pix2foc(wcs.wcs_world2pix(coo,1),1)\n\n\t\t\t# Bin the events into actual image pixels\n\t\t\tH,xedges,yedges=np.histogram2d(foc[:,1]-0.5,foc[:,0]-0.5,\n\t\t\t\t\t\t\t\tbins=imsz,range=([ [0,imsz[0]],[0,imsz[1]] ]))\n\t\t\tcount += H\n\n\treturn count", "def extract_cochlear_subbands(nets, SIGNAL_SIZE, SR, LOW_LIM, HIGH_LIM, N, SAMPLE_FACTOR, pad_factor, debug, subbands_ifft, return_subbands_only, rectify_and_lowpass_subbands, rFFT, custom_filts, erb_filter_kwargs, include_all_keys, compression_function, include_subbands_noise, subbands_noise_mean, subbands_noise_stddev):\n\n # make the erb filters tensor\n nets['filts_tensor'] = make_filts_tensor(SIGNAL_SIZE, SR, LOW_LIM, HIGH_LIM, N, SAMPLE_FACTOR, use_rFFT=rFFT, pad_factor=pad_factor, custom_filts=custom_filts, erb_filter_kwargs=erb_filter_kwargs)\n\n # make subbands by multiplying filts with fft of input\n nets['subbands'] = tf.multiply(nets['filts_tensor'],nets['fft_input'],name='mul_subbands')\n if debug: # return the real and imaginary parts of the subbands separately -- use if matching to their output\n nets['subbands_r'] = tf.real(nets['subbands'])\n nets['subbands_i'] = tf.imag(nets['subbands'])\n\n # TODO: with using subbands_ifft is redundant. \n # make the time subband operations if we are returning the subbands or if we want to include all of the keys in the graph\n if subbands_ifft or return_subbands_only or include_all_keys:\n if not rFFT:\n nets['subbands_ifft'] = tf.real(tf.ifft(nets['subbands'],name='ifft_subbands'),name='ifft_subbands_r')\n else:\n nets['subbands_ifft'] = tf.spectral.irfft(nets['subbands'],name='ifft_subbands')\n if return_subbands_only or include_all_keys:\n nets['subbands_time'] = nets['subbands_ifft']\n if rectify_and_lowpass_subbands: # TODO: the subband operations are hard coded in?\n nets['subbands_time_relu'] = tf.nn.relu(nets['subbands_time'], name='rectified_subbands')\n nets['subbands_time_lowpassed'] = hanning_pooling_1d_no_depthwise(nets['subbands_time_relu'], downsample=2, length_of_window=2*4, make_plots=False, data_format='NCW', normalize=True, sqrt_window=False)\n\n # TODO: noise is only added in the case when we are calcalculating the time subbands, but we might want something similar for the cochleagram\n if return_subbands_only or include_all_keys:\n # Compress subbands if specified and add noise. \n nets = compression_function(nets, input_node_name='subbands_time_lowpassed', output_node_name='subbands_time_lowpassed_compressed')\n if include_subbands_noise:\n nets = add_neural_noise(nets, subbands_noise_mean, subbands_noise_stddev, input_node_name='subbands_time_lowpassed_compressed', output_node_name='subbands_time_lowpassed_compressed_with_noise')\n nets['subbands_time_lowpassed_compressed_with_noise'] = tf.expand_dims(nets['subbands_time_lowpassed_compressed_with_noise'],-1)\n nets['subbands_time_processed'] = nets['subbands_time_lowpassed_compressed_with_noise']\n else:\n nets['subbands_time_lowpassed_compressed'] = tf.expand_dims(nets['subbands_time_lowpassed_compressed'],-1)\n nets['subbands_time_processed'] = nets['subbands_time_lowpassed_compressed']\n\n return nets", "def bandpass_filter(data, lowcut, highcut, fs=2000, numtaps=255):\n nyq = fs / 2\n\n # design filter\n fe1 = lowcut / nyq\n fe2 = highcut / nyq\n b = firwin(numtaps, (fe1, fe2), pass_zero=False)\n\n filtered = lfilter(b, 1, data)\n\n return filtered", "def five_years_avg_dividend(self, five_years_avg_dividend: float):\n\n self._five_years_avg_dividend = five_years_avg_dividend", "def constant_2015():\n\n #Load the CMIP6 historical\n cubes = iris.load(data_dir+'SO2DMS-em-anthro_input4MIPs_emissions_CMIP_CEDS-v2016-07-26-gr_200001-201412_n48.nc')\n #Get low and high level emissions just in the last year (2014)\n cubes = iris.cube.CubeList([cubes[2],cubes[1]])\n final_cubes = iris.cube.CubeList()\n for cube in cubes:\n final_cube = cube[-12:]\n final_cubes.append(final_cube)\n \n #Set the year-on-year proportional reductions to be nothing\n yoy_rates = calc_perc_reducts()\n yoy_rates = np.array(yoy_rates)\n yoy_rates = np.ones_like(yoy_rates)\n\n #Create coordinates for new nc file between 2014 and 2100\n lat_coord = cubes[0].coord('latitude')\n lon_coord = cubes[0].coord('longitude')\n time_coord = DimCoord(np.arange(95055.,95055.+(2100-2014+1)*360.,30.),standard_name=u'time', units=cf_units.Unit('days since 1750-1-1 00:00:00', calendar='360_day'), long_name=u'time', var_name='time')\n\n #Create the cube date\n cube_data_surf = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n cube_data_high = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n #Set first year equal to 2014 in CMIP6 historical\n cube_data_surf[:12,...] = final_cubes[0].data\n cube_data_high[:12,...] = final_cubes[1].data\n #Apply equal emissions in all other years too\n for i in range(12,cube_data_surf.shape[0]):\n cube_data_surf[i,...] = cube_data_surf[(i-12),...] * yoy_rates[0,i]\n cube_data_high[i,...] = cube_data_high[(i-12),...] * yoy_rates[1,i]\n #Make the output cubes\n fut_cube_surf = iris.cube.Cube(cube_data_surf,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[0].standard_name, long_name=final_cubes[0].long_name, var_name=final_cubes[0].var_name, units=final_cubes[0].units, attributes=final_cubes[0].attributes)\n fut_cube_high = iris.cube.Cube(cube_data_high,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[1].standard_name, long_name=final_cubes[1].long_name, var_name=final_cubes[1].var_name, units=final_cubes[1].units, attributes=final_cubes[1].attributes)\n\n fut_cube_high.var_name = 'field569_1'\n fut_cube_high.units='kg/m2/s'\n fut_cube_high.long_name ='HIGH LEVEL SO2 EMISSIONS KG/M2/S'\n fut_cube_surf.var_name = 'field569'\n fut_cube_surf.units='kg/m2/s'\n fut_cube_surf.long_name ='SULPHUR DIOXIDE EMISSIONS'\n\n #Load the DMS cube from standard RCP2.6\n dms_cube = iris.load(data_dir+'DMSSO2NH3_18502100_RCP26_monthly.nc')[0]\n iris.coord_categorisation.add_year(dms_cube,'time',name='year')\n dms_cube = dms_cube.extract(iris.Constraint(year = lambda y: y>=2014))\n\n dms_cube.var_name = 'field570'\n dms_cube.attributes.pop('name')\n dms_cube.coord('time').var_name = 'time'\n dms_cube.coord('time').long_name = 'time'\n\n fut_cube_high = fut_cube_high[:-2]\n fut_cube_surf = fut_cube_surf[:-2]\n\n fut_dms = iris.cube.Cube(dms_cube.data[:,0,::-1,:],dim_coords_and_dims=[(fut_cube_surf.coord('time'),0),(fut_cube_surf.coord('latitude'),1),(fut_cube_surf.coord('longitude'), 2)],standard_name=dms_cube.standard_name, long_name=dms_cube.long_name, var_name=dms_cube.var_name, units=dms_cube.units, attributes=dms_cube.attributes)\n\n #Save the final cubes as netcdf (cutting them to be the same length)\n iris.save(iris.cube.CubeList([fut_dms,fut_cube_high,fut_cube_surf]),data_dir+ \"SO2DMS_const2014.nc\")\n os.system('ncatted -O -a calendar,time,m,c,\"360_day\" '+data_dir+ \"SO2DMS_const2014.nc\")\n\n return", "def winter_gif(self):\n # Create the directory.\n os.mkdir('./medal_figures_winter')\n start = self.start_year\n end = self.end_year\n duration = self.duration\n # Specify the years.\n years = [i for i in self.years_winter if (i >= start) and (i <= end)]\n # Setup the colormap.\n cmap = sns.cubehelix_palette(n_colors=6, start=2.5, rot=0.1, hue=2, dark=0.3, light=1, as_cmap=True)\n # Important variable and keywords to initialize cartopy.\n shapename = 'admin_0_countries'\n countries_shp = shpreader.natural_earth(resolution='110m', category='cultural', name=shapename)\n filenames = []\n # Loop in the specific years.\n for i in years:\n fig = plt.figure(figsize=(10, 8))\n ax = fig.add_subplot(1, 1, 1, projection=ccrs.Mercator())\n ax.set_extent([-169.95, 169.95, -65, 80], crs=ccrs.PlateCarree())\n ax.add_feature(cfeature.BORDERS)\n ax.coastlines(resolution='110m')\n # Add some titles for specific years.\n if i == 1924:\n fig.suptitle('The First Winter Olympics.', y=0.9, fontsize=14, fontweight='bold')\n if i == 1994:\n fig.suptitle('The International Olympic Committee voted to separate the Summer and Winter Games.',\n y=0.9, fontsize=12, fontweight='bold')\n if i == 2018:\n fig.suptitle('Suspension of the Russian Olympic Committee due to Olympic Doping Controversy.',\n y=0.9, fontsize=12, fontweight='bold')\n iso_lib = list(self.conv['ISO'])\n if i != 2018:\n city = self.df_winter.loc[self.df_winter['Year'] == i]['City'].iloc[0]\n ax.title.set_text('Total Number of Medals of Winter Olympics Year: %d City: %s' % (i, city))\n df_tmp = self.df_winter.loc[self.df_winter['Year'] == i]\n d = dict(df_tmp.groupby(df_tmp['Country']).size())\n else:\n ax.title.set_text('Total Number of Medals of Winter Olympics Year: %d City: %s' % (i, 'Pyeongchang'))\n m = []\n for j in self.df_2018_winter['NOC'].tolist():\n n = j[j.find('(')+1:j.find(')')]\n m.append(n)\n k = self.df_2018_winter['Total'].tolist()\n d = dict(zip(m, k))\n d.pop('30 NOCs', None)\n max_medal = float(max(d.values()))\n for country in shpreader.Reader(countries_shp).records():\n iso = country.attributes['ADM0_A3']\n medal_num = 0\n if iso in iso_lib:\n ioc = self.conv.loc[self.conv['ISO'] == iso,'IOC'].iloc[0]\n if not pd.isna(ioc):\n if ioc in d.keys():\n medal_num = d[ioc]\n if all([iso == 'RUS', i>=1956, i<=1988]):\n medal_num = d['URS']\n if all([iso=='DEU', i>=1968, i<=1988]):\n medal_num = d['FRG'] + d['GDR']\n if all([iso=='DEU', i>=1956, i<=1964]):\n medal_num = d['EUA']\n if i==1952 and iso=='DEU':\n medal_num = d['FRG']\n if i==1992 and iso=='RUS':\n medal_num = d['EUN']\n if i==2018 and iso=='RUS':\n medal_num = d['OAR']\n ax.add_geometries(country.geometry, ccrs.PlateCarree(),\n facecolor=cmap(medal_num / max_medal, 1))\n sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(0, max_medal))\n sm._A = []\n plt.colorbar(sm, ax=ax, orientation=\"horizontal\", fraction=0.046, pad=0.04)\n fname = './medal_figures_winter/year_%d.png' % i\n filenames.append(fname)\n plt.savefig(fname=fname, format='png')\n plt.close(fig)\n images = []\n # Create the gif.\n for filename in filenames:\n images.append(imageio.imread(filename))\n imageio.mimsave('./medal_figures_winter/movie.gif', images, duration=duration)\n return", "def make_animation_subset_levels(X, fixed_axes, fixed_value_1, fixed_value_2,\n filtration_size):\n # Create the array indexes\n obj = [slice(None, None, None)] * 4\n obj[fixed_axes[0]] = fixed_value_1\n obj[fixed_axes[1]] = fixed_value_2\n # print obj\n\n # Create sequence of threshold values\n thresholds = np.linspace(start=np.amin(X[obj]), stop=np.amax(X[obj]), num=filtration_size)\n # print thresholds\n # TEST PLOT\n # fig, ax = plt.subplots()\n # # interp = kwargs.get('interpolation', 'none')\n # # colors = kwargs.get('colormap', 'seismic')\n # img0 = ax.imshow(X[obj], cmap='Blues', interpolation='none')\n # fig.colorbar(img0, ax=ax, fraction=0.022, pad=0.01)\n # ax.invert_yaxis()\n # # tx = ax.set_title('%s = %d' % (X.dimensions[sweep_axis], i))\n # fig.tight_layout()\n # fig.show()\n\n # def get_middle(xx):\n # return 1 - (float(np.amax(xx)) / (np.amax(xx) + abs(np.amin(xx))))\n\n def init():\n global fig, ax, im, tx\n fig = plt.figure()\n ax = plt.axes()\n # idx = list(obj)\n # idx[sweep_axis] = slice(None, None, None)\n # middle = get_middle(X[idx])\n # print obj\n im = ax.imshow(X[obj] < thresholds[2], cmap='Blues',#cmap=shiftedColorMap(cm.seismic, midpoint=middle),\n interpolation='none', aspect='auto')\n # vmin=np.amin(X[idx]), vmax=np.amax(X[idx]))\n ax.invert_yaxis()\n # cb = fig.colorbar(im)\n # tx = ax.set_title('%s = %d' % (X.dimensions[sweep_axis], i))\n return\n\n def animate(n):\n # update indexes\n # obj[sweep_axis] = n\n # vmax = np.max(X[obj])\n # vmin = np.min(X[obj])\n im.set_data(X[obj] < thresholds[n])\n # im.set_clim(vmin, vmax)\n # tx.set_text('%s = %d' % (X.dimensions[sweep_axis], n))\n return\n\n init()\n anim = animation.FuncAnimation(fig, animate, frames=np.arange(filtration_size), interval=100, blit=False)\n return anim", "def get_bands(self, data_array_norm, baseline_array_norm, f):\n\n fmax = 50\n fidx = f < fmax\n fnum = f[fidx].size\n\n band_tot = np.zeros((fnum, fnum, data_array_norm.shape[0], data_array_norm.shape[2], data_array_norm.shape[3]))\n band_tot_bl = np.zeros((fnum, fnum, baseline_array_norm.shape[0], baseline_array_norm.shape[2], baseline_array_norm.shape[3]))\n for i in range(fnum):\n for j in range(fnum):\n if j > i:\n idx = (f >= f[i]) & (f < f[j])\n band_tot[i, j, :, :] = np.sum(data_array_norm[:, idx, :, :], axis=1) / (f[j] - f[i])\n band_tot_bl[i, j, :, :] = np.sum(baseline_array_norm[:, idx, :, :], axis=1) / (f[j] - f[i])\n\n\n band_tot_bl1 = np.mean(band_tot_bl, axis=3) # average across time bins\n band_tot_bl2 = np.repeat(band_tot_bl1[:, :, :, None, :], band_tot_bl.shape[3], axis=3) # repeat same value across time\n return band_tot, band_tot_bl2, f[fidx]", "def numberOfWideBands(config=None):\n # Get correlator configuration\n c = config\n if c == None: \n c = utils.getConfigAstroband()\n\n # Determine if we have both wideband and spectral line astrobands. \n # If we do, we return nwide & maxbandwidth for sl only since \n # this is the correlator which will be attached to all ants.\n astrobands = [ abc[0] for abc in c ]\n if len( astrobands ) == 0:\n raise Exception, \"No existing astroband configuration.\"\n if max( astrobands ) > 8 and min( astrobands ) < 9: \n astrobands = [ ab for ab in astrobands if ab < 9 ]\n\n # Check bandwidth\n nwide = 0\n maxbandwidth = 0\n for t in c:\n astroband = t[0]\n # Skip band if it is not being used or is not in astroband list above.\n mp = commands.queryString('SignalPath.Mapping.Astroband%d.confTag' % (astroband) )\n if mp == 'NONE' or astroband not in astrobands: continue\n\n # Get bandwidth\n if t[2] == commands.BW500:\n bw = 500\n elif t[2] == commands.BW250:\n bw = 250\n elif t[2] == commands.BW125:\n bw = 125\n elif t[2] == commands.BW62:\n bw = 62\n elif t[2] == commands.BW31:\n bw = 31\n elif t[2] == commands.BW8:\n bw = 8\n elif t[2] == commands.BW2:\n bw = 2\n else:\n raise Exception, 'Could not find bandwith for '+str(t[2])\n\n # Maximum?\n if bw > maxbandwidth: \n maxbandwidth = bw\n if utils.isDualPol( astroband ):\n nwide = 2 \n else:\n nwide = 1\n elif bw == maxbandwidth:\n if utils.isDualPol( astroband ): \n nwide += 2 \n else:\n nwide += 1\n\n return nwide, maxbandwidth", "def get_time_filtered_correlations(a_lt3,a_lt4,adwin_filt_bool,**kw):\r\n verbose = kw.pop('verbose',False)\r\n ### prepare RO results and sort them according to sweep point\r\n for a in [a_lt3,a_lt4]:\r\n a.pts = a.g.attrs['sweep_length']\r\n a.ssros = a.agrp['ssro_results'].value\r\n a.readouts = a.g.attrs['nr_of_ROsequences']\r\n # a.sorted_results = a_ssros.reshape((-1,a.pts,a.readouts))\r\n\r\n\r\n ### correlate the ROs with each other by making a boolean filter:\r\n ### variables here are described in terms of spin states!\r\n m00 = (a_lt3.ssros == 1)*(a_lt4.ssros == 1)\r\n m10 = (a_lt3.ssros == 1)*(a_lt4.ssros == 0)\r\n m01 = (a_lt3.ssros == 0)*(a_lt4.ssros == 1)\r\n m11 = (a_lt3.ssros == 0)*(a_lt4.ssros == 0)\r\n \r\n ### now define unique identifiers for each Ro correlation and recast the correlations into a single array.\r\n ### As identifieres I choose 1 = index 0 in the output list, i.e. 11; 2 = index 1 in the output list ... and so forth\r\n RO_correlators = np.array(len(a_lt3.ssros)*[1])*m11 \\\r\n + np.array(len(a_lt3.ssros)*[2])*m10 \\\r\n + np.array(len(a_lt3.ssros)*[3])*m01 \\\r\n + np.array(len(a_lt3.ssros)*[4])*m00 \r\n ### PH - added to make sure that has a full set of repetitions\r\n RO_correlators = RO_correlators[:(a.g.attrs['sweep_length']*(len(RO_correlators)/a.g.attrs['sweep_length']))]\r\n adwin_filt_bool = adwin_filt_bool[:(a.g.attrs['sweep_length']*(len(RO_correlators)/a.g.attrs['sweep_length']))]\r\n\r\n \r\n ### now sort the correlators and the adwin fltr according to the sweep pts\r\n sorted_RO_correlators = RO_correlators.reshape((-1,a_lt3.pts,a_lt3.readouts))\r\n sorted_adwin_fltr = adwin_filt_bool.reshape((-1,a_lt3.pts,a_lt3.readouts))\r\n\r\n ### from now on: no numpy magic anymore. from here it is brutforce 'for-looping'\r\n ### (all conceived arrays will have different lengths due to temporal filtering. this break most np methods)\r\n ### although vstack and hstack would probably work...\r\n \r\n return_list = range(a_lt3.pts) ## all of these pts will be substituted with the correlator occurence\r\n for i in range(a_lt3.pts): \r\n correlators_at_sweep_pt = [0,0,0,0]\r\n for j in [1,2,3,4]: ### loop over the correlator identifiers\r\n correlators_at_sweep_pt[j-1] = np.sum(np.logical_and(sorted_adwin_fltr[:,i,:],sorted_RO_correlators[:,i,:]==j)) ## exclude adwin filter and do a logical and with the correlator identifier. Then sum over the number of occurences\r\n\r\n\r\n return_list[i] = correlators_at_sweep_pt\r\n\r\n return return_list", "def bandstop_filter(data, lowcut, highcut, fs=2000, numtaps=255):\n nyq = fs / 2\n\n # design filter\n fe1 = lowcut / nyq\n fe2 = highcut / nyq\n b = firwin(numtaps, (fe1, fe2), pass_zero=True)\n\n filtered = lfilter(b, 1, data)\n\n return filtered", "def load_images(filename, bands, Args):\n image = np.zeros([Args.num, Args.out_size,\n Args.out_size, len(bands)])\n for i, band in enumerate(bands):\n print (\"Getting pstamps for band\", band)\n full_image = fits.open(filename.replace(\"band\", band))[0].data\n image[:, :, :, i] = get_stamps(full_image, Args)\n return image", "def mask3(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).eq(value))\n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[1]).blend(change_img)\n return img_out", "def system_5(in_dir, out_dir, threshold, num_frames=150, num_prev_frames=10, blur=(3,3), as_numeric=True, stretched=True):\n filenames = _prepare_filenames(in_dir, num_frames=150)\n initial_background_model = np.array([cv2.imread(f) for f in filenames[0:num_prev_frames]])\n seed_img = mode(initial_background_model)\n previous_frames = deque(initial_background_model, maxlen=num_prev_frames)\n\n for i, f in tqdm(enumerate(filenames[num_prev_frames:])):\n img = lm(cv2.imread(f))", "def apply_pixel(self, bands:List, bandfunction) -> 'ImageCollection':\n pickled_lambda = cloudpickle.dumps(bandfunction)\n\n process_id = 'apply_pixel'\n args = {\n 'imagery':self.graph,\n 'bands':bands,\n 'function': str(base64.b64encode(pickled_lambda), \"UTF-8\")\n }\n\n return self.graph_add_process(process_id, args)", "def apply_pixel(self, bands:List, bandfunction) -> 'ImageCollection':\n pickled_lambda = cloudpickle.dumps(bandfunction)\n\n process_id = 'apply_pixel'\n args = {\n 'imagery':self.graph,\n 'bands':bands,\n 'function': str(base64.b64encode(pickled_lambda), \"UTF-8\")\n }\n\n return self.graph_add_process(process_id, args)", "def movie(band,skypos,tranges,skyrange,framesz=0,width=False,height=False,\n\t\t verbose=0,tscale=1000.,memlight=False,coadd=False,\n\t\t response=False,calpath='../cal/',hdu=False,retries=20):\n\t# Not defining stepsz effectively creates a count map.\n\tmv = []\n\trr = []\n\tif coadd:\n\t\tif verbose>2:\n\t\t\tprint 'Coadding across '+str(tranges)\n\t\tmv.append(countmap(band,skypos,tranges,skyrange,width=width,\n\t\t\t\t height=height,verbose=verbose,tscale=tscale,memlight=memlight,\n\t\t\t\t hdu=hdu,retries=retries))\n\t\trr.append(rrhr(band,skypos,tranges,skyrange,response=response,width=width,height=height,stepsz=1.,verbose=verbose,calpath=calpath,tscale=tscale,hdu=hdu,retries=retries)) if response else rr.append(np.ones(np.shape(mv)[1:]))\n\telse:\n\t\tfor trange in tranges:\n\t\t\tstepsz = framesz if framesz else trange[1]-trange[0]\n\t\t\tsteps = np.ceil((trange[1]-trange[0])/stepsz)\n\t\t\tfor i,t0 in enumerate(np.arange(trange[0],trange[1],stepsz)):\n\t\t\t\tif verbose>1:\n\t\t\t\t\tprint_inline('Movie frame '+str(i+1)+' of '+str(int(steps)))\n\t\t\t\tt1 = trange[1] if i==steps else t0+stepsz\n\t\t\t\tmv.append(countmap(band,skypos,[[t0,t1]],skyrange,width=width,height=height,verbose=verbose,tscale=tscale,memlight=memlight,hdu=hdu,retries=retries))\n\t# FIXME: This should not create an rr unless it's requested...\n\t\t\t\trr.append(rrhr(band,skypos,[[t0,t1]],skyrange,response=response,width=width,height=height,stepsz=1.,verbose=verbose,calpath=calpath,tscale=tscale,retries=retries)) if response else rr.append(np.ones(np.shape(mv)[1:]))\n\n\treturn np.array(mv),np.array(rr)", "def filter_bands(self, imagery, bands=None, names=None, wavelengths=None) -> 'ImageCollection':\n\n graph = {\n 'process_id': 'filter_bands',\n 'imagery': imagery.graph,\n }\n\n if bands:\n graph['bands'] = bands\n if names:\n graph['names'] = names\n if wavelengths:\n graph['wavelengths'] = wavelengths\n\n imagery.graph = graph\n return imagery", "def calculateNumberOfChanges(image, bandNames):\n #Get a collection of images where each image has 2 bands: classifications for year(i) and classifications for year(i+1)\n lc_one_change_col = npv.getYearStackIC(image,bandNames, band_indices=[0,1])\n #Get a collection of images where each image represents whether there was change from year(i) to year(i+1) and convert to an image\n lc_one_change_col = lc_one_change_col.map(npv.LC_OneChange)\n lc_one_change_image = lc_one_change_col.toBands()\n #Calculate the number of changes by applying the sum reducer\n lc_sum_changes = lc_one_change_image.reduce(ee.Reducer.sum().unweighted())\n return lc_sum_changes", "def _make_filters(self):\n\n \"\"\"\n filter_bank = bandpass_filterbank(\n self.bands, fs=self.fs, order=order, output=output\n )\n\n return [lambda sig: sosfiltfilt(bpf, sig) for bpf in filter_bank]\n \"\"\"\n\n # This seems to work only for Octave bands out of the box\n centers = self.centers\n n = len(self.centers)\n\n new_bands = [[centers[0] / 2, centers[1]]]\n for i in range(1, n - 1):\n new_bands.append([centers[i - 1], centers[i + 1]])\n new_bands.append([centers[-2], self.fs / 2])\n\n n_freq = self.n_fft // 2 + 1\n freq_resp = np.zeros((n_freq, n))\n freq = np.arange(n_freq) / self.n_fft * self.fs\n\n for b, (band, center) in enumerate(zip(new_bands, centers)):\n lo = np.logical_and(band[0] <= freq, freq < center)\n freq_resp[lo, b] = 0.5 * (1 + np.cos(2 * np.pi * freq[lo] / center))\n\n if b != n - 1:\n hi = np.logical_and(center <= freq, freq < band[1])\n freq_resp[hi, b] = 0.5 * (1 - np.cos(2 * np.pi * freq[hi] / band[1]))\n else:\n hi = center <= freq\n freq_resp[hi, b] = 1.0\n\n filters = np.fft.fftshift(\n np.fft.irfft(freq_resp, n=self.n_fft, axis=0),\n axes=[0],\n )\n\n # remove the first sample to make them odd-length symmetric filters\n self.filters = filters[1:, :]", "def write_images(band,skypos,tranges,skyrange,write_cnt=False,write_int=False,write_rr=False,framesz=0,width=False,height=False,verbose=0,tscale=1000.,memlight=False,coadd=False,response=False,calpath='../cal/',clobber=False,retries=20):\n\t# No files were requested, so don't bother doing anything.\n\tif not (write_cnt or write_int or write_rr):\n\t\treturn\n\tcount,rr,intensity=create_images(band,skypos,tranges,skyrange,framesz=framesz,width=width,height=height,verbose=verbose,tscale=tscale,memlight=memlight,coadd=coadd,response=response,calpath=calpath,retries=retries)\n\n\t# Add a conditional so that this is only created for multi-frame images\n\ttbl = movie_tbl(band,tranges,framesz=framesz,verbose=verbose,retries=retries)\n\n\tif write_cnt:\n\t\thdu = pyfits.PrimaryHDU(count)\n\t\thdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)\n\t\thdulist = pyfits.HDUList([hdu,tbl])\n\t\tif verbose:\n\t\t\tprint 'Writing count image to '+str(write_cnt)\n\t\thdulist.writeto(write_cnt,clobber=clobber)\n\tif write_rr:\n\t\thdu = pyfits.PrimaryHDU(rr)\n\t\thdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)\n\t\thdulist = pyfits.HDUList([hdu,tbl])\n\t\tif verbose:\n\t\t\tprint 'Writing response image to '+str(write_rr)\n hdulist.writeto(write_rr,clobber=clobber)\n\tif write_int:\n\t\thdu = pyfits.PrimaryHDU(intensity)\n\t\thdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)\n\t\thdulist = pyfits.HDUList([hdu,tbl])\n\t\tif verbose:\n\t\t\tprint 'Writing intensity image to '+str(write_int)\n\t\thdulist.writeto(write_int,clobber=clobber)\n\n\treturn", "def minmax():\n minmaxlist = []\n timelist = []\n #create a list of the filenames of all sentinel-images\n s2files = [f for f in listdir(s2path) if endswith(join(s2path, f),\".tif\")==True]\n print(\"STEP 1/2\")\n print(\"EXPORTING MIN AND MAX VALUES PER BAND\")\n for i in s2files:\n start = time.time()\n nlfile = nlpath + \"/\" + i\n s2file = s2path+\"/\"+i\n #open the file\n s2raster = gdal.Open(s2file) \n #iterate over the bands of each image\n for n in range(s2raster.RasterCount):\n f = n + 1\n s2band = s2raster.GetRasterBand(f)\n #read the pixels of the band as an numpy-array\n s2band = s2band.ReadAsArray()\n #resize the bands to have all images in the same size\n s2band = np.resize(s2band,(1050,1050))\n #get the min and max values of each band to be able to 0-1 normalize after\n min = s2band.min()\n max = s2band.max()\n #check if there are already values for the band\n if len(minmaxlist) < s2raster.RasterCount + 1:\n s2minmax = [min,max]\n minmaxlist.append(s2minmax)\n # if the min value of the open band is smaller than the saved minimal value, overwrite it\n if min < minmaxlist[n][0]:\n minmaxlist[n][0] = min\n #if the max value of the open band is higher than the saves maximum value, overwrite it\n if max > minmaxlist[n][1]:\n minmaxlist[n][1] = max\n #open the nightlight img\n nlraster = gdal.Open(nlfile)\n nlband = nlraster.GetRasterBand(1)\n #read the only band of the image as a numpy-array\n nlband = nlband.ReadAsArray()\n #resize it the same way as the sentinel images\n nlband = np.resize(nlband,(1050,1050))\n #get the min and max values of the band\n nlmin = nlband.min()\n nlmax = nlband.max()\n #check if there are already information about min and max values for the nightlight images\n if len(minmaxlist) < s2raster.RasterCount + 1:\n nlminmax = [nlmin,nlmax]\n minmaxlist.append(nlminmax)\n #if the min value of the open nightlight image is smaller than the saved minimal value, overwrite it\n if nlmin < minmaxlist[16][0]:\n minmaxlist[16][0] = nlmin\n #if the max value of the open nightlight image is higher than the saves maximum value, overwrite it\n if nlmax > minmaxlist[16][1]:\n minmaxlist[16][1] = nlmax\n end = time.time()\n timelist.append(end-start)\n print(\"Step 1/2\",str(s2files.index(i)+1) + \"/\" + str(len(s2files)),\"Est. time left:\",time.strftime('%H:%M:%S',time.gmtime(int(sum(timelist)/len(timelist)*(len(s2files)-s2files.index(i))))))\n #throw out the Quality Bands (QA10,QA20,QA60)\n minmaxlist = [i for j,i in enumerate(minmaxlist) if j not in [13,14,15]]\n return minmaxlist", "def reduce_dataset(years, values,flux_floor=0,max_tm_error=0,min_reduction_steps=200):\n non_zero_ind, min_retained_zero_years = remove_begin_end_zero_flux(years,values,flux_floor,min_reduction_steps)\n\n years_mod = years[non_zero_ind]\n values_mod = values[non_zero_ind]\n\n if years_mod.size <3:\n years_mod = years\n values_mod = values\n values_mod = 0\n else:\n #makes ure you have not removed more than 1% of the mass when removing 0 or flux floor rates\n o_mass = TimeSeries(years,values,None,None).integrate().values[-1]\n r_mass = TimeSeries(years_mod, values_mod, None, None).integrate().values[-1]\n if abs((o_mass-r_mass)/o_mass)*100 > 1:\n years_mod = years\n values_mod = values\n timeseries = TimeSeries(years_mod, values_mod, None, None)\n mass = timeseries.integrate()\n\n #normalize Values\n maxval = np.max(values_mod)\n values_mod = values_mod/maxval\n o_timeseries = TimeSeries(years,values/maxval,None,None)\n o_mass = o_timeseries.integrate()\n timeseries = TimeSeries(years_mod, values_mod, None, None)\n mass = timeseries.integrate()\n\n mx = np.argmax(timeseries.values)\n points = [0, mx, len(timeseries)]\n x = timeseries.times\n\n ythresh = 100*np.mean(timeseries.values)\n out_error = 1\n out_error_last = out_error\n OUT_ERROR_THRESHOLD = 1e-2\n\n UPPER_N = 200\n LOWER_N = 50\n last_result = None\n MAX_ITERATIONS = 80\n\n solve_type = SMOOTH\n simple_peaks = False\n last_result,ix = reduct_iter(timeseries,flux_floor,ythresh,out_error,out_error_last,OUT_ERROR_THRESHOLD,UPPER_N,LOWER_N,last_result,MAX_ITERATIONS)\n last_result = retain_min_years(last_result.reduced_flux,o_timeseries,o_mass,min_retained_zero_years)\n #if there are less points than the min_reduction_steps then use the remaining\n #points to rebalance the segments with the largest mass errors.\n play_points = min_reduction_steps - last_result.num_reduced_points\n bef = last_result.reduced_flux.times.size\n if play_points > 0:\n last_result = red_flux.rebalance_extra_points(last_result,play_points)\n\n rr = last_result\n\n #find peaks for data rebalance and reporting\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=3,rel_height=1)\n if peaks.size == 0 :\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=2,rel_height=1)\n if peaks.size == 0:\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=1,rel_height=1)\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=3,rel_height=1)\n if pneg.size == 0:\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=2,rel_height=1)\n if pneg.size == 0:\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=1,rel_height=1)\n\n peaks = rr.reduced_flux.times[peaks]\n pneg = rr.reduced_flux.times[pneg]\n\n peaks = np.isin(o_timeseries.times,peaks)\n pneg = np.isin(o_timeseries.times,pneg)\n peaks = np.where(peaks)\n pneg = np.where(pneg)\n\n peaks = peaks[0]\n pneg = pneg[0]\n iter = 0\n while iter < 100 and (abs(last_result.total_mass_error*maxval) > max_tm_error or abs(last_result.total_mass_error/last_result.mass.values[-1])*100 > .001) :\n rr = red_flux.rebalance_valleys(rr,peaks,pneg)\n #keep the lowest total_mass_error\n if abs(rr.total_mass_error) < abs(last_result.total_mass_error):\n last_result = rr\n else:\n break\n iter += 1\n\n out_times = last_result.reduced_flux.times\n out_values = last_result.reduced_flux.values\n #return the reduced data, undo normalize of the values (*maxval)\n return out_times, out_values*maxval,-(last_result.total_mass_error * maxval),peaks.size,iter", "def octave_bands(fc=1000, third=False, start=0.0, n=8):\n\n div = 1\n if third:\n div = 3\n\n # Octave Bands\n fcentre = fc * (\n 2.0 ** (np.arange(start * div, (start + n) * div - (div - 1)) / div)\n )\n fd = 2 ** (0.5 / div)\n bands = np.array([[f / fd, f * fd] for f in fcentre])\n\n return bands, fcentre", "def applyMask3first(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).neq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).eq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).eq(value))\n change_img = imagem.select(bandNames[0]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[0]).blend(change_img)\n img_out = img_out.addBands(imagem.select(bandNames[1:]))\n return img_out", "def ShowOneContour(index,all_images,all_pointing,thex0,they0,all_titles,object_name,all_expo,dir_top_img,all_filt,figname):\n plt.figure(figsize=(15,6))\n spec_index_min=100 # cut the left border\n spec_index_max=1900 # cut the right border\n star_halfwidth=70\n \n YMIN=-15\n YMAX=15\n \n figfilename=os.path.join(dir_top_img,figname) \n \n #center is approximately the one on the original raw image (may be changed)\n #x0=int(all_pointing[index][0])\n x0=int(thex0[index])\n \n \n # Extract the image \n full_image=np.copy(all_images[index])\n \n # refine center in X,Y\n star_region_X=full_image[:,x0-star_halfwidth:x0+star_halfwidth]\n \n profile_X=np.sum(star_region_X,axis=0)\n profile_Y=np.sum(star_region_X,axis=1)\n\n NX=profile_X.shape[0]\n NY=profile_Y.shape[0]\n \n X_=np.arange(NX)\n Y_=np.arange(NY)\n \n avX,sigX=weighted_avg_and_std(X_,profile_X**4) # take squared on purpose (weigh must be >0)\n avY,sigY=weighted_avg_and_std(Y_,profile_Y**4)\n \n x0=int(avX+x0-star_halfwidth)\n \n \n # find the center in Y on the spectrum\n yprofile=np.sum(full_image[:,spec_index_min:spec_index_max],axis=1)\n y0=np.where(yprofile==yprofile.max())[0][0]\n\n # cut the image in vertical and normalise by exposition time\n reduc_image=full_image[y0-20:y0+20,x0:spec_index_max]/all_expo[index] \n reduc_image[:,0:100]=0 # erase central star\n \n X_Size_Pixels=np.arange(0,reduc_image.shape[1])\n Y_Size_Pixels=np.arange(0,reduc_image.shape[0])\n Transverse_Pixel_Size=Y_Size_Pixels-int(float(Y_Size_Pixels.shape[0])/2.)\n \n # calibration in wavelength\n #grating_name=all_filt[index].replace('dia ','')\n grating_name=get_disperser_filtname(all_filt[index])\n \n lambdas=Pixel_To_Lambdas(grating_name,X_Size_Pixels,all_pointing[index],True)\n \n #if grating_name=='Ron200':\n # holo = Hologram('Ron400',verbose=True)\n #else: \n # holo = Hologram(grating_name,verbose=True)\n #lambdas=holo.grating_pixel_to_lambda(X_Size_Pixels,all_pointing[index])\n #if grating_name=='Ron200':\n # lambdas=lambdas*2.\n \n\n X,Y=np.meshgrid(lambdas,Transverse_Pixel_Size) \n T=np.transpose(reduc_image)\n \n \n plt.contourf(X, Y, reduc_image, 100, alpha=1., cmap='jet',origin='lower')\n C = plt.contour(X, Y, reduc_image , 20, colors='black', linewidth=.5,origin='lower')\n \n \n for line in LINES:\n if line == O2 or line == HALPHA or line == HBETA or line == HGAMMA:\n plt.plot([line['lambda'],line['lambda']],[YMIN,YMAX],'-',color='lime',lw=0.5)\n plt.text(line['lambda'],YMAX-3,line['label'],verticalalignment='bottom', horizontalalignment='center',color='lime', fontweight='bold',fontsize=16)\n \n \n \n plt.axis([X.min(), X.max(), Y.min(), Y.max()]); plt.grid(True)\n plt.title(all_titles[index])\n plt.grid(color='white', ls='solid')\n plt.text(200,-5.,all_filt[index],verticalalignment='bottom', horizontalalignment='center',color='yellow', fontweight='bold',fontsize=16)\n plt.xlabel('$\\lambda$ (nm)')\n plt.ylabel('pixels')\n plt.ylim(YMIN,YMAX)\n plt.xlim(0.,1200.)\n plt.savefig(figfilename)", "def make_lightcurve(centroids, bands, band_idx, box_size, aperture_radius):\n band_names = np.sort(list(bands.keys()))\n num_stars= range(len(centroids))\n for star_idx in num_stars:\n xcenters, ycenters = [],[]\n aperture_sums = []\n background = []\n fwhms = []\n obs_time = []\n obs_mjd = []\n ##extract lightcurve (enumerate all frames) in a given band\n for i in tqdm(bands[band_names[band_idx]]):\n #import pdb; pdb.set_trace()\n hdr = fits.open(i)[0].header\n img = fits.open(i)[0].data\n #get dates from fits header\n date=dt.strptime(hdr['DATE-OBS'], '%Y-%m-%d')\n time=dt.strptime(hdr['EXP-STRT'], '%H:%M:%S.%f')\n newdate = time.replace(year=date.year, month=date.month, day=date.day)\n obs_time.append(newdate)\n obs_mjd.append(hdr['MJD-STRT'])\n\n #crop\n #import pdb; pdb.set_trace()\n image_crop = get_crop(img, centroids[star_idx], box_size)\n\n ###aperture photometry###\n #compute centroid\n centroid = get_centroid(image_crop)\n\n xcenters.append(centroid[0])\n ycenters.append(centroid[1])\n\n #compute backgound\n bkg_mean=get_bkg(image_crop, centroid, r_in=20., r_out=30.)\n\n #measure fwhm\n fwhm=get_fwhm(image_crop)\n\n #without aperture photometry\n\n aperture_sum = get_phot(image_crop, centroid, r=aperture_radius)\n\n #minus background wihtin annulus\n #aperture_sum = get_phot2(image_crop,bkg_mean,centroid,r=aperture_radius)\n\n aperture_sums.append(aperture_sum)\n background.append(bkg_mean)\n\n # if fwhm < 10*np.median(fwhms):\n # fwhms.append(fwhm)\n # else:\n # fwhms.append(np.nan)\n fwhms.append(fwhm)\n\n #output as dataframe of given band and star\n\n dfs.append(pd.DataFrame(\n {'{0}_{1}_x'.format(band_names[band_idx], str(star_idx)) : xcenters,\n '{0}_{1}_y'.format(band_names[band_idx], str(star_idx)) : ycenters,\n '{0}_{1}_flux_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : aperture_sums,\n '{0}_{1}_bkg_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : background,\n '{0}_{1}_fwhm_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : fwhms},\n #'airmass' : airmass\n index = obs_time))\n return dfs, band_idx, band_names", "def fmask(bandname=\"fmask\"):\n\n def fmask(image):\n imgFmask = image.select(bandname)\n shadow = imgFmask.eq(3)\n snow = imgFmask.eq(4)\n cloud = imgFmask.eq(5)\n\n mask = shadow.Or(snow).Or(cloud)\n\n imgMask = image.updateMask(mask.Not())\n return imgMask\n return fmask", "def determine_exposure_time(cn, bandlims, wantSNR = 10.0, wantetime = 5.0, ref_lam = 0.550,\n plot_snr_curves = False, plot_spectrum = False,\n title = \"\"):\n\n # Specify Kat's fiducial S/N\n iref = np.argmin(np.fabs(cn.lam - ref_lam))\n\n if bandlims is not None:\n\n # Specify band via wavelength\n icont = np.array([np.argmin(np.fabs(cn.lam - bandlims[0])), np.argmin(np.fabs(cn.lam - bandlims[1]))])\n iband = np.arange(icont[0]+1, icont[1])\n ibottom = np.argmin(np.fabs(cn.Cratio - np.min(cn.Cratio[iband])))\n\n # Calculate the continuum planet photon counts and contrast ratio\n ccont = cg.observe.interp_cont_over_band(cn.lam, cn.cp, icont, iband)\n ccrat = cg.observe.interp_cont_over_band(cn.lam, cn.Cratio, icont, iband)\n\n # Calculate varies SNRs as a function of exposure time\n Nt = 1000\n times = np.linspace(1.0, 100.0, Nt)\n band_snrs = np.zeros(len(times))\n bot_snrs = np.zeros(len(times))\n cont_snrs = np.zeros(len(times))\n fid_snrs = np.zeros(len(times))\n for i, time in enumerate(times):\n cn.make_fake_data(texp = times[i])\n fid_snrs[i] = cn.SNRt[iref]\n if bandlims is not None:\n band_snrs[i] = cg.observe.SNR_band(cn.cp, ccont, cn.cb, iband, itime=times[i])\n bot_snrs[i] = cn.SNRt[ibottom]\n cont_snrs[i] = np.mean(cn.SNRt[icont])\n\n # Fit for time to desired snr value\n etime_fid = find_time_from_snr(times, fid_snrs, wantSNR) #times[np.argmin(np.fabs(fid_snrs - wantSNR))]\n if bandlims is not None:\n etime_band = find_time_from_snr(times, band_snrs, wantSNR) #times[np.argmin(np.fabs(band_snrs - wantSNR))]\n etime_bot = find_time_from_snr(times, bot_snrs, wantSNR) #times[np.argmin(np.fabs(bot_snrs - wantSNR))]\n etime_cont = find_time_from_snr(times, cont_snrs, wantSNR) #times[np.argmin(np.fabs(cont_snrs - wantSNR))]\n\n # Check for incomplete bands which can cause anomalously low exposure times\n if bandlims is None:\n etime_band = np.nan\n etime_bot = np.nan\n etime_cont = np.nan\n else:\n if (False in np.isfinite(cn.Cobs[iband])):\n etime_band = np.nan\n\n # Make plot of SNR vs exposure time\n if plot_snr_curves:\n\n fig, ax = plt.subplots(figsize = (8,6))\n ax.set_xlabel(\"Exposure Time [hrs]\")\n ax.set_ylabel(\"S/N\")\n if bandlims is not None:\n ax.plot(times, band_snrs, label = \"detect band rel. to cont.\")\n ax.plot(times, bot_snrs, label = \"bottom of band\")\n ax.plot(times, cont_snrs, label = \"avg. continuum\")\n ax.plot(times, fid_snrs, label = \"at %.2f $\\mu$m\" %cn.lam[iref])\n if bandlims is not None:\n ax.scatter(etime_band, wantSNR, c=\"C0\")\n ax.scatter(etime_bot, wantSNR, c=\"C1\")\n ax.scatter(etime_cont, wantSNR, c=\"C2\")\n ax.scatter(etime_fid, wantSNR, c=\"C3\")\n ax.axhline(wantSNR, ls = \"--\", c = \"grey\")\n if bandlims is not None:\n ax.axvline(etime_band, ls = \"--\", c = \"C0\")\n ax.axvline(etime_bot, ls = \"--\", c = \"C1\")\n ax.axvline(etime_cont, ls = \"--\", c = \"C2\")\n ax.axvline(etime_fid, ls = \"--\", c = \"C3\")\n ylims = ax.get_ylim()\n if bandlims is not None:\n ax.text(etime_band, ylims[1]-.5*ylims[1], \"%.2f\" %etime_band, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C0\")\n ax.text(etime_bot, ylims[1]-.1*ylims[1], \"%.2f\" %etime_bot, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C1\")\n ax.text(etime_cont, ylims[1]-.15*ylims[1], \"%.2f\" %etime_cont, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C2\")\n ax.text(etime_fid, ylims[1]-.20*ylims[1], \"%.2f\" %etime_fid, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C3\")\n ax.legend(framealpha = 0.75, fontsize = 14)\n\n if plot_spectrum:\n\n # Construct noised spectrum plot\n if bandlims is not None:\n cn.make_fake_data(texp = etime_band)\n else:\n cn.make_fake_data(texp = etime_fid)\n\n fig, ax = plt.subplots(figsize = (8,6))\n ax.plot(cn.lam, cn.Cratio, ls = \"steps-mid\", color = \"grey\")\n ax.errorbar(cn.lam, cn.Cobs, yerr=cn.Csig, fmt = \"o\", ms = 2.0, alpha = 0.7, color = \"k\")\n ax.set_xlabel(\"Wavelength [$\\mu$m]\")\n ax.set_ylabel(\"Fp/Fs\")\n ax.set_title(title)\n\n if bandlims is not None:\n # Identify specific points in band\n for i in icont:\n ax.scatter(cn.lam[i], cn.Cratio[i], s = 20.0, c = \"C8\", marker = \"o\", zorder = 100)\n for i in iband:\n ax.scatter(cn.lam[i], cn.Cratio[i], s = 20.0, c = \"C1\", marker = \"o\", zorder = 100)\n ax.scatter(cn.lam[ibottom], cn.Cratio[ibottom], s = 20.0, c = \"C8\", marker = \"o\", zorder = 100)\n # Identify specific continuum points in band\n for i, ic in enumerate(iband):\n ax.scatter(cn.lam[ic], ccrat[i], s = 20.0, c = \"C9\", marker = \"o\", zorder = 100)\n\n # Return exposure times\n return etime_band, etime_bot, etime_cont, etime_fid", "def ls5_sr_corr(img):\n return img.select(['B1'], ['BLUE']).float().multiply(0.91996).add(37).int16()\\\n .addBands(img.select(['B2'], ['GREEN']).float().multiply(0.92764).add(84).int16())\\\n .addBands(img.select(['B3'], ['RED']).float().multiply(0.8881).add(98).int16())\\\n .addBands(img.select(['B4'], ['NIR']).float().multiply(0.95057).add(38).int16())\\\n .addBands(img.select(['B5'], ['SWIR1']).float().multiply(0.96525).add(29).int16())\\\n .addBands(img.select(['B7'], ['SWIR2']).float().multiply(0.99601).add(20).int16())\\\n .addBands(img.select(['pixel_qa'], ['PIXEL_QA']).int16())\\\n .addBands(img.select(['radsat_qa'], ['RADSAT_QA']).int16())\\\n .copyProperties(img)\\\n .copyProperties(img, ['system:time_start', 'system:time_end', 'system:index', 'system:footprint'])", "def sky_groups():\n cam = \"sky\"\n for light, lens, ndc, good, window in [(True, True, False, True, True),\n (True, True, False, True, False),\n (True, True, False, False, False),\n (True, False, False, True, False),\n (True, False, False, False, False),\n (False, True, False, True, True),\n (False, True, False, False, True)]:\n filenames = flatfiles(cam)\n filenames = get_light_sky(filenames, light)\n filenames = get_lens(filenames, lens)\n filenames = get_ndc(filenames, ndc)\n filenames = get_good(filenames, good)\n filenames = get_window_sky(filenames, window)\n images = valid_images(filenames)\n process_images(images, cam, (light, lens, ndc, good, window))", "def _filter_images(data, hmin):\n #Laziest way to get a circle mask\n fp = CircularAperture((0,0), r=hmin).to_mask().data>.1\n fp = fp.astype(bool)\n\n # Apply maximum filter, flux filter\n filt_image = maximum_filter(data, footprint=fp,\n mode='constant', cval=0)\n origins = product([0,-1], [0,-1])\n max_4sum = np.amax([_conv_origin(data, o) for o in origins], axis=0)\n return(filt_image, max_4sum)", "def warm_region_cal(audio_samples, fs):\n #window the audio\n windowed_samples = timbral_util.window_audio(audio_samples)\n\n # need to define a function for the roughness stimuli, emphasising the 20 - 40 region (of the bark scale)\n min_bark_band = 10\n max_bark_band = 40\n mean_bark_band = (min_bark_band + max_bark_band) / 2.0\n array = np.arange(min_bark_band, max_bark_band)\n x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)\n x -= np.min(x)\n x /= np.max(x)\n\n wr_array = np.zeros(240)\n wr_array[min_bark_band:max_bark_band] = x\n\n # need to define a second array emphasising the 20 - 40 region (of the bark scale)\n min_bark_band = 80\n max_bark_band = 240\n mean_bark_band = (min_bark_band + max_bark_band) / 2.0\n array = np.arange(min_bark_band, max_bark_band)\n x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)\n x -= np.min(x)\n x /= np.max(x)\n\n hf_array = np.zeros(240)\n hf_array[min_bark_band:max_bark_band] = x\n\n windowed_loud_spec = []\n windowed_rms = []\n\n wr_vals = []\n hf_vals = []\n\n for i in range(windowed_samples.shape[0]):\n samples = windowed_samples[i, :]\n N_entire, N_single = timbral_util.specific_loudness(samples, Pref=100.0, fs=fs, Mod=0)\n\n # append the loudness spec\n windowed_loud_spec.append(N_single)\n windowed_rms.append(np.sqrt(np.mean(samples * samples)))\n\n wr_vals.append(np.sum(wr_array * N_single))\n hf_vals.append(np.sum(hf_array * N_single))\n\n mean_wr = np.mean(wr_vals)\n mean_hf = np.mean(hf_vals)\n weighted_wr = np.average(wr_vals, weights=windowed_rms)\n weighted_hf = np.average(hf_vals, weights=windowed_rms)\n\n return mean_wr, weighted_wr, mean_hf, weighted_hf", "def roi_to_wm(img,brain_wm,nth):\n \n data = img.get_data()\n wmdata = brain_wm.get_data()\n shape = data.shape\n\n roi_ids = np.unique(data)\n roi = roi_ids[1:]\n roi = [int(i) for i in roi]\n print roi\n \n wmdata = wmdata!=0\n result_mask = np.zeros(data.shape)\n #print wmdata \n \n #First, get the nonzero voxel index in image data.\n #Here image data is a label volume.\n #ROIs is in it\n for roi_id in roi:\n #print roi_id\n tmp_mask = data==roi_id\n #print tmp_mask\n indexs = np.transpose(tmp_mask.nonzero())\n #print indexs\n \n #Second, find the nearest wm voxel for each indexs.\n print indexs.shape\n for coor in indexs:\n #print coor\n x = coor[0]\n y = coor[1]\n z = coor[2]\n \n if wmdata[x,y,z]==1:\n result_mask[x,y,z] = roi_id\n else:\n #find the nearest neighbor.\n flag = False\n radius = 1\n mindist_voxel = []\n mindist = 1000 \n while radius<100: \n neigh_list = get_neighbors(coor,radius,shape)\n radius += 1\n #find the nearest white matter voxel.\n for n in neigh_list:\n #print n\n if wmdata[n[0],n[1],n[2]]==1:\n flag = True\n dist = np.sqrt((n[0]-x)**2+(n[1]-y)**2+(n[2]-z)**2)\n # if the distance is smaller than tag, choose it to be nearest.\n \n if dist < mindist:\n mindist = dist\n mindist_voxel = n\n \n if flag:\n break\n #print mindist_voxel\n if mindist_voxel!=[]:\n result_mask[mindist_voxel[0],mindist_voxel[1],mindist_voxel[2]] = roi_id \n for roi_id in roi:\n tmp_mask = result_mask==roi_id\n roi_size = tmp_mask.sum() \n print roi_id, roi_size\n result = img\n result._data = result_mask\n #roi_name = os.path.join(mkdir,'roi_%s.nii.gz'%i)\n nib.save(result,\"test_regroi.nii.gz\")\n \n return True", "def firwin(N, cutoff, width=None, window='hamming'):\n\n from signaltools import get_window\n if isinstance(width,float):\n A = 2.285*N*width + 8\n if (A < 21): beta = 0.0\n elif (A <= 50): beta = 0.5842*(A-21)**0.4 + 0.07886*(A-21)\n else: beta = 0.1102*(A-8.7)\n window=('kaiser',beta)\n\n win = get_window(window,N,fftbins=1)\n alpha = N//2\n m = numpy.arange(0,N)\n h = win*special.sinc(cutoff*(m-alpha))\n return h / numpy.sum(h,axis=0)", "def filter_irrigated(asset, yr, region, filter_type='irrigated', addl_yr=None):\n filt_fc = None\n\n # filter out any weird geometries\n plots = ee.FeatureCollection(asset)\n plots = plots.map(lambda x: x.set('geo_type', x.geometry().type()))\n plots = plots.filter(ee.Filter.eq('geo_type', 'Polygon'))\n\n roi = ee.FeatureCollection(region)\n if filter_type == 'irrigated':\n\n summer_s, late_summer_e = '{}-05-01'.format(yr), '{}-07-15'.format(yr)\n late_summer_s_, summer_e = '{}-07-01'.format(yr), '{}-10-31'.format(yr)\n\n lsSR_masked = landsat_masked(yr, roi)\n\n early_nd = ee.Image(lsSR_masked.filterDate(summer_s, late_summer_e).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd')\n early_nd_max = early_nd.select('nd').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n early_int_mean = early_nd_max.reduceRegions(collection=plots,\n reducer=ee.Reducer.median(),\n scale=30.0)\n early_int_mean = early_int_mean.select('median')\n\n late_nd = ee.Image(lsSR_masked.filterDate(late_summer_s_, summer_e).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd_1')\n late_nd_max = late_nd.select('nd_1').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n\n combo = late_nd_max.reduceRegions(collection=early_int_mean,\n reducer=ee.Reducer.mean(),\n scale=30.0)\n\n filt_fc = combo # .filter(ee.Filter.Or(ee.Filter.gt('median', 0.9), ee.Filter.gt('mean', 0.8)))\n desc = '{}_{}_irr'.format(os.path.basename(region), yr)\n\n elif filter_type == 'dryland':\n\n summer_s, late_summer_e = '{}-07-01'.format(yr), '{}-10-31'.format(yr)\n late_summer_s_, late_summer_e_ = '{}-07-01'.format(addl_yr), '{}-10-31'.format(addl_yr)\n\n lsSR_masked = landsat_masked(yr, roi)\n early_nd = ee.Image(lsSR_masked.filterDate(summer_s, late_summer_e).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd')\n early_nd_max = early_nd.select('nd').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n early_int_mean = early_nd_max.reduceRegions(collection=plots,\n reducer=ee.Reducer.mean(),\n scale=30.0)\n early_int_mean = early_int_mean.select(['mean', 'MGRS_TILE', 'system:index', 'popper'],\n ['nd_e', 'MGRS_TILE', 'system:index', 'popper'])\n\n lsSR_masked = landsat_masked(addl_yr, roi)\n late_nd = ee.Image(lsSR_masked.filterDate(late_summer_s_, late_summer_e_).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd_1')\n late_nd_max = late_nd.select('nd_1').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n\n combo = late_nd_max.reduceRegions(collection=early_int_mean,\n reducer=ee.Reducer.mean(),\n scale=30.0)\n\n filt_fc = combo.filter(ee.Filter.Or(ee.Filter.lt('nd_e', 0.7), ee.Filter.lt('mean', 0.7)))\n desc = '{}_dry'.format(os.path.basename(region))\n\n else:\n raise NotImplementedError('must choose from filter_low or filter_high')\n\n task = ee.batch.Export.table.toCloudStorage(filt_fc,\n description=desc,\n bucket='wudr',\n fileFormat='SHP')\n print(yr, filter_type)\n task.start()", "def _bands_competed_last_year():\n lLastYear = datetime.datetime.now().year - 1\n cursor = connection.cursor()\n cursor.execute(\"SELECT count(distinct(r.band_id)) FROM contests_contestevent e, contests_contestresult r WHERE r.contest_event_id = e.id AND extract(year from e.date_of_event) = %(year)s GROUP BY extract(year from e.date_of_event) ORDER BY extract(year from e.date_of_event) desc\", {'year' : lLastYear})\n rows = cursor.fetchall()\n lReturn = 0\n if rows and rows[0]:\n lReturn = rows[0][0]\n cursor.close()\n return lReturn", "def _getWindows(self,\n signal: Signal,\n grid: Signal,\n addAnacrusis=False,\n addAfterLastBeat=False,\n window=\"square\",\n aggregation='rmse'):\n\n result = []\n times = copy.copy(grid.times)\n # pan times\n panning = self.parameters[\"panning\"].value * np.median(np.diff(times))\n times = [time - panning for time in times]\n # if addAnacrusis:\n # times = np.insert(times, 0, 0) # TODO make it faster by not creating a new array\n # annacrusisValues = signal.getValues(0, times[])\n # if len(annacrusisValues):\n # result.append(self._getWindow(annacrusisValues, window, aggregation))\n # else: # If the first tick is at 0, then the anacrusis is 0, or [0 ,..., 0] if the signal is multidimensional\n # result.append(signal.values[0] * 0.)\n\n for i in range(len(grid) - 1):\n result.append(self._getWindow(signal.getValues(times[i], times[i + 1]), signal.sampleRate, window, aggregation))\n\n # if addAfterLastBeat:\n # afterValues = signal.getValues(grid.times[-1], signal.duration)\n # if len(afterValues):\n # result.append(self._getWindow(afterValues, window, aggregation))\n # else:\n # result.append(signal.values[0] * 0.)\n # else:\n # times = times[:-1]\n\n return Signal(result, times=grid.times[:-1])", "def optimizeThresholds(band=0, tmo=7) :\n # Optimize thresholds should take <5 seconds, so set tmo to 7\n bandlist = helpers.makeList(band)\n cblist = makeCorrBandList(band)\n # If band is offline, don't wait for it.\n if ( len( cblist ) != 0 ) :\n rangedCb = helpers.formatAsRanges(cblist)\n c1 = \"Waiting for astrobands %s before turning noise on\" % rangedCb\n # previous command was probably configastroband, which could take \n # 30 seconds if loading a new FPGA configuration.\n wait(CORR, cblist, 40, ALL, precomment=c1)\n\n noiseon()\n\n if ( band == 0 ) :\n cstr = \"Optimizing thresholds on all Astrobands\"\n else :\n rangedAb = helpers.formatAsRanges(bandlist)\n cstr = \"Optimizing thresholds on Astroband(s) %s \" % ( rangedAb )\n rtdComment( cstr )\n s.optimizeThresholds( bandlist );\n if ( len( cblist ) != 0 ) :\n rangedCb = helpers.formatAsRanges(cblist)\n c1 = \"Waiting for astrobands %s before turning noise off\" % rangedCb\n wait(CORR, cblist, tmo, ALL, precomment=c1)\n\n noiseoff()", "def test_thresh_color(images):\n for img in images:\n # Get the stack bounds to draw onto the main image\n stack_bounds = get_stack_bounds(img)\n\n # Get all the sub-images for each stack\n stack_images = get_stack_images(img)\n\n SIZE = (200, 300)\n filtered_imgs = []\n\n # Loop through all the stacks\n for stack_bound, stack_img in zip(stack_bounds, stack_images):\n #Draw the rectangle for the current stack\n disp = deepcopy(img)\n located_stacks_img = draw_rect(np.copy(disp), stack_bound, [0,0,0])\n cv2.imshow('Filtering stack', located_stacks_img)\n\n # Convert the current stack image into hsv\n stack_img_hsv = cv2.cvtColor(stack_img, cv2.COLOR_BGR2HSV)\n for i, color in enumerate(COLORS):\n contours = thresh_color(stack_img, stack_img_hsv, COLORS[color])\n\n # Draw the contours\n stack2 = deepcopy( stack_img)\n cont_img = cv2.drawContours(stack2, contours, -1, (255,255,255), 2)\n # cont_img = cv2.resize(cont_img, SIZE)\n\n # Put the number of contours as text\n txt = '{}:{}'.format(color, len(contours))\n print(txt)\n\n # Display the contour information to the screen\n cv2.imshow(txt, scale_image(cont_img, 9))\n filtered_imgs.append(cont_img)\n cv2.moveWindow(txt, 180*i, 600)\n # cv2.imshow('filtered_images', np.hstack(filtered_imgs))\n print()\n # Skip to the next image\n if cv2.waitKey(0) == ord('1'):\n break\n cv2.destroyAllWindows()", "def msatna_blocks_3lag_year(year: int) -> pd.Series:\n return msatna_blocks_3lag_panel()[year]", "def init_moscatel(filedir, filters_in_config, output_dir, skip_every=None):\n file_list = glob(os.path.join(filedir,'*.fits'))\n file_list.sort()\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n if os.listdir(filedir) != []:\n #if len(file_list)>0:\n print('total no. of raw data frames: {0}\\n'.format(len(file_list)))\n\n if skip_every is not None:\n print('Skipping every {0}-th frames per band\\n'.format(skip_every))\n\n else: #elif skip_every == None:\n '''\n bug: does not print even if skip_every is not entered in terminal\n '''\n print('Analyzing all raw frames per band')\n skip_every=1\n\n bands = {}\n for j in filters_in_config:\n j=j.strip(' ')\n bands[j]=[]\n filters_in_hdr=[]\n\n #get list of frames by filter based on header\n for i in tqdm(file_list[::skip_every]):\n hdr = pf.open(i)[0].header\n filters_in_hdr.append(hdr['FILTER'])\n for j in filters_in_config:\n if hdr['FILTER'] == j:\n j=j.strip(' ')\n bands[j].append(i)\n\n filters_in_hdr_set=list(set(filters_in_hdr)).sort()\n\n for k in bands.keys():\n print('{0}-band={1} frames'.format(k, len(bands[k])))\n #save into txtfile\n name = os.path.join(output_dir,k+'-band.txt')\n with open(name, 'w') as z: #overwrite\n #write line by line\n for line in bands[k]:\n z.write('{}\\n'.format(line))\n print('\\nfilenames sorted by band saved in {}'.format(output_dir))\n\n else:\n print('ERROR: check your data directory')\n #return empty dict\n bands={}\n\n return filters_in_hdr_set, bands", "def _window_function(arr: np.ndarray, border: int = 0) -> np.ndarray:\n ndata = len(arr)\n nwind = ndata - 2 * border\n w = np.zeros(ndata)\n for i in range(nwind):\n w[i + border] = np.sin(np.pi * (i + 1.0) / (nwind + 1.0))\n return w", "def sliding_window_tiler(\n xraster,\n model,\n n_classes: int,\n pad_style: str = 'reflect',\n overlap: float = 0.50,\n constant_value: int = 600,\n batch_size: int = 1024,\n threshold: float = 0.50,\n standardization: str = None,\n mean=None,\n std=None,\n window: str = 'triang' # 'overlap-tile'\n ):\n tile_size = model.layers[0].input_shape[0][1]\n tile_channels = model.layers[0].input_shape[0][-1]\n\n tiler_image = Tiler(\n data_shape=xraster.shape,\n tile_shape=(tile_size, tile_size, tile_channels),\n channel_dimension=2,\n mode=pad_style,\n constant_value=600\n )\n\n # Define the tiler and merger based on the output size of the prediction\n tiler_mask = Tiler(\n data_shape=(xraster.shape[0], xraster.shape[1], n_classes),\n tile_shape=(tile_size, tile_size, n_classes),\n channel_dimension=2,\n mode=pad_style,\n constant_value=600\n )\n\n # merger = Merger(tiler=tiler_mask, window=window, logits=4)\n merger = Merger(\n tiler=tiler_mask, window=window)\n\n # Iterate over the data in batches\n for batch_id, batch in tiler_image(xraster, batch_size=batch_size):\n\n # Standardize\n batch = batch / 10000.0\n\n # Predict\n batch = model.predict(batch, batch_size=batch_size)\n\n # Merge the updated data in the array\n merger.add_batch(batch_id, batch_size, batch)\n\n prediction = merger.merge(unpad=True)\n\n if prediction.shape[-1] > 1:\n prediction = np.argmax(prediction, axis=-1)\n else:\n prediction = np.squeeze(\n np.where(prediction > threshold, 1, 0).astype(np.int16)\n )\n return prediction", "def stack_tir(scene_urls,cloud_mask_bits,aoi,aoi_crs,\n subtract_median_lst=True,subtract_air_temp=False):\n if subtract_air_temp:\n ceda_password = get_ceda_password()\n at = met_climate.access_ukcp09(cf.ceda_username,ceda_password)\n\n \n # with rasterio.open(scene_bqa) as bqa:\n # with rasterio.open(scene_tir) as tir:\n\n # bqa_data,bqa_trans = ru.read_in_aoi(bqa,**aoi_kwargs)\n # tir_data,tir_trans = ru.read_in_aoi(tir,**aoi_kwargs)\n \n # bqa_data = bqa_data[0,:,:]\n # tir_data = tir_data[0,:,:]\n # tir_data = ma.array(tir_data,dtype=float,\n # mask=ru.mask_qa(bqa_data,bitmask=0b1))\n\n # (ymin,ymax) = (0, tir_data.shape[0])\n # (xmin,xmax) = (0, tir_data.shape[1])\n \n counter=-1\n for scene_url in scene_urls:\n counter+=1\n scene_tir = scene_url\n scene_bqa = scene_url.replace('B'+tirband,'B'+qaband)\n scene_red = scene_url.replace('B'+tirband,'B'+rband)\n scene_nir = scene_url.replace('B'+tirband,'B'+nband)\n scene_metadata = scene_url.replace('B'+tirband+'.TIF','MTL.txt')\n\n print('Reading scene {}'.format(counter+1))\n try:\n with rasterio.open(scene_bqa) as bqa:\n #print(scene_bqa)\n bqa_data,bqa_trans = ru.read_in_aoi(bqa,aoi=aoi,aoi_crs=aoi_crs)\n\n with rasterio.open(scene_tir) as tir:\n #print(scene_tir)\n tir_data,tir_trans = ru.read_in_aoi(tir,aoi=aoi,aoi_crs=aoi_crs)\n tir_crs = tir.crs\n tir_profile = tir.profile\n\n with rasterio.open(scene_red) as red:\n #print(scene_red)\n red_data,red_trans = ru.read_in_aoi(red,aoi=aoi,aoi_crs=aoi_crs)\n red_crs = red.crs\n\n with rasterio.open(scene_nir) as nir:\n #print(scene_nir)\n nir_data,nir_trans = ru.read_in_aoi(nir,aoi=aoi,aoi_crs=aoi_crs)\n \n except OSError as e:\n print('ERROR',e)\n print('skipping scene')\n counter = counter-1\n continue\n \n # Determine size of stack allowing for AoI to extend outside of scene\n if counter == 0:\n aoi_box = rasterio.warp.transform_bounds(aoi_crs,tir_crs,*aoi.values())\n aoi_left, aoi_bottom, aoi_right, aoi_top = aoi_box\n aoi_box = dict(zip(('minx','miny','maxx','maxy'),aoi_box))\n # rowmin,colmin = (bqa.index(aoi_left,aoi_top)) #,op=round))\n # rowmax,colmax = (bqa.index(aoi_right,aoi_bottom)) #,op=round))\n # The above two lines are fine but the following does not \n # require the rasterio dataset to be kept open\n rowmin,colmin = rasterio.transform.rowcol(tir_trans,aoi_left,aoi_top)\n rowmax,colmax = rasterio.transform.rowcol(tir_trans,aoi_right,aoi_bottom)\n stack_height,stack_width = (rowmax-rowmin,colmax-colmin)\n lst_stack = (ma.zeros((len(scene_urls),stack_height,stack_width),\n dtype=np.float,fill_value=np.nan\n )+np.nan) \n \n # Determine size of intersect in THIS scene\n intersect = ru.aoi_scene_intersection(aoi_box,bqa)\n ins_left, ins_bottom, ins_right, ins_top = intersect.bounds\n #rowmin,colmin = (bqa.index(ins_left,ins_top,op=round))\n #rowmax,colmax = (bqa.index(ins_right,ins_bottom,op=round))\n # The above two lines are incorrect now that we read a window:\n # We need to transform the coordinates into the row,col of \n # the window, not the original file.\n rowmin,colmin = rasterio.transform.rowcol(tir_trans,ins_left,ins_top)\n rowmax,colmax = rasterio.transform.rowcol(tir_trans,ins_right,ins_bottom)\n\n try:\n # Subset data \n bqa_data = ma.array(bqa_data[0,rowmin:rowmax,colmin:colmax])\n tir_data = ma.array(tir_data[0,rowmin:rowmax,colmin:colmax])\n red_data = ma.array(red_data[0,rowmin:rowmax,colmin:colmax])\n nir_data = ma.array(nir_data[0,rowmin:rowmax,colmin:colmax])\n assert tir_data.shape == lst_stack.shape[1:]\n except (IndexError,AssertionError) as e:\n print('ERROR:',e)\n print('loop count',counter)\n print(tir_data.shape, lst_stack.shape)\n print(rowmin,rowmax,colmin,colmax)\n import pdb; pdb.set_trace()\n\n lst_data = lst.calculate_land_surface_temperature_NB(\n red_data, nir_data, tir_data,\n red_trans, tir_trans, \n red_crs, tir_crs, scene_metadata\n )\n \n # Masks\n smw = 11\n mask_all = filters.maximum_filter(\n ru.mask_qa(bqa_data,bits=cloud_mask_bits),size=smw\n )\n\n lst_data_mask_all = ma.array(lst_data,\n mask=mask_all,\n dtype=np.float,\n fill_value=np.nan) #.filled()\n\n # After masking, reproject\n # not necessary if they share a CRS\n if counter > 0:\n assert tir_crs == prev_crs\n prev_crs = tir_crs\n\n # Now do some normalisation\n if subtract_air_temp:\n filename = scene_tir.split('/')[-1]\n datestring = filename.split('_')[3]\n\n atscene = met_climate.dummy_scene( \n tir_crs, tir_trans, aoi_box,(stack_height,stack_width))\n\n # import pdb; pdb.set_trace()\n # If the following fails, it may mean there was a problem setting up the session\n atdata = at.grid_temp_over_scene(\n atscene, datestring, interpolation='linear')\n atdata = atdata[rowmin:rowmax,colmin:colmax]\n assert lst_data_mask_all.shape == atdata.shape\n lst_data_mask_all = ma.array(\n lst_data_mask_all - atdata,\n mask=mask_all,\n fill_value=np.nan)\n \n if subtract_median_lst:\n # ALSO subtract median xLST\n medval = ma.median(lst_data_mask_all)\n lst_data_mask_all = ma.array(\n lst_data_mask_all - medval,\n mask=mask_all,\n fill_value=np.nan)\n \n elif subtract_median_lst:\n # Subtract median LST from scene (within QA mask) \n \n medval = ma.median(lst_data_mask_all)\n lst_data_mask_all = ma.array(\n lst_data_mask_all - medval,\n mask=mask_all,\n fill_value=np.nan)\n \n # Then add to stack\n lst_stack[counter,:,:] = lst_data_mask_all\n\n # Make profile for file output\n N_layers = counter+1\n tir_profile.update(\n dtype=rasterio.float64,\n width=stack_width,\n height=stack_height,\n transform=tir_trans,\n count=N_layers,\n compress='lzw'\n )\n\n\n return lst_stack, tir_profile", "def movie(l, zlim=0.7, zstep=0.01, nside=64, dump_plot_dir=None, nsn_func=None,\n bands=['g', 'r', 'i', 'z'],\n exclude_bands=['u', 'y'],\n vmax_nsn=None,\n min_cadence=0.5,\n lc_template=None,\n salt2=None):\n \n m = Metrics(l, model_filename=salt2, lc_template=lc_template, nside=nside)\n nsn_tot = np.zeros(m.npix)\n nsn_inst = np.zeros(m.npix)\n cadence_tot = np.zeros(m.npix)\n cadence_nhits = np.zeros(m.npix) \n zmax_tot = np.zeros(m.npix)\n zmax_nhits = np.zeros(m.npix)\n tmp_map = np.zeros(m.npix)\n\n # median values \n nsn_tot_history = []\n nsn_inst_history = []\n median_cadence_inst_history = []\n zmax_inst_history = []\n \n # p = Plotter()\n # for block in pxlog:\n # for m,acc in metrics:\n # r = m(block)\n # a.accumulate(r)\n # if a.do_plot:\n # p.plot_maps(a)\n \n \n \n # loop on the survey mjd -- by steps of 1 day\n for mjd in np.arange(m.mjd.min(), m.mjd.max()+1):\n zmax = np.zeros(m.npix)\n nsn = np.zeros(m.npix)\n \n # check that the sampling is ok at z=0\n s,u = m.select_window(mjd, z=0., bands=bands, exclude_bands=exclude_bands)\n c = m.cadence(u, z=0.)\n first, last = m.first_last_visits(mjd, u, z=0.)\n c[c<min_cadence] = 0.\n c[first==0.] = 0.\n c[last==0.] = 0.\n c0_ok = c>0.\n \n # loop over the redshift range, and check the resolution in\n # color as a function of redshift. Store the highest redshift\n # that passes the cuts \n for z in np.arange(0.1, zlim+zstep, zstep)[::-1]:\n # select the window \n s,u = m.select_window(mjd, z=z, exclude_bands=exclude_bands)\n \n # average cadence\n # note: explore median dt\n cz = m.cadence(u, z=z)\n \n # observations before -15 and after +30 ? \n firstz, lastz = m.first_last_visits(mjd, u, z=z)\n # cut in cadence \n cz[(cz<min_cadence)] = 0.\n \n # cut on the last visit\n cz[(firstz==0.)] = 0.\n cz[(lastz==0)] = 0.\n cz *= c0_ok\n\n # cut on sigma amplitude\n if np.abs(z-0.3) <= 0.01:\n snr_g = m.amplitude_snr(mjd, instrument_name + '::g', z, s)\n snr_r = m.amplitude_snr(mjd, instrument_name + '::r', z, s)\n snr_i = m.amplitude_snr(mjd, instrument_name + '::i', z, s)\n snr_z = m.amplitude_snr(mjd, instrument_name + '::z', z, s) \n if z <= 0.3:\n snr_ok = m.cut_on_amplitude_snr(mjd, z, s, \n snr_cuts = {instrument_name + '::g': 30., \n instrument_name + '::r': 40., \n instrument_name + '::i': 30., \n instrument_name + '::z': 20.})\n else:\n snr_ok = m.cut_on_amplitude_snr(mjd, z, s, \n snr_cuts = {instrument_name + '::r': 40., \n instrument_name + '::i': 30., \n instrument_name + '::z': 20.})\n \n # update max-z map \n zmax[(cz>0) & (snr_ok>0.) & (zmax==0.)] = z\n c[c==0] = cz[c==0]\n # update the number of supernovae for that day \n # we update (1) a map that contains the total\n # number of SNe and (2) a NTuple that contains\n # mjd, nsn, zmax\n if nsn_func is not None:\n nsn_inst[:] = 0.\n nsn_inst[zmax>0.] = nsn_func(zmax[zmax>0])\n nsn_tot[zmax>0.] += nsn_inst[zmax>0.]\n else:\n logging.warning('no function to compute number of SNe')\n\n # update the cumulative maps\n cadence_tot += c\n cadence_nhits[c>0] += 1\n zmax_tot += zmax\n zmax_nhits[zmax>0] += 1\n \n # m.plot_map(first, fig=1, vmin=0., vmax=1.25, sub=221, cbar=False)\n # m.plot_map(last, fig=1, vmin=0., vmax=1.25, sub=222, cbar=False)\n fig = plt.figure(1, figsize=(15.,7.5))\n human_date = DateTimeFromMJD(mjd).strftime('%Y-%m-%d')\n fig.suptitle('[%s mjd=%6.0f]' % (human_date, mjd))\n m.plot_map(nsn_tot, fig=1, sub=231, vmin=0., vmax=vmax_nsn, cbar=True, title='$N_{SNe}: %6.0f$ (tot)' % nsn_tot.sum())\n nsn_tot_history.append((mjd,nsn_tot.sum()))\n tmp_map[:] = hp.UNSEEN ; idx = zmax_nhits>0\n tmp_map[idx] = zmax_tot[idx] / zmax_nhits[idx]\n med = np.median(tmp_map[tmp_map>0])\n m.plot_map(tmp_map, fig=1, sub=232, vmin=0., vmax=0.5, cbar=True, title='$z_{max}$ (avg) [%4.2f]' % (med if ~np.isnan(med) else 0))\n tmp_map[:] = hp.UNSEEN ; idx = cadence_nhits>0\n tmp_map[idx] = cadence_tot[idx] / cadence_nhits[idx]\n med = np.median(tmp_map[tmp_map>0])\n m.plot_map(tmp_map, fig=1, sub=233, vmin=0., vmax=1., cbar=True, title='cadence [day$^{-1}$] (avg) [%4.2f]' % (med if ~np.isnan(med) else 0))\n \n m.plot_map(nsn_inst, fig=1, sub=234, vmin=0., vmax=0.015, cbar=True, title='$N_{SNe}: %4.0f$' % nsn_inst.sum())\n nsn_inst_history.append((mjd,nsn_inst.sum()))\n med = np.median(zmax[zmax>0])\n m.plot_map(zmax, fig=1, vmin=0., vmax=0.5, sub=235, cbar=True, title='$z_{max}$ [%4.2f]' % (med if ~np.isnan(med) else 0))\n zmax_inst_history.append((mjd,(med if ~np.isnan(med) else 0)))\n med = np.median(c[c>0])\n m.plot_cadence(c, fig=1, dump_plot_dir=dump_plot_dir, \n vmin=0.,\n vmax=1.,\n min_cadence=min_cadence,\n sub=236,\n title='cadence [day$^{-1}$] [%4.2f]' % (med if ~np.isnan(med) else 0.),\n cbar=True)\n median_cadence_inst_history.append((mjd,(med if ~np.isnan(med) else 0.)))\n\n # SNR debug plots \n fig = plt.figure(2)\n fig.suptitle('[%s mjd=%6.0f]' % (human_date, mjd))\n m.plot_map(snr_g, fig=2, sub=221, vmin=0., vmax=30., cbar=True, title='SNR[g]')\n m.plot_map(snr_r, fig=2, sub=222, vmin=0., vmax=40., cbar=True, title='SNR[r]')\n m.plot_map(snr_i, fig=2, sub=223, vmin=0., vmax=30., cbar=True, title='SNR[i]') \n m.plot_map(snr_z, fig=2, sub=224, vmin=0., vmax=20., cbar=True, title='SNR[z]', dump_plot_dir=dump_plot_dir, prefix='snr')\n\n # cadence debug plots\n\n m.fig_odometer += 1\n\n\n # dump history\n nsn_tot_history = np.rec.fromrecords(nsn_tot_history, names=['mjd', 'val'])\n nsn_inst_history = np.rec.fromrecords(nsn_inst_history, names=['mjd', 'val'])\n zmax_inst_history = np.rec.fromrecords(zmax_inst_history, names=['mjd', 'val'])\n median_cadence_inst_history = np.rec.fromrecords(median_cadence_inst_history, names=['mjd', 'val'])\n np.save(dump_plot_dir + os.sep + 'nsn_tot_history.npy', nsn_tot_history)\n np.save(dump_plot_dir + os.sep + 'nsn_inst_history.npy', nsn_inst_history)\n np.save(dump_plot_dir + os.sep + 'zmax_inst_history.npy', zmax_inst_history)\n np.save(dump_plot_dir + os.sep + 'median_cadence_inst_history.npy', median_cadence_inst_history)", "def test_matched_filter5():\n x_size = 80\n y_size = 90\n\n objects = numpy.zeros((1, 5))\n\n # Make filter with unit sum.\n objects[0,:] = [x_size/2, y_size/2, 1.0, 1.0, 1.0]\n psf = dg.drawGaussians((x_size, y_size), objects)\n psf = psf/numpy.sum(psf)\n flt = matchedFilterC.MatchedFilter(psf)\n\n # Make test image.\n image = numpy.zeros((x_size, y_size))\n image[int(x_size/2), int(y_size/2)] = float(100)\n\n mf_conv = flt.convolve(image)\n\n t1 = numpy.fft.fft2(recenterPSF.recenterPSF(psf))\n t2 = numpy.fft.fft2(image)\n np_conv = numpy.real(numpy.fft.ifft2(t1*t2))\n\n assert(numpy.allclose(mf_conv, np_conv))\n\n flt.cleanup()", "def sliding_window(frame_length, step, Xsampleslist, ysampleslist):\n Xsamples = []\n ysamples = []\n for j in range(len(Xsampleslist)):\n X = Xsampleslist[j]\n ybinary = ysampleslist[j]\n for i in range(0, X.shape[0] - frame_length, step):\n xsub = X[i:i + frame_length, :]\n ysub = ybinary\n Xsamples.append(xsub)\n ysamples.append(ysub)\n return Xsamples, ysamples", "def calc_band_filters(f_ranges, sfreq, filter_length=\"1000ms\", l_trans_bandwidth=4, h_trans_bandwidth=4):\n filter_list = list()\n for f_range in f_ranges:\n h = mne.filter.create_filter(None, sfreq, l_freq=f_range[0], h_freq=f_range[1], fir_design='firwin',\n l_trans_bandwidth=l_trans_bandwidth, h_trans_bandwidth=h_trans_bandwidth,\n filter_length=filter_length)\n filter_list.append(h)\n filter_bank = np.vstack(filter_list)\n return filter_bank", "def smooth_climatologies(thresh_climYear, seas_climYear, smoothPercentileWidth):\n # If the climatology contains NaNs, then assume it is a <365-day year and deal accordingly\n if np.sum(np.isnan(seas_climYear)) + np.sum(np.isnan(thresh_climYear)):\n valid = ~np.isnan(thresh_climYear)\n thresh_climYear[valid] = runavg(thresh_climYear[valid], smoothPercentileWidth)\n valid = ~np.isnan(seas_climYear)\n seas_climYear[valid] = runavg(seas_climYear[valid], smoothPercentileWidth)\n else: # >= 365-day year (no nans)\n thresh_climYear = runavg(thresh_climYear, smoothPercentileWidth)\n seas_climYear = runavg(seas_climYear, smoothPercentileWidth)\n\n return thresh_climYear, seas_climYear", "def read_wxt_obs(years, time):\n\n met_vars = ['RH', 'Tair', 'press']\n vars = met_vars + ['time']\n filepath = ['C:/Users/Elliott/Documents/PhD Reading/PhD Research/Aerosol Backscatter/MorningBL/data/L1/' + \\\n 'Davis_BGH_' + str(i) + '_15min.nc' for i in years]\n wxt_obs_raw = eu.netCDF_read(filepath, vars=vars)\n\n\n # set up array to be filled\n wxt_obs = {}\n for met_var in met_vars:\n wxt_obs[met_var] = np.empty(len(time))\n wxt_obs[met_var][:] = np.nan\n wxt_obs['time'] = time\n\n # find data region and create an average if appropriate\n print_step = range(1000,20000, 1000)\n for t, time_t in enumerate(time):\n\n if t in print_step:\n print 't ='+str(t)\n\n # time t-1 (start of original time period, as all data is relevent for time ENDING at time_t)\n tm1 = t-1\n time_tm1 = time_t - dt.timedelta(minutes=60)\n\n # # start of time period\n # idx_extent = 8000\n # s_idx = int(eu.binary_search(wxt_obs_raw['time'], time_tm1, lo=max(0, tm1 - idx_extent),\n # hi=min(tm1 + idx_extent, len(wxt_obs_raw['time']))))\n # # end of time period\n # e_idx = int(eu.binary_search(wxt_obs_raw['time'], time_t, lo=max(0, t - idx_extent),\n # hi=min(t + idx_extent, len(wxt_obs_raw['time']))))\n\n s_idx = int(eu.binary_search(wxt_obs_raw['time'], time_tm1))\n # end of time period\n e_idx = int(eu.binary_search(wxt_obs_raw['time'], time_t))\n\n # if the time_range time and data['time'] found in this iteration are within an acceptable range (15 mins)\n tm1_diff = time_tm1 - wxt_obs_raw['time'][s_idx]\n t_diff = time_t - wxt_obs_raw['time'][e_idx]\n\n\n # _, s_idx, tm1_diff = eu.nearest(wxt_obs_raw['time'], time_tm1)\n # _, e_idx, t_diff = eu.nearest(wxt_obs_raw['time'], time_t)\n\n\n if (tm1_diff.total_seconds() <= 15 * 60) & (t_diff.total_seconds() <= 15 * 60):\n for met_var in met_vars:\n wxt_obs[met_var][t] = np.nanmean(wxt_obs_raw[met_var][s_idx:e_idx+1])\n\n\n # create RH_frac using RH data\n wxt_obs['RH_frac'] = wxt_obs['RH'] / 100.0\n\n # calculate extra variables\n e_s_hpa = 6.112 * (np.exp((17.67 * wxt_obs['Tair']) / (wxt_obs['Tair'] + 243.5))) # [hPa] # sat. v. pressure\n e_s = e_s_hpa * 100.0 # [Pa] # sat. v. pressure\n wxt_obs['e'] = wxt_obs['RH_frac'] * e_s # [Pa] # v. pressure\n wxt_obs['r_v'] = wxt_obs['e'] / (1.61 * ((wxt_obs['press']*100.0) - wxt_obs['e'])) # water_vapour mixing ratio [kg kg-1]\n wxt_obs['q'] = wxt_obs['e'] / ((1.61 * ((wxt_obs['press']*100.0) - wxt_obs['e'])) + wxt_obs['e']) # specific humidity [kg kg-1]\n wxt_obs['Tv'] = (1 + (0.61 * wxt_obs['q'])) * (wxt_obs['Tair'] + 273.15) # virtual temp [K]\n wxt_obs['air_density'] = (wxt_obs['press']*100.0) / (286.9 * wxt_obs['Tv'])# [kg m-3]\n\n return wxt_obs", "def filter_on_adwin_parameters(a_lt3,a_lt4,**kw):\r\n\r\n filter_params = kw.pop('adwin_filter_params',{})\r\n if len(filter_params):\r\n old_params = analysis_params.SPSP_fltr_adwin_settings\r\n \r\n for setup_key,setup_dict in filter_params.iteritems():\r\n for key,params in setup_dict.iteritems():\r\n analysis_params.SPSP_fltr_adwin_settings['fltr_dict_'+setup_key][key] = params\r\n\r\n fltr = np.array([True]*len(a_lt3.agrp['ssro_results'].value)) ### initially everything true\r\n\r\n for a,suffix in zip([a_lt3,a_lt4],['lt3','lt4']): ### loop over both files\r\n for key,val in analysis_params.SPSP_fltr_adwin_settings['fltr_dict_'+suffix].iteritems(): ### loop over the list of filter parameters\r\n [filter_on,minimum,maximum] = val\r\n\r\n if filter_on:\r\n if key == 'repetition_number':\r\n values = np.array([i for i in range(len(fltr)/a.g.attrs['sweep_length']) for _ in range(a.g.attrs['sweep_length'])]) ### Make an array of values corresponding to the current rep\r\n else:\r\n values = a.agrp[key].value\r\n\r\n fltr = np.logical_and(fltr,(values >= minimum) & ( values <= maximum)) ### update filter\r\n\r\n if len(filter_params):\r\n analysis_params.SPSP_fltr_adwin_settings = old_params\r\n\r\n return fltr", "def adjust_daterange_filter_for_rolling_window(dimensions, operations, filters):\n has_datetime_dimension_in_first_dimension_pos = (\n not len(dimensions) or not dimensions[0].data_type == DataType.date\n )\n if has_datetime_dimension_in_first_dimension_pos:\n return filters\n\n has_rolling = any(\n [isinstance(operation, RollingOperation) for operation in operations]\n )\n if not has_rolling:\n return filters\n\n dim0 = dimensions[0]\n filters_on_dim0 = [\n filter_\n for filter_ in filters\n if isinstance(filter_, RangeFilter)\n and str(filter_.definition.term) == str(dim0.definition)\n ]\n if not 0 < len(filters_on_dim0):\n return filters\n\n max_rolling_period = max(\n operation.window\n for operation in operations\n if isinstance(operation, RollingOperation)\n )\n\n for filter_ in filters_on_dim0:\n # Monkey patch the update start date on the date filter\n print(\"stop\")\n args = (\n {dim0.interval_key + \"s\": max_rolling_period}\n if isinstance(dim0, DatetimeInterval) and \"quarter\" != dim0.interval_key\n else {\"months\": max_rolling_period * 3}\n )\n filter_.definition.start.value -= relativedelta(**args)\n\n return filters", "def run_global(start_year, end_year, depth_from, depth_to, animate=True):\n# years, times, rootgrps = retrieve(1950,2018)\n# rootgrps_1950 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1950\\EN.4.2.1.f.analysis.g10.195001.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_1951 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1951\\EN.4.2.1.f.analysis.g10.195101.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_1952 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1952\\EN.4.2.1.f.analysis.g10.195201.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_1953 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1953\\EN.4.2.1.f.analysis.g10.195301.nc\", \"r+\", format=\"NETCDF4\")]\n#\n#\n# rootgrps_2015 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2015\\EN.4.2.1.f.analysis.g10.201501.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_2016 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2016\\EN.4.2.1.f.analysis.g10.201601.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_2017 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2017\\EN.4.2.1.f.analysis.g10.201701.nc\", \"r+\", format=\"NETCDF4\")]\n rootgrps_2018 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2018\\EN.4.2.1.f.analysis.g10.201801.nc\", \"r+\", format=\"NETCDF4\")]\n\n# HC_1950 = calculate_HC_global(rootgrps_1950, 0, 2000)\n# print('1950', time.time()-start)\n# HC_1951 = calculate_HC_global(rootgrps_1951, 0, 2000)\n# print('1951', time.time()-start)\n# HC_1952 = calculate_HC_global(rootgrps_1952, 0, 2000)\n# print('1952', time.time()-start)\n# HC_1953 = calculate_HC_global(rootgrps_1953, 0, 2000)\n# print('1953', time.time()-start) \n#\n# HC_2015 = calculate_HC_global(rootgrps_2015, 0, 2000)\n# print('2015', time.time()-start)\n# HC_2016 = calculate_HC_global(rootgrps_2016, 0, 2000)\n# print('2016', time.time()-start)\n# HC_2017 = calculate_HC_global(rootgrps_2017, 0, 2000)\n# print('2017', time.time()-start)\n HC_2018 = calculate_HC_global(rootgrps_2018, 0, 2000)\n# print('2018', time.time()-start)\n# HC_1950_mean = (HC_1950+HC_1951+HC_1952+HC_1953)/4\n# HC_2018_mean = (HC_2015+HC_2016+HC_2017+HC_2018)/4\n\n# dHC = (HC_2018_mean-HC_1950_mean)/(65*365*24*3600)\n if animate == True:\n plot(rootgrps_2018, HC_2018)\n return HC_2018", "def get_rgb_bands(image, bands):\n if bands is not MONOCHROME:\n red = image[:, :, bands['red']]\n green = image[:, :, bands['green']]\n blue = image[:, :, bands['blue']]\n\n img = np.rollaxis(np.array([red, green, blue]), 0, 3)\n else:\n img = color.grey2rgb(image)\n\n return img", "def build_sea_data(\n start_year=1999,\n end_year=2016,\n netcdf_path=\"data/sea_level/netcdf/\",\n target_lon=175.8606890,\n target_lat=-36.993684,\n buffer_degrees=0.5,\n path_out=\".\",\n):\n # tairua_coords = (-36.993684, 175.8606890)\n df_sea_data = pd.DataFrame()\n\n for year in range(start_year, end_year + 1):\n ds_first = xr.open_mfdataset(\n os.path.join(netcdf_path, f\"dt_global_twosat_phy_l4_{year}*.nc\")\n )\n\n target_lon = xr.DataArray(\n list(target_lon + np.linspace(-buffer_degrees, buffer_degrees))\n )\n target_lat = xr.DataArray(\n list(target_lat + np.linspace(-buffer_degrees, buffer_degrees))\n )\n\n ds_tairua = ds_first[[\"adt\", \"ugos\", \"vgos\"]].sel(\n longitude=target_lon, latitude=target_lat, method=\"nearest\"\n )\n df_sealevel_pandas = (\n ds_tairua.resample(time=\"MS\")\n .mean()\n .mean(dim=\"dim_0\")\n .to_dataframe()\n )\n\n df_sea_data = pd.concat([df_sea_data, df_sealevel_pandas])\n\n print(\n f\"************************Done {year} ************************************\"\n )\n print(df_sea_data.tail(10))\n\n df_sea_data.to_csv(os.path.join(path_out, \"df_sea_data.csv\"))", "def _delayandsum5(data, offsets, ifactor2, steeramp, out, autopower):\n num, gridsize, numchannels = offsets.shape\n num = out.shape[0]\n for n in nb.prange(num):\n for gi in nb.prange(gridsize):\n out[n,gi] = 0\n autopower[n,gi] = 0\n for mi in range(numchannels):\n ind = offsets[n,gi,mi] + n\n r = (data[ind,mi] * (1-ifactor2[n,gi,mi]) \\\n + data[ind+1,mi] * ifactor2[n,gi,mi]) * steeramp[n,gi,mi]\n out[n,gi] += r\n autopower[n,gi] += r*r", "def flattenFrames(stack, onh_info):\n \n maxHeight=0\n frameList=[]\n\n if onh_info!=-1:\n y_min = onh_info.bbox[0]\n #need to subtract one because index?\n y_max = onh_info.bbox[2]\n \n #hull starts at (0,0), add the y and x min to translate to correct indices.\n hull_onh = np.array(np.where(onh_info.convex_image)) + np.array([[y_min], [onh_info.bbox[1]]])\n elif onh_info==-1:\n #should prevent shiftDetectorONH from running since i will always be greater than -1\n #hull_onh has been left undefined.\n y_min, y_max = -1,-1\n \n for i, frame in enumerate(stack):\n #medFrame = ndimage.filters.median_filter(frame,size=(1,60)) #Takes 3.5 minutes\n medFrame = ndimage.filters.uniform_filter1d(frame, 60) #Takes 1.0 minutes and has same output as med filter\n if i>=y_min and i<y_max:\n #get the index of x pixels that are part of the onh for each frame\n #these are indices of indices\n x_onh_ind = np.array(np.where(hull_onh[0]==i)) \n x_onh = hull_onh.T[x_onh_ind][0].T[1]\n #this should be sorted so that its the x_min and max for each frame\n x_onh_bounds = (x_onh[0], x_onh[-1])\n shifts = shiftDetectorONH(medFrame, onh_info, x_onh_bounds)\n else:\n shifts = shiftDetector(medFrame)\n newFrame = adjustFrame(frame, shifts)\n frameList.append(newFrame)\n if newFrame.shape[0] > maxHeight:\n maxHeight = newFrame.shape[0]\n \n #Show percentage of loop completed.\n print('\\rFinding and correcting horizontal shifts: {:.2f}% done'.format((100.0*((i+1)/len(stack)))), end='', flush=True)\n print('\\n')\n \n flattenedStack = padFrames(frameList, maxHeight)\n\n return flattenedStack", "def contours_and_data(epoch, model, features, filters, figname, fgal=0.5,\n idx=-1, data='s82', N=60000):\n if data == 's82':\n # fetch Stripe 82 data\n X, Xcov = fetch_prepped_s82data(epoch, fgal, features, filters)\n Xcoadd, Xcoaddcov = fetch_prepped_s82data(epoch, fgal, features,\n filters, use_single=False)\n sind = np.abs(Xcoadd[:, idx]) < 0.03\n gind = np.abs(Xcoadd[:, idx]) > 0.03\n\n else:\n # fetch DR10 data\n X, Xcov = fetch_prepped_dr10data(N, fgal, features, filters)\n sind = np.abs(X[:, idx]) < 0.145\n gind = np.abs(X[:, idx]) > 0.145\n\n # unpickle the XD model\n if type(model) == str: \n f = open(model, 'rb')\n model = cPickle.load(f)\n f.close()\n\n fs = 5\n ms = 1\n lsize = 20\n idx = [[0, -1], [2, 3], [3, 4]]\n xlim = [(18., 22), (-0.5, 2.5), (-0.5, 2)]\n ylim = [(-0.1, 0.5), (-0.5, 2.5), (-0.5, 1.5)]\n xlab = ['psfmag $r$', 'modelmag $g-r$', 'modelmag $r-i$']\n ylab = ['psfmag - modelmag $r$', 'modelmag $r-i$', 'modelmag $i-z$']\n\n f = pl.figure(figsize=(3 * fs, 3 * fs))\n Nstar = len(np.where(model.fixed_means[:, idx] != np.inf)[0])\n pl.subplots_adjust(wspace=0.3)\n for i in range(1, 10):\n k = (i - 1) % 3\n if i < 4:\n ind = np.arange(X.shape[0], dtype=np.int)\n rng = range(model.n_components)\n elif 3 < i < 7:\n ind = sind\n rng = range(Nstar)\n else:\n ind = gind\n rng = range(Nstar, model.n_components)\n ax = pl.subplot(3, 3, i)\n for j in rng:\n if model.alpha[j] > 1.e-3:\n draw_ellipse(model.mu[j, idx[k]],\n model.V[j, idx[k]][:, idx[k]],\n scales=[2], ec='k', fc='gray', alpha=0.2)\n pl.plot(X[ind][::10, idx[k][0]],\n X[ind][::10, idx[k][1]], '.k',ms=ms)\n pl.xlim(xlim[k])\n pl.ylim(ylim[k])\n pl.xlabel(xlab[k], fontsize=lsize)\n pl.ylabel(ylab[k], fontsize=lsize)\n if ('psf' in ylab[k]) & ('model' in ylab[k]):\n ytick = ['%0.1f' % v for v in np.linspace(-.1, 0.4, 6)]\n ytick[0] = ''\n ax.set_yticklabels(ytick)\n if i == 1:\n s = 'All'\n elif i == 3:\n s = '\"Stars\"'\n else:\n s = '\"Galaxies\"'\n ax.text(-.3, 0.5, s, ha='center', va='center', fontsize=25,\n rotation='vertical', transform=ax.transAxes)\n f.savefig(figname, bbox_inches='tight')", "def _resampler(df_year, year):\n # Aggregates data using mean for each time interval and gets a\n # sample count for each new data point.\n df_15 = df_year.resample('15T').apply(['mean', 'count'])\n df_30 = df_year.resample('30T').apply(['mean', 'count'])\n df_1h = df_year.resample('1H').apply(['mean', 'count'])\n df_1d = df_year.resample('D').apply(['mean', 'count'])\n\n # Removes top level title that is not needed.\n df_15.columns = df_15.columns.droplevel(0)\n df_30.columns = df_30.columns.droplevel(0)\n df_1h.columns = df_1h.columns.droplevel(0)\n df_1d.columns = df_1d.columns.droplevel(0)\n\n # Creating new date range to include all time intervals within the year.\n idx_15 = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:45:00', freq='15T')\n idx_30 = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:30:00', freq='30T')\n idx_1h = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:00:00', freq='1H')\n idx_1d = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:00:00', freq='D')\n\n # Reindexing so data that starts in, for example August, will now\n # have the months prior to August filled with nans.\n df_15_reindex = df_15.reindex(idx_15, fill_value=np.nan)\n df_15_reindex[['count']] = df_15_reindex[['count']].fillna(0).astype(int)\n # Adding all columns to match example excel.\n df_15_reindex = df_15_reindex.rename(columns={'mean': 'H(ft)'})\n df_15_reindex = df_15_reindex.rename(columns={'count': 'SampleCount'})\n\n # Adding meters column.\n df_15_reindex['H(m)'] = df_15_reindex['H(ft)'] / 3.28\n # Rounds meters column so significant digits match\n # original height column.\n df_15_reindex['H(m)'] = df_15_reindex['H(m)'].round(2)\n df_15_reindex['H(ft)'] = df_15_reindex['H(ft)'].round(2)\n df_15_reindex['DateTime2'] = df_15_reindex.index\n df_15_reindex['Date'] = df_15_reindex.index\n df_15_reindex['Date2'] = df_15_reindex.index\n df_15_reindex['Date_Python_generated'] = df_15_reindex['Date'].dt.date\n df_15_reindex['Time1'] = df_15_reindex['Date'].dt.time\n df_15_reindex['Time2'] = df_15_reindex['Date'].dt.time\n df_15_reindex['H(m)_final'] = df_15_reindex['H(m)']\n df_15_reindex = df_15_reindex.reset_index(drop=True)\n # Adding original datetime and height data to dataframe. To do this\n # pd.concat is used because the column lengths are different.\n df_15_reindex = pd.concat([\n df_15_reindex, df_year.reset_index(drop=True)], axis=1)\n df_15_reindex['dateTime'] = pd.to_datetime(df_15_reindex['dateTime'])\n # Reordering columns to match example excel.\n df_15_reindex = df_15_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n # Filling nans with empty cells in columns similar to example excel.\n df_15_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_15_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n\n # Similar to 15 minute interval code but 30 minutes interval.\n df_30_reindex = df_30.reindex(idx_30, fill_value=np.nan)\n df_30_reindex[['count']] = df_30_reindex[['count']].fillna(0).astype(int)\n df_30_reindex = df_30_reindex.rename(columns={'mean': 'H(ft)'})\n df_30_reindex = df_30_reindex.rename(columns={'count': 'SampleCount'})\n df_30_reindex['H(m)'] = df_30_reindex['H(ft)'] / 3.28\n df_30_reindex['H(m)'] = df_30_reindex['H(m)'].round(2)\n df_30_reindex['H(ft)'] = df_30_reindex['H(ft)'].round(2)\n df_30_reindex['DateTime2'] = df_30_reindex.index\n df_30_reindex['Date'] = df_30_reindex.index\n df_30_reindex['Date2'] = df_30_reindex.index\n df_30_reindex['Date_Python_generated'] = df_30_reindex['Date'].dt.date\n df_30_reindex['Time1'] = df_30_reindex['Date'].dt.time\n df_30_reindex['Time2'] = df_30_reindex['Date'].dt.time\n df_30_reindex['H(m)_final'] = df_30_reindex['H(m)']\n df_30_reindex = df_30_reindex.reset_index(drop=True)\n df_30_reindex = pd.concat([\n df_30_reindex, df_year.reset_index(drop=True)], axis=1)\n df_30_reindex['dateTime'] = pd.to_datetime(df_30_reindex['dateTime'])\n df_30_reindex = df_30_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n df_30_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_30_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n\n # Similar to 15 minute interval code but hourly interval.\n df_1h_reindex = df_1h.reindex(idx_1h, fill_value=np.nan)\n df_1h_reindex[['count']] = df_1h_reindex[['count']].fillna(0).astype(int)\n df_1h_reindex = df_1h_reindex.rename(columns={'mean': 'H(ft)'})\n df_1h_reindex = df_1h_reindex.rename(columns={'count': 'SampleCount'})\n df_1h_reindex['H(m)'] = df_1h_reindex['H(ft)'] / 3.28\n df_1h_reindex['H(m)'] = df_1h_reindex['H(m)'].round(2)\n df_1h_reindex['H(ft)'] = df_1h_reindex['H(ft)'].round(2)\n df_1h_reindex['DateTime2'] = df_1h_reindex.index\n df_1h_reindex['Date'] = df_1h_reindex.index\n df_1h_reindex['Date2'] = df_1h_reindex.index\n df_1h_reindex['Date_Python_generated'] = df_1h_reindex['Date'].dt.date\n df_1h_reindex['Time1'] = df_1h_reindex['Date'].dt.time\n df_1h_reindex['Time2'] = df_1h_reindex['Date'].dt.time\n df_1h_reindex['H(m)_final'] = df_1h_reindex['H(m)']\n df_1h_reindex = df_1h_reindex.reset_index(drop=True)\n df_1h_reindex = pd.concat([\n df_1h_reindex, df_year.reset_index(drop=True)], axis=1)\n df_1h_reindex['dateTime'] = pd.to_datetime(df_1h_reindex['dateTime'])\n df_1h_reindex = df_1h_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n df_1h_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_1h_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n\n # Similar to 15 minute interval code but daily interval.\n df_1d_reindex = df_1d.reindex(idx_1d, fill_value=np.nan)\n df_1d_reindex[['count']] = df_1d_reindex[['count']].fillna(0).astype(int)\n df_1d_reindex = df_1d_reindex.rename(columns={'mean': 'H(ft)'})\n df_1d_reindex = df_1d_reindex.rename(columns={'count': 'SampleCount'})\n df_1d_reindex['H(m)'] = df_1d_reindex['H(ft)'] / 3.28\n df_1d_reindex['H(m)'] = df_1d_reindex['H(m)'].round(2)\n df_1d_reindex['H(ft)'] = df_1d_reindex['H(ft)'].round(2)\n df_1d_reindex['DateTime2'] = df_1d_reindex.index\n df_1d_reindex['Date'] = df_1d_reindex.index\n df_1d_reindex['Date2'] = df_1d_reindex.index\n df_1d_reindex['Date_Python_generated'] = df_1d_reindex['Date'].dt.date\n df_1d_reindex['Time1'] = df_1d_reindex['Date'].dt.time\n df_1d_reindex['Time2'] = df_1d_reindex['Date'].dt.time\n df_1d_reindex['H(m)_final'] = df_1d_reindex['H(m)']\n df_1d_reindex = df_1d_reindex.reset_index(drop=True)\n df_1d_reindex = pd.concat([\n df_1d_reindex, df_year.reset_index(drop=True)], axis=1)\n df_1d_reindex['dateTime'] = pd.to_datetime(df_1d_reindex['dateTime'])\n df_1d_reindex = df_1d_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n df_1d_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_1d_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n return df_15_reindex, df_30_reindex, df_1h_reindex, df_1d_reindex", "def run(self,workspace):\n image_name = self.image_name.value\n cpimage = workspace.image_set.get_image(image_name)\n image = cpimage.pixel_data\n mask = cpimage.mask\n workspace.display_data.statistics = []\n level = int(self.atrous_level.value)\n\n wavelet = self.a_trous(1.0*image, level+1)\n wlevprod = wavelet[:,:,level-1] * 3.0\n\n spotthresh = wlevprod.mean() + float(self.noise_removal_factor.value) * wlevprod.std()\n tidx = wlevprod < spotthresh\n wlevprod[tidx] = 0\n\n wlevprod = self.circular_average_filter(wlevprod, int(self.smoothing_filter_size.value))\n wlevprod = self.smooth_image(wlevprod, mask)\n\n max_wlevprod = scipy.ndimage.filters.maximum_filter(wlevprod,3)\n maxloc = (wlevprod == max_wlevprod)\n twlevprod = max_wlevprod > float(self.final_spot_threshold.value)\n maxloc[twlevprod == 0] = 0\n \n labeled_image,object_count = scipy.ndimage.label(maxloc,\n np.ones((3,3),bool))\n\n unedited_labels = labeled_image.copy()\n # Filter out objects touching the border or mask\n border_excluded_labeled_image = labeled_image.copy()\n labeled_image = self.filter_on_border(image, labeled_image)\n border_excluded_labeled_image[labeled_image > 0] = 0\n \n # Relabel the image\n labeled_image,object_count = relabel(labeled_image)\n new_labeled_image, new_object_count = self.limit_object_count(\n labeled_image, object_count)\n if new_object_count < object_count:\n # Add the labels that were filtered out into the border\n # image.\n border_excluded_mask = ((border_excluded_labeled_image > 0) |\n ((labeled_image > 0) & \n (new_labeled_image == 0)))\n border_excluded_labeled_image = scipy.ndimage.label(border_excluded_mask,\n np.ones((3,3),bool))[0]\n object_count = new_object_count\n labeled_image = new_labeled_image\n \n # Make an outline image\n outline_image = cellprofiler.cpmath.outline.outline(labeled_image)\n outline_border_excluded_image = cellprofiler.cpmath.outline.outline(border_excluded_labeled_image)\n \n if self.show_window:\n statistics = workspace.display_data.statistics\n statistics.append([\"# of accepted objects\",\n \"%d\"%(object_count)])\n\n workspace.display_data.image = image\n workspace.display_data.labeled_image = labeled_image\n workspace.display_data.border_excluded_labels = border_excluded_labeled_image\n\n # Add image measurements\n objname = self.object_name.value\n measurements = workspace.measurements\n cpmi.add_object_count_measurements(measurements,\n objname, object_count)\n # Add label matrices to the object set\n objects = cellprofiler.objects.Objects()\n objects.segmented = labeled_image\n objects.unedited_segmented = unedited_labels\n objects.parent_image = image\n \n workspace.object_set.add_objects(objects,self.object_name.value)\n cpmi.add_object_location_measurements(workspace.measurements, \n self.object_name.value,\n labeled_image)\n if self.should_save_outlines.value:\n out_img = cpi.Image(outline_image.astype(bool),\n parent_image = image)\n workspace.image_set.add(self.save_outlines.value, out_img)", "def modifiedb(fx,tstep=2**5,nfbins=2**10,df=1.0,nh=2**8-1,beta=.2):\r\n \r\n if type(fx) is list:\r\n fx=np.array(fx)\r\n try:\r\n fn,fm=fx.shape\r\n if fm>fn:\r\n fm,fn=fx.shape\r\n except ValueError:\r\n fn=len(fx)\r\n fm=1\r\n if fm>1:\r\n fn=fn[0]\r\n print 'computing cross spectra'\r\n #compute the analytic signal of function f and dctrend\r\n fa=sps.hilbert(dctrend(fx[0]))\r\n fb=sps.hilbert(dctrend(fx[1]))\r\n fa=fa.reshape(fn)\r\n fb=fb.reshape(fn)\r\n else:\r\n #compute the analytic signal of function f and dctrend\r\n fa=sps.hilbert(dctrend(fx))\r\n fa=fa.reshape(fn)\r\n fb=fa\r\n \r\n #sampling period\r\n df=float(df)\r\n dt=1./df\r\n \r\n tau=(nh-1)/2 #midpoint index of window h\r\n \r\n #create a time array such that the first point is centered on time window\r\n tlst=np.arange(start=0,stop=fn-1,step=tstep,dtype='int')\r\n\t\r\n #create an empty array to put the tf in \r\n tfarray=np.zeros((nfbins,len(tlst)),dtype='complex')\r\n \r\n #create a frequency array with just positive frequencies\r\n flst=np.fft.fftfreq(nfbins,dt)[0:nfbins/2]\r\n \r\n #calculate pseudo WV\r\n for point,nn in enumerate(tlst):\r\n #calculate the smallest timeshift possible\r\n taun=min(nn,tau,fn-nn-1)\r\n #make a timeshift array\r\n taulst=np.arange(start=-taun,stop=taun+1,step=1,dtype='int')\r\n #create modified b window\r\n mbwin=np.cosh(taulst)**(-2*beta)\r\n mbwin=mbwin/sum(mbwin)\r\n MBwin=np.fft.fft(padzeros(mbwin,npad=nfbins))\r\n #calculate windowed correlation function of analytic function\r\n Rnn=np.conjugate(fa[nn-taulst])*fb[nn+taulst] \r\n #calculate fft of windowed correlation function\r\n FTRnn=MBwin*np.fft.fft(padzeros(Rnn,npad=nfbins))\r\n #put into tfarray\r\n tfarray[:,point]=FTRnn\r\n \r\n #need to cut the time frequency array in half due to the WVD assuming \r\n #time series sampled at twice nyquist.\r\n tfarray=tfarray\r\n \r\n return tfarray,tlst,flst", "def window_functions(array, window_function, window_length = 0, flag_plot = False):\n\n dim = len(numpy.shape(array))\n \n # for single dimensions\n if dim == 1:\n # the window function should end up with the same length as the array\n array_length = numpy.shape(array)[0]\n \n # if it is smaller than the length, make it that length\n if window_length > 0 and window_length < array_length:\n n_max = window_length\n zeros = numpy.zeros(array_length - window_length) \n else:\n n_max = array_length\n zeros = []\n \n # the windows\n if window_function == \"none\":\n window = numpy.ones(array_length)\n \n elif window_function == \"ones\":\n window = numpy.concatenate((numpy.ones(n_max).T, zeros)) \n \n elif window_function == \"triangle\":\n window = numpy.concatenate((numpy.linspace(1, 0, n_max).T, zeros)) \n\n elif window_function == \"gaussian\":\n window = numpy.exp(-(2.2*numpy.arange(0, array_length)/(n_max))**2)\n #window = numpy.exp(-numpy.arange(0, array_length)**2 / (n_max**1.7)).T \n \n elif window_function == \"experimental\": \n window = numpy.exp(-(2.2*numpy.arange(0, array_length)/(n_max))**2)\n\n else:\n print(\"ERROR (croc.Absorptive.window_functions): Unknown window function.\")\n window = numpy.ones(array_length)\n \n if flag_plot:\n m = numpy.max(array)\n \n plt.figure()\n plt.plot(array)\n plt.plot(window * m)\n plt.plot(array*window)\n plt.title(\"window function is scaled\")\n plt.show()\n \n return array * window\n\n # for higher dimensions\n else:\n print(\"ERROR (croc.Absorptive.window_functions): Not implemented yet for multiple dimensions.\")\n return 0", "def stack_layers(inDir, outPath, bands=None):\r\n # ONLY SUPPORTS inDir W/ LANDSAT BANDS ENDING /w '_B#.TIF'\r\n # band = ds.GetRasterBand(1)\r\n # band.GetStatistics(True,True) - returns min,max,mean,std\r\n # band.ReadAsArray()\r\n try:\r\n fns = []\r\n if bands is None:\r\n # process all bands in the directory.\r\n bandtypes = ('*_B*.TIF','*band*.tif')\r\n for bandtype in bandtypes:\r\n fns.extend(glob.glob(inDir + bandtype))\r\n else:\r\n # process the specified bands.\r\n blist = '[' + ','.join(str(i) for i in bands) + ']'\r\n bandtypes = ('*B' + blist + '.TIF', '*band' + blist + '.tif')\r\n for bandtype in bandtypes:\r\n fns.extend(glob.glob(inDir + bandtype))\r\n\r\n # Read the first raster & get its band.\r\n fns.sort()\r\n fn = fns.pop(0)\r\n\r\n ras = gdal.Open(fn)\r\n\r\n band = ras.GetRasterBand(1)\r\n\r\n # rows & cols\r\n cols = ras.RasterXSize\r\n rows = ras.RasterYSize\r\n\r\n # raster info\r\n geo = ras.GetGeoTransform()\r\n originX = geo[0]\r\n originY = geo[3]\r\n pixelWidth = geo[1]\r\n pixelHeight = geo[5]\r\n\r\n # Create the output raster\r\n driver = gdal.GetDriverByName('GTiff')\r\n outRas = driver.Create(outPath, cols, rows, len(fns) + 1, band.DataType)\r\n outRas.SetGeoTransform(geo)\r\n #outRas.SetGeoTransform((originX, pixelWidth, 0, originY, 0, pixelHeight)) # not sure what the zeros are\r\n\r\n # Get the spatial ref info\r\n outRasterSRS = osr.SpatialReference()\r\n outRasterSRS.ImportFromWkt(ras.GetProjectionRef())\r\n\r\n # Write the bands to the new file.\r\n outRas.GetRasterBand(1).WriteArray(band.ReadAsArray())\r\n\r\n # Loop thru any remaining files, adding them to the output.\r\n for i in range(0, len(fns)):\r\n ras = gdal.Open(fns[i])\r\n band = ras.GetRasterBand(1)\r\n outRas.GetRasterBand(i + 2).WriteArray(band.ReadAsArray())\r\n\r\n # Add the spatial ref info at the end.\r\n outRas.SetProjection(outRasterSRS.ExportToWkt())\r\n # write and close the output file.\r\n outRas.FlushCache()\r\n outRas = None\r\n except RuntimeError:\r\n print 'ERROR PROCESSING ' + fn\r\n traceback.print_exc()\r\n return", "def make_sourceframe_with_observations(\n n_observations: int,\n exposure_id: str = \"exposure\",\n obscode: str = \"obs\",\n exposure_mjd_start: float = 50000.0,\n exposure_mjd_mid: float = 50000.0,\n healpixel: int = 1,\n) -> SourceFrame:\n observations = [\n make_sourceobs(\n exposure_id=exposure_id,\n obscode=obscode,\n healpixel=healpixel,\n mjd=exposure_mjd_mid,\n exposure_mjd_start=exposure_mjd_start,\n exposure_mjd_mid=exposure_mjd_mid,\n )\n for _ in range(n_observations)\n ]\n\n return SourceFrame(\n exposure_id=exposure_id,\n obscode=obscode,\n filter=\"filter\",\n exposure_mjd_start=exposure_mjd_start,\n exposure_mjd_mid=exposure_mjd_mid,\n exposure_duration=30.0,\n healpixel=1,\n observations=observations,\n )", "def filter_show(filters, nx=8):\n FN, C, FH, FW = filters.shape\n ny = int(np.ceil(FN / nx))\n\n fig = plt.figure()\n fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)\n\n for i in range(FN):\n ax = fig.add_subplot(ny, nx, i+1, xticks=[], yticks=[])\n ax.imshow(filters[i, 0], cmap=plt.cm.gray_r, interpolation='nearest')\n plt.show()", "def band_selector(image, colors):\n # convert band to list for downstream compatibilty, if necessary\n if len(colors) == 3: #then it's an RGB image\n\n #housekeeping\n try:\n nbands = len(colors['band'])\n except: \n colors['band'] = [colors['band']]\n nbands = len(colors['band'])\n\n try:\n len(colors['dark_on_light'])\n except:\n colors['dark_on_light'] = [colors['dark_on_light']]\n\n if colors['colorspace'] is 'gray' or colors['colorspace'] is 'grey':\n colors['band'] = [0]\n nbands = 1\n if len(colors['dark_on_light']) > 1:\n raise ValueError(\n \"\"\"Can't interpret multiple arguments for 'dark_on_light' when \n 'colorspace' is {}.\n \"\"\".format(colors['colorspace'])\n )\n \n if nbands != len(colors['dark_on_light']):\n raise ValueError(\n \"\"\"Number of items in `colors['dark_on_light']` doesn't\n equal the number of bands in `colors['band']`!\"\"\"\n )\n\n # convert colorspace if necessary\n try:\n working_image = getattr(color, \"rgb2\" + colors['colorspace'].lower())(image)\n except:\n working_image = image.copy()\n if colors['colorspace'].lower() != 'rgb':\n raise ValueError(\n \"\"\"Didn't recognize specified colorspace. \n See skimage.color.rgb2* for options.\"\"\"\n )\n \n # pull bands\n if len(working_image.shape) == 3: # excludes rgb2gray\n working_image = [img_split(working_image)[i] for i in colors['band']]\n else:\n working_image = [working_image]\n nbands = 1\n \n else: # it's a black and white image\n nbands = 1\n working_image = [image.copy()]\n if len(image.shape) != 2:\n raise ValueError(\n \"\"\"Your `color` argument suggested a grayscale image, but it has \\\n multiple bands!\"\"\"\n )\n \n return(working_image)", "def modify_bands(\n xraster: xr.core.dataarray.DataArray, input_bands: List[str],\n output_bands: List[str], drop_bands: List[str] = []):\n # Do not modify if image has the same number of output bands\n if xraster['band'].shape[0] == len(output_bands):\n return xraster\n\n # Drop any bands from input that should not be on output\n for ind_id in list(set(input_bands) - set(output_bands)):\n drop_bands.append(input_bands.index(ind_id)+1)\n return xraster.drop(dim=\"band\", labels=drop_bands, drop=True)", "def filters(im, filter_list=[\"MedianFilter\"]):\n out = im\n for filter_name in filter_list:\n out = out.filter(getattr(ImageFilter, filter_name))\n return out", "def read_concat_5dczi(czis):\n stacks = []\n for czi in czis:\n stacks.append(read_czi(czi, True))\n stack, frames = concatenate_5dstacks(stacks)\n return stack, frames", "def obtain_filters_mask(model, threshold, cba_index, prune_index):\n\n num_pruned_bn = 0\n num_total_bn = 0\n num_remain_filters = []\n mask_remain_filters = []\n\n # The number of filters reserved must be a multiple of 8\n int_multiple = 8\n filter_switch = list(range(0, 1024, int_multiple))\n\n # cba_index stores all convolution layers with BN layer (the previous layer of YOLO layer is without BN layer)\n for index in cba_index:\n bn_module = model.module_list[index][1]\n if index in prune_index:\n mask = obtain_bn_mask(bn_module, threshold).cpu().numpy()\n num_layer_remain_bn = int(mask.sum())\n if num_layer_remain_bn < 8:\n layer_sort_bn = bn_module.weight.data.abs().clone()\n value_sort_bn = torch.sort(layer_sort_bn)[0]\n layer_threshold = value_sort_bn[-8]\n mask = obtain_bn_mask(bn_module, layer_threshold).cpu().numpy()\n else:\n for i, _ in enumerate(filter_switch):\n if num_layer_remain_bn < filter_switch[i]:\n num_layer_remain_bn = filter_switch[i - 1]\n break\n layer_sort_bn = bn_module.weight.data.abs().clone()\n value_sort_bn = torch.sort(layer_sort_bn)[0]\n layer_threshold = value_sort_bn[-num_layer_remain_bn]\n mask = obtain_bn_mask(bn_module, layer_threshold).cpu().numpy()\n\n num_remain_bn = int(mask.sum())\n num_pruned_bn = num_pruned_bn + mask.shape[0] - num_remain_bn\n\n if num_remain_bn == 0:\n print(\"Channels would be all pruned!\")\n raise Exception\n\n logger.info('layer index: %d \\t total channel: %d \\t remaining channel: %d',\n index, mask.shape[0], num_remain_bn)\n else:\n mask = np.ones(bn_module.weight.data.shape)\n num_remain_bn = mask.shape[0]\n num_total_bn += mask.shape[0]\n num_remain_filters.append(num_remain_bn)\n mask_remain_filters.append(mask.copy())\n\n prune_ratio = num_pruned_bn / num_total_bn\n logger.info('Prune channels: %d \\t Prune ratio: %.3f', num_pruned_bn, prune_ratio)\n\n return num_remain_filters, mask_remain_filters", "def concatenate_5dstacks(stacks):\n def stack_channel(stacks, channel):\n \"\"\"Stack multiple 4d ndarrays\"\"\"\n cstack = stacks[0][channel].copy()\n frames = []\n for i in range(1, len(stacks)):\n frames.append(len(cstack))\n cstack = np.vstack([cstack, stacks[i][channel]])\n return cstack, frames\n c0_stack, frames = stack_channel(stacks, 0)\n c1_stack, _ = stack_channel(stacks, 1)\n return np.stack((c0_stack, c1_stack)), frames", "def bin_by_npixels(self, npix):\n\n disp = self.dispersion\n dbins = disp[1:] - disp[:-1]\n bin_boundary = disp[:-1] + 0.5 * dbins\n\n lbins = bin_boundary[:-1]\n rbins = bin_boundary[1:]\n mbins = disp[1:-1]\n dbins = rbins - lbins\n flux = self.flux[1:-1]\n flux_err = self.flux_err[1:-1]\n num_bins = len(mbins)\n\n num_new_bins = int((num_bins - (num_bins % npix)) / npix)\n\n new_wave = np.zeros(num_new_bins)\n new_flux = np.zeros(num_new_bins)\n new_flux_err = np.zeros(num_new_bins)\n\n for idx in range(num_new_bins):\n\n _new_flux = 0\n _new_flux_err = 0\n _new_dbin = 0\n\n for jdx in range(npix):\n _new_flux += flux[idx * npix + jdx] * dbins[idx * npix + jdx]\n _new_dbin += dbins[idx * npix + jdx]\n _new_flux_err += (flux_err[idx * npix + jdx] * dbins[\n idx * npix + jdx]) ** 2\n\n rbin = rbins[npix * idx + npix - 1]\n lbin = lbins[npix * idx]\n _new_wave = (rbin - lbin) * 0.5 + lbin\n\n new_wave[idx] = _new_wave\n new_flux[idx] = _new_flux / _new_dbin\n new_flux_err[idx] = np.sqrt(_new_flux_err) / _new_dbin\n\n return SpecOneD(dispersion=new_wave, flux=new_flux,\n flux_err=new_flux_err, unit='f_lam')", "def add_narrow_band_images(self, cube, z_desc, eml=None, size=None,\n unit_size=u.arcsec, width=8, is_sum=False,\n subtract_off=True, margin=10., fband=3.,\n median_filter=0,\n method=\"mean\"):\n if is_sum:\n warnings.warn(\n \"The 'is_sum' parameter is deprecated. Use method='sum' \"\n \"instead. Aggregation function set to sum.\", MpdafWarning)\n method = \"sum\"\n\n if self.z is None:\n self._logger.warning('Cannot generate narrow-band image if the '\n 'redshift is None.')\n return\n\n if size is None:\n size = self.default_size\n unit_size = u.arcsec\n\n subcub = cube.subcube(center=(self.dec, self.ra), size=size,\n unit_center=u.deg, unit_size=unit_size)\n\n z = self.z['Z'][self.z['Z_DESC'] == z_desc]\n\n if z > 0:\n if eml is None:\n all_lines = np.array([1216, 1908, 3727, 4863, 5007, 6564])\n all_tags = np.array(['LYALPHA', 'SUMCIII1907', 'SUMOII3726',\n 'HBETA', 'OIII5007', 'HALPHA'])\n else:\n all_lines = np.array(list(eml.keys()))\n all_tags = np.array(list(eml.values()))\n\n minl, maxl = subcub.wave.get_range(unit=u.angstrom) / (1 + z)\n useful = np.where((all_lines > minl) & (all_lines < maxl))\n nlines = len(useful[0])\n if nlines > 0:\n lambda_ranges = np.empty((2, nlines))\n lambda_ranges[0, :] = (1 + z) * all_lines[useful] - width / 2.0\n lambda_ranges[1, :] = (1 + z) * all_lines[useful] + width / 2.0\n tags = all_tags[useful]\n for l1, l2, tag in zip(lambda_ranges[0, :],\n lambda_ranges[1, :], tags):\n # self._logger.debug('Generate narrow band image for NB_%s'\n # ' with z=%s', tag, z[0])\n self.images['NB_' + tag] = subcub.get_image(\n wave=(l1, l2), method=method,\n subtract_off=subtract_off, margin=margin,\n fband=fband, median_filter=median_filter, unit_wave=u.angstrom)", "def get_bands(self, roi_dir, **kwargs):\n dset = self.dataset\n dset.load()\n band_kwargs = dict(emin=dset.emin, emax=dset.emax, minROI=dset.minROI, maxROI=dset.maxROI)\n band_kwargs.update(kwargs)\n radius = band_kwargs['minROI'] # fixed radius now\n bandlist = []\n for band in dset.dmap:\n emin,emax, event_type = band.emin(), band.emax(), band.event_class()&5\n if (emin + 1) < band_kwargs['emin'] or (emax - 1) >band_kwargs['emax']: continue\n #print (int(emin), event_class)\n energy= np.sqrt(emin*emax)\n bandlist.append( bands.BandSet(band, self.psfman(event_type,energy), self.exposureman(event_type,energy), \n roi_dir, radius))\n return np.asarray(bandlist)", "def plot_landings_quantiles(df):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n ax.set_position(default_timeseries_position) \n\n Fn = df['CatchMT'].groupby([df.Year, df.Reg, df.Sreg]).mean()\n grp = df['CatchMT'].groupby([df.Year, df.Reg, df.Sreg])\n\n qmean = grp.mean().loc[:, 'All', 'All'] \n q90 = grp.quantile(0.90).loc[:, 'All', 'All'] \n q75 = grp.quantile(0.75).loc[:, 'All', 'All'] \n q50 = grp.quantile(0.50).loc[:, 'All', 'All'] \n q25 = grp.quantile(0.25).loc[:, 'All', 'All'] \n q10 = grp.quantile(0.10).loc[:, 'All', 'All'] \n\n # Don't plot the first year. Also, the data is shifted by one year.\n # For some reason, restricting the year range above results in a series\n # that still have a multi-index. This seems like the cleanest way to do\n # that.\n qmean = qmean.iloc[2:]\n q90 = q90.iloc[2:]\n q75 = q75.iloc[2:]\n q50 = q50.iloc[2:]\n q25 = q25.iloc[2:]\n q10 = q10.iloc[2:]\n qmean.index = qmean.index - 1\n q90.index = q90.index - 1\n q75.index = q75.index - 1\n q50.index = q50.index - 1\n q25.index = q25.index - 1\n q10.index = q10.index - 1\n \n colors = seaborn.color_palette(n_colors=3);\n\n q90.plot(ax=ax, color=colors[0], linestyle='--', label='90%') \n q75.plot(ax=ax, color=colors[1], linestyle='--', label='75%') \n qmean.plot(ax=ax, color='black', label='Mean') \n q50.plot(ax=ax, color=colors[2], linestyle='--', label='50%') \n q25.plot(ax=ax, color=colors[1], linestyle='--', label='25%') \n q10.plot(ax=ax, color=colors[0], linestyle='--', label='10%') \n\n ax.legend(loc='best')\n\n content = io.BytesIO()\n plt.savefig(content, format='png')\n content.seek(0)\n image_cache['landings']['quantiles'] = content\n\n plt.close()", "def resample(wrksppath, region, model):\n logging.info('\\nResampling the rasters for ' + region)\n # Define app workspace and sub-paths\n tiffs = os.path.join(wrksppath, region, model + '_GeoTIFFs')\n resampleds = os.path.join(wrksppath, region, model + '_GeoTIFFs_resampled')\n\n # Create directory for the resampled GeoTIFFs\n if not os.path.exists(tiffs):\n logging.info('There is no tiffs folder. You must have already resampled them. Skipping resampling')\n return\n\n # List all Resampled GeoTIFFs\n files = os.listdir(tiffs)\n files = [tif for tif in files if tif.endswith('.tif')]\n files.sort()\n\n # Read raster dimensions\n path = os.path.join(tiffs, files[0])\n raster_dim = rasterio.open(path)\n width = raster_dim.width\n height = raster_dim.height\n lon_min = raster_dim.bounds.left\n lon_max = raster_dim.bounds.right\n lat_min = raster_dim.bounds.bottom\n lat_max = raster_dim.bounds.top\n\n # Geotransform for each resampled raster (east, south, west, north, width, height)\n geotransform_res = rasterio.transform.from_bounds(lon_min, lat_min, lon_max, lat_max, width * 100, height * 100)\n\n # Resample each GeoTIFF\n for file in files:\n path = os.path.join(tiffs, file)\n logging.info(path)\n with rasterio.open(path) as dataset:\n data = dataset.read(\n out_shape=(int(dataset.height * 100), int(dataset.width * 100)),\n # Reduce 100 to 10 if using the whole globe\n resampling=Resampling.nearest\n )\n\n # Convert new resampled array from 3D to 2D\n data = numpy.squeeze(data, axis=0)\n\n # Specify the filepath of the resampled raster\n resample_filename = file.replace('.tif', '_resampled.tif')\n resample_filepath = os.path.join(resampleds, resample_filename)\n\n # Save the GeoTIFF\n with rasterio.open(\n resample_filepath,\n 'w',\n driver='GTiff',\n height=data.shape[0],\n width=data.shape[1],\n count=1,\n dtype=data.dtype,\n nodata=numpy.nan,\n crs='+proj=latlong',\n transform=geotransform_res,\n ) as dst:\n dst.write(data, 1)\n\n # delete the non-resampled tiffs now that we dont need them\n shutil.rmtree(tiffs)\n\n return", "def folding(eventfile,Porb,nbins):\n times = fits.open(eventfile)[1].data['TIME'] #getting array of times\n gtis_data = fits.open(eventfile)[2].data #getting GTIs\n T = sum([ gtis_data[i]['STOP']-gtis_data[i]['START'] for i in range(len(gtis_data)) ]) #exposure time\n\n gtis_conform = []\n for i in range(len(gtis_data)):\n gtis_conform.append([gtis_data[i][0],gtis_data[i][1]]) #conform to the input that Stingray uses\n\n phase_sr,prof_sr,err_sr = fold_events(times,1/Porb,gtis=np.array(gtis_conform),ref_time=times[0],nbin=nbins)\n phase_sr_expo,prof_sr_expo,err_sr_expo = fold_events(times,1/Porb,gtis=np.array(gtis_conform),ref_time=times[0],expocorr=True,nbin=nbins)\n\n total_phase_sr = list(phase_sr) + list(phase_sr+1)\n total_prof_sr = list(prof_sr)*2\n total_err_sr = list(err_sr)*2\n\n total_phase_sr_expo = list(phase_sr_expo) + list(phase_sr_expo+1)\n total_prof_sr_expo = list(prof_sr_expo)*2\n total_err_sr_expo = list(err_sr_expo)*2\n\n plt.figure()\n plt.errorbar(x=total_phase_sr,y=total_prof_sr/T,yerr=total_err_sr/T,color='r',drawstyle='steps-mid')\n plt.errorbar(x=total_phase_sr_expo,y=total_prof_sr_expo/T,yerr=total_err_sr_expo/T,color='b',drawstyle='steps-mid')\n plt.legend(('Folded profile','Exposure-corrected'),loc='best',fontsize=12)\n plt.title(str(pathlib.Path(eventfile).name) +', exposure-corrected (using Stingray fold_events)',fontsize=12)\n plt.xlabel('Phase',fontsize=12)\n plt.ylabel('Counts/s',fontsize=12)\n\n return total_phase_sr_expo,total_prof_sr_expo/T,total_err_sr_expo/T", "def flattenFrames(stack):\n \n maxHeight=0\n frameList=[]\n \n \n print('\\n')\n for i, frame in enumerate(stack):\n #medFrame = ndimage.filters.median_filter(frame,size=(1,60)) #Takes 3.5 minutes\n medFrame = ndimage.filters.uniform_filter1d(frame, 60) #Takes 1.0 minutes and has same output as med filter\n shifts = shiftDetector(medFrame)\n newFrame = adjustFrame(frame, shifts)\n frameList.append(newFrame)\n if newFrame.shape[0] > maxHeight:\n maxHeight = newFrame.shape[0]\n \n #Show percentage of loop completed.\n print('\\rFinding and correcting shifts {:.2f}% done'.format(100.0*((i+1)/len(stack))),end='', flush=True)\n \n flattenedStack = padFrames(frameList, maxHeight)\n\n return flattenedStack", "def filter_img(inarr, data_resolution):\n outt = inarr.copy()\n print('outmin', np.nanmin(outt), np.nanmax(outt))\n\n t_thresh_size = -40\n t_thresh_cut = -50\n\n outt[outt >= t_thresh_size] = 0\n outt[np.isnan(outt)] = 0\n\n labels, numL = label(outt)\n\n u, inv = np.unique(labels, return_inverse=True)\n n = np.bincount(inv)\n\n pix_nb = 700/data_resolution**2\n\n badinds = u[(n < pix_nb)]\n # all blobs with more than 1000 pixels = 25,000km2 (meteosat regridded 5km), 200pix = 5000km2, 8pix = 200km2\n # scale 30km, radius 15km ca. 700km2 circular area equals 28 pix\n\n for bi in badinds:\n inds = np.where(labels == bi)\n outt[inds] = 0\n\n outt[outt >= t_thresh_cut] = 150\n\n grad = np.gradient(outt)\n outt[outt == 150] = np.nan\n\n nogood = np.isnan(outt) # filters edge maxima later, no maxima in -40 edge area by definition!\n\n # tdiff = np.nanmax(outt) - np.nanmin(outt) # define background temperature for image\n # if tdiff > 28: # temp difference of 28 degrees\n # xmin = 15\n # else:\n # xmin = 10\n\n xmin = 10\n outt[nogood] = t_thresh_cut - xmin\n nok = np.where(abs(grad[0]) > 80)\n d = 2\n i = nok[0]\n j = nok[1]\n # edge smoothing for wavelet application\n for ii, jj in zip(i, j):\n kern = outt[ii - d:ii + d + 1, jj - d:jj + d + 1]\n outt[ii - d:ii + d + 1, jj - d:jj + d + 1] = ndimage.gaussian_filter(kern, 3, mode='nearest')\n\n return outt, nogood, t_thresh_size, t_thresh_cut, pix_nb" ]
[ "0.80519295", "0.7068984", "0.69225854", "0.57098013", "0.5579009", "0.55375785", "0.5408724", "0.5229599", "0.50659686", "0.5013549", "0.494298", "0.49226463", "0.48234645", "0.4740639", "0.46834993", "0.46194592", "0.46066454", "0.45539072", "0.45349857", "0.45121947", "0.45062637", "0.44996592", "0.4427564", "0.44237077", "0.44009677", "0.4390605", "0.4386547", "0.4376775", "0.43743333", "0.43699837", "0.43677163", "0.43658012", "0.43553653", "0.43503332", "0.43419296", "0.43419296", "0.43334955", "0.43329638", "0.4310008", "0.43075743", "0.43038926", "0.42898938", "0.42832208", "0.4273657", "0.42696375", "0.42607778", "0.4256779", "0.4230962", "0.42281407", "0.42251158", "0.42242107", "0.42234787", "0.422288", "0.42191693", "0.42105874", "0.42086008", "0.4203612", "0.42021665", "0.41723335", "0.41642737", "0.41630194", "0.4160556", "0.41492337", "0.41298175", "0.41290793", "0.41124162", "0.4105143", "0.4102885", "0.40835267", "0.40825367", "0.4078864", "0.4075689", "0.40719795", "0.4065303", "0.40553793", "0.404835", "0.40443453", "0.40423766", "0.4040471", "0.403998", "0.4036449", "0.403424", "0.40312245", "0.40310588", "0.40282086", "0.40266702", "0.4015847", "0.40156963", "0.40156323", "0.40108803", "0.4010241", "0.40102276", "0.4002642", "0.40006104", "0.40002158", "0.39986855", "0.39941877", "0.39941317", "0.3993474", "0.39920703" ]
0.5577494
5
Function to perform a 5 year moving window filter for a single land cover value (such as Forest as 1) for all years in an image. Calls the function mask5. The image bands do not need to be in order, but the bandNames argument must be in chronological order. The temporal filter inspects the central position of five consecutive years, and if the extremities of the consecutive years are identical but the centre position is not, then the central pixels are reclassified to match its temporal neighbour class. This function can be applied to whichever land cover values the user decides, such as all of the land cover values or a select few.
def applyWindow5years(imagem, value, bandNames): img_out = imagem.select(bandNames[0]) for i in np.arange(1, len(bandNames)-3): img_out = img_out.addBands(mask5(imagem, value,bandNames[(i-1):(i+4)])) img_out = img_out.addBands(imagem.select(bandNames[-3])) img_out = img_out.addBands(imagem.select(bandNames[-2])) img_out = img_out.addBands(imagem.select(bandNames[-1])) return img_out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mask5(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[3]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[4]).eq(value))\n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value) \n change_img1 = imagem.select(bandNames[2]).mask(mask.eq(1)).where(mask.eq(1), value) \n change_img2 = imagem.select(bandNames[3]).mask(mask.eq(1)).where(mask.eq(1), value) \n img_out = imagem.select(bandNames[1]).blend(change_img).blend(change_img1).blend(change_img2)\n return img_out", "def applyWindow4years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-2):\n img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def applyWindow3years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-1):\n img_out = img_out.addBands(mask3(imagem, value,bandNames[(i-1):(i+2)]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def prepare_ERA5_moisture_flux(era5_path=era5_path):\n import xarray as xr\n from aux_gps import save_ncfile\n from aux_gps import anomalize_xr\n import numpy as np\n from aux_gps import convert_wind_direction\n from dask.diagnostics import ProgressBar\n ds = xr.open_dataset(\n era5_path / 'ERA5_UVQ_4xdaily_israel_1996-2019.nc', chunks={'level': 5})\n # ds = ds.resample(time='D', keep_attrs=True).mean(keep_attrs=True)\n # ds.attrs['action'] = 'resampled to 1D from 12:00UTC data points'\n mf = (ds['q'] * ds['u']).to_dataset(name='qu')\n mf.attrs = ds.attrs\n mf['qu'].attrs['units'] = ds['u'].attrs['units']\n mf['qu'].attrs['long_name'] = 'U component of moisture flux'\n mf['qu'].attrs['standard_name'] = 'eastward moisture flux'\n mf['qv'] = ds['q'] * ds['v']\n mf['qv'].attrs['units'] = ds['v'].attrs['units']\n mf['qv'].attrs['long_name'] = 'V component moisture flux'\n mf['qv'].attrs['standard_name'] = 'northward moisture flux'\n mf['qf'], mf['qfdir'] = convert_wind_direction(u=mf['qu'], v=mf['qv'])\n mf['qf'].attrs['units'] = ds['v'].attrs['units']\n mf['qf'].attrs['long_name'] = 'moisture flux magnitude'\n # mf['qfdir'] = 270 - np.rad2deg(np.arctan2(mf['qv'], mf['qu']))\n mf['qfdir'].attrs['units'] = 'deg'\n mf['qfdir'].attrs['long_name'] = 'moisture flux direction (meteorological)'\n mf = mf.sortby('latitude')\n mf = mf.sortby('level', ascending=False)\n comp = dict(zlib=True, complevel=9)\n encoding_mf = {var: comp for var in mf}\n mf_delayed = mf.to_netcdf(era5_path / 'ERA5_MF_4xdaily_israel_1996-2019.nc',\n 'w', encoding=encoding_mf, compute=False)\n mf_anoms = anomalize_xr(mf, freq='MS', time_dim='time')\n mf_anoms_mean = mf_anoms.mean('latitude').mean('longitude')\n encoding_mf_anoms = {var: comp for var in mf_anoms}\n mf_anoms_delayed = mf_anoms_mean.to_netcdf(era5_path / 'ERA5_MF_anomalies_4xdaily_israel_mean_1996-2019.nc',\n 'w', encoding=encoding_mf_anoms, compute=False)\n with ProgressBar():\n results = mf_delayed.compute()\n with ProgressBar():\n results1 = mf_anoms_delayed.compute()\n # save_ncfile(mf, era5_path, 'ERA5_MF_4xdaily_israel_1996-2019.nc')\n # mf_anoms = anomalize_xr(mf, freq='MS', time_dim='time')\n # mf_anoms_mean = mf_anoms.mean('latitude').mean('longitude')\n # save_ncfile(mf_anoms_mean, era5_path,\n # 'ERA5_MF_anomalies_4xdaily_israel_mean_1996-2019.nc')\n return", "def applyForwardNoDataFilter(image, bandNames):\n #Get a list of band names from year(1) through the last year\n bandNamesEE = ee.List(bandNames[1:])\n \n #Define forwards filter\n #In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for the first year\n #currentImage = image.select(bandNames[1]), the image for the second year\n #previousImage = image.select(bandNames[0]), the first year\n #Find where the second year has missing data, replace those values with the values of the first year\n #Append previousImage to currentImage, so now currentImage is a two band image, with the first band being the second year with the gap fill\n #and the second band is the first years classification\n #The iteration continues, now with followingImage.select[0] being the second year with the gap fill applied, and bandName is the third year\n def forwardNoDataFilter(bandName, previousImage):\n currentImage = image.select(ee.String(bandName))\n previousImage = ee.Image(previousImage)\n currentImage = currentImage.unmask(previousImage.select([0]))\n return currentImage.addBands(previousImage)\n \n #Iterate through all the years, starting with the first year's classification\n filtered = bandNamesEE.iterate(forwardNoDataFilter,ee.Image(image.select(bandNames[0])))\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def mask4(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[3]).eq(value)) \n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value)\n change_img1 = imagem.select(bandNames[2]).mask(mask.eq(1)).where(mask.eq(1), value) \n img_out = imagem.select(bandNames[1]).blend(change_img).blend(change_img1)\n return img_out", "def stack_bands(yr, roi):\n\n water_year_start = '{}-10-01'.format(yr - 1)\n\n winter_s, winter_e = '{}-01-01'.format(yr), '{}-03-01'.format(yr),\n spring_s, spring_e = '{}-03-01'.format(yr), '{}-05-01'.format(yr),\n late_spring_s, late_spring_e = '{}-05-01'.format(yr), '{}-07-01'.format(yr)\n summer_s, summer_e = '{}-07-01'.format(yr), '{}-09-01'.format(yr)\n fall_s, fall_e = '{}-09-01'.format(yr), '{}-12-31'.format(yr)\n\n periods = [('cy', winter_s, fall_e),\n ('1', spring_s, spring_e),\n ('2', late_spring_s, late_spring_e),\n ('3', summer_s, summer_e),\n ('4', fall_s, fall_e)]\n\n first = True\n for name, start, end in periods:\n bands = landsat_composites(yr, start, end, roi, name)\n if first:\n input_bands = bands\n proj = bands.select('B2_cy').projection().getInfo()\n first = False\n else:\n input_bands = input_bands.addBands(bands)\n\n for s, e, n in [(spring_s, spring_e, 'espr'),\n (late_spring_s, late_spring_e, 'lspr'),\n (summer_s, summer_e, 'smr'),\n (fall_s, fall_e, 'fl'),\n (water_year_start, spring_e, 'wy_espr'),\n (water_year_start, late_spring_e, 'wy_espr'),\n (water_year_start, summer_e, 'wy_smr'),\n (water_year_start, fall_e, 'wy')]:\n gridmet = ee.ImageCollection(\"IDAHO_EPSCOR/GRIDMET\").filterBounds(\n roi).filterDate(s, e).select('pr', 'eto', 'tmmn', 'tmmx')\n temp_reducer = ee.Reducer.mean()\n t_names = ['tmax'.format(n), 'tmin'.format(n)]\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_{}'.format(n), 'pet_total_{}'.format(n)).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n wd_estimate = precip_sum.select('precip_total_{}'.format(n)).subtract(precip_sum.select(\n 'pet_total_{}'.format(n))).rename('wd_est_{}'.format(n))\n input_bands = input_bands.addBands([temp_perc, precip_sum, wd_estimate])\n\n temp_reducer = ee.Reducer.percentile([10, 50, 90])\n t_names = ['tmmn_p10_cy', 'tmmn_p50_cy', 'tmmn_p90_cy', 'tmmx_p10_cy', 'tmmx_p50_cy', 'tmmx_p90_cy']\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_cy', 'pet_total_cy').resample('bilinear').reproject(crs=proj['crs'], scale=30)\n wd_estimate = precip_sum.select('precip_total_cy').subtract(precip_sum.select(\n 'pet_total_cy')).rename('wd_est_cy')\n\n coords = ee.Image.pixelLonLat().rename(['Lon_GCS', 'LAT_GCS']).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n ned = ee.Image('USGS/NED')\n terrain = ee.Terrain.products(ned).select('elevation', 'slope', 'aspect').reduceResolution(\n ee.Reducer.mean()).reproject(crs=proj['crs'], scale=30)\n\n world_climate = get_world_climate(proj=proj)\n elev = terrain.select('elevation')\n tpi_1250 = elev.subtract(elev.focal_mean(1250, 'circle', 'meters')).add(0.5).rename('tpi_1250')\n tpi_250 = elev.subtract(elev.focal_mean(250, 'circle', 'meters')).add(0.5).rename('tpi_250')\n tpi_150 = elev.subtract(elev.focal_mean(150, 'circle', 'meters')).add(0.5).rename('tpi_150')\n static_input_bands = coords.addBands([temp_perc, wd_estimate, terrain, tpi_1250, tpi_250, tpi_150, world_climate])\n\n nlcd = ee.Image('USGS/NLCD/NLCD2011').select('landcover').reproject(crs=proj['crs'], scale=30).rename('nlcd')\n\n cdl_cult = ee.Image('USDA/NASS/CDL/2017').select('cultivated'). \\\n remap([1, 2], [0, 1]).reproject(crs=proj['crs'], scale=30).rename('cdlclt')\n\n cdl_crop = ee.Image('USDA/NASS/CDL/2017').select('cropland').reproject(crs=proj['crs'],\n scale=30).rename('cdlcrp')\n\n gsw = ee.Image('JRC/GSW1_0/GlobalSurfaceWater')\n occ_pos = gsw.select('occurrence').gt(0)\n water = occ_pos.unmask(0).rename('gsw')\n\n static_input_bands = static_input_bands.addBands([nlcd, cdl_cult, cdl_crop, water])\n\n input_bands = input_bands.addBands(static_input_bands).clip(roi)\n\n # standardize names to match EE javascript output\n standard_names = []\n temp_ct = 1\n prec_ct = 1\n names = input_bands.bandNames().getInfo()\n for name in names:\n if 'tavg' in name and 'tavg' in standard_names:\n standard_names.append('tavg_{}'.format(temp_ct))\n temp_ct += 1\n elif 'prec' in name and 'prec' in standard_names:\n standard_names.append('prec_{}'.format(prec_ct))\n prec_ct += 1\n else:\n standard_names.append(name)\n\n input_bands = input_bands.rename(standard_names)\n return input_bands", "def applyBackwardNoDataFilter(image, bandNames):\n #Get a list of band names to iterate over, from year(-2) through year(0)\n bandNamesEE = ee.List(bandNames[:-1]).reverse()\n \n #Define backwards filter\n #In first iteration, bandName=bandNames[-2] and followingImage is image.select(bandNames[-1]), or the classifications for the final year\n #currentImage = image.select(bandNames[-2]), the second to last year\n #followingImage = image.select(bandNames[-1]), the final year\n #Find where the second to last year has missing data, replace those values with the values of the following year\n #Append followingImage to currentImage, so now currentImage is a two band image, with the first band being the second to last year with the gap fill\n #and the second band is the final years classification\n #The iteration continues, now with followingImage.select[0] being the second to last year with the gap fill applied, and bandName is the third to last year\n def backwardNoDataFilter(bandName, followingImage):\n currentImage = image.select(ee.String(bandName))\n followingImage = ee.Image(followingImage)\n currentImage = currentImage.unmask(followingImage.select([0]))\n return currentImage.addBands(followingImage)\n \n #Apply backwards filter, starting with the final year and iterating through to year(0) \n filtered = bandNamesEE.iterate(backwardNoDataFilter,ee.Image(image.select(bandNames[-1])))\n #Re-order bands to be in chronological order\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def fmask(bandname=\"fmask\"):\n\n def fmask(image):\n imgFmask = image.select(bandname)\n shadow = imgFmask.eq(3)\n snow = imgFmask.eq(4)\n cloud = imgFmask.eq(5)\n\n mask = shadow.Or(snow).Or(cloud)\n\n imgMask = image.updateMask(mask.Not())\n return imgMask\n return fmask", "def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): \n #Grab land cover classes as a list of strings\n lc_classes = classDictionary.keys().getInfo()\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Get the frequency of each class through the years by reducing the image collection to an image using the sum reducer\n class_frequency = binary_class_images.reduce(ee.Reducer.sum().unweighted()).rename(lc_classes)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Define an image to add bands with frequency filter applied\n out_img = ee.Image()\n \n #Loop through years\n for yearBand in yearBandNames:\n #Select the target year from the image\n yearImage = image.select(yearBand)\n \n #Loop through land cover classes in filterParams\n for lc_class in lc_classes:\n #Get the minimum occurance allowed in that land cover class\n min_occurance = filterParams.get(lc_class)\n \n #Find if the land cover class had less than the number of min_occurances in each pixel\n change_class = class_frequency.select(lc_class).lt(ee.Image.constant(min_occurance))\n \n #If change_class==1, then replace that pixel with the mode of all the years in that pixel\n #This filter is only applied to pixels of this land cover class\n #First mask yearImage to pixels of this land cover class, then get the union of pixels where change_class==1,\n #if both conditions are true, then the pixel is replaced with the mode\n yearImage = yearImage.where(yearImage.eq(ee.Number(classDictionary.get(lc_class))).bitwiseAnd(change_class),mode_image)\n #Rename yearImage to bandName\n yearImage = yearImage.rename(yearBand)\n #Append to output image\n out_img = out_img.addBands(yearImage)\n \n return out_img", "def applyMask3last(imagem, value, bandNames):\n mask = imagem.select(bandNames[-3]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[-2]).eq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[-1]).neq(value))\n change_img = imagem.select(bandNames[-1]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[0:-1])\n img_out = img_out.addBands(imagem.select(bandNames[-1]).blend(change_img))\n return img_out", "def applyIncidenceFilter(image, bandNames, classDictionary, numChangesCutoff = 8, connectedPixelCutoff=6):\n #Calculate the number of times a pixel changes throughout the time series and determine if it is over the numChangesCutoff\n num_changes = calculateNumberOfChanges(image, bandNames)\n too_many_changes = num_changes.gt(numChangesCutoff)\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Calculate the number of connected pixels for each land cover class and year, reduce to a single band image representing the number\n #of connected pixels of the same land cover class as the central pixel, and determine if it is over the connectedPixelCutoff\n connected_pixel_count = ee.ImageCollection(binary_class_images.map(lambda x: x.mask(x).connectedPixelCount(100,False).reduce(ee.Reducer.sum()).lt(connectedPixelCutoff)))\n \n #Get a bitwiseAnd determination if the number of connected pixels <= connectedPixelCutoff and the number of changes > numChangesCutoff \n incidence_filter = ee.ImageCollection(connected_pixel_count.map(lambda x: x.bitwiseAnd(too_many_changes))).toBands().rename(bandNames)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Replace pixels of image where incidence_filter is True with mode_image\n incidence_filtered = image.where(incidence_filter, mode_image)\n \n return incidence_filtered", "def mask3(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).eq(value))\n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[1]).blend(change_img)\n return img_out", "def calculate_daily_climatology(\n pctile,\n windowHalfWidth,\n lenClimYear,\n smoothPercentile,\n smoothPercentileWidth,\n thresh_climYear, # empty array\n seas_climYear, # empty array\n clim, # empty dict\n feb29,\n doyClim,\n clim_start,\n clim_end,\n tempClim,\n temp,\n):\n # Loop over all day-of-year values, and calculate threshold and seasonal climatology across years\n for d in range(1, lenClimYear + 1):\n # Special case for Feb 29\n if d == feb29:\n continue\n # find all indices for each day of the year +/- windowHalfWidth and from them calculate the threshold\n tt0 = np.where(doyClim[clim_start : clim_end + 1] == d)[\n 0\n ] # the index for that day each year\n # If this doy value does not exist (i.e. in 360-day calendars) then skip it\n if len(tt0) == 0:\n continue\n tt = np.array([])\n for w in range(-windowHalfWidth, windowHalfWidth + 1): # -5 : 5 default\n tt = np.append(\n tt, clim_start + tt0 + w\n ) # append the daily values 5days before and 5days after\n tt = tt[tt >= 0] # Reject indices \"before\" the first element\n tt = tt[tt < TClim] # Reject indices \"after\" the last element\n thresh_climYear[d - 1] = np.percentile(nonans(tempClim[tt.astype(int)]), pctile)\n seas_climYear[d - 1] = np.mean(nonans(tempClim[tt.astype(int)]))\n\n # Special case for Feb 29 (LEAP YEAR)\n thresh_climYear[feb29 - 1] = (\n 0.5 * thresh_climYear[feb29 - 2] + 0.5 * thresh_climYear[feb29]\n )\n seas_climYear[feb29 - 1] = (\n 0.5 * seas_climYear[feb29 - 2] + 0.5 * seas_climYear[feb29]\n )\n\n if smoothPercentile:\n thresh_climYear, seas_climYear = smooth_climatologies(\n thresh_climYear, seas_climYear, smoothPercentileWidth\n )\n\n # Generate threshold for full time series\n clim[\"thresh\"] = thresh_climYear[doy.astype(int) - 1]\n clim[\"seas\"] = seas_climYear[doy.astype(int) - 1]\n # Save vector indicating which points in temp are missing values\n clim[\"missing\"] = np.isnan(temp)\n\n return clim", "def band_filter(self, bands) -> 'ImageCollection':\n\n process_id = 'filter_bands'\n args = {\n 'imagery': self.graph,\n 'bands': bands\n }\n return self.graph_add_process(process_id, args)", "def system_5(in_dir, out_dir, threshold, num_frames=150, num_prev_frames=10, blur=(3,3), as_numeric=True, stretched=True):\n filenames = _prepare_filenames(in_dir, num_frames=150)\n initial_background_model = np.array([cv2.imread(f) for f in filenames[0:num_prev_frames]])\n seed_img = mode(initial_background_model)\n previous_frames = deque(initial_background_model, maxlen=num_prev_frames)\n\n for i, f in tqdm(enumerate(filenames[num_prev_frames:])):\n img = lm(cv2.imread(f))", "def calbands( band = 0, tmo = 30 ) :\n optimizeThresholds(band,tmo)\n flattenPhases(band,tmo)\n calibrateSpectra(band=band,tmo=tmo)", "def applyMask3first(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).neq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).eq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).eq(value))\n change_img = imagem.select(bandNames[0]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[0]).blend(change_img)\n img_out = img_out.addBands(imagem.select(bandNames[1:]))\n return img_out", "def computeCloudMasking(image_name, numberOfTrees=NUMBER_TREES, threshold=CUTTOF):\n\n # Import training data as GEE object\n # Build randomForest model at each run\n fc_training = ee.FeatureCollection(\n 'ft:1XzZPz8HZMARKQ9OPTWvfuRkPaGIASzkRYMfhKT8H')\n\n # Use these methods for prediction.\n methods_name = ee.List(['percentile1', 'percentile5', 'tree2', 'tree3'])\n\n # Random Forest model\n randomForest = ee.Classifier.randomForest(numberOfTrees=numberOfTrees)\n randomForest = randomForest.train(fc_training, 'cloud', methods_name)\n\n # Image + region of interest\n image = ee.Image(image_name)\n roi = getGeometryImage(image)\n\n # UK BORDER <=> mask sea\n land_geometry = ee.FeatureCollection(parameters.land_geometry)\n # image = image.clip(land_geometry)\n\n # Apply the different methods\n # tree1 = getMaskTree1(image, roi)\n tree2 = getMaskTree2(image, roi)\n tree3 = getMaskTree3(image, roi)\n percentile1, percentile5 = CloudClusterScore(image, roi)\n\n # Add each result as a band of the final image\n final_image = tree3.addBands([tree2, percentile1, percentile5]) \\\n .clip(land_geometry)\n\n # Apply the random Forest classification\n masked_image = final_image.classify(randomForest) \\\n .gt(threshold)\n\n # Add meta data: geometry + date\n masked_image = masked_image.set(\"system:footprint\", image.get('system:footprint'))\n masked_image = masked_image.set(\"system:time_start\", image.get('system:time_start'))\n masked_image = masked_image.set(\"system:time_end\", image.get('system:time_end'))\n\n return masked_image", "def _simulate_flux(self, times, five_sigma_mag, band):\n magnification = self._calculate_magnification(times)\n \n source_flux = self.source_flux[band]\n blending_flux = self.blending_flux[band]\n model_flux = source_flux * magnification + blending_flux\n model_mag = MM.Utils.get_mag_from_flux(model_flux)\n sigma_mag = self._LSST_uncertainties(model_mag, five_sigma_mag, band)\n temp = MM.Utils.get_flux_and_err_from_mag(model_mag, sigma_mag)\n sigma_flux = temp[1]\n \n simulated = model_flux + np.random.normal(scale=sigma_flux)\n simulated[simulated < 0.] = 0.\n \n if self._model.n_lenses == 2:\n diff = (model_flux - simulated) / sigma_flux\n self._binary_chi2_sum += np.sum(diff**2)\n\n return (simulated, sigma_flux)", "def plot_land_cover(data, year=None, measurement=None, out_width=15, cols=4,):\n # get measurement name\n measurement = get_layer_name(measurement, data)\n\n # get colour map, normalisation\n try:\n cmap, norm = lc_colourmap(measurement)\n except AssertionError:\n\n raise KeyError('Could not automatically determine colour scheme from'\n f'DataArray name {measurement}. Please specify which '\n 'DEA Landcover measurement is being plotted by providing'\n 'the name using the \"measurement\" variable For example'\n '(measurement = \"full_classification\")')\n\n height, width = data.geobox.shape\n scale = out_width / width\n\n if year:\n #plotting protocall if 'year' variable is passed\n year_string = f\"{year}-01-01\"\n data = data.sel(time=year_string, method=\"nearest\")\n \n fig, ax = plt.subplots()\n fig.set_size_inches(width * scale, height * scale)\n make_colorbar(fig, ax, measurement)\n im = ax.imshow(data, cmap=cmap, norm=norm, interpolation=\"nearest\")\n\n \n elif len(data.time) == 1:\n #plotting protocall if only one timestep is passed and not a year variable\n fig, ax = plt.subplots()\n fig.set_size_inches(width * scale, height * scale)\n make_colorbar(fig, ax, measurement)\n im = ax.imshow(data.isel(time=0), cmap=cmap, norm=norm, interpolation=\"nearest\")\n else:\n #plotting protocall if multible time steps are passed to plot\n if cols > len(data.time):\n cols = len(data.time)\n rows = int((len(data.time) + cols-1)/cols)\n\n fig, ax = plt.subplots(nrows=rows, ncols=cols)\n fig.set_size_inches(\n width * scale, (height * scale / cols) * (len(data.time) / cols))\n\n make_colorbar(fig, ax.flat[0], measurement)\n\n for a, b in enumerate(ax.flat):\n if a < data.shape[0]:\n im = b.imshow(data[a], cmap=cmap, norm=norm,\n interpolation=\"nearest\")\n\n return im", "def five_years_avg_dividend(self, five_years_avg_dividend: float):\n\n self._five_years_avg_dividend = five_years_avg_dividend", "def testMask5D(self):\n mask = np.ones((3, 3, 3, 5, 1), dtype=np.float32)\n inputs = tf.constant(1.0, shape=(5, 5, 5, 5, 5))\n conv1 = snt.Conv3D(\n output_channels=1,\n kernel_shape=3,\n mask=mask,\n padding=snt.VALID,\n use_bias=False,\n initializers=create_constant_initializers(1.0, 0.0, use_bias=False))\n out = conv1(inputs)\n expected_out = 135 * np.ones((5, 3, 3, 3, 1), dtype=np.float32)\n with self.test_session():\n tf.variables_initializer([conv1.w]).run()\n self.assertAllClose(out.eval(), expected_out)", "def main_bf_MISR(h5f, output_folder, SPATIAL_RESOLUTION=0.5, VZA_MAX=18, CAMERA='AN'):\n\n # =============================================================================\n # 1. Initialization\n # calculate constant parameters\n # initialize output arrays and output hdf5 file\n # check the number of CERES granules \n # =============================================================================\n\n print(\"-------MISR----->\", h5f)\n print(\"-------FID------<>\", h5f.fid)\n print(\"---->\", type(h5f))\n if type(h5f.fid) is str:\n output_nc_name = h5f.fid.split('/')[-1].replace('TERRA_BF_L1B', 'CLIMARBLE')\n else:\n output_nc_name = h5f.fid.name. \\\n decode(\"utf-8\").split('/')[-1]. \\\n replace('TERRA_BF_L1B', 'CLIMARBLE')\n\n output_nc_name = output_nc_name.replace('.h5', '.nc')\n\n # \n NUM_POINTS = 1 / SPATIAL_RESOLUTION\n NUM_LATS = int(180 / SPATIAL_RESOLUTION)\n NUM_LONS = int(360 / SPATIAL_RESOLUTION)\n\n LAT_EDGES = np.arange(-90.0, 90.0001, SPATIAL_RESOLUTION)\n LON_EDGES = np.arange(-180.0, 180.0001, SPATIAL_RESOLUTION)\n\n # \n orbit_radiance_sum = np.zeros((NUM_LATS, NUM_LONS, 4))\n orbit_radiance_num = np.zeros((NUM_LATS, NUM_LONS, 4))\n orbit_nc_out = os.path.join(output_folder, output_nc_name)\n\n\n # =============================================================================\n # 2. Main processing\n # Loop through each CERES granule and sort radiances into the corresponding lat/lon bins\n # When encounters an asceding granule, script will move to the next granule\n # =============================================================================\n\n # USE MODIS granules to match first and last time of the descending node\n MISR_blocks = get_descending(h5f, 'MISR.{}'.format(CAMERA))\n if MISR_blocks[0] == 0:\n print(\">> IOError( no available MODIS granule in orbit {} )\".format(bf_file))\n return\n\n # LOAD lat/lon here\n lat = h5f['MISR/Geolocation/GeoLatitude'][:]\n lon = h5f['MISR/Geolocation/GeoLongitude'][:]\n\n # LOAD radiance here\n MISR_bands = ['Blue', 'Green', 'Red', 'NIR']\n rads_all = []\n for iband in MISR_bands:\n rads_all.append(h5f['MISR/{}/Data_Fields/{}_Radiance'.format(CAMERA, iband)][:])\n\n # SPECIFY data dimension to interpolate SZA/VZA\n rad_shape = (128, 512)\n \n\n # LOOP through MISR blocks (starts from 0)\n for iblk in MISR_blocks:\n\n # INTERPOLATE sza and vza (this part can be replaced by a more accurate function)\n raw_sza = h5f['MISR/Solar_Geometry/SolarZenith'][iblk]\n raw_vza = h5f['MISR/{}/Sensor_Geometry/{}Zenith'.format(CAMERA, ''.join(c.lower() if i==1 else c for i,c in enumerate(CAMERA)))][iblk]\n np.place(raw_sza, raw_sza<0, np.nan)\n np.place(raw_vza, raw_vza<0, np.nan)\n blk_sza = resize(raw_sza, rad_shape)\n blk_vza = resize(raw_vza, rad_shape)\n\n\n # SELECT lat/lon\n idx_geometry = np.where((blk_sza<89.0) & (blk_vza<VZA_MAX))\n select_lat = lat[iblk][idx_geometry]\n select_lon = lon[iblk][idx_geometry]\n\n\n # SELECT spectral radiances here\n # Aggregate 275-m res data to 1.1-km when necessary\n # Separate band by band to allow one (or more) band(s) failure\n for iband, band_name in enumerate(MISR_bands, start=0):\n blk_rad = rads_all[iband][iblk]\n # blk_rad = h5f['MISR/{}/Data_Fields/{}_Radiance'.format(CAMERA, band_name)][iblk]\n\n if blk_rad.shape == (512, 2048): \n # 275-m res band\n np.place(blk_rad, blk_rad<0, np.nan)\n fnl_blk_rad = np.nanmean(np.reshape(blk_rad, (blk_rad.shape[0]//4, 4, blk_rad.shape[1]//4,4)), axis=(1,3))\n else:\n fnl_blk_rad = blk_rad\n\n\n select_rad = np.nan_to_num(fnl_blk_rad[idx_geometry])\n fnl_idx = np.where((select_rad>0)&(select_rad<1000))[0]\n\n fnl_lat = select_lat[fnl_idx] * -1\n fnl_lon = select_lon[fnl_idx]\n fnl_rad = select_rad[fnl_idx]\n\n try:\n rad_sum, binedges, bin_numbers = binned_statistic_dd((fnl_lat, fnl_lon), fnl_rad, bins=[LAT_EDGES, LON_EDGES], statistic='sum')\n rad_cnt, binedges, bin_numbers = binned_statistic_dd((fnl_lat, fnl_lon), fnl_rad, bins=[LAT_EDGES, LON_EDGES], statistic='count')\n\n orbit_radiance_sum[:, :, iband] += rad_sum\n orbit_radiance_num[:, :, iband] += rad_cnt\n except ValueError:\n continue\n\n # =============================================================================\n # 3. Save results\n # =============================================================================\n orbit_radiance_num = np.array(orbit_radiance_num, dtype='int16')\n\n coords_lats = np.linspace(90-SPATIAL_RESOLUTION/2, -90+SPATIAL_RESOLUTION/2, NUM_LATS)\n coords_lons = np.linspace(-180+SPATIAL_RESOLUTION/2, 180-SPATIAL_RESOLUTION/2, NUM_LONS)\n\n xr_rad_sum = xr.DataArray(orbit_radiance_sum, coords=[('latitude', coords_lats), ('longitude', coords_lons), ('misr_channel', range(4))])\n xr_rad_num = xr.DataArray(orbit_radiance_num, coords=[('latitude', coords_lats), ('longitude', coords_lons), ('misr_channel', range(4))])\n xr_rad_sum.encoding['_FillValue'] = 0\n xr_rad_num.encoding['_FillValue'] = 0\n xr_rad_sum.name = 'MISR spec rad sum'\n xr_rad_num.name = 'MISR spec rad num'\n xr_rad_sum.to_netcdf(orbit_nc_out, 'a')\n xr_rad_num.to_netcdf(orbit_nc_out, 'a')\n return orbit_nc_out", "def ls5_sr_corr(img):\n return img.select(['B1'], ['BLUE']).float().multiply(0.91996).add(37).int16()\\\n .addBands(img.select(['B2'], ['GREEN']).float().multiply(0.92764).add(84).int16())\\\n .addBands(img.select(['B3'], ['RED']).float().multiply(0.8881).add(98).int16())\\\n .addBands(img.select(['B4'], ['NIR']).float().multiply(0.95057).add(38).int16())\\\n .addBands(img.select(['B5'], ['SWIR1']).float().multiply(0.96525).add(29).int16())\\\n .addBands(img.select(['B7'], ['SWIR2']).float().multiply(0.99601).add(20).int16())\\\n .addBands(img.select(['pixel_qa'], ['PIXEL_QA']).int16())\\\n .addBands(img.select(['radsat_qa'], ['RADSAT_QA']).int16())\\\n .copyProperties(img)\\\n .copyProperties(img, ['system:time_start', 'system:time_end', 'system:index', 'system:footprint'])", "def at_rSNR(h5):\n ses = h5['SES'][:]['ses'].copy()\n ses.sort()\n h5.attrs['clipSNR'] = np.mean(ses[:-3]) / h5.attrs['noise'] *np.sqrt(ses.size)\n x = np.median(ses) \n h5.attrs['medSNR'] = np.median(ses) / h5.attrs['noise'] *np.sqrt(ses.size)", "def applyGapFilter(image, bandNames):\n filtered = applyForwardNoDataFilter(image, bandNames)\n filtered = applyBackwardNoDataFilter(filtered, bandNames)\n return filtered", "def dwt(image_array, quantization_Array):\n # Create the high pass and low pass filters\n # both filters are non-causal\n # symmetric\n # [-2, -1, 0, 1, 2]\n LPF = [-0.125, 0.25, 0.75, 0.25, -0.125]\n LPF_center = 2\n\n # [ -2,-1, 0]\n HPF = [-0.5, 1, -0.5]\n HPF_center = 2\n\n nrow, ncol = image_array.shape\n\n # create an array that will contain the 4 different subbands of the image\n LL = np.zeros((nrow, ncol))\n LH = np.zeros((nrow, ncol))\n HL = np.zeros((nrow, ncol))\n HH = np.zeros((nrow, ncol))\n filtered_image = [LL, LH, HL, HH]\n\n # filtering the rows using a low pass and high pass filters\n LowPass_rows = np.zeros((nrow, ncol))\n HighPass_rows = np.zeros((nrow, ncol))\n for i in range(0, nrow):\n LowPass_rows[i, :] = lfilter(LPF, image_array[i, :], LPF_center)\n HighPass_rows[i, :] = lfilter(HPF, image_array[i, :], HPF_center)\n\n # down sample rows.\n # which means we will have half the number of columns\n for i in range(0, len(filtered_image)):\n filtered_image[i] = filtered_image[i][:, ::2]\n\n # apply filters accross columns\n for i in range(0, ncol):\n LL[:, i] = lfilter(LPF, LowPass_rows[:, i], LPF_center)\n LH[:, i] = lfilter(HPF, LowPass_rows[:, i], HPF_center)\n HL[:, i] = lfilter(LPF, HighPass_rows[:, i], LPF_center)\n HH[:, i] = lfilter(HPF, HighPass_rows[:, i], HPF_center)\n\n # down sample columns and quantize\n for i in range(0, len(filtered_image)):\n filtered_image[i] = filtered_image[i][::2, :]\n filtered_image[i] = np.round(\n filtered_image[i]/quantization_Array[i]).astype(int)\n\n return filtered_image", "def determine_exposure_time(cn, bandlims, wantSNR = 10.0, wantetime = 5.0, ref_lam = 0.550,\n plot_snr_curves = False, plot_spectrum = False,\n title = \"\"):\n\n # Specify Kat's fiducial S/N\n iref = np.argmin(np.fabs(cn.lam - ref_lam))\n\n if bandlims is not None:\n\n # Specify band via wavelength\n icont = np.array([np.argmin(np.fabs(cn.lam - bandlims[0])), np.argmin(np.fabs(cn.lam - bandlims[1]))])\n iband = np.arange(icont[0]+1, icont[1])\n ibottom = np.argmin(np.fabs(cn.Cratio - np.min(cn.Cratio[iband])))\n\n # Calculate the continuum planet photon counts and contrast ratio\n ccont = cg.observe.interp_cont_over_band(cn.lam, cn.cp, icont, iband)\n ccrat = cg.observe.interp_cont_over_band(cn.lam, cn.Cratio, icont, iband)\n\n # Calculate varies SNRs as a function of exposure time\n Nt = 1000\n times = np.linspace(1.0, 100.0, Nt)\n band_snrs = np.zeros(len(times))\n bot_snrs = np.zeros(len(times))\n cont_snrs = np.zeros(len(times))\n fid_snrs = np.zeros(len(times))\n for i, time in enumerate(times):\n cn.make_fake_data(texp = times[i])\n fid_snrs[i] = cn.SNRt[iref]\n if bandlims is not None:\n band_snrs[i] = cg.observe.SNR_band(cn.cp, ccont, cn.cb, iband, itime=times[i])\n bot_snrs[i] = cn.SNRt[ibottom]\n cont_snrs[i] = np.mean(cn.SNRt[icont])\n\n # Fit for time to desired snr value\n etime_fid = find_time_from_snr(times, fid_snrs, wantSNR) #times[np.argmin(np.fabs(fid_snrs - wantSNR))]\n if bandlims is not None:\n etime_band = find_time_from_snr(times, band_snrs, wantSNR) #times[np.argmin(np.fabs(band_snrs - wantSNR))]\n etime_bot = find_time_from_snr(times, bot_snrs, wantSNR) #times[np.argmin(np.fabs(bot_snrs - wantSNR))]\n etime_cont = find_time_from_snr(times, cont_snrs, wantSNR) #times[np.argmin(np.fabs(cont_snrs - wantSNR))]\n\n # Check for incomplete bands which can cause anomalously low exposure times\n if bandlims is None:\n etime_band = np.nan\n etime_bot = np.nan\n etime_cont = np.nan\n else:\n if (False in np.isfinite(cn.Cobs[iband])):\n etime_band = np.nan\n\n # Make plot of SNR vs exposure time\n if plot_snr_curves:\n\n fig, ax = plt.subplots(figsize = (8,6))\n ax.set_xlabel(\"Exposure Time [hrs]\")\n ax.set_ylabel(\"S/N\")\n if bandlims is not None:\n ax.plot(times, band_snrs, label = \"detect band rel. to cont.\")\n ax.plot(times, bot_snrs, label = \"bottom of band\")\n ax.plot(times, cont_snrs, label = \"avg. continuum\")\n ax.plot(times, fid_snrs, label = \"at %.2f $\\mu$m\" %cn.lam[iref])\n if bandlims is not None:\n ax.scatter(etime_band, wantSNR, c=\"C0\")\n ax.scatter(etime_bot, wantSNR, c=\"C1\")\n ax.scatter(etime_cont, wantSNR, c=\"C2\")\n ax.scatter(etime_fid, wantSNR, c=\"C3\")\n ax.axhline(wantSNR, ls = \"--\", c = \"grey\")\n if bandlims is not None:\n ax.axvline(etime_band, ls = \"--\", c = \"C0\")\n ax.axvline(etime_bot, ls = \"--\", c = \"C1\")\n ax.axvline(etime_cont, ls = \"--\", c = \"C2\")\n ax.axvline(etime_fid, ls = \"--\", c = \"C3\")\n ylims = ax.get_ylim()\n if bandlims is not None:\n ax.text(etime_band, ylims[1]-.5*ylims[1], \"%.2f\" %etime_band, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C0\")\n ax.text(etime_bot, ylims[1]-.1*ylims[1], \"%.2f\" %etime_bot, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C1\")\n ax.text(etime_cont, ylims[1]-.15*ylims[1], \"%.2f\" %etime_cont, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C2\")\n ax.text(etime_fid, ylims[1]-.20*ylims[1], \"%.2f\" %etime_fid, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C3\")\n ax.legend(framealpha = 0.75, fontsize = 14)\n\n if plot_spectrum:\n\n # Construct noised spectrum plot\n if bandlims is not None:\n cn.make_fake_data(texp = etime_band)\n else:\n cn.make_fake_data(texp = etime_fid)\n\n fig, ax = plt.subplots(figsize = (8,6))\n ax.plot(cn.lam, cn.Cratio, ls = \"steps-mid\", color = \"grey\")\n ax.errorbar(cn.lam, cn.Cobs, yerr=cn.Csig, fmt = \"o\", ms = 2.0, alpha = 0.7, color = \"k\")\n ax.set_xlabel(\"Wavelength [$\\mu$m]\")\n ax.set_ylabel(\"Fp/Fs\")\n ax.set_title(title)\n\n if bandlims is not None:\n # Identify specific points in band\n for i in icont:\n ax.scatter(cn.lam[i], cn.Cratio[i], s = 20.0, c = \"C8\", marker = \"o\", zorder = 100)\n for i in iband:\n ax.scatter(cn.lam[i], cn.Cratio[i], s = 20.0, c = \"C1\", marker = \"o\", zorder = 100)\n ax.scatter(cn.lam[ibottom], cn.Cratio[ibottom], s = 20.0, c = \"C8\", marker = \"o\", zorder = 100)\n # Identify specific continuum points in band\n for i, ic in enumerate(iband):\n ax.scatter(cn.lam[ic], ccrat[i], s = 20.0, c = \"C9\", marker = \"o\", zorder = 100)\n\n # Return exposure times\n return etime_band, etime_bot, etime_cont, etime_fid", "def test_matched_filter5():\n x_size = 80\n y_size = 90\n\n objects = numpy.zeros((1, 5))\n\n # Make filter with unit sum.\n objects[0,:] = [x_size/2, y_size/2, 1.0, 1.0, 1.0]\n psf = dg.drawGaussians((x_size, y_size), objects)\n psf = psf/numpy.sum(psf)\n flt = matchedFilterC.MatchedFilter(psf)\n\n # Make test image.\n image = numpy.zeros((x_size, y_size))\n image[int(x_size/2), int(y_size/2)] = float(100)\n\n mf_conv = flt.convolve(image)\n\n t1 = numpy.fft.fft2(recenterPSF.recenterPSF(psf))\n t2 = numpy.fft.fft2(image)\n np_conv = numpy.real(numpy.fft.ifft2(t1*t2))\n\n assert(numpy.allclose(mf_conv, np_conv))\n\n flt.cleanup()", "def fig_5():\n epoch = 3\n N = 60000\n Nr = N\n K = 32\n n_iter = 256\n Nstar = 16\n data = 'dr10'\n factor = 100.\n features = ['psf_mag', 'model_colors', 'psf_minus_model']\n filters = ['r', 'ug gr ri iz', 'ugriz']\n message = 'pm_mc_pmm_r_all_all'\n model = 'xdmodel_%s_%d_%d_%d_%d_%s.pkl' % (data, Nr, K, n_iter, Nstar,\n message)\n model = os.environ['xddata'] + model\n figname = os.environ['xdplots'] + 'fig5.png'\n gn = 'm3'\n glob_cmd(model, gn, features, filters, figname)", "def minmax():\n minmaxlist = []\n timelist = []\n #create a list of the filenames of all sentinel-images\n s2files = [f for f in listdir(s2path) if endswith(join(s2path, f),\".tif\")==True]\n print(\"STEP 1/2\")\n print(\"EXPORTING MIN AND MAX VALUES PER BAND\")\n for i in s2files:\n start = time.time()\n nlfile = nlpath + \"/\" + i\n s2file = s2path+\"/\"+i\n #open the file\n s2raster = gdal.Open(s2file) \n #iterate over the bands of each image\n for n in range(s2raster.RasterCount):\n f = n + 1\n s2band = s2raster.GetRasterBand(f)\n #read the pixels of the band as an numpy-array\n s2band = s2band.ReadAsArray()\n #resize the bands to have all images in the same size\n s2band = np.resize(s2band,(1050,1050))\n #get the min and max values of each band to be able to 0-1 normalize after\n min = s2band.min()\n max = s2band.max()\n #check if there are already values for the band\n if len(minmaxlist) < s2raster.RasterCount + 1:\n s2minmax = [min,max]\n minmaxlist.append(s2minmax)\n # if the min value of the open band is smaller than the saved minimal value, overwrite it\n if min < minmaxlist[n][0]:\n minmaxlist[n][0] = min\n #if the max value of the open band is higher than the saves maximum value, overwrite it\n if max > minmaxlist[n][1]:\n minmaxlist[n][1] = max\n #open the nightlight img\n nlraster = gdal.Open(nlfile)\n nlband = nlraster.GetRasterBand(1)\n #read the only band of the image as a numpy-array\n nlband = nlband.ReadAsArray()\n #resize it the same way as the sentinel images\n nlband = np.resize(nlband,(1050,1050))\n #get the min and max values of the band\n nlmin = nlband.min()\n nlmax = nlband.max()\n #check if there are already information about min and max values for the nightlight images\n if len(minmaxlist) < s2raster.RasterCount + 1:\n nlminmax = [nlmin,nlmax]\n minmaxlist.append(nlminmax)\n #if the min value of the open nightlight image is smaller than the saved minimal value, overwrite it\n if nlmin < minmaxlist[16][0]:\n minmaxlist[16][0] = nlmin\n #if the max value of the open nightlight image is higher than the saves maximum value, overwrite it\n if nlmax > minmaxlist[16][1]:\n minmaxlist[16][1] = nlmax\n end = time.time()\n timelist.append(end-start)\n print(\"Step 1/2\",str(s2files.index(i)+1) + \"/\" + str(len(s2files)),\"Est. time left:\",time.strftime('%H:%M:%S',time.gmtime(int(sum(timelist)/len(timelist)*(len(s2files)-s2files.index(i))))))\n #throw out the Quality Bands (QA10,QA20,QA60)\n minmaxlist = [i for j,i in enumerate(minmaxlist) if j not in [13,14,15]]\n return minmaxlist", "def smoothen_image(image):\n window = square(3)\n def _replace_center_with_one_if_five_neighbors_are_different_than_0(values):\n \"\"\"\n For each location in the input image, the value returned by the function is the value assigned to that location.\n That's why, naturally, the function needs to return a scalar.\n :param values:\n :return: a scalar representing the value to be set at the current location in the input image\n \"\"\"\n greater_than_0 = 0\n for entry in values:\n if entry > 0:\n greater_than_0 += 1\n if greater_than_0 >= 5:\n return 1\n else:\n return 0\n\n \"\"\"\n This call will take windows of the shape given by the footprint, send them as an 1D array to the _replace function\n and return the value that is to be set in the center of the window. The edges are ignored (for now)\n \"\"\"\n new_image = generic_filter(image, _replace_center_with_one_if_five_neighbors_are_different_than_0, footprint = window)\n return new_image", "def calcM5(hardware, system, atmos, title='m5'):\n # photParams stores default values for the exposure time, nexp, size of the primary,\n # readnoise, gain, platescale, etc.\n # See https://github.com/lsst/sims_photUtils/blob/master/python/lsst/sims/photUtils/PhotometricParameters.py\n effarea = np.pi * (6.423/2.*100.)**2\n photParams_zp = PhotometricParameters(exptime=1, nexp=1, gain=1, effarea=effarea,\n readnoise=8.8, othernoise=0, darkcurrent=0.2)\n photParams = PhotometricParameters(gain=1.0, effarea=effarea, readnoise=8.8, othernoise=0, darkcurrent=0.2)\n photParams_infinity = PhotometricParameters(gain=1.0, readnoise=0, darkcurrent=0,\n othernoise=0, effarea=effarea)\n # lsstDefaults stores default values for the FWHMeff.\n # See https://github.com/lsst/sims_photUtils/blob/master/python/lsst/sims/photUtils/LSSTdefaults.py\n lsstDefaults = LSSTdefaults()\n darksky = Sed()\n darksky.readSED_flambda(os.path.join('../siteProperties', 'darksky.dat'))\n flatSed = Sed()\n flatSed.setFlatSED()\n m5 = {}\n Tb = {}\n Sb = {}\n kAtm = {}\n Cm = {}\n dCm_infinity = {}\n sourceCounts = {}\n skyCounts = {}\n skyMag = {}\n gamma = {}\n for f in system:\n m5[f] = SignalToNoise.calcM5(darksky, system[f], hardware[f], photParams,\n FWHMeff=lsstDefaults.FWHMeff(f))\n fNorm = flatSed.calcFluxNorm(m5[f], system[f])\n flatSed.multiplyFluxNorm(fNorm)\n sourceCounts[f] = flatSed.calcADU(system[f], photParams=photParams)\n # Calculate the Skycounts expected in this bandpass.\n skyCounts[f] = (darksky.calcADU(hardware[f], photParams=photParams)\n * photParams.platescale**2)\n # Calculate the sky surface brightness.\n skyMag[f] = darksky.calcMag(hardware[f])\n # Calculate the gamma value.\n gamma[f] = SignalToNoise.calcGamma(system[f], m5[f], photParams)\n # Calculate the \"Throughput Integral\" (this is the hardware + atmosphere)\n dwavelen = np.mean(np.diff(system[f].wavelen))\n Tb[f] = np.sum(system[f].sb / system[f].wavelen) * dwavelen\n # Calculate the \"Sigma\" 'system integral' (this is the hardware only)\n Sb[f] = np.sum(hardware[f].sb / hardware[f].wavelen) * dwavelen\n # Calculate km - atmospheric extinction in a particular bandpass\n kAtm[f] = -2.5*np.log10(Tb[f] / Sb[f])\n # Calculate the Cm and Cm_Infinity values.\n # m5 = Cm + 0.5*(msky - 21) + 2.5log10(0.7/FWHMeff) + 1.25log10(t/30) - km(X-1.0)\n # Exptime should be 30 seconds and X=1.0\n exptime = photParams.exptime * photParams.nexp\n if exptime != 30.0:\n print \"Whoa, exposure time was not as expected - got %s not 30 seconds. Please edit Cm calculation.\" %(exptime)\n # Assumes atmosphere used in system throughput is X=1.0\n X = 1.0\n Cm[f] = (m5[f] - 0.5*(skyMag[f] - 21) - 2.5*np.log10(0.7/lsstDefaults.FWHMeff(f)))\n # Calculate Cm_Infinity by setting readout noise to zero.\n m5inf = SignalToNoise.calcM5(darksky, system[f], hardware[f], photParams_infinity,\n FWHMeff=lsstDefaults.FWHMeff(f))\n Cm_infinity = (m5inf - 0.5*(skyMag[f] - 21)\n - 2.5*np.log10(0.7/lsstDefaults.FWHMeff(f)))\n dCm_infinity[f] = Cm_infinity - Cm[f]\n print title\n print 'Filter FWHMeff FWHMgeom SkyMag SkyCounts Tb Sb kAtm Gamma Cm dCm_infinity m5 SourceCounts'\n for f in ('u', 'g' ,'r', 'i', 'z', 'y'):\n print '%s %.2f %.2f %.2f %.1f %.3f %.3f %.4f %.6f %.2f %.2f %.2f %.2f'\\\n %(f, lsstDefaults.FWHMeff(f),\n SignalToNoise.FWHMeff2FWHMgeom(lsstDefaults.FWHMeff(f)),\n skyMag[f], skyCounts[f], Tb[f], Sb[f], kAtm[f],\n gamma[f], Cm[f], dCm_infinity[f], m5[f], sourceCounts[f])\n\n # Show what these look like individually (add sky & m5 limits on throughput curves)\n plt.figure()\n for f in filterlist:\n plt.plot(system[f].wavelen, system[f].sb, color=filtercolors[f], linewidth=2, label=f)\n plt.plot(atmosphere.wavelen, atmosphere.sb, 'k:', label='X=1.0')\n plt.legend(loc='center right', fontsize='smaller')\n plt.xlim(300, 1100)\n plt.ylim(0, 1)\n plt.xlabel('Wavelength (nm)')\n plt.ylabel('Throughput')\n plt.title('System Throughputs')\n plt.grid(True)\n\n plt.figure()\n ax = plt.gca()\n # Add dark sky\n ax2 = ax.twinx()\n plt.sca(ax2)\n skyab = -2.5*np.log10(darksky.fnu) - darksky.zp\n ax2.plot(darksky.wavelen, skyab,\n 'k-', linewidth=0.8, label='Dark sky mags')\n ax2.set_ylabel('AB mags')\n ax2.set_ylim(24, 14)\n plt.sca(ax)\n # end of dark sky\n handles = []\n for f in filterlist:\n plt.plot(system[f].wavelen, system[f].sb, color=filtercolors[f], linewidth=2)\n myline = mlines.Line2D([], [], color=filtercolors[f], linestyle='-', linewidth=2,\n label = '%s: m5 %.1f (sky %.1f)' %(f, m5[f], skyMag[f]))\n handles.append(myline)\n plt.plot(atmos.wavelen, atmos.sb, 'k:', label='Atmosphere, X=1.0')\n # Add legend for dark sky.\n myline = mlines.Line2D([], [], color='k', linestyle='-', label='Dark sky AB mags/arcsec^2')\n handles.append(myline)\n # end of dark sky legend line\n plt.legend(loc=(0.01, 0.69), handles=handles, fancybox=True, numpoints=1, fontsize='small')\n plt.ylim(0, 1)\n plt.xlim(300, 1100)\n plt.xlabel('Wavelength (nm)')\n plt.ylabel('Fractional Throughput Response')\n plt.title('System total response curves %s' %(title))\n return m5", "def _delayandsum5(data, offsets, ifactor2, steeramp, out, autopower):\n num, gridsize, numchannels = offsets.shape\n num = out.shape[0]\n for n in nb.prange(num):\n for gi in nb.prange(gridsize):\n out[n,gi] = 0\n autopower[n,gi] = 0\n for mi in range(numchannels):\n ind = offsets[n,gi,mi] + n\n r = (data[ind,mi] * (1-ifactor2[n,gi,mi]) \\\n + data[ind+1,mi] * ifactor2[n,gi,mi]) * steeramp[n,gi,mi]\n out[n,gi] += r\n autopower[n,gi] += r*r", "def maskClouds(self,img):\n\t\t\n\t\tscore = ee.Image(1.0);\n\t\t# Clouds are reasonably bright in the blue band.\n\t\tblue_rescale = img.select('blue').subtract(ee.Number(0.1)).divide(ee.Number(0.3).subtract(ee.Number(0.1)))\n\t\tscore = score.min(blue_rescale);\n\n\t\t# Clouds are reasonably bright in all visible bands.\n\t\tvisible = img.select('red').add(img.select('green')).add(img.select('blue'))\n\t\tvisible_rescale = visible.subtract(ee.Number(0.2)).divide(ee.Number(0.8).subtract(ee.Number(0.2)))\n\t\tscore = score.min(visible_rescale);\n\n\t\t# Clouds are reasonably bright in all infrared bands.\n\t\tinfrared = img.select('nir').add(img.select('swir1')).add(img.select('swir2'))\n\t\tinfrared_rescale = infrared.subtract(ee.Number(0.3)).divide(ee.Number(0.8).subtract(ee.Number(0.3)))\n\t\tscore = score.min(infrared_rescale);\n\n\t\t# Clouds are reasonably cool in temperature.\n\t\ttemp_rescale = img.select('thermal').subtract(ee.Number(300)).divide(ee.Number(290).subtract(ee.Number(300)))\n\t\tscore = score.min(temp_rescale);\n\n\t\t# However, clouds are not snow.\n\t\tndsi = img.normalizedDifference(['green', 'swir1']);\n\t\tndsi_rescale = ndsi.subtract(ee.Number(0.8)).divide(ee.Number(0.6).subtract(ee.Number(0.8)))\n\t\tscore = score.min(ndsi_rescale).multiply(100).byte();\n\t\tmask = score.lt(self.env.cloudThreshold).rename(['cloudMask']);\n\t\timg = img.updateMask(mask);\n \n\t\treturn img;", "def bandpass_filter(data, lowcut, highcut, fs=2000, numtaps=255):\n nyq = fs / 2\n\n # design filter\n fe1 = lowcut / nyq\n fe2 = highcut / nyq\n b = firwin(numtaps, (fe1, fe2), pass_zero=False)\n\n filtered = lfilter(b, 1, data)\n\n return filtered", "def _filter_images(data, hmin):\n #Laziest way to get a circle mask\n fp = CircularAperture((0,0), r=hmin).to_mask().data>.1\n fp = fp.astype(bool)\n\n # Apply maximum filter, flux filter\n filt_image = maximum_filter(data, footprint=fp,\n mode='constant', cval=0)\n origins = product([0,-1], [0,-1])\n max_4sum = np.amax([_conv_origin(data, o) for o in origins], axis=0)\n return(filt_image, max_4sum)", "def constant_2015():\n\n #Load the CMIP6 historical\n cubes = iris.load(data_dir+'SO2DMS-em-anthro_input4MIPs_emissions_CMIP_CEDS-v2016-07-26-gr_200001-201412_n48.nc')\n #Get low and high level emissions just in the last year (2014)\n cubes = iris.cube.CubeList([cubes[2],cubes[1]])\n final_cubes = iris.cube.CubeList()\n for cube in cubes:\n final_cube = cube[-12:]\n final_cubes.append(final_cube)\n \n #Set the year-on-year proportional reductions to be nothing\n yoy_rates = calc_perc_reducts()\n yoy_rates = np.array(yoy_rates)\n yoy_rates = np.ones_like(yoy_rates)\n\n #Create coordinates for new nc file between 2014 and 2100\n lat_coord = cubes[0].coord('latitude')\n lon_coord = cubes[0].coord('longitude')\n time_coord = DimCoord(np.arange(95055.,95055.+(2100-2014+1)*360.,30.),standard_name=u'time', units=cf_units.Unit('days since 1750-1-1 00:00:00', calendar='360_day'), long_name=u'time', var_name='time')\n\n #Create the cube date\n cube_data_surf = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n cube_data_high = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n #Set first year equal to 2014 in CMIP6 historical\n cube_data_surf[:12,...] = final_cubes[0].data\n cube_data_high[:12,...] = final_cubes[1].data\n #Apply equal emissions in all other years too\n for i in range(12,cube_data_surf.shape[0]):\n cube_data_surf[i,...] = cube_data_surf[(i-12),...] * yoy_rates[0,i]\n cube_data_high[i,...] = cube_data_high[(i-12),...] * yoy_rates[1,i]\n #Make the output cubes\n fut_cube_surf = iris.cube.Cube(cube_data_surf,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[0].standard_name, long_name=final_cubes[0].long_name, var_name=final_cubes[0].var_name, units=final_cubes[0].units, attributes=final_cubes[0].attributes)\n fut_cube_high = iris.cube.Cube(cube_data_high,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[1].standard_name, long_name=final_cubes[1].long_name, var_name=final_cubes[1].var_name, units=final_cubes[1].units, attributes=final_cubes[1].attributes)\n\n fut_cube_high.var_name = 'field569_1'\n fut_cube_high.units='kg/m2/s'\n fut_cube_high.long_name ='HIGH LEVEL SO2 EMISSIONS KG/M2/S'\n fut_cube_surf.var_name = 'field569'\n fut_cube_surf.units='kg/m2/s'\n fut_cube_surf.long_name ='SULPHUR DIOXIDE EMISSIONS'\n\n #Load the DMS cube from standard RCP2.6\n dms_cube = iris.load(data_dir+'DMSSO2NH3_18502100_RCP26_monthly.nc')[0]\n iris.coord_categorisation.add_year(dms_cube,'time',name='year')\n dms_cube = dms_cube.extract(iris.Constraint(year = lambda y: y>=2014))\n\n dms_cube.var_name = 'field570'\n dms_cube.attributes.pop('name')\n dms_cube.coord('time').var_name = 'time'\n dms_cube.coord('time').long_name = 'time'\n\n fut_cube_high = fut_cube_high[:-2]\n fut_cube_surf = fut_cube_surf[:-2]\n\n fut_dms = iris.cube.Cube(dms_cube.data[:,0,::-1,:],dim_coords_and_dims=[(fut_cube_surf.coord('time'),0),(fut_cube_surf.coord('latitude'),1),(fut_cube_surf.coord('longitude'), 2)],standard_name=dms_cube.standard_name, long_name=dms_cube.long_name, var_name=dms_cube.var_name, units=dms_cube.units, attributes=dms_cube.attributes)\n\n #Save the final cubes as netcdf (cutting them to be the same length)\n iris.save(iris.cube.CubeList([fut_dms,fut_cube_high,fut_cube_surf]),data_dir+ \"SO2DMS_const2014.nc\")\n os.system('ncatted -O -a calendar,time,m,c,\"360_day\" '+data_dir+ \"SO2DMS_const2014.nc\")\n\n return", "def ros_forest_mk5(weather_df: DataFrame, params: Dict) -> DataFrame:\n ros_df = weather_df[DATETIME].to_frame(name='DateTime')\n ros_df[FROS_DIR] = spread_direction(weather_df)\n ros_df[FFDI] = get_FFDI(weather_df, params['wind_reduction'])\n\n ros_df[FROS] = 0.0012*ros_df[FFDI]*params['fuel_load']\n ros_df[ROS] = 0.0012*get_FFDI(weather_df, flank=True)*params['fuel_load']\n\n return post_process(ros_df)", "def psdf_5(**kwargs):\n\n # fqlag parameters #\n n = 2**8\n dt = 1.0\n fql = np.array([.5/(dt*n), 1./dt])\n\n lc, extra = simulate_light_curves(n=n, dt=dt, nsim=100,\n input_psd=['bending_powerlaw', [1e-4, -2, 3e-3]])\n\n model = ['bpl', [-5, -2, -5]]\n inP = extra['input_psd'][1]\n inP = [np.log(inP[0]), inP[1], np.log(inP[2])]\n fit_psdf(fql, model, lc, extra, '5', input_pars=inP)", "def determine_exposure_time(self, bandlims, wantSNR = 10.0, wantetime = 5.0, ref_lam = 0.550,\n plot_snr_curves = False, plot_spectrum = False,\n title = \"\"):\n\n # Specify Kat's fiducial S/N\n iref = np.argmin(np.fabs(cn.lam - ref_lam))\n\n if bandlims is not None:\n\n # Specify band via wavelength\n icont = np.array([np.argmin(np.fabs(cn.lam - bandlims[0])), np.argmin(np.fabs(cn.lam - bandlims[1]))])\n iband = np.arange(icont[0]+1, icont[1])\n ibottom = np.argmin(np.fabs(cn.Cratio - np.min(cn.Cratio[iband])))\n\n # Calculate the continuum planet photon counts and contrast ratio\n ccont = cg.observe.interp_cont_over_band(cn.lam, cn.cp, icont, iband)\n ccrat = cg.observe.interp_cont_over_band(cn.lam, cn.Cratio, icont, iband)\n\n # Calculate various SNRs as a function of exposure time\n Nt = 1000\n times = np.linspace(1.0, 100.0, Nt)\n band_snrs = np.zeros(len(times))\n bot_snrs = np.zeros(len(times))\n cont_snrs = np.zeros(len(times))\n fid_snrs = np.zeros(len(times))\n for i, time in enumerate(times):\n cn.make_fake_data(texp = times[i])\n fid_snrs[i] = cn.SNRt[iref]\n if bandlims is not None:\n band_snrs[i] = cg.observe.SNR_band(cn.cp, ccont, cn.cb, iband, itime=times[i])\n bot_snrs[i] = cn.SNRt[ibottom]\n cont_snrs[i] = np.mean(cn.SNRt[icont])\n\n # Fit for time to desired snr value\n etime_fid = find_time_from_snr(times, fid_snrs, wantSNR) #times[np.argmin(np.fabs(fid_snrs - wantSNR))]\n if bandlims is not None:\n etime_band = find_time_from_snr(times, band_snrs, wantSNR) #times[np.argmin(np.fabs(band_snrs - wantSNR))]\n etime_bot = find_time_from_snr(times, bot_snrs, wantSNR) #times[np.argmin(np.fabs(bot_snrs - wantSNR))]\n etime_cont = find_time_from_snr(times, cont_snrs, wantSNR) #times[np.argmin(np.fabs(cont_snrs - wantSNR))]\n\n # Check for incomplete bands which can cause anomalously low exposure times\n if bandlims is None:\n etime_band = np.nan\n etime_bot = np.nan\n etime_cont = np.nan\n else:\n if (False in np.isfinite(cn.Cobs[iband])):\n etime_band = np.nan\n\n # Make plot of SNR vs exposure time\n if plot_snr_curves:\n\n fig, ax = plt.subplots(figsize = (8,6))\n ax.set_xlabel(\"Exposure Time [hrs]\")\n ax.set_ylabel(\"S/N\")\n if bandlims is not None:\n ax.plot(times, band_snrs, label = \"detect band rel. to cont.\")\n ax.plot(times, bot_snrs, label = \"bottom of band\")\n ax.plot(times, cont_snrs, label = \"avg. continuum\")\n ax.plot(times, fid_snrs, label = \"at %.2f $\\mu$m\" %cn.lam[iref])\n if bandlims is not None:\n ax.scatter(etime_band, wantSNR, c=\"C0\")\n ax.scatter(etime_bot, wantSNR, c=\"C1\")\n ax.scatter(etime_cont, wantSNR, c=\"C2\")\n ax.scatter(etime_fid, wantSNR, c=\"C3\")\n ax.axhline(wantSNR, ls = \"--\", c = \"grey\")\n if bandlims is not None:\n ax.axvline(etime_band, ls = \"--\", c = \"C0\")\n ax.axvline(etime_bot, ls = \"--\", c = \"C1\")\n ax.axvline(etime_cont, ls = \"--\", c = \"C2\")\n ax.axvline(etime_fid, ls = \"--\", c = \"C3\")\n ylims = ax.get_ylim()\n if bandlims is not None:\n ax.text(etime_band, ylims[1]-.5*ylims[1], \"%.2f\" %etime_band, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C0\")\n ax.text(etime_bot, ylims[1]-.1*ylims[1], \"%.2f\" %etime_bot, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C1\")\n ax.text(etime_cont, ylims[1]-.15*ylims[1], \"%.2f\" %etime_cont, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C2\")\n ax.text(etime_fid, ylims[1]-.20*ylims[1], \"%.2f\" %etime_fid, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C3\")\n ax.legend(framealpha = 0.75, fontsize = 14)\n\n if plot_spectrum:\n\n # Construct noised spectrum plot\n if bandlims is not None:\n cn.make_fake_data(texp = etime_band)\n else:\n cn.make_fake_data(texp = etime_fid)\n\n fig, ax = plt.subplots(figsize = (8,6))\n ax.plot(cn.lam, cn.Cratio, ls = \"steps-mid\", color = \"grey\")\n ax.errorbar(cn.lam, cn.Cobs, yerr=cn.Csig, fmt = \"o\", ms = 2.0, alpha = 0.7, color = \"k\")\n ax.set_xlabel(\"Wavelength [$\\mu$m]\")\n ax.set_ylabel(\"Fp/Fs\")\n ax.set_title(title)\n\n if bandlims is not None:\n # Identify specific points in band\n for i in icont:\n ax.scatter(cn.lam[i], cn.Cratio[i], s = 20.0, c = \"C8\", marker = \"o\", zorder = 100)\n for i in iband:\n ax.scatter(cn.lam[i], cn.Cratio[i], s = 20.0, c = \"C1\", marker = \"o\", zorder = 100)\n ax.scatter(cn.lam[ibottom], cn.Cratio[ibottom], s = 20.0, c = \"C8\", marker = \"o\", zorder = 100)\n # Identify specific continuum points in band\n for i, ic in enumerate(iband):\n ax.scatter(cn.lam[ic], ccrat[i], s = 20.0, c = \"C9\", marker = \"o\", zorder = 100)\n\n # Return exposure times\n return etime_band, etime_bot, etime_cont, etime_fid", "def SclMasks(self, image, renamed=False):\n if self.process == 'SR':\n if renamed:\n scl = image.select('scene_classification_map')\n else:\n scl = image.select('SCL')\n\n data = ee.Dictionary(self.SclData)\n\n def wrap(band_value, name):\n band_value = ee.Number.parse(band_value)\n name = ee.String(name)\n mask = scl.eq(band_value).rename(name)\n return mask\n\n newbands = ee.Dictionary(data.map(wrap))\n bandslist = tools.dictionary.extractList(newbands,\n [str(i) for i in range(1, 12)])\n\n image = tools.image.addMultiBands(bandslist)\n return image", "def bandstop_filter(data, lowcut, highcut, fs=2000, numtaps=255):\n nyq = fs / 2\n\n # design filter\n fe1 = lowcut / nyq\n fe2 = highcut / nyq\n b = firwin(numtaps, (fe1, fe2), pass_zero=True)\n\n filtered = lfilter(b, 1, data)\n\n return filtered", "def ShowOneContour(index,all_images,all_pointing,thex0,they0,all_titles,object_name,all_expo,dir_top_img,all_filt,figname):\n plt.figure(figsize=(15,6))\n spec_index_min=100 # cut the left border\n spec_index_max=1900 # cut the right border\n star_halfwidth=70\n \n YMIN=-15\n YMAX=15\n \n figfilename=os.path.join(dir_top_img,figname) \n \n #center is approximately the one on the original raw image (may be changed)\n #x0=int(all_pointing[index][0])\n x0=int(thex0[index])\n \n \n # Extract the image \n full_image=np.copy(all_images[index])\n \n # refine center in X,Y\n star_region_X=full_image[:,x0-star_halfwidth:x0+star_halfwidth]\n \n profile_X=np.sum(star_region_X,axis=0)\n profile_Y=np.sum(star_region_X,axis=1)\n\n NX=profile_X.shape[0]\n NY=profile_Y.shape[0]\n \n X_=np.arange(NX)\n Y_=np.arange(NY)\n \n avX,sigX=weighted_avg_and_std(X_,profile_X**4) # take squared on purpose (weigh must be >0)\n avY,sigY=weighted_avg_and_std(Y_,profile_Y**4)\n \n x0=int(avX+x0-star_halfwidth)\n \n \n # find the center in Y on the spectrum\n yprofile=np.sum(full_image[:,spec_index_min:spec_index_max],axis=1)\n y0=np.where(yprofile==yprofile.max())[0][0]\n\n # cut the image in vertical and normalise by exposition time\n reduc_image=full_image[y0-20:y0+20,x0:spec_index_max]/all_expo[index] \n reduc_image[:,0:100]=0 # erase central star\n \n X_Size_Pixels=np.arange(0,reduc_image.shape[1])\n Y_Size_Pixels=np.arange(0,reduc_image.shape[0])\n Transverse_Pixel_Size=Y_Size_Pixels-int(float(Y_Size_Pixels.shape[0])/2.)\n \n # calibration in wavelength\n #grating_name=all_filt[index].replace('dia ','')\n grating_name=get_disperser_filtname(all_filt[index])\n \n lambdas=Pixel_To_Lambdas(grating_name,X_Size_Pixels,all_pointing[index],True)\n \n #if grating_name=='Ron200':\n # holo = Hologram('Ron400',verbose=True)\n #else: \n # holo = Hologram(grating_name,verbose=True)\n #lambdas=holo.grating_pixel_to_lambda(X_Size_Pixels,all_pointing[index])\n #if grating_name=='Ron200':\n # lambdas=lambdas*2.\n \n\n X,Y=np.meshgrid(lambdas,Transverse_Pixel_Size) \n T=np.transpose(reduc_image)\n \n \n plt.contourf(X, Y, reduc_image, 100, alpha=1., cmap='jet',origin='lower')\n C = plt.contour(X, Y, reduc_image , 20, colors='black', linewidth=.5,origin='lower')\n \n \n for line in LINES:\n if line == O2 or line == HALPHA or line == HBETA or line == HGAMMA:\n plt.plot([line['lambda'],line['lambda']],[YMIN,YMAX],'-',color='lime',lw=0.5)\n plt.text(line['lambda'],YMAX-3,line['label'],verticalalignment='bottom', horizontalalignment='center',color='lime', fontweight='bold',fontsize=16)\n \n \n \n plt.axis([X.min(), X.max(), Y.min(), Y.max()]); plt.grid(True)\n plt.title(all_titles[index])\n plt.grid(color='white', ls='solid')\n plt.text(200,-5.,all_filt[index],verticalalignment='bottom', horizontalalignment='center',color='yellow', fontweight='bold',fontsize=16)\n plt.xlabel('$\\lambda$ (nm)')\n plt.ylabel('pixels')\n plt.ylim(YMIN,YMAX)\n plt.xlim(0.,1200.)\n plt.savefig(figfilename)", "def _one_octave(self, shrink=True, refine=True, n_5=False):\n x = []\n y = []\n dx = []\n dy = []\n if not self.sigmas:\n self._calc_sigma()\n if self.do_mask and (self.cur_mask is None):\n self._init_mask()\n if self.do_mask and (numpy.logical_not(self.cur_mask).sum(dtype=int) == 0):\n return\n\n previous = self.data\n dog_shape = (len(self.sigmas) - 1,) + self.data.shape\n self.dogs = numpy.zeros(dog_shape, dtype=numpy.float32)\n\n idx = 0\n i = 0\n for sigma_abs, sigma_rel in self.sigmas:\n# if self.already_blurred != [] and i < 3:\n# sigma_rel = 0\n# if i > 0 : previous = self.already_blurred[i-1]\n if sigma_rel == 0:\n self.blurs.append(previous)\n else:\n new_blur = gaussian_filter(previous, sigma_rel)\n self.blurs.append(new_blur)\n self.dogs[idx] = previous - new_blur\n previous = new_blur\n idx += 1\n i += 1\n\n\n if self.dogs[0].shape == self.raw.shape:\n self.dogs_init = self.dogs\n\n if _blob:\n valid_points = _blob.local_max(self.dogs, self.cur_mask, n_5)\n else:\n valid_points = local_max(self.dogs, self.cur_mask, n_5)\n kps, kpy, kpx = numpy.where(valid_points)\n self.raw_kp.append((kps, kpy, kpx))\n\n if refine:\n if \"startswith\" in dir(refine) and refine.startswith(\"SG\"):\n kpx, kpy, kps, delta_s = self.refine_Hessian_SG(kpx, kpy, kps)\n l = kpx.size\n peak_val = self.dogs[(numpy.around(kps).astype(int),\n numpy.around(kpy).astype(int),\n numpy.around(kpx).astype(int))]\n valid = numpy.ones(l, dtype=bool)\n else:\n kpx, kpy, kps, peak_val, valid = self.refine_Hessian(kpx, kpy, kps)\n l = valid.sum()\n self.ref_kp.append((kps, kpy, kpx))\n print('After refinement : %i keypoints' % l)\n else:\n peak_val = self.dogs[kps, kpy, kpx]\n l = kpx.size\n valid = numpy.ones(l, bool)\n\n keypoints = numpy.recarray((l,), dtype=self.dtype)\n\n\n if l != 0:\n keypoints[:].x = (kpx[valid] + 0.5) * self.curr_reduction - 0.5 # Place ourselves at the center of the pixel, and back\n keypoints[:].y = (kpy[valid] + 0.5) * self.curr_reduction - 0.5 # Place ourselves at the center of the pixel, and back\n sigmas = self.init_sigma * (self.dest_sigma / self.init_sigma) ** ((kps[valid]) / (self.scale_per_octave))\n keypoints[:].sigma = (self.curr_reduction * sigmas)\n keypoints[:].I = peak_val[valid]\n\n\n if shrink:\n #shrink data so that they can be treated by next octave\n logger.debug(\"In shrink\")\n last = self.blurs[self.scale_per_octave]\n ty, tx = last.shape\n if ty % 2 != 0 or tx % 2 != 0:\n new_tx = 2 * ((tx + 1) // 2)\n new_ty = 2 * ((ty + 1) // 2)\n new_last = numpy.zeros((new_ty, new_tx), last.dtype)\n new_last[:ty, :tx] = last\n last = new_last\n if self.do_mask:\n new_msk = numpy.ones((new_ty, new_tx), numpy.int8)\n new_msk[:ty, :tx] = self.cur_mask\n self.cur_mask = new_msk\n self.data = binning(last, 2) / 4.0\n self.curr_reduction *= 2.0\n self.octave += 1\n self.blurs = []\n if self.do_mask:\n self.cur_mask = (binning(self.cur_mask, 2) > 0).astype(numpy.int8)\n self.cur_mask = morphology.binary_dilation(self.cur_mask, self.grow)\n\n\n if len(self.keypoints) == 0 :\n self.keypoints = keypoints\n else:\n old_size = self.keypoints.size\n new_size = old_size + l\n new_keypoints = numpy.recarray(new_size, dtype=self.dtype)\n new_keypoints[:old_size] = self.keypoints\n new_keypoints[old_size:] = keypoints\n self.keypoints = new_keypoints", "def make_lightcurve(centroids, bands, band_idx, box_size, aperture_radius):\n band_names = np.sort(list(bands.keys()))\n num_stars= range(len(centroids))\n for star_idx in num_stars:\n xcenters, ycenters = [],[]\n aperture_sums = []\n background = []\n fwhms = []\n obs_time = []\n obs_mjd = []\n ##extract lightcurve (enumerate all frames) in a given band\n for i in tqdm(bands[band_names[band_idx]]):\n #import pdb; pdb.set_trace()\n hdr = fits.open(i)[0].header\n img = fits.open(i)[0].data\n #get dates from fits header\n date=dt.strptime(hdr['DATE-OBS'], '%Y-%m-%d')\n time=dt.strptime(hdr['EXP-STRT'], '%H:%M:%S.%f')\n newdate = time.replace(year=date.year, month=date.month, day=date.day)\n obs_time.append(newdate)\n obs_mjd.append(hdr['MJD-STRT'])\n\n #crop\n #import pdb; pdb.set_trace()\n image_crop = get_crop(img, centroids[star_idx], box_size)\n\n ###aperture photometry###\n #compute centroid\n centroid = get_centroid(image_crop)\n\n xcenters.append(centroid[0])\n ycenters.append(centroid[1])\n\n #compute backgound\n bkg_mean=get_bkg(image_crop, centroid, r_in=20., r_out=30.)\n\n #measure fwhm\n fwhm=get_fwhm(image_crop)\n\n #without aperture photometry\n\n aperture_sum = get_phot(image_crop, centroid, r=aperture_radius)\n\n #minus background wihtin annulus\n #aperture_sum = get_phot2(image_crop,bkg_mean,centroid,r=aperture_radius)\n\n aperture_sums.append(aperture_sum)\n background.append(bkg_mean)\n\n # if fwhm < 10*np.median(fwhms):\n # fwhms.append(fwhm)\n # else:\n # fwhms.append(np.nan)\n fwhms.append(fwhm)\n\n #output as dataframe of given band and star\n\n dfs.append(pd.DataFrame(\n {'{0}_{1}_x'.format(band_names[band_idx], str(star_idx)) : xcenters,\n '{0}_{1}_y'.format(band_names[band_idx], str(star_idx)) : ycenters,\n '{0}_{1}_flux_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : aperture_sums,\n '{0}_{1}_bkg_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : background,\n '{0}_{1}_fwhm_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : fwhms},\n #'airmass' : airmass\n index = obs_time))\n return dfs, band_idx, band_names", "def make_animation_subset_levels(X, fixed_axes, fixed_value_1, fixed_value_2,\n filtration_size):\n # Create the array indexes\n obj = [slice(None, None, None)] * 4\n obj[fixed_axes[0]] = fixed_value_1\n obj[fixed_axes[1]] = fixed_value_2\n # print obj\n\n # Create sequence of threshold values\n thresholds = np.linspace(start=np.amin(X[obj]), stop=np.amax(X[obj]), num=filtration_size)\n # print thresholds\n # TEST PLOT\n # fig, ax = plt.subplots()\n # # interp = kwargs.get('interpolation', 'none')\n # # colors = kwargs.get('colormap', 'seismic')\n # img0 = ax.imshow(X[obj], cmap='Blues', interpolation='none')\n # fig.colorbar(img0, ax=ax, fraction=0.022, pad=0.01)\n # ax.invert_yaxis()\n # # tx = ax.set_title('%s = %d' % (X.dimensions[sweep_axis], i))\n # fig.tight_layout()\n # fig.show()\n\n # def get_middle(xx):\n # return 1 - (float(np.amax(xx)) / (np.amax(xx) + abs(np.amin(xx))))\n\n def init():\n global fig, ax, im, tx\n fig = plt.figure()\n ax = plt.axes()\n # idx = list(obj)\n # idx[sweep_axis] = slice(None, None, None)\n # middle = get_middle(X[idx])\n # print obj\n im = ax.imshow(X[obj] < thresholds[2], cmap='Blues',#cmap=shiftedColorMap(cm.seismic, midpoint=middle),\n interpolation='none', aspect='auto')\n # vmin=np.amin(X[idx]), vmax=np.amax(X[idx]))\n ax.invert_yaxis()\n # cb = fig.colorbar(im)\n # tx = ax.set_title('%s = %d' % (X.dimensions[sweep_axis], i))\n return\n\n def animate(n):\n # update indexes\n # obj[sweep_axis] = n\n # vmax = np.max(X[obj])\n # vmin = np.min(X[obj])\n im.set_data(X[obj] < thresholds[n])\n # im.set_clim(vmin, vmax)\n # tx.set_text('%s = %d' % (X.dimensions[sweep_axis], n))\n return\n\n init()\n anim = animation.FuncAnimation(fig, animate, frames=np.arange(filtration_size), interval=100, blit=False)\n return anim", "def retrieve_cloudmask(\n self, output_binary=True, include_thermal_test=True, include_channel_r5=True\n ):\n\n # Read visual near infrared (VNIR) channels at 15m resolution.\n r1 = self.get_reflectance(channel=\"1\")\n r2 = self.get_reflectance(channel=\"2\")\n r3N = self.get_reflectance(channel=\"3N\")\n\n # Read short-wave infrared (SWIR) channels at 30m resolution and match\n # VNIR resolution.\n r5 = self.get_reflectance(channel=\"5\")\n if self.datetime > datetime.datetime(2007, 5, 1) or not include_channel_r5:\n # The SWIR sensor suffered from temperature problems after May\n # 2007. Images later on are set to a dummy value \"1\", which won't\n # influence the following thresholding tests. Swath edge NaN pixels\n # stay NaN.\n r5[~np.isnan(r5)] = 1\n r5 = np.repeat(np.repeat(r5, 2, axis=0), 2, axis=1)\n\n # Read thermal (TIR) channel at 90m resolution and match VNIR\n # resolution.\n bt14 = self.get_brightnesstemperature(channel=\"14\")\n bt14 = np.repeat(np.repeat(bt14, 6, axis=0), 6, axis=1)\n\n # Ratios for clear-cloudy-tests.\n r3N2 = r3N / r2\n r12 = r1 / r2\n\n ### TEST 1-4 ###\n # Set cloud mask to default \"confidently clear\".\n clmask = np.ones(r1.shape, dtype=np.float) * 2\n\n with np.warnings.catch_warnings():\n np.warnings.filterwarnings(\"ignore\", r\"invalid value encountered\")\n\n # Set \"probably clear\" pixels.\n clmask[\n multiple_logical(\n r3N > 0.03,\n r5 > 0.01,\n 0.7 < r3N2,\n r3N2 < 1.75,\n r12 < 1.45,\n func=np.logical_and,\n )\n ] = PROBABLY_CLEAR\n\n # Set \"probably cloudy\" pixels.\n clmask[\n multiple_logical(\n r3N > 0.03,\n r5 > 0.015,\n 0.75 < r3N2,\n r3N2 < 1.75,\n r12 < 1.35,\n func=np.logical_and,\n )\n ] = PROBABLY_CLOUDY\n\n # Set \"confidently cloudy\" pixels\n clmask[\n multiple_logical(\n r3N > 0.065,\n r5 > 0.02,\n 0.8 < r3N2,\n r3N2 < 1.75,\n r12 < 1.2,\n func=np.logical_and,\n )\n ] = CONFIDENTLY_CLOUDY\n\n # Combine swath edge pixels.\n clmask[\n multiple_logical(\n np.isnan(r1),\n np.isnan(r2),\n np.isnan(r3N),\n np.isnan(r5),\n func=np.logical_or,\n )\n ] = np.nan\n\n if include_thermal_test:\n ### TEST 5 ###\n # Uncertain warm ocean pixels, higher than the 5th percentile of\n # brightness temperature values from all \"confidently clear\"\n # labeled pixels, are overwritten with \"confidently clear\".\n\n # Check for available \"confidently clear\" pixels.\n nc = np.sum(clmask == 2) / np.sum(~np.isnan(clmask))\n if nc > 0.03:\n bt14_p05 = np.nanpercentile(bt14[clmask == 2], 5)\n else:\n # If less than 3% of pixels are \"confidently clear\", test 5\n # cannot be applied according to Werner et al., 2016. However,\n # a sensitivity study showed that combining \"probably clear\"\n # and \"confidently clear\" pixels in such cases leads to\n # plausible results and we derive a threshold correspondingly.\n bt14_p05 = np.nanpercentile(\n bt14[np.logical_or(clmask == 2, clmask == 3)], 5\n )\n\n with np.warnings.catch_warnings():\n np.warnings.filterwarnings(\"ignore\", r\"invalid value encountered\")\n # Pixels with brightness temperature values above the 5th\n # percentile of clear ocean pixels are overwritten with\n # \"confidently clear\".\n clmask[np.logical_and(bt14 > bt14_p05, ~np.isnan(clmask))] = 2\n\n # Combine swath edge pixels.\n clmask[np.logical_or(np.isnan(clmask), np.isnan(bt14))] = np.nan\n\n if output_binary:\n clmask[np.logical_or(clmask == 2, clmask == 3)] = 0 # clear\n clmask[np.logical_or(clmask == 4, clmask == 5)] = 1 # cloudy\n\n return clmask", "def add_ska1_v5(self, r_min=None, r_max=None):\n # Load the station coordinates.\n path = os.path.dirname(os.path.abspath(__file__))\n coords = np.loadtxt(join(path, 'data', 'v5_enu.txt'))\n x, y, z = coords[:, 0], coords[:, 1], coords[:, 2]\n r = (x**2 + y**2)**0.5\n\n cluster_radius = 90 # This just seems to work (not confirmed)\n\n if r_min and r_max:\n idx = np.where(np.logical_and(r >= r_min, r <= r_max))\n x, y, z = x[idx], y[idx], z[idx]\n elif r_min:\n idx = np.where(r >= r_min)\n x, y, z = x[idx], y[idx], z[idx]\n elif r_max:\n idx = np.where(r <= r_max)\n x, y, z = x[idx], y[idx], z[idx]\n\n # Get the cluster centres within the given range.\n cluster_x, cluster_y, _ = \\\n TelescopeLayout.cluster_centres_ska_v5(r_min, r_max)\n\n # Loop over clusters and extract stations within a 90 m radius.\n for cx, cy in zip(cluster_x, cluster_y):\n dr = ((x - cx)**2 + (y - cy)**2)**0.5\n idx = np.where(dr <= cluster_radius)\n tx, ty, tz = x[idx], y[idx], z[idx]\n # num_clusters += tx.size\n # r_ = (tx**2 + ty**2)**0.5\n # r_min = min(r_min, r_.min())\n # r_max = max(r_max, r_.max())\n if tx.size > 0:\n cluster_count = 0\n for name in self.layouts:\n if name.startswith('ska1_v5_cluster'):\n cluster_count += 1\n self.layouts['ska1_v5_cluster_%03i' % cluster_count] = {\n 'x': tx, 'y': ty, 'z': tz, 'cx': cx, 'cy': cy,\n 'cr': cluster_radius, 'r_min': r_min, 'r_max': r_max}\n x = np.delete(x, idx)\n y = np.delete(y, idx)\n z = np.delete(z, idx)\n if x.size > 0:\n # Add any remaining stations that were not assigned to a cluster.\n count = 0\n for name in self.layouts:\n if name.startswith('ska1_v5') and not '_cluster' in name:\n count += 1\n key_ = 'ska1_v5_%03i' % count\n self.layouts[key_] = dict(x=x, y=y, z=z, r_min=r_min, r_max=r_max)", "def _build_multiband_mask(data, tractor, filt2pixscale, fill_value=0.0,\n threshmask=0.01, r50mask=0.05, maxshift=10,\n relmaxshift=0.1,\n sigmamask=3.0, neighborfactor=1.0, verbose=False):\n import numpy.ma as ma\n from copy import copy\n from skimage.transform import resize\n from legacyhalos.mge import find_galaxy\n from legacyhalos.misc import srcs2image, ellipse_mask\n\n import matplotlib.pyplot as plt\n from astropy.visualization import simple_norm\n\n bands, refband = data['bands'], data['refband']\n #residual_mask = data['residual_mask']\n\n #nbox = 5\n #box = np.arange(nbox)-nbox // 2\n #box = np.meshgrid(np.arange(nbox), np.arange(nbox))[0]-nbox//2\n\n xobj, yobj = np.ogrid[0:data['refband_height'], 0:data['refband_width']]\n\n # If the row-index of the central galaxy is not provided, use the source\n # nearest to the center of the field.\n if 'galaxy_indx' in data.keys():\n galaxy_indx = np.atleast_1d(data['galaxy_indx'])\n else:\n galaxy_indx = np.array([np.argmin((tractor.bx - data['refband_height']/2)**2 +\n (tractor.by - data['refband_width']/2)**2)])\n data['galaxy_indx'] = np.atleast_1d(galaxy_indx)\n data['galaxy_id'] = ''\n\n #print('Import hack!')\n #norm = simple_norm(img, 'log', min_percent=0.05, clip=True)\n #import matplotlib.pyplot as plt ; from astropy.visualization import simple_norm\n\n ## Get the PSF sources.\n #psfindx = np.where(tractor.type == 'PSF')[0]\n #if len(psfindx) > 0:\n # psfsrcs = tractor.copy()\n # psfsrcs.cut(psfindx)\n #else:\n # psfsrcs = None\n\n def tractor2mge(indx, factor=1.0):\n # Convert a Tractor catalog entry to an MGE object.\n class MGEgalaxy(object):\n pass\n\n default_majoraxis = tractor.diam_init[indx] * 60 / 2 / filt2pixscale[refband] # [pixels]\n default_pa = tractor.pa_init[indx]\n default_ba = tractor.ba_init[indx]\n #default_theta = (270 - default_pa) % 180\n #default_eps = 1 - tractor.ba_init[indx]\n\n #if tractor.sga_id[indx] > -1:\n if tractor.type[indx] == 'PSF' or tractor.shape_r[indx] < 2:\n pa = tractor.pa_init[indx]\n ba = tractor.ba_init[indx]\n # take away the extra factor of 2 we put in in read_sample()\n r50 = tractor.diam_init[indx] * 60 / 2 / 2\n if r50 < 5:\n r50 = 5.0 # minimum size, arcsec\n majoraxis = factor * r50 / filt2pixscale[refband] # [pixels]\n #majoraxis = factor * tractor.diam_init[indx] * 60 / 2 / 2 / filt2pixscale[refband] # [pixels]\n else:\n ee = np.hypot(tractor.shape_e1[indx], tractor.shape_e2[indx])\n ba = (1 - ee) / (1 + ee)\n pa = 180 - (-np.rad2deg(np.arctan2(tractor.shape_e2[indx], tractor.shape_e1[indx]) / 2))\n pa = pa % 180\n\n # can be zero (or very small) if fit as a PSF or REX\n if tractor.shape_r[indx] > 1:\n majoraxis = factor * tractor.shape_r[indx] / filt2pixscale[refband] # [pixels]\n else:\n majoraxis = factor * tractor.diam_init[indx] * 60 / 2 / 2 / filt2pixscale[refband] # [pixels]\n\n mgegalaxy = MGEgalaxy()\n \n mgegalaxy.xmed = tractor.by[indx]\n mgegalaxy.ymed = tractor.bx[indx]\n mgegalaxy.xpeak = tractor.by[indx]\n mgegalaxy.ypeak = tractor.bx[indx]\n\n # never use the Tractor geometry (only the centroid)\n # https://portal.nersc.gov/project/cosmo/temp/ioannis/virgofilaments-html/215/NGC5584/NGC5584.html\n if True:\n mgegalaxy.eps = 1-ba\n mgegalaxy.pa = pa\n mgegalaxy.theta = (270 - pa) % 180\n mgegalaxy.majoraxis = majoraxis\n else:\n mgegalaxy.eps = 1 - default_ba\n mgegalaxy.pa = default_pa\n mgegalaxy.theta = (270 - default_pa) % 180\n mgegalaxy.majoraxis = default_majoraxis\n\n # always restore all pixels within the nominal / initial size of the galaxy\n #objmask = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n # default_majoraxis,\n # default_majoraxis * (1-default_eps), \n # np.radians(default_theta-90), xobj, yobj)\n #objmask = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n # default_majoraxis, default_majoraxis, 0.0, xobj, yobj)\n\n objmask = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n mgegalaxy.majoraxis,\n mgegalaxy.majoraxis * (1-mgegalaxy.eps), \n np.radians(mgegalaxy.theta-90), xobj, yobj)\n\n # central 10% pixels can override the starmask\n objmask_center = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n 0.1*mgegalaxy.majoraxis,\n 0.1*mgegalaxy.majoraxis * (1-mgegalaxy.eps), \n np.radians(mgegalaxy.theta-90), xobj, yobj)\n\n return mgegalaxy, objmask, objmask_center\n\n # Now, loop through each 'galaxy_indx' from bright to faint.\n data['mge'] = []\n for ii, central in enumerate(galaxy_indx):\n print('Determing the geometry for galaxy {}/{}.'.format(\n ii+1, len(galaxy_indx)))\n\n # [1] Determine the non-parametric geometry of the galaxy of interest\n # in the reference band. First, subtract all models except the galaxy\n # and galaxies \"near\" it. Also restore the original pixels of the\n # central in case there was a poor deblend.\n largeshift = False\n mge, centralmask, centralmask2 = tractor2mge(central, factor=1.0)\n #plt.clf() ; plt.imshow(centralmask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-mask.png') ; pdb.set_trace()\n\n iclose = np.where([centralmask[np.int(by), np.int(bx)]\n for by, bx in zip(tractor.by, tractor.bx)])[0]\n \n srcs = tractor.copy()\n srcs.cut(np.delete(np.arange(len(tractor)), iclose))\n model = srcs2image(srcs, data['{}_wcs'.format(refband.lower())],\n band=refband.lower(),\n pixelized_psf=data['{}_psf'.format(refband.lower())])\n\n img = data[refband].data - model\n img[centralmask] = data[refband].data[centralmask]\n\n mask = np.logical_or(ma.getmask(data[refband]), data['residual_mask'])\n #mask = np.logical_or(data[refband].mask, data['residual_mask'])\n\n # restore the central pixels but not the masked stellar pixels\n centralmask[np.logical_and(data['starmask'], np.logical_not(centralmask2))] = False\n mask[centralmask] = False\n\n img = ma.masked_array(img, mask)\n ma.set_fill_value(img, fill_value)\n #if ii == 1:\n # pdb.set_trace()\n\n mgegalaxy = find_galaxy(img, nblob=1, binning=1, quiet=False)#, plot=True) ; plt.savefig('cosmo-www/tmp/junk-mge.png')\n #plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('junk-mask.png')\n ##plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n #pdb.set_trace()\n\n # Did the galaxy position move? If so, revert back to the Tractor geometry.\n if np.abs(mgegalaxy.xmed-mge.xmed) > maxshift or np.abs(mgegalaxy.ymed-mge.ymed) > maxshift:\n print('Large centroid shift (x,y)=({:.3f},{:.3f})-->({:.3f},{:.3f})'.format(\n mgegalaxy.xmed, mgegalaxy.ymed, mge.xmed, mge.ymed))\n print(' Reverting to the default geometry and the Tractor centroid.')\n largeshift = True\n mgegalaxy = copy(mge)\n\n radec_med = data['{}_wcs'.format(refband.lower())].pixelToPosition(\n mgegalaxy.ymed+1, mgegalaxy.xmed+1).vals\n radec_peak = data['{}_wcs'.format(refband.lower())].pixelToPosition(\n mgegalaxy.ypeak+1, mgegalaxy.xpeak+1).vals\n mge = {\n 'largeshift': largeshift,\n 'ra': tractor.ra[central], 'dec': tractor.dec[central],\n 'bx': tractor.bx[central], 'by': tractor.by[central],\n #'mw_transmission_g': tractor.mw_transmission_g[central],\n #'mw_transmission_r': tractor.mw_transmission_r[central],\n #'mw_transmission_z': tractor.mw_transmission_z[central],\n 'ra_moment': radec_med[0], 'dec_moment': radec_med[1],\n #'ra_peak': radec_med[0], 'dec_peak': radec_med[1]\n }\n for key in ('eps', 'majoraxis', 'pa', 'theta', 'xmed', 'ymed', 'xpeak', 'ypeak'):\n mge[key] = np.float32(getattr(mgegalaxy, key))\n if key == 'pa': # put into range [0-180]\n mge[key] = mge[key] % np.float32(180)\n data['mge'].append(mge)\n\n #if False:\n # #plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n # plt.clf() ; mgegalaxy = find_galaxy(img, nblob=1, binning=1, quiet=True, plot=True)\n # plt.savefig('/mnt/legacyhalos-data/debug.png')\n\n # [2] Create the satellite mask in all the bandpasses. Use srcs here,\n # which has had the satellites nearest to the central galaxy trimmed\n # out.\n print('Building the satellite mask.')\n satmask = np.zeros(data[refband].shape, bool)\n for filt in bands:\n # do not let GALEX and WISE contribute to the satellite mask\n if data[filt].shape != satmask.shape:\n continue\n \n cenflux = getattr(tractor, 'flux_{}'.format(filt.lower()))[central]\n satflux = getattr(srcs, 'flux_{}'.format(filt.lower()))\n if cenflux <= 0.0:\n #raise ValueError('Central galaxy flux is negative!')\n print('Central galaxy flux is negative! Proceed with caution...')\n #pdb.set_trace()\n \n satindx = np.where(np.logical_or(\n (srcs.type != 'PSF') * (srcs.shape_r > r50mask) *\n (satflux > 0.0) * ((satflux / cenflux) > threshmask),\n srcs.ref_cat == 'R1'))[0]\n #satindx = np.where(srcs.ref_cat == 'R1')[0]\n #if np.isin(central, satindx):\n # satindx = satindx[np.logical_not(np.isin(satindx, central))]\n if len(satindx) == 0:\n #raise ValueError('All satellites have been dropped!')\n #print('Warning! All satellites have been dropped from band {}!'.format(filt))\n print('Note: no satellites to mask in band {}.'.format(filt))\n else:\n satsrcs = srcs.copy()\n #satsrcs = tractor.copy()\n satsrcs.cut(satindx)\n satimg = srcs2image(satsrcs, data['{}_wcs'.format(filt.lower())],\n band=filt.lower(),\n pixelized_psf=data['{}_psf'.format(filt.lower())])\n thissatmask = satimg > sigmamask*data['{}_sigma'.format(filt.lower())]\n #if filt == 'FUV':\n # plt.clf() ; plt.imshow(thissatmask, origin='lower') ; plt.savefig('junk-{}.png'.format(filt.lower()))\n # #plt.clf() ; plt.imshow(data[filt], origin='lower') ; plt.savefig('junk-{}.png'.format(filt.lower()))\n # pdb.set_trace()\n if satmask.shape != satimg.shape:\n thissatmask = resize(thissatmask*1.0, satmask.shape, mode='reflect') > 0\n\n satmask = np.logical_or(satmask, thissatmask)\n #if True:\n # import matplotlib.pyplot as plt\n # plt.clf() ; plt.imshow(np.log10(satimg), origin='lower') ; plt.savefig('debug.png')\n # plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('debug.png')\n ## #plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n # pdb.set_trace()\n\n #print(filt, np.sum(satmask), np.sum(thissatmask))\n\n #plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-satmask.png')\n \n # [3] Build the final image (in each filter) for ellipse-fitting. First,\n # subtract out the PSF sources. Then update the mask (but ignore the\n # residual mask). Finally convert to surface brightness.\n #for filt in ['W1']:\n for filt in bands:\n thismask = ma.getmask(data[filt])\n if satmask.shape != thismask.shape:\n _satmask = (resize(satmask*1.0, thismask.shape, mode='reflect') > 0) == 1.0\n _centralmask = (resize(centralmask*1.0, thismask.shape, mode='reflect') > 0) == 1.0\n mask = np.logical_or(thismask, _satmask)\n mask[_centralmask] = False\n else:\n mask = np.logical_or(thismask, satmask)\n mask[centralmask] = False\n #if filt == 'r':\n # #plt.imshow(_satmask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-satmask-{}.png'.format(filt))\n # plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-mask-{}.png'.format(filt))\n # plt.clf() ; plt.imshow(img, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-img-{}.png'.format(filt))\n # pdb.set_trace()\n\n varkey = '{}_var'.format(filt.lower())\n imagekey = '{}_masked'.format(filt.lower())\n psfimgkey = '{}_psfimg'.format(filt.lower())\n thispixscale = filt2pixscale[filt]\n if imagekey not in data.keys():\n data[imagekey], data[varkey], data[psfimgkey] = [], [], []\n\n img = ma.getdata(data[filt]).copy()\n \n # Get the PSF sources.\n psfindx = np.where((tractor.type == 'PSF') * (getattr(tractor, 'flux_{}'.format(filt.lower())) / cenflux > threshmask))[0]\n if len(psfindx) > 0 and filt.upper() != 'W3' and filt.upper() != 'W4': \n #if len(psfindx) > 0 and filt.upper() != 'NUV' and filt.upper() != 'FUV' and filt.upper() != 'W3' and filt.upper() != 'W4':\n psfsrcs = tractor.copy()\n psfsrcs.cut(psfindx)\n else:\n psfsrcs = None\n \n if psfsrcs:\n psfimg = srcs2image(psfsrcs, data['{}_wcs'.format(filt.lower())],\n band=filt.lower(),\n pixelized_psf=data['{}_psf'.format(filt.lower())])\n if False:\n #import fitsio ; fitsio.write('junk-psf-{}.fits'.format(filt.lower()), data['{}_psf'.format(filt.lower())].img, clobber=True)\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)\n im = ax1.imshow(np.log10(img), origin='lower') ; fig.colorbar(im, ax=ax1)\n im = ax2.imshow(np.log10(psfimg), origin='lower') ; fig.colorbar(im, ax=ax2)\n im = ax3.imshow(np.log10(data['{}_psf'.format(filt.lower())].img), origin='lower') ; fig.colorbar(im, ax=ax3)\n im = ax4.imshow(img-psfimg, origin='lower') ; fig.colorbar(im, ax=ax4)\n plt.savefig('desi-users/ioannis/tmp/qa-psf-{}.png'.format(filt.lower()))\n if filt == 'r':# or filt == 'r':\n pdb.set_trace()\n img -= psfimg\n else:\n psfimg = np.zeros((2, 2), 'f4')\n\n data[psfimgkey].append(psfimg)\n \n img = ma.masked_array((img / thispixscale**2).astype('f4'), mask) # [nanomaggies/arcsec**2]\n var = data['{}_var_'.format(filt.lower())] / thispixscale**4 # [nanomaggies**2/arcsec**4]\n\n # Fill with zeros, for fun--\n ma.set_fill_value(img, fill_value)\n #if ii == 0 and filt == 'r': #filt == 'W1' or \n # plt.clf() ; plt.imshow(img, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-img-{}.png'.format(filt.lower()))\n # plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-mask-{}.png'.format(filt.lower()))\n ##### plt.clf() ; plt.imshow(thismask, origin='lower') ; plt.savefig('junk-thismask-{}.png'.format(filt.lower()))\n # pdb.set_trace()\n \n data[imagekey].append(img)\n data[varkey].append(var)\n\n #test = data['r_masked'][0]\n #plt.clf() ; plt.imshow(np.log(test.clip(test[mgegalaxy.xpeak, mgegalaxy.ypeak]/1e4)), origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n #pdb.set_trace()\n\n # Cleanup?\n for filt in bands:\n del data[filt]\n del data['{}_var_'.format(filt.lower())]\n\n return data", "def frame_fix_badpix_isolated(array, bpm_mask=None, sigma_clip=3, num_neig=5,\n size=5, protect_mask=0, cxy=None, mad=False, \n ignore_nan=True, verbose=True, full_output=False):\n if array.ndim != 2:\n raise TypeError('Array is not a 2d array or single frame')\n if size % 2 == 0:\n raise TypeError('Size of the median blur kernel must be an odd integer')\n\n if bpm_mask is not None:\n bpm_mask = bpm_mask.astype('bool')\n\n if verbose: start = time_ini()\n\n if num_neig > 0:\n neigh = True\n else:\n neigh = False\n\n frame = array.copy()\n if cxy is None:\n cy, cx = frame_center(frame)\n else:\n cx, cy = cxy\n \n if bpm_mask is None:\n ori_nan_mask = np.where(np.isnan(frame))\n ind = clip_array(frame, sigma_clip, sigma_clip, neighbor=neigh,\n num_neighbor=num_neig, mad=mad)\n bpm_mask = np.zeros_like(frame)\n bpm_mask[ind] = 1\n if ignore_nan:\n bpm_mask[ori_nan_mask] = 0\n if protect_mask:\n cir = disk((cy, cx), protect_mask, shape=bpm_mask.shape)\n bpm_mask[cir] = 0\n bpm_mask = bpm_mask.astype('bool')\n\n smoothed = median_filter(frame, size, mode='mirror')\n frame[np.where(bpm_mask)] = smoothed[np.where(bpm_mask)]\n array_out = frame\n count_bp = np.sum(bpm_mask)\n \n if verbose:\n msg = \"/nDone replacing {} bad pixels using the median of neighbors\"\n print(msg.format(count_bp))\n timing(start)\n \n if full_output:\n return array_out, bpm_mask\n else:\n return array_out", "def test_thresh_color(images):\n for img in images:\n # Get the stack bounds to draw onto the main image\n stack_bounds = get_stack_bounds(img)\n\n # Get all the sub-images for each stack\n stack_images = get_stack_images(img)\n\n SIZE = (200, 300)\n filtered_imgs = []\n\n # Loop through all the stacks\n for stack_bound, stack_img in zip(stack_bounds, stack_images):\n #Draw the rectangle for the current stack\n disp = deepcopy(img)\n located_stacks_img = draw_rect(np.copy(disp), stack_bound, [0,0,0])\n cv2.imshow('Filtering stack', located_stacks_img)\n\n # Convert the current stack image into hsv\n stack_img_hsv = cv2.cvtColor(stack_img, cv2.COLOR_BGR2HSV)\n for i, color in enumerate(COLORS):\n contours = thresh_color(stack_img, stack_img_hsv, COLORS[color])\n\n # Draw the contours\n stack2 = deepcopy( stack_img)\n cont_img = cv2.drawContours(stack2, contours, -1, (255,255,255), 2)\n # cont_img = cv2.resize(cont_img, SIZE)\n\n # Put the number of contours as text\n txt = '{}:{}'.format(color, len(contours))\n print(txt)\n\n # Display the contour information to the screen\n cv2.imshow(txt, scale_image(cont_img, 9))\n filtered_imgs.append(cont_img)\n cv2.moveWindow(txt, 180*i, 600)\n # cv2.imshow('filtered_images', np.hstack(filtered_imgs))\n print()\n # Skip to the next image\n if cv2.waitKey(0) == ord('1'):\n break\n cv2.destroyAllWindows()", "def flattenFrames(stack):\n \n maxHeight=0\n frameList=[]\n \n \n print('\\n')\n for i, frame in enumerate(stack):\n #medFrame = ndimage.filters.median_filter(frame,size=(1,60)) #Takes 3.5 minutes\n medFrame = ndimage.filters.uniform_filter1d(frame, 60) #Takes 1.0 minutes and has same output as med filter\n shifts = shiftDetector(medFrame)\n newFrame = adjustFrame(frame, shifts)\n frameList.append(newFrame)\n if newFrame.shape[0] > maxHeight:\n maxHeight = newFrame.shape[0]\n \n #Show percentage of loop completed.\n print('\\rFinding and correcting shifts {:.2f}% done'.format(100.0*((i+1)/len(stack))),end='', flush=True)\n \n flattenedStack = padFrames(frameList, maxHeight)\n\n return flattenedStack", "def masked(months=range(1, 13), years=[2009], folder=\"data/\", layer=\"BHR_VIS\"):\n data = []\n file_template = 'NETCDF:\"{:s}\":{:s}' # Template for the Netcdf path\n # the actual filename\n fname_template = '{:s}/GlobAlbedo.merge.albedo.05.{:d}{:02d}.nc'\n for year in years:\n for month in months:\n fname = fname_template.format(folder, year, month)\n netcdf_fname = file_template.format(fname, layer)\n g = gdal.Open(netcdf_fname)\n if g is None:\n raise IOError(\"Problem with reading file {}\".format(fname))\n the_data = g.ReadAsArray()\n masked_data = np.ma.array(the_data,mask=np.isnan(the_data))\n data.append(masked_data)\n output_data = np.ma.array(data)\n return output_data", "def optimizeThresholds(band=0, tmo=7) :\n # Optimize thresholds should take <5 seconds, so set tmo to 7\n bandlist = helpers.makeList(band)\n cblist = makeCorrBandList(band)\n # If band is offline, don't wait for it.\n if ( len( cblist ) != 0 ) :\n rangedCb = helpers.formatAsRanges(cblist)\n c1 = \"Waiting for astrobands %s before turning noise on\" % rangedCb\n # previous command was probably configastroband, which could take \n # 30 seconds if loading a new FPGA configuration.\n wait(CORR, cblist, 40, ALL, precomment=c1)\n\n noiseon()\n\n if ( band == 0 ) :\n cstr = \"Optimizing thresholds on all Astrobands\"\n else :\n rangedAb = helpers.formatAsRanges(bandlist)\n cstr = \"Optimizing thresholds on Astroband(s) %s \" % ( rangedAb )\n rtdComment( cstr )\n s.optimizeThresholds( bandlist );\n if ( len( cblist ) != 0 ) :\n rangedCb = helpers.formatAsRanges(cblist)\n c1 = \"Waiting for astrobands %s before turning noise off\" % rangedCb\n wait(CORR, cblist, tmo, ALL, precomment=c1)\n\n noiseoff()", "def nc_to_hdf5_mudis(dataset, config):\n np.warnings.filterwarnings('ignore')\n\n date = datetime.datetime.strptime(dataset.recorddate,\n '%d.%m.%Y') # read date from dateset\n date_name = datetime.datetime.strftime(date,\n '%Y%m%d') # convert date to YYYYMMDD format\n config['date'] = date_name\n\n # Create the directory to save the results\n path = config['str_dir'] + '/radiance/{}/data/'.format(config['date'])\n os.makedirs(os.path.dirname(path), exist_ok=True)\n\n # Read time of the file (correct time)\n time = datetime.datetime.strptime(dataset.recordtime, '%H:%M:%S.')\n time = datetime.datetime.time(time)\n\n # convert time to datetime format\n datetime_name = datetime.datetime.combine(date, time)\n new_name = datetime.datetime.strftime(datetime_name, '%Y%m%d_%H%M%S')\n\n # radiance = dataset.variables['data'][:].reshape(113, 1281)\n # wavelength_axis = dataset.variables['xAxis'][:]\n\n # Create a file in the disk\n with h5py.File(config['str_dir'] + '/radiance/{}/data/{}.h5'.format(\n config['date'], new_name), 'w') as datos:\n\n if not list(datos.items()):\n # Create two datasets(use only one time)\n datos.create_dataset('/data',\n data=dataset['data'][:].reshape(113, 1281),\n dtype='f4')\n # datos.create_dataset('/skymap', data=skymap, dtype='f4')\n else:\n del datos['data']\n # del datos['skymap']\n print('data deleted and corrected')\n datos.create_dataset('/data', data=data, dtype='f4')\n # datos.create_dataset('/skymap', data=skymap, dtype='f4')\n\n # Add attributes to datasets\n datos['data'].attrs['time'] = str(time)\n datos['data'].attrs['Exposure'] = dataset.exposuretime\n datos['data'].attrs['NumAver'] = dataset.AVERAGED\n datos['data'].attrs['CCDTemp'] = dataset.detectortemperature\n datos['data'].attrs['NumSingMes'] = dataset.noofaccumulations\n # datos['data'].attrs['ElectrTemp'] = dataset.\n datos['data'].attrs['Latitude'] = '52.39N'\n datos['data'].attrs['Longitude'] = '9.7E'\n datos['data'].attrs['Altitude'] = '65 AMSL'\n\n chn = np.arange(1, 114)\n datos.create_dataset('/channel', data=chn, dtype=np.float32)\n datos.create_dataset('/wavelength', data=dataset['xAxis'][:])\n\n datos['data'].dims.create_scale(datos['channel'], 'channel')\n datos['data'].dims[0].attach_scale(datos['channel'])\n datos['data'].dims[0].label = 'channel'\n datos['data'].dims[1].label = 'wavelength'\n\n # datos['skymap'].dims[0].label = 'channel'\n # datos['skymap'].dims[1].label = 'Azimuth, Zenith'\n\n datos.close()", "def extract_cochlear_subbands(nets, SIGNAL_SIZE, SR, LOW_LIM, HIGH_LIM, N, SAMPLE_FACTOR, pad_factor, debug, subbands_ifft, return_subbands_only, rectify_and_lowpass_subbands, rFFT, custom_filts, erb_filter_kwargs, include_all_keys, compression_function, include_subbands_noise, subbands_noise_mean, subbands_noise_stddev):\n\n # make the erb filters tensor\n nets['filts_tensor'] = make_filts_tensor(SIGNAL_SIZE, SR, LOW_LIM, HIGH_LIM, N, SAMPLE_FACTOR, use_rFFT=rFFT, pad_factor=pad_factor, custom_filts=custom_filts, erb_filter_kwargs=erb_filter_kwargs)\n\n # make subbands by multiplying filts with fft of input\n nets['subbands'] = tf.multiply(nets['filts_tensor'],nets['fft_input'],name='mul_subbands')\n if debug: # return the real and imaginary parts of the subbands separately -- use if matching to their output\n nets['subbands_r'] = tf.real(nets['subbands'])\n nets['subbands_i'] = tf.imag(nets['subbands'])\n\n # TODO: with using subbands_ifft is redundant. \n # make the time subband operations if we are returning the subbands or if we want to include all of the keys in the graph\n if subbands_ifft or return_subbands_only or include_all_keys:\n if not rFFT:\n nets['subbands_ifft'] = tf.real(tf.ifft(nets['subbands'],name='ifft_subbands'),name='ifft_subbands_r')\n else:\n nets['subbands_ifft'] = tf.spectral.irfft(nets['subbands'],name='ifft_subbands')\n if return_subbands_only or include_all_keys:\n nets['subbands_time'] = nets['subbands_ifft']\n if rectify_and_lowpass_subbands: # TODO: the subband operations are hard coded in?\n nets['subbands_time_relu'] = tf.nn.relu(nets['subbands_time'], name='rectified_subbands')\n nets['subbands_time_lowpassed'] = hanning_pooling_1d_no_depthwise(nets['subbands_time_relu'], downsample=2, length_of_window=2*4, make_plots=False, data_format='NCW', normalize=True, sqrt_window=False)\n\n # TODO: noise is only added in the case when we are calcalculating the time subbands, but we might want something similar for the cochleagram\n if return_subbands_only or include_all_keys:\n # Compress subbands if specified and add noise. \n nets = compression_function(nets, input_node_name='subbands_time_lowpassed', output_node_name='subbands_time_lowpassed_compressed')\n if include_subbands_noise:\n nets = add_neural_noise(nets, subbands_noise_mean, subbands_noise_stddev, input_node_name='subbands_time_lowpassed_compressed', output_node_name='subbands_time_lowpassed_compressed_with_noise')\n nets['subbands_time_lowpassed_compressed_with_noise'] = tf.expand_dims(nets['subbands_time_lowpassed_compressed_with_noise'],-1)\n nets['subbands_time_processed'] = nets['subbands_time_lowpassed_compressed_with_noise']\n else:\n nets['subbands_time_lowpassed_compressed'] = tf.expand_dims(nets['subbands_time_lowpassed_compressed'],-1)\n nets['subbands_time_processed'] = nets['subbands_time_lowpassed_compressed']\n\n return nets", "def boxcar_filter(time_series, lb=0, ub=0.5, n_iterations=2):\r\n\r\n n = time_series.shape[-1]\r\n\r\n len_boxcar_ub = np.ceil(1 / (2.0 * ub))\r\n boxcar_ub = np.empty(len_boxcar_ub)\r\n boxcar_ub.fill(1.0 / len_boxcar_ub)\r\n boxcar_ones_ub = np.ones_like(boxcar_ub)\r\n\r\n if lb == 0:\r\n lb = None\r\n else:\r\n len_boxcar_lb = np.ceil(1 / (2.0 * lb))\r\n boxcar_lb = np.empty(len_boxcar_lb)\r\n boxcar_lb.fill(1.0 / len_boxcar_lb)\r\n boxcar_ones_lb = np.ones_like(boxcar_lb)\r\n\r\n #If the time_series is a 1-d, we add a dimension, so that we can iterate\r\n #over 2-d inputs:\r\n if len(time_series.shape) == 1:\r\n time_series = np.array([time_series])\r\n for i in range(time_series.shape[0]):\r\n if ub:\r\n #Start by applying a low-pass to the signal. Pad the signal on\r\n #each side with the initial and terminal signal value:\r\n pad_s = np.hstack((boxcar_ones_ub *\r\n time_series[i, 0], time_series[i]))\r\n pad_s = np.hstack((pad_s, boxcar_ones_ub * time_series[i, -1]))\r\n\r\n #Filter operation is a convolution with the box-car(iterate,\r\n #n_iterations times over this operation):\r\n for iteration in range(n_iterations):\r\n conv_s = np.convolve(pad_s, boxcar_ub)\r\n\r\n #Extract the low pass signal by excising the central\r\n #len(time_series) points:\r\n time_series[i] = conv_s[conv_s.shape[-1] / 2 - np.floor(n / 2.):\r\n conv_s.shape[-1] / 2 + np.ceil(n / 2.)]\r\n\r\n #Now, if there is a high-pass, do the same, but in the end subtract out\r\n #the low-passed signal:\r\n if lb:\r\n pad_s = np.hstack((boxcar_ones_lb *\r\n time_series[i, 0], time_series[i]))\r\n pad_s = np.hstack((pad_s, boxcar_ones_lb * time_series[i, -1]))\r\n\r\n #Filter operation is a convolution with the box-car(iterate,\r\n #n_iterations times over this operation):\r\n for iteration in range(n_iterations):\r\n conv_s = np.convolve(pad_s, boxcar_lb)\r\n\r\n #Extract the low pass signal by excising the central\r\n #len(time_series) points:\r\n s_lp = conv_s[conv_s.shape[-1] / 2 - np.floor(n / 2.):\r\n conv_s.shape[-1] / 2 + np.ceil(n / 2.)]\r\n\r\n #Extract the high pass signal simply by subtracting the high pass\r\n #signal from the original signal:\r\n time_series[i] = time_series[i] - s_lp + np.mean(s_lp) # add mean\r\n #to make sure that there are no negative values. This also seems to\r\n #make sure that the mean of the signal (in % signal change) is\r\n #close to 0\r\n\r\n return time_series.squeeze()", "def summaryD5(self):\r\n\r\n if self.window.diff_tabs.tempruns_D45_set==3:\r\n\r\n dfD5_temp1_pos1= pd.read_csv('raw_text_D5_1_1.txt')\r\n dfD5_temp1_pos2= pd.read_csv('raw_text_D5_1_2.txt')\r\n \r\n D5summary_temp1=pd.concat([dfD5_temp1_pos1,dfD5_temp1_pos2],axis=1)\r\n D5summary_temp1.to_csv('D5summary_temp1.txt',index=False)\r\n\r\n dfD5_temp2_pos1= pd.read_csv('raw_text_D5_1_1.txt')\r\n dfD5_temp2_pos2= pd.read_csv('raw_text_D5_1_2.txt')\r\n \r\n\r\n D5summary_temp2=pd.concat([dfD5_temp2_pos1,dfD5_temp2_pos2],axis=1)\r\n D5summary_temp2.to_csv('D5summary_temp2.txt',index=False)\r\n\r\n dfD5_temp3_pos1= pd.read_csv('raw_text_D5_3_1.txt')\r\n dfD5_temp3_pos2= pd.read_csv('raw_text_D5_3_2.txt')\r\n \r\n\r\n D4summary_temp3=pd.concat([dfD5_temp3_pos1,dfD5_temp3_pos2],axis=1)\r\n D4summary_temp3.to_csv('D5summary_temp3.txt',index=False)", "def mask_init (data, header, filt, imgtype):\n\n if get_par(set_zogy.timing,tel):\n t = time.time()\n\n fits_bpm = (get_par(set_bb.bad_pixel_mask,tel)\n .replace('bpm', 'bpm_{}'.format(filt)))\n\n bpm_present, fits_bpm = already_exists (fits_bpm, get_filename=True)\n if bpm_present:\n # if it exists, read it\n data_mask = read_hdulist(fits_bpm)\n else:\n # if not, create uint8 array of zeros with same shape as\n # [data]\n log.info('Warning: bad pixel mask {} does not exist'.format(fits_bpm))\n data_mask = np.zeros(np.shape(data), dtype='uint8')\n\n\n # create initial mask header\n header_mask = fits.Header()\n\n\n if imgtype == 'object':\n\n # mask of pixels with non-finite values in [data]\n mask_infnan = ~np.isfinite(data)\n # replace those pixel values with zeros\n data[mask_infnan] = 0\n # and add them to [data_mask] with same value defined for 'bad' pixels\n # unless that pixel was already masked\n mask_value = get_par(set_zogy.mask_value,tel)\n data_mask[(mask_infnan) & (data_mask==0)] += mask_value['bad']\n\n\n # identify saturated pixels; saturation level (ADU) is taken from\n # blackbox settings file, which needs to be mulitplied by the gain\n # and have the mean biaslevel subtracted\n satlevel_electrons = (get_par(set_bb.satlevel,tel) *\n np.mean(get_par(set_bb.gain,tel))\n - header['BIASMEAN'])\n mask_sat = (data >= satlevel_electrons)\n # add them to the mask of edge and bad pixels\n data_mask[mask_sat] += mask_value['saturated']\n\n\n # determining number of saturated objects; 2 saturated pixels are\n # considered from the same object also if they are only connected\n # diagonally\n struct = np.ones((3,3), dtype=bool)\n __, nobj_sat = ndimage.label(mask_sat, structure=struct)\n\n\n # and pixels connected to saturated pixels\n struct = np.ones((3,3), dtype=bool)\n mask_satcon = ndimage.binary_dilation(mask_sat, structure=struct,\n iterations=1)\n # add them to the mask\n mask_satcon2add = (mask_satcon & ~mask_sat)\n data_mask[mask_satcon2add] += mask_value['saturated-connected']\n\n\n # fill potential holes using function [fill_sat_holes]\n fill_sat_holes (data_mask, mask_value)\n\n\n header_mask['SATURATE'] = (satlevel_electrons, '[e-] adopted saturation '\n 'threshold')\n header['NOBJ-SAT'] = (nobj_sat, 'number of saturated objects')\n # also add these to the header of image itself\n header['SATURATE'] = (satlevel_electrons, '[e-] adopted saturation threshold')\n header['NOBJ-SAT'] = (nobj_sat, 'number of saturated objects')\n # rest of the mask header entries are added in one go using\n # function [mask_header] once all the reduction steps have\n # finished\n\n\n if get_par(set_zogy.timing,tel):\n log_timing_memory (t0=t, label='mask_init')\n\n return data_mask.astype('uint8'), header_mask", "def load_images(filename, bands, Args):\n image = np.zeros([Args.num, Args.out_size,\n Args.out_size, len(bands)])\n for i, band in enumerate(bands):\n print (\"Getting pstamps for band\", band)\n full_image = fits.open(filename.replace(\"band\", band))[0].data\n image[:, :, :, i] = get_stamps(full_image, Args)\n return image", "def broaden_mask(img, threshold=0.05, qual=None):\n if not np.any(qual):\n qual = DerivativeVariance(img.phase)\n qual = qual[img.mask==True].max()*1.1 - qual\n max_value = qual[img.mask==True].max()\n img['mask'][qual<max_value*threshold] = False", "def oneFifthRule(self, t):\n\n # Only adapt every n evaluations\n if t % self.n != 0:\n return\n\n if t < self.N:\n success = mean(self.success_history[:t])\n else:\n success = mean(self.success_history)\n\n if success < 1/5:\n self.sigma *= self.c\n elif success > 1/5:\n self.sigma /= self.c\n\n self.sigma_mean = self.sigma", "def runBrighterFatter():\n RunData([getFiles(mintime=(15, 12, 20), maxtime=(15, 24, 16), folder='data/31Jul/')[0],], out='I800nmlow',\n wavelength='l800l')\n RunData([getFiles(mintime=(15, 28, 40), maxtime=(15, 39, 21), folder='data/31Jul/')[2],], out='I800nmmed',\n wavelength='l800m')\n RunData([getFiles(mintime=(15, 40, 07), maxtime=(15, 45, 14), folder='data/29Jul/')[4],], out='I800nmhigh',\n wavelength='l800h')", "def find_arms(path,fr_nb):\n im=open_frame(path,fr_nb)\n img=im.copy()\n im=img_as_ubyte(im)\n mask_h = hysteresis_thresholding(img,6,10)\n \n ksize=5\n kernel = np.ones((ksize,ksize),dtype = np.uint8)\n kernel = skimage.morphology.disk(ksize)\n \n mask = cv2.morphologyEx(mask_h, cv2.MORPH_OPEN, kernel,iterations=2)\n \n arms = mask_h-mask\n \"\"\"\n lab,_ = ndi.label(diff)\n \n arms = skimage.morphology.remove_small_objects(lab,60)\"\"\" #Only temporary, to track only the biggest\n return mask,arms", "def clip_at_nth(infits, cut=10):\n\n#\n#--- trim the extreme values\n#\n upper = find_nth(infits, cut)\n\n cmd1 = \"/usr/bin/env PERL5LIB=\"\n cmd2 = ' dmimgthresh infile=' + infits+ ' outfile=zout.fits cut=\"0:' + str(upper) + '\" value=0 clobber=yes'\n cmd = cmd1 + cmd2\n bash(cmd, env=ascdsenv)\n\n outfile = infits.replace('.fits','_full.fits')\n cmd = 'mv ' + infits + ' ' + outfile\n os.system(cmd)\n \n m = re.search('gz', infits)\n if m is not None:\n os.system('gzip zout.fits')\n cmd = 'mv zout.fits.gz ' + infits\n os.system(cmd)\n else:\n cmd = 'mv zout.fits ' + infits\n os.system(cmd)", "def conv5x5(self, in_planes, out_planes, stride=1):\n c = self.conv(5, in_planes, out_planes, stride=stride)\n return c", "def top5_bottom5_image(tissue, model, patchsize, feature):\n\n from openslide import open_slide\n features, expression, donorIDs, transcriptIDs, technical_factors, technical_headers, technical_idx = extract_final_layer_data(tissue, model, 'mean', patchsize)\n\n sorted_idx = np.argsort(features[:,feature - 1])\n donorIDs_ordered = donorIDs[sorted_idx]\n\n tissue_filepath = os.path.join(GTEx_directory,'data','raw',tissue)\n\n LungGTExIDs = os.listdir(tissue_filepath)\n LungdonorIDs = [x.split('.')[0].split('-')[1] for x in LungGTExIDs]\n\n ordered_GTExIDs = np.array(LungGTExIDs)[[LungdonorIDs.index(x.decode('utf-8')) for x in donorIDs_ordered]]\n\n topIDs = ordered_GTExIDs[-5:]\n bottomIDs = ordered_GTExIDs[:5]\n\n top_five_images = []\n bottom_five_images = []\n\n for (k,ID) in enumerate(topIDs):\n image_filepath = os.path.join(GTEx_directory,'data','raw','Lung', ID)\n slide = open_slide(image_filepath)\n x = slide.get_thumbnail(size=(400,400))\n top_five_images.append(x)\n\n\n for (k,ID) in enumerate(bottomIDs):\n image_filepath = os.path.join(GTEx_directory,'data','raw','Lung', ID)\n slide = open_slide(image_filepath)\n x = slide.get_thumbnail(size=(400,400))\n bottom_five_images.append(x)\n\n return top_five_images, bottom_five_images", "def concatenate_5dstacks(stacks):\n def stack_channel(stacks, channel):\n \"\"\"Stack multiple 4d ndarrays\"\"\"\n cstack = stacks[0][channel].copy()\n frames = []\n for i in range(1, len(stacks)):\n frames.append(len(cstack))\n cstack = np.vstack([cstack, stacks[i][channel]])\n return cstack, frames\n c0_stack, frames = stack_channel(stacks, 0)\n c1_stack, _ = stack_channel(stacks, 1)\n return np.stack((c0_stack, c1_stack)), frames", "def filter_img(inarr, data_resolution):\n outt = inarr.copy()\n print('outmin', np.nanmin(outt), np.nanmax(outt))\n\n t_thresh_size = -40\n t_thresh_cut = -50\n\n outt[outt >= t_thresh_size] = 0\n outt[np.isnan(outt)] = 0\n\n labels, numL = label(outt)\n\n u, inv = np.unique(labels, return_inverse=True)\n n = np.bincount(inv)\n\n pix_nb = 700/data_resolution**2\n\n badinds = u[(n < pix_nb)]\n # all blobs with more than 1000 pixels = 25,000km2 (meteosat regridded 5km), 200pix = 5000km2, 8pix = 200km2\n # scale 30km, radius 15km ca. 700km2 circular area equals 28 pix\n\n for bi in badinds:\n inds = np.where(labels == bi)\n outt[inds] = 0\n\n outt[outt >= t_thresh_cut] = 150\n\n grad = np.gradient(outt)\n outt[outt == 150] = np.nan\n\n nogood = np.isnan(outt) # filters edge maxima later, no maxima in -40 edge area by definition!\n\n # tdiff = np.nanmax(outt) - np.nanmin(outt) # define background temperature for image\n # if tdiff > 28: # temp difference of 28 degrees\n # xmin = 15\n # else:\n # xmin = 10\n\n xmin = 10\n outt[nogood] = t_thresh_cut - xmin\n nok = np.where(abs(grad[0]) > 80)\n d = 2\n i = nok[0]\n j = nok[1]\n # edge smoothing for wavelet application\n for ii, jj in zip(i, j):\n kern = outt[ii - d:ii + d + 1, jj - d:jj + d + 1]\n outt[ii - d:ii + d + 1, jj - d:jj + d + 1] = ndimage.gaussian_filter(kern, 3, mode='nearest')\n\n return outt, nogood, t_thresh_size, t_thresh_cut, pix_nb", "def stack_tir(scene_urls,cloud_mask_bits,aoi,aoi_crs,\n subtract_median_lst=True,subtract_air_temp=False):\n if subtract_air_temp:\n ceda_password = get_ceda_password()\n at = met_climate.access_ukcp09(cf.ceda_username,ceda_password)\n\n \n # with rasterio.open(scene_bqa) as bqa:\n # with rasterio.open(scene_tir) as tir:\n\n # bqa_data,bqa_trans = ru.read_in_aoi(bqa,**aoi_kwargs)\n # tir_data,tir_trans = ru.read_in_aoi(tir,**aoi_kwargs)\n \n # bqa_data = bqa_data[0,:,:]\n # tir_data = tir_data[0,:,:]\n # tir_data = ma.array(tir_data,dtype=float,\n # mask=ru.mask_qa(bqa_data,bitmask=0b1))\n\n # (ymin,ymax) = (0, tir_data.shape[0])\n # (xmin,xmax) = (0, tir_data.shape[1])\n \n counter=-1\n for scene_url in scene_urls:\n counter+=1\n scene_tir = scene_url\n scene_bqa = scene_url.replace('B'+tirband,'B'+qaband)\n scene_red = scene_url.replace('B'+tirband,'B'+rband)\n scene_nir = scene_url.replace('B'+tirband,'B'+nband)\n scene_metadata = scene_url.replace('B'+tirband+'.TIF','MTL.txt')\n\n print('Reading scene {}'.format(counter+1))\n try:\n with rasterio.open(scene_bqa) as bqa:\n #print(scene_bqa)\n bqa_data,bqa_trans = ru.read_in_aoi(bqa,aoi=aoi,aoi_crs=aoi_crs)\n\n with rasterio.open(scene_tir) as tir:\n #print(scene_tir)\n tir_data,tir_trans = ru.read_in_aoi(tir,aoi=aoi,aoi_crs=aoi_crs)\n tir_crs = tir.crs\n tir_profile = tir.profile\n\n with rasterio.open(scene_red) as red:\n #print(scene_red)\n red_data,red_trans = ru.read_in_aoi(red,aoi=aoi,aoi_crs=aoi_crs)\n red_crs = red.crs\n\n with rasterio.open(scene_nir) as nir:\n #print(scene_nir)\n nir_data,nir_trans = ru.read_in_aoi(nir,aoi=aoi,aoi_crs=aoi_crs)\n \n except OSError as e:\n print('ERROR',e)\n print('skipping scene')\n counter = counter-1\n continue\n \n # Determine size of stack allowing for AoI to extend outside of scene\n if counter == 0:\n aoi_box = rasterio.warp.transform_bounds(aoi_crs,tir_crs,*aoi.values())\n aoi_left, aoi_bottom, aoi_right, aoi_top = aoi_box\n aoi_box = dict(zip(('minx','miny','maxx','maxy'),aoi_box))\n # rowmin,colmin = (bqa.index(aoi_left,aoi_top)) #,op=round))\n # rowmax,colmax = (bqa.index(aoi_right,aoi_bottom)) #,op=round))\n # The above two lines are fine but the following does not \n # require the rasterio dataset to be kept open\n rowmin,colmin = rasterio.transform.rowcol(tir_trans,aoi_left,aoi_top)\n rowmax,colmax = rasterio.transform.rowcol(tir_trans,aoi_right,aoi_bottom)\n stack_height,stack_width = (rowmax-rowmin,colmax-colmin)\n lst_stack = (ma.zeros((len(scene_urls),stack_height,stack_width),\n dtype=np.float,fill_value=np.nan\n )+np.nan) \n \n # Determine size of intersect in THIS scene\n intersect = ru.aoi_scene_intersection(aoi_box,bqa)\n ins_left, ins_bottom, ins_right, ins_top = intersect.bounds\n #rowmin,colmin = (bqa.index(ins_left,ins_top,op=round))\n #rowmax,colmax = (bqa.index(ins_right,ins_bottom,op=round))\n # The above two lines are incorrect now that we read a window:\n # We need to transform the coordinates into the row,col of \n # the window, not the original file.\n rowmin,colmin = rasterio.transform.rowcol(tir_trans,ins_left,ins_top)\n rowmax,colmax = rasterio.transform.rowcol(tir_trans,ins_right,ins_bottom)\n\n try:\n # Subset data \n bqa_data = ma.array(bqa_data[0,rowmin:rowmax,colmin:colmax])\n tir_data = ma.array(tir_data[0,rowmin:rowmax,colmin:colmax])\n red_data = ma.array(red_data[0,rowmin:rowmax,colmin:colmax])\n nir_data = ma.array(nir_data[0,rowmin:rowmax,colmin:colmax])\n assert tir_data.shape == lst_stack.shape[1:]\n except (IndexError,AssertionError) as e:\n print('ERROR:',e)\n print('loop count',counter)\n print(tir_data.shape, lst_stack.shape)\n print(rowmin,rowmax,colmin,colmax)\n import pdb; pdb.set_trace()\n\n lst_data = lst.calculate_land_surface_temperature_NB(\n red_data, nir_data, tir_data,\n red_trans, tir_trans, \n red_crs, tir_crs, scene_metadata\n )\n \n # Masks\n smw = 11\n mask_all = filters.maximum_filter(\n ru.mask_qa(bqa_data,bits=cloud_mask_bits),size=smw\n )\n\n lst_data_mask_all = ma.array(lst_data,\n mask=mask_all,\n dtype=np.float,\n fill_value=np.nan) #.filled()\n\n # After masking, reproject\n # not necessary if they share a CRS\n if counter > 0:\n assert tir_crs == prev_crs\n prev_crs = tir_crs\n\n # Now do some normalisation\n if subtract_air_temp:\n filename = scene_tir.split('/')[-1]\n datestring = filename.split('_')[3]\n\n atscene = met_climate.dummy_scene( \n tir_crs, tir_trans, aoi_box,(stack_height,stack_width))\n\n # import pdb; pdb.set_trace()\n # If the following fails, it may mean there was a problem setting up the session\n atdata = at.grid_temp_over_scene(\n atscene, datestring, interpolation='linear')\n atdata = atdata[rowmin:rowmax,colmin:colmax]\n assert lst_data_mask_all.shape == atdata.shape\n lst_data_mask_all = ma.array(\n lst_data_mask_all - atdata,\n mask=mask_all,\n fill_value=np.nan)\n \n if subtract_median_lst:\n # ALSO subtract median xLST\n medval = ma.median(lst_data_mask_all)\n lst_data_mask_all = ma.array(\n lst_data_mask_all - medval,\n mask=mask_all,\n fill_value=np.nan)\n \n elif subtract_median_lst:\n # Subtract median LST from scene (within QA mask) \n \n medval = ma.median(lst_data_mask_all)\n lst_data_mask_all = ma.array(\n lst_data_mask_all - medval,\n mask=mask_all,\n fill_value=np.nan)\n \n # Then add to stack\n lst_stack[counter,:,:] = lst_data_mask_all\n\n # Make profile for file output\n N_layers = counter+1\n tir_profile.update(\n dtype=rasterio.float64,\n width=stack_width,\n height=stack_height,\n transform=tir_trans,\n count=N_layers,\n compress='lzw'\n )\n\n\n return lst_stack, tir_profile", "def compress_netcfd(folder_path, start_date, out_folder, file_name, num_of_rivids):\n\n # Based on 15 day forecast\n forecast_day_indices = np.array([0, 8, 16, 24, 32, 40, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84], dtype=np.int8)\n\n # Based on 10 day forecast\n # Excluding the first day because we already have initialization from the normal forecasts\n high_res_forecast_day_indices = np.array([24, 48, 72, 92, 100, 108, 112, 116, 120, 124])\n\n start_datetime = to_datetime(start_date, infer_datetime_format=True)\n dates = date_range(start_datetime + DateOffset(1), periods=15)\n high_res_dates = date_range(start_datetime + DateOffset(1), periods=10)\n\n # Ensemble Dimensions\n # 1) Rivid\n # 2) Number of forecast days (i.e. 15 in a 15 day forecast)\n # 3) Number of ensembles\n\n ensembles = np.zeros((num_of_rivids, 15, 51), dtype=np.float32)\n initialization = np.zeros((num_of_rivids,), dtype=np.float32)\n\n for forecast_number in range(1, 52):\n file = os.path.join(folder_path, \"{}_{}.nc\".format(file_name, forecast_number))\n\n tmp_dataset = xr.open_dataset(file)\n streamflow = tmp_dataset['Qout'].data\n streamflow = streamflow[:, forecast_day_indices]\n\n if forecast_number == 1:\n initialization[:] = streamflow[:, 0]\n rivids = tmp_dataset['rivid'].data\n lat = tmp_dataset['lat'].data\n lon = tmp_dataset['lon'].data\n z = tmp_dataset['z'].data\n\n ensembles[:, :, forecast_number - 1] = streamflow[:, 1:]\n\n tmp_dataset.close()\n\n # High Res Forecast\n file = os.path.join(folder_path, \"{}_52.nc\".format(file_name))\n\n tmp_dataset = xr.open_dataset(file)\n\n high_res_forecast_data = tmp_dataset[\"Qout\"].data\n high_res_forecast_data = high_res_forecast_data[:, high_res_forecast_day_indices]\n\n tmp_dataset.close()\n\n data_variables = {\n \"Qout\": (['rivid', 'date', 'ensemble_number'], ensembles),\n \"Qout_high_res\": (['rivid', 'date_high_res'], high_res_forecast_data)\n }\n\n coords = {\n 'rivid': rivids,\n 'date': dates,\n 'date_high_res': high_res_dates,\n 'ensemble_number': np.arange(1, 52, dtype=np.uint8),\n 'initialization_values': ('rivid', initialization),\n 'lat': ('rivid', lat),\n 'lon': ('rivid', lon),\n 'z': ('rivid', z),\n 'start_date': start_datetime\n }\n\n xarray_dataset = xr.Dataset(data_variables, coords)\n xarray_dataset.to_netcdf(path=os.path.join(out_folder, '{}.nc'.format(start_date)), format='NETCDF4')", "def dynamic_masking(image):\n image = img_as_float(image)\n background = gaussian_filter(median_filter(image,3),1)\n image[background > threshold_otsu(background)/5.0] = 0.0\n \n return image", "def read_concat_5dczi(czis):\n stacks = []\n for czi in czis:\n stacks.append(read_czi(czi, True))\n stack, frames = concatenate_5dstacks(stacks)\n return stack, frames", "def roi_to_wm(img,brain_wm,nth):\n \n data = img.get_data()\n wmdata = brain_wm.get_data()\n shape = data.shape\n\n roi_ids = np.unique(data)\n roi = roi_ids[1:]\n roi = [int(i) for i in roi]\n print roi\n \n wmdata = wmdata!=0\n result_mask = np.zeros(data.shape)\n #print wmdata \n \n #First, get the nonzero voxel index in image data.\n #Here image data is a label volume.\n #ROIs is in it\n for roi_id in roi:\n #print roi_id\n tmp_mask = data==roi_id\n #print tmp_mask\n indexs = np.transpose(tmp_mask.nonzero())\n #print indexs\n \n #Second, find the nearest wm voxel for each indexs.\n print indexs.shape\n for coor in indexs:\n #print coor\n x = coor[0]\n y = coor[1]\n z = coor[2]\n \n if wmdata[x,y,z]==1:\n result_mask[x,y,z] = roi_id\n else:\n #find the nearest neighbor.\n flag = False\n radius = 1\n mindist_voxel = []\n mindist = 1000 \n while radius<100: \n neigh_list = get_neighbors(coor,radius,shape)\n radius += 1\n #find the nearest white matter voxel.\n for n in neigh_list:\n #print n\n if wmdata[n[0],n[1],n[2]]==1:\n flag = True\n dist = np.sqrt((n[0]-x)**2+(n[1]-y)**2+(n[2]-z)**2)\n # if the distance is smaller than tag, choose it to be nearest.\n \n if dist < mindist:\n mindist = dist\n mindist_voxel = n\n \n if flag:\n break\n #print mindist_voxel\n if mindist_voxel!=[]:\n result_mask[mindist_voxel[0],mindist_voxel[1],mindist_voxel[2]] = roi_id \n for roi_id in roi:\n tmp_mask = result_mask==roi_id\n roi_size = tmp_mask.sum() \n print roi_id, roi_size\n result = img\n result._data = result_mask\n #roi_name = os.path.join(mkdir,'roi_%s.nii.gz'%i)\n nib.save(result,\"test_regroi.nii.gz\")\n \n return True", "def apply_pixel(self, bands:List, bandfunction) -> 'ImageCollection':\n pickled_lambda = cloudpickle.dumps(bandfunction)\n\n process_id = 'apply_pixel'\n args = {\n 'imagery':self.graph,\n 'bands':bands,\n 'function': str(base64.b64encode(pickled_lambda), \"UTF-8\")\n }\n\n return self.graph_add_process(process_id, args)", "def apply_pixel(self, bands:List, bandfunction) -> 'ImageCollection':\n pickled_lambda = cloudpickle.dumps(bandfunction)\n\n process_id = 'apply_pixel'\n args = {\n 'imagery':self.graph,\n 'bands':bands,\n 'function': str(base64.b64encode(pickled_lambda), \"UTF-8\")\n }\n\n return self.graph_add_process(process_id, args)", "def F_read_S5P_nc(self,fn,data_fields,data_fields_l2g=[]):\n from netCDF4 import Dataset\n ncid = Dataset(fn,'r')\n outp = {}\n for i in range(len(data_fields)):\n tmp = ncid[data_fields[i]]\n tmpdtype = tmp.dtype\n if not data_fields_l2g:\n varname = tmp.name\n else:\n varname = data_fields_l2g[i]\n if tmpdtype is \"str\":\n outp[varname] = tmp[:]\n else:\n outp[varname] = np.squeeze(tmp[:])\n ## scale factor already applied?! so confusing\n# try:\n# outp[varname] = outp[varname]*tmp.scale_factor\n# if tmp.scale_factor != 1:\n# print(varname+' has a scale_factor of '+'%s'%tmp.scale_factor)\n# except Exception:\n# #print(e)\n# print(varname+' has no scale_factor!')\n if 'time_utc' in outp.keys():\n UTC_matlab_datenum = np.zeros((len(outp['time_utc']),1),dtype=np.float64)\n for i in range(len(outp['time_utc'])):\n tmp = datetime.datetime.strptime(outp['time_utc'][i],'%Y-%m-%dT%H:%M:%S.%fZ')\n UTC_matlab_datenum[i] = (tmp.toordinal()\\\n +tmp.hour/24.\\\n +tmp.minute/1440.\\\n +tmp.second/86400.\\\n +tmp.microsecond/86400/1000000+366.)\n outp['UTC_matlab_datenum'] = np.tile(UTC_matlab_datenum,(1,outp['latc'].shape[1]))\n else: # hcho l2 does not have time_utc\n # the delta_time field of hcho fills all across track position, but ch4 is one per scanline\n if len(outp['delta_time'].shape) == 1:\n outp['delta_time'] = np.tile(outp['delta_time'][...,None],(1,outp['latc'].shape[1]))\n outp['UTC_matlab_datenum'] = (outp['time']+outp['delta_time']/1000.)/86400.+734139.\n \n outp['across_track_position'] = np.tile(np.arange(1.,outp['latc'].shape[1]+1),\\\n (outp['latc'].shape[0],1)).astype(np.int16)\n return outp", "def crop_acc_mask(images_dir, images_output_dir, masks_dir, mask_suffix=None, masks_output_dir=None): \n image_suffix_list = [\"C0\", \"DE\", \"T2\"]\n if not os.path.exists(images_output_dir):\n os.makedirs(images_output_dir)\n if masks_output_dir is not None and (not os.path.exists(masks_output_dir)):\n os.makedirs(masks_output_dir)\n margin = [0, 30, 30]\n masks_list = os.listdir(masks_dir)\n masks_list.sort()\n json_dict = OrderedDict()\n for mask in masks_list:\n mask_path = os.path.join(masks_dir, mask)\n if mask.endswith(\".nii.gz\"):\n print(\"#\" * 11 *11)\n print(mask_path)\n mask_sitk = sitk.ReadImage(mask_path)\n mask_npy = sitk.GetArrayFromImage(mask_sitk)\n mask_shape = mask_npy.shape\n crop_bbox_min, crop_bbox_max = get_ND_bounding_box(mask_npy, margin=margin)\n # do not crop along depth dimension\n crop_bbox_min[0] = 0\n crop_bbox_max[0] = mask_shape[0]\n print(crop_bbox_min, crop_bbox_max)\n json_dict[mask_path] = {\"crop_bbox_min\": crop_bbox_min, \"crop_bbox_max\": crop_bbox_max}\n mask_output_npy = crop_ND_volume_with_bounding_box(mask_npy, crop_bbox_min, crop_bbox_max)\n if mask_suffix is not None:\n mask = mask.replace(\"_\" + mask_suffix + \".nii.gz\", \".nii.gz\")\n if masks_output_dir is not None:\n save_cropped_array_as_nifty_volume(mask_output_npy, os.path.join(masks_output_dir, mask), mask_sitk)\n save_cropped_array_as_nifty_volume(convert_label(mask_output_npy, [1, 2, 3, 4, 5], [1, 2, 3, 1, 1]), \\\n os.path.join(images_output_dir, mask.replace(\".nii.gz\", \"_{0:04d}.nii.gz\".format(len( \\\n image_suffix_list)))), mask_sitk)\n for i, image_suffix in enumerate(image_suffix_list):\n image = mask.replace(\".nii.gz\", \"_{}.nii.gz\".format(image_suffix))\n image_path = os.path.join(images_dir, image)\n print(image_path)\n image_sitk = sitk.ReadImage(image_path)\n image_npy = sitk.GetArrayFromImage(image_sitk)\n image_output_npy = crop_ND_volume_with_bounding_box(image_npy, crop_bbox_min, crop_bbox_max)\n save_cropped_array_as_nifty_volume(image_output_npy, os.path.join(images_output_dir, mask.replace( \\\n \".nii.gz\", \"_{0:04d}.nii.gz\".format(i))), image_sitk)\n save_json(json_dict, os.path.join(images_output_dir, \"crop_information.json\"))\n if masks_output_dir is not None:\n save_json(json_dict, os.path.join(masks_output_dir, \"crop_information.json\"))", "def countmap(band,skypos,tranges,skyrange,width=False,height=False,\n\t\t\t verbose=0,tscale=1000.,memlight=False,hdu=False,retries=20):\n\timsz = gxt.deg2pix(skypos,skyrange)\n\tcount = np.zeros(imsz)\n\tfor trange in tranges:\n\t\t# If memlight is requested, break the integration into\n\t\t# smaller chunks.\n\t\tstep = memlight if memlight else trange[1]-trange[0]\n\t\tfor i in np.arange(trange[0],trange[1],step):\n\t\t\tt0,t1=i,i+step\n\t\t\tif verbose:\n\t\t\t\tprint_inline('Coadding '+str(t0)+' to '+str(t1))\n\t\t\tevents = gQuery.getArray(gQuery.rect(band,skypos[0],skypos[1],t0,t1,\n\t\t\t\t\t\t\t\t\t\t\t\t skyrange[0],skyrange[1]),\n\t\t\t\t\t\t\t\t\t verbose=verbose,retries=retries)\n\n\t\t\t# Check that there is actually data here.\n\t\t\tif not events:\n\t\t\t\tif verbose>1:\n\t\t\t\t\tprint \"No data in \"+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\ttimes = np.array(events,dtype='float64')[:,0 ]/tscale\n\t\t\tcoo =\tnp.array(events,dtype='float64')[:,1:]\n\n\t\t\t# If there's no data, return a blank image.\n\t\t\tif len(coo)==0:\n\t\t\t\tif verbose:\n\t\t\t\t\tprint 'No data in this frame: '+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\t# Define World Coordinate System (WCS)\n\t\t\twcs = define_wcs(skypos,skyrange,width=False,height=False)\n\n\t\t\t# Map the sky coordinates onto the focal plane\n\t\t\tfoc = wcs.sip_pix2foc(wcs.wcs_world2pix(coo,1),1)\n\n\t\t\t# Bin the events into actual image pixels\n\t\t\tH,xedges,yedges=np.histogram2d(foc[:,1]-0.5,foc[:,0]-0.5,\n\t\t\t\t\t\t\t\tbins=imsz,range=([ [0,imsz[0]],[0,imsz[1]] ]))\n\t\t\tcount += H\n\n\treturn count", "def build_sea_data(\n start_year=1999,\n end_year=2016,\n netcdf_path=\"data/sea_level/netcdf/\",\n target_lon=175.8606890,\n target_lat=-36.993684,\n buffer_degrees=0.5,\n path_out=\".\",\n):\n # tairua_coords = (-36.993684, 175.8606890)\n df_sea_data = pd.DataFrame()\n\n for year in range(start_year, end_year + 1):\n ds_first = xr.open_mfdataset(\n os.path.join(netcdf_path, f\"dt_global_twosat_phy_l4_{year}*.nc\")\n )\n\n target_lon = xr.DataArray(\n list(target_lon + np.linspace(-buffer_degrees, buffer_degrees))\n )\n target_lat = xr.DataArray(\n list(target_lat + np.linspace(-buffer_degrees, buffer_degrees))\n )\n\n ds_tairua = ds_first[[\"adt\", \"ugos\", \"vgos\"]].sel(\n longitude=target_lon, latitude=target_lat, method=\"nearest\"\n )\n df_sealevel_pandas = (\n ds_tairua.resample(time=\"MS\")\n .mean()\n .mean(dim=\"dim_0\")\n .to_dataframe()\n )\n\n df_sea_data = pd.concat([df_sea_data, df_sealevel_pandas])\n\n print(\n f\"************************Done {year} ************************************\"\n )\n print(df_sea_data.tail(10))\n\n df_sea_data.to_csv(os.path.join(path_out, \"df_sea_data.csv\"))", "def smoothen_cy5(data, f2a):\n\n global traces \n traces = data\n\n global frames2avg\n frames2avg = f2a\n\n start = time()\n\n n_traces = traces.shape[0]\n n_frames = traces.shape[2]\n\n smoothened = np.zeros((n_traces, n_frames-(f2a-1)))\n pool = mp.Pool(mp.cpu_count())\n smoothened = np.array(pool.map(smoothen_cy5_single, iter(range(n_traces))))\n\n print(\"Time passed: \" + str(time() - start))\n\n return smoothened", "def flattenFrames(stack, onh_info):\n \n maxHeight=0\n frameList=[]\n\n if onh_info!=-1:\n y_min = onh_info.bbox[0]\n #need to subtract one because index?\n y_max = onh_info.bbox[2]\n \n #hull starts at (0,0), add the y and x min to translate to correct indices.\n hull_onh = np.array(np.where(onh_info.convex_image)) + np.array([[y_min], [onh_info.bbox[1]]])\n elif onh_info==-1:\n #should prevent shiftDetectorONH from running since i will always be greater than -1\n #hull_onh has been left undefined.\n y_min, y_max = -1,-1\n \n for i, frame in enumerate(stack):\n #medFrame = ndimage.filters.median_filter(frame,size=(1,60)) #Takes 3.5 minutes\n medFrame = ndimage.filters.uniform_filter1d(frame, 60) #Takes 1.0 minutes and has same output as med filter\n if i>=y_min and i<y_max:\n #get the index of x pixels that are part of the onh for each frame\n #these are indices of indices\n x_onh_ind = np.array(np.where(hull_onh[0]==i)) \n x_onh = hull_onh.T[x_onh_ind][0].T[1]\n #this should be sorted so that its the x_min and max for each frame\n x_onh_bounds = (x_onh[0], x_onh[-1])\n shifts = shiftDetectorONH(medFrame, onh_info, x_onh_bounds)\n else:\n shifts = shiftDetector(medFrame)\n newFrame = adjustFrame(frame, shifts)\n frameList.append(newFrame)\n if newFrame.shape[0] > maxHeight:\n maxHeight = newFrame.shape[0]\n \n #Show percentage of loop completed.\n print('\\rFinding and correcting horizontal shifts: {:.2f}% done'.format((100.0*((i+1)/len(stack)))), end='', flush=True)\n print('\\n')\n \n flattenedStack = padFrames(frameList, maxHeight)\n\n return flattenedStack", "def smooth_mask ( gray , blur = 1 , threshold = 128 ) :\n\n assert gray is not None\n\n gray_markers = nd.median_filter(gray, blur)\n _ , gray_markers = cv2.threshold ( gray_markers , threshold , 255 , cv2.THRESH_BINARY )\n\n return gray_markers", "def F_subset_S5PCH4(self,path,if_trop_xch4=False,s5p_product='RPRO'): \n from scipy.interpolate import interp1d\n # find out list of l2 files to subset\n if os.path.isfile(path):\n self.F_update_popy_with_control_file(path)\n l2_list = self.l2_list\n l2_dir = self.l2_dir\n else:\n import glob\n l2_dir = path\n l2_list = []\n cwd = os.getcwd()\n os.chdir(l2_dir)\n start_date = self.start_python_datetime.date()\n end_date = self.end_python_datetime.date()\n days = (end_date-start_date).days+1\n DATES = [start_date + datetime.timedelta(days=d) for d in range(days)]\n for DATE in DATES:\n flist = glob.glob('S5P_'+s5p_product+'_L2__CH4____'+DATE.strftime(\"%Y%m%d\")+'T*.nc')\n l2_list = l2_list+flist\n \n os.chdir(cwd)\n self.l2_dir = l2_dir\n self.l2_list = l2_list\n \n #maxsza = self.maxsza \n #maxcf = self.maxcf\n west = self.west\n east = self.east\n south = self.south\n north = self.north\n min_qa_value = self.min_qa_value\n \n # absolute path of useful variables in the nc file\n data_fields = ['/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/latitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/longitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/solar_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/viewing_zenith_angle',\\\n '/PRODUCT/latitude',\\\n '/PRODUCT/longitude',\\\n '/PRODUCT/qa_value',\\\n '/PRODUCT/time',\\\n '/PRODUCT/delta_time',\\\n '/PRODUCT/methane_mixing_ratio',\\\n '/PRODUCT/methane_mixing_ratio_bias_corrected',\\\n '/PRODUCT/methane_mixing_ratio_precision'] \n # standardized variable names in l2g file. should map one-on-one to data_fields\n data_fields_l2g = ['latitude_bounds','longitude_bounds','SolarZenithAngle',\\\n 'vza','latc','lonc','qa_value','time','delta_time',\\\n 'column_amount_no_bias_correction','column_amount','column_uncertainty']\n if if_trop_xch4:\n # absolute path of useful variables in the nc file\n data_fields = ['/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/latitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/longitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/solar_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/viewing_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/INPUT_DATA/dry_air_subcolumns',\\\n '/PRODUCT/SUPPORT_DATA/INPUT_DATA/surface_pressure',\\\n '/PRODUCT/SUPPORT_DATA/INPUT_DATA/pressure_interval',\\\n '/PRODUCT/SUPPORT_DATA/INPUT_DATA/methane_profile_apriori',\\\n '/PRODUCT/latitude',\\\n '/PRODUCT/longitude',\\\n '/PRODUCT/qa_value',\\\n '/PRODUCT/time',\\\n '/PRODUCT/delta_time',\\\n '/PRODUCT/methane_mixing_ratio',\\\n '/PRODUCT/methane_mixing_ratio_bias_corrected',\\\n '/PRODUCT/methane_mixing_ratio_precision'] \n # standardized variable names in l2g file. should map one-on-one to data_fields\n data_fields_l2g = ['latitude_bounds','longitude_bounds','SolarZenithAngle',\\\n 'vza','dry_air_subcolumns','surface_pressure','pressure_interval',\n 'methane_profile_apriori','latc','lonc','qa_value','time','delta_time',\\\n 'column_amount_no_bias_correction','column_amount','column_uncertainty']\n self.logger.info('Read, subset, and store level 2 data to l2g_data')\n self.logger.info('Level 2 data are located at '+l2_dir)\n l2g_data = {}\n for fn in l2_list:\n fn_dir = l2_dir+fn\n self.logger.info('Loading '+fn)\n outp_nc = self.F_read_S5P_nc(fn_dir,data_fields,data_fields_l2g)\n if if_trop_xch4:\n sounding_interp = F_interp_geos_mat(outp_nc['lonc'],outp_nc['latc'],outp_nc['UTC_matlab_datenum'],\\\n geos_dir='/mnt/Data2/GEOS/s5p_interp/',\\\n interp_fields=['TROPPT'])\n outp_nc['TROPPT'] = sounding_interp['TROPPT']\n #f1 = outp_nc['SolarZenithAngle'] <= maxsza\n #f2 = outp_nc['cloud_fraction'] <= maxcf\n # ridiculously, qa_value has a scale_factor of 0.01. so error-prone\n f3 = outp_nc['qa_value'] >= min_qa_value \n f4 = outp_nc['latc'] >= south\n f5 = outp_nc['latc'] <= north\n tmplon = outp_nc['lonc']-west\n tmplon[tmplon < 0] = tmplon[tmplon < 0]+360\n f6 = tmplon >= 0\n f7 = tmplon <= east-west\n f8 = outp_nc['UTC_matlab_datenum'] >= self.start_matlab_datenum\n f9 = outp_nc['UTC_matlab_datenum'] <= self.end_matlab_datenum\n validmask = f3 & f4 & f5 & f6 & f7 & f8 & f9\n self.logger.info('You have '+'%s'%np.sum(validmask)+' valid L2 pixels')\n l2g_data0 = {}\n if np.sum(validmask) == 0:\n continue\n # yep it's indeed messed up\n Lat_lowerleft = np.squeeze(outp_nc['latitude_bounds'][:,:,0])[validmask]\n Lat_upperleft = np.squeeze(outp_nc['latitude_bounds'][:,:,3])[validmask]\n Lat_lowerright = np.squeeze(outp_nc['latitude_bounds'][:,:,1])[validmask]\n Lat_upperright = np.squeeze(outp_nc['latitude_bounds'][:,:,2])[validmask]\n Lon_lowerleft = np.squeeze(outp_nc['longitude_bounds'][:,:,0])[validmask]\n Lon_upperleft = np.squeeze(outp_nc['longitude_bounds'][:,:,3])[validmask]\n Lon_lowerright = np.squeeze(outp_nc['longitude_bounds'][:,:,1])[validmask]\n Lon_upperright = np.squeeze(outp_nc['longitude_bounds'][:,:,2])[validmask]\n l2g_data0['latr'] = np.column_stack((Lat_lowerleft,Lat_upperleft,Lat_upperright,Lat_lowerright))\n l2g_data0['lonr'] = np.column_stack((Lon_lowerleft,Lon_upperleft,Lon_upperright,Lon_lowerright))\n for key in outp_nc.keys():\n if key not in {'latitude_bounds','longitude_bounds','time_utc','time','delta_time'}:\n l2g_data0[key] = outp_nc[key][validmask]\n if if_trop_xch4:\n # calculate trop xch4 using l2g_data0\n l2g_data0['air_column_strat'] = np.zeros(l2g_data0['latc'].shape)\n l2g_data0['air_column_total'] = np.zeros(l2g_data0['latc'].shape)\n l2g_data0['methane_ap_column_strat'] = np.zeros(l2g_data0['latc'].shape)\n for il2 in range(len(l2g_data0['latc'])):\n cum_air = np.concatenate(([0.],np.cumsum(l2g_data0['dry_air_subcolumns'][il2,].squeeze())))\n cum_methane = np.concatenate(([0.],np.cumsum(l2g_data0['methane_profile_apriori'][il2,].squeeze())))\n # model top is 10 Pa, 12 layers, 13 levels\n plevel = 10.+np.arange(0,13)*l2g_data0['pressure_interval'][il2]\n tropp = l2g_data0['TROPPT'][il2]\n l2g_data0['air_column_total'][il2] = np.sum(l2g_data0['dry_air_subcolumns'][il2,])\n f = interp1d(plevel,cum_air)\n l2g_data0['air_column_strat'][il2] = f(tropp)\n f = interp1d(plevel,cum_methane)\n l2g_data0['methane_ap_column_strat'][il2] = f(tropp)\n del l2g_data0['dry_air_subcolumns']\n del l2g_data0['methane_profile_apriori'] \n l2g_data = self.F_merge_l2g_data(l2g_data,l2g_data0)\n self.l2g_data = l2g_data\n if not l2g_data:\n self.nl2 = 0\n else:\n self.nl2 = len(l2g_data['latc'])", "def step_5_inequalities(z_k, nu_k):\n\n c1 = 1./(400*np.sqrt(z_k.C)); c2 = 2./(4*np.pi) # todo: should not multiply by 2\n P_size = z_k.P_size\n\n lhs = z_k.integral_absolute()\n\n rhs = z_k.integral_absolute_prev()\n rhs += c1**2*c2**2/(2*P_size)*(z_k.C*P_size - z_k.integral_absolute_prev())**2\n\n inequality1 = lhs > rhs\n\n print \"Inequality 5a:\"\n print \"|v_tilde-v^*|_L2^2 = %g\" % lhs\n print \"|v_tilde-v^k|_L2^2 + beta(C|P| - |v_tilde+v^k|_L2^2)^2 = %g\" % rhs\n\n # convolution inequality\n k = len(z_k.r_values)\n z_star_z_k_int = z_k.integral_absolute_last_step()\n inequality2 = z_star_z_k_int < 2**(-k) # todo: maybe multiply k by 2?\n print \"ineq5.2: %g < %g\" % (z_star_z_k_int, 2**(-2*k))\n\n return inequality1 and inequality2", "def request_band_extract(file_prefix, points_layer, region, years, filter_bounds=False):\n roi = ee.FeatureCollection(region)\n plots = ee.FeatureCollection(points_layer)\n for yr in years:\n stack = stack_bands(yr, roi)\n\n if filter_bounds:\n plots = plots.filterBounds(roi)\n\n filtered = plots.filter(ee.Filter.eq('YEAR', yr))\n\n plot_sample_regions = stack.sampleRegions(\n collection=filtered,\n properties=['POINT_TYPE', 'YEAR'],\n scale=30,\n tileScale=16)\n\n task = ee.batch.Export.table.toCloudStorage(\n plot_sample_regions,\n description='{}_{}'.format(file_prefix, yr),\n bucket='wudr',\n fileNamePrefix='{}_{}'.format(file_prefix, yr),\n fileFormat='CSV')\n\n task.start()\n print(yr)\n exit()", "def ls_sr_band_correction(self,\n img):\n return \\\n ee.Algorithms.If(\n ee.String(img.get('SATELLITE')).compareTo('LANDSAT_8'),\n ee.Algorithms.If(ee.String(img.get('SATELLITE')).compareTo('LANDSAT_5'),\n ee.Image(img.select(['B1', 'B2', 'B3', 'B4', 'B5', 'B7', 'pixel_qa', 'radsat_qa'],\n ['BLUE', 'GREEN', 'RED', 'NIR', 'SWIR1', 'SWIR2', 'PIXEL_QA',\n 'RADSAT_QA'])\n .int16()\n .copyProperties(img)\n .copyProperties(img,\n ['system:time_start',\n 'system:time_end',\n 'system:index',\n 'system:footprint'])),\n ee.Algorithms.If(ee.Number(int(self.auto_ls5_correction)),\n ee.Image(EEHelper.ls5_sr_corr(img)),\n ee.Image(img.select(\n ['B1', 'B2', 'B3', 'B4', 'B5', 'B7', 'pixel_qa', 'radsat_qa'],\n ['BLUE', 'GREEN', 'RED', 'NIR', 'SWIR1', 'SWIR2', 'PIXEL_QA',\n 'RADSAT_QA'])\n .int16()\n .copyProperties(img)\n .copyProperties(img,\n ['system:time_start',\n 'system:time_end',\n 'system:index',\n 'system:footprint']))\n )\n ),\n ee.Algorithms.If(ee.Number(int(self.auto_ls8_correction)),\n ee.Image(EEHelper.ls8_sr_corr(img)),\n ee.Image(img.select(['B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'pixel_qa', 'radsat_qa'],\n ['BLUE', 'GREEN', 'RED', 'NIR', 'SWIR1', 'SWIR2', 'PIXEL_QA',\n 'RADSAT_QA'])\n .int16()\n .copyProperties(img)\n .copyProperties(img,\n ['system:time_start',\n 'system:time_end',\n 'system:index',\n 'system:footprint']))\n )\n )", "def extract_roi(h5series, dir_out, size_m, size_var=.5, max_ecc=.7,\n dist_border=10, pad_border=40, exclude_overlap=30.,\n bg_amp_kw=BG_DEFAULT_KW, bg_amp_bin=np.nan,\n bg_pha_kw=BG_DEFAULT_KW, bg_pha_bin=np.nan,\n search_enabled=True, ret_roimgr=False):\n h5in = pathlib.Path(h5series)\n dout = pathlib.Path(dir_out)\n\n h5out = dout / FILE_ROI_DATA_H5\n imout = dout / FILE_ROI_DATA_TIF\n slout = dout / FILE_SLICES\n\n # Determine ROI location\n with qpimage.QPSeries(h5file=h5in, h5mode=\"r\") as qps:\n rmgr = ROIManager(qps.identifier)\n if search_enabled:\n for ii in range(len(qps)):\n qpi = qps[ii]\n # find objects\n slices = search.search_phase_objects(\n qpi=qpi,\n size_m=size_m,\n size_var=size_var,\n max_ecc=max_ecc,\n dist_border=dist_border,\n pad_border=pad_border,\n exclude_overlap=exclude_overlap)\n for jj, sl in enumerate(slices):\n slident = \"{}.{}\".format(qpi[\"identifier\"], jj)\n rmgr.add(roislice=sl, image_index=ii,\n roi_index=jj, identifier=slident)\n rmgr.save(slout)\n else:\n rmgr.load(slout)\n\n # Extract ROI images\n with qpimage.QPSeries(h5file=h5in, h5mode=\"r\") as qps, \\\n qpimage.QPSeries(h5file=h5out, h5mode=\"w\") as qps_roi, \\\n tifffile.TiffWriter(str(imout), imagej=True) as tf:\n for ii in range(len(qps)):\n # image to analyze\n qpi = qps[ii]\n # available ROIs\n rois = rmgr.get_from_image_index(ii)\n for jj, (rid, sl) in enumerate(rois):\n # Extract the ROI\n qpisl = qpi.__getitem__(sl)\n # amplitude bg correction\n if bg_amp_kw:\n amp_mask = get_binary(\n qpisl.amp, value_or_method=bg_amp_bin)\n qpisl.compute_bg(which_data=\"amplitude\",\n from_binary=amp_mask,\n **bg_amp_kw)\n # phase bg correction\n if bg_pha_kw:\n pha_mask = get_binary(\n qpisl.pha, value_or_method=bg_pha_bin)\n qpisl.compute_bg(which_data=\"phase\",\n from_binary=pha_mask,\n **bg_pha_kw)\n slident = \"{}.{}\".format(qpi[\"identifier\"], jj)\n if rid != slident:\n # This might happen if the user does not know the\n # image identifier and builds his own `FILE_SLICES`.\n msg = \"Mismatch of slice and QPImage identifiers: \" \\\n + \"{} vs {}!\".format(rid, slident)\n warnings.warn(msg)\n # override `slident` with user identifier\n slident = rid\n qps_roi.add_qpimage(qpisl, identifier=slident)\n\n if len(qps_roi):\n # Write TIF\n # determine largest image\n sxmax = np.max([qq.shape[0] for qq in qps_roi])\n symax = np.max([qq.shape[1] for qq in qps_roi])\n dummy = np.zeros((2, sxmax, symax), dtype=np.float32)\n for qpir in qps_roi:\n dummy[0, :, :] = 0\n dummy[1, :, :] = 1\n res = 1 / qpir[\"pixel size\"] * 1e-6 # use µm\n sx, sy = qpir.shape\n dummy[0, :sx, :sy] = qpir.pha\n dummy[1, :sx, :sy] = qpir.amp\n tf.save(data=dummy, resolution=(res, res, None))\n\n ret = h5out\n if ret_roimgr:\n ret = ret, rmgr\n return ret", "def conv5x5(in_planes, out_planes, stride=1, groups=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=5, stride=stride, groups=groups,\n padding=2, dilation=1, bias=False)", "def reduce_dataset(years, values,flux_floor=0,max_tm_error=0,min_reduction_steps=200):\n non_zero_ind, min_retained_zero_years = remove_begin_end_zero_flux(years,values,flux_floor,min_reduction_steps)\n\n years_mod = years[non_zero_ind]\n values_mod = values[non_zero_ind]\n\n if years_mod.size <3:\n years_mod = years\n values_mod = values\n values_mod = 0\n else:\n #makes ure you have not removed more than 1% of the mass when removing 0 or flux floor rates\n o_mass = TimeSeries(years,values,None,None).integrate().values[-1]\n r_mass = TimeSeries(years_mod, values_mod, None, None).integrate().values[-1]\n if abs((o_mass-r_mass)/o_mass)*100 > 1:\n years_mod = years\n values_mod = values\n timeseries = TimeSeries(years_mod, values_mod, None, None)\n mass = timeseries.integrate()\n\n #normalize Values\n maxval = np.max(values_mod)\n values_mod = values_mod/maxval\n o_timeseries = TimeSeries(years,values/maxval,None,None)\n o_mass = o_timeseries.integrate()\n timeseries = TimeSeries(years_mod, values_mod, None, None)\n mass = timeseries.integrate()\n\n mx = np.argmax(timeseries.values)\n points = [0, mx, len(timeseries)]\n x = timeseries.times\n\n ythresh = 100*np.mean(timeseries.values)\n out_error = 1\n out_error_last = out_error\n OUT_ERROR_THRESHOLD = 1e-2\n\n UPPER_N = 200\n LOWER_N = 50\n last_result = None\n MAX_ITERATIONS = 80\n\n solve_type = SMOOTH\n simple_peaks = False\n last_result,ix = reduct_iter(timeseries,flux_floor,ythresh,out_error,out_error_last,OUT_ERROR_THRESHOLD,UPPER_N,LOWER_N,last_result,MAX_ITERATIONS)\n last_result = retain_min_years(last_result.reduced_flux,o_timeseries,o_mass,min_retained_zero_years)\n #if there are less points than the min_reduction_steps then use the remaining\n #points to rebalance the segments with the largest mass errors.\n play_points = min_reduction_steps - last_result.num_reduced_points\n bef = last_result.reduced_flux.times.size\n if play_points > 0:\n last_result = red_flux.rebalance_extra_points(last_result,play_points)\n\n rr = last_result\n\n #find peaks for data rebalance and reporting\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=3,rel_height=1)\n if peaks.size == 0 :\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=2,rel_height=1)\n if peaks.size == 0:\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=1,rel_height=1)\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=3,rel_height=1)\n if pneg.size == 0:\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=2,rel_height=1)\n if pneg.size == 0:\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=1,rel_height=1)\n\n peaks = rr.reduced_flux.times[peaks]\n pneg = rr.reduced_flux.times[pneg]\n\n peaks = np.isin(o_timeseries.times,peaks)\n pneg = np.isin(o_timeseries.times,pneg)\n peaks = np.where(peaks)\n pneg = np.where(pneg)\n\n peaks = peaks[0]\n pneg = pneg[0]\n iter = 0\n while iter < 100 and (abs(last_result.total_mass_error*maxval) > max_tm_error or abs(last_result.total_mass_error/last_result.mass.values[-1])*100 > .001) :\n rr = red_flux.rebalance_valleys(rr,peaks,pneg)\n #keep the lowest total_mass_error\n if abs(rr.total_mass_error) < abs(last_result.total_mass_error):\n last_result = rr\n else:\n break\n iter += 1\n\n out_times = last_result.reduced_flux.times\n out_values = last_result.reduced_flux.values\n #return the reduced data, undo normalize of the values (*maxval)\n return out_times, out_values*maxval,-(last_result.total_mass_error * maxval),peaks.size,iter", "def sky_groups():\n cam = \"sky\"\n for light, lens, ndc, good, window in [(True, True, False, True, True),\n (True, True, False, True, False),\n (True, True, False, False, False),\n (True, False, False, True, False),\n (True, False, False, False, False),\n (False, True, False, True, True),\n (False, True, False, False, True)]:\n filenames = flatfiles(cam)\n filenames = get_light_sky(filenames, light)\n filenames = get_lens(filenames, lens)\n filenames = get_ndc(filenames, ndc)\n filenames = get_good(filenames, good)\n filenames = get_window_sky(filenames, window)\n images = valid_images(filenames)\n process_images(images, cam, (light, lens, ndc, good, window))", "def make_skydark(files, ext=1, nproc=6, title='ext_1', overwrite=False):\n\n # See if outfile already exists\n outfile = 'skydark_{}.fits'.format(title)\n if (os.path.exists(outfile)) & (overwrite is False):\n print('{} already exists, stopping...'.format(outfile))\n\n else:\n print('Making a stack of the input files...')\n stack = np.zeros((len(files), 2051, 4096))\n for i,f in enumerate(files):\n h = fits.open(f)\n data = h[ext].data\n #dq = h[ext+2].data\n\n # Get the segmap for this file\n segmap_file = f.replace('.fits', '_seg_ext_{}.fits'.format(ext))\n if not os.path.isfile(segmap_file): # sometimes input files are medsub/equalized\n segmap_file = f.replace('_medsub', '').replace('_eq', '').replace('.fits', '_seg_ext_{}.fits'.format(ext))\n segmap = fits.getdata(segmap_file)\n\n # Mask bad pixels and sources\n #data[dq!=0] = np.nan\n data[segmap>0] = np.nan\n stack[i] = data\n h.close()\n\n # Make the skydark\n print('Calculating the median through the stack of input files...')\n if nproc==1:\n skydark = np.nanmedian(stack, axis=0)\n else:\n stacks = np.split(stack, 16, axis=2) # split stack into 16 2048x256 sections\n p = Pool(nproc)\n results = p.map(med_stack, stacks)\n skydark = np.concatenate(results, axis=1)\n\n # Write out the sky dark\n fits.writeto(outfile, skydark, overwrite=True)\n print('Sky dark generated.')\n\n # Make a filtered version of the skydark\n print('Filtering the sky dark...')\n amp1, amp2 = np.split(skydark, 2, axis=1) # treat amps separately\n sigma_clip = SigmaClip(sigma=3.)\n bkg_estimator = MedianBackground()\n bkg1 = Background2D(amp1, (100, 100), filter_size=(10, 10), \n sigma_clip=sigma_clip, bkg_estimator=bkg_estimator)\n bkg2 = Background2D(amp2, (100, 100), filter_size=(10, 10), \n sigma_clip=sigma_clip, bkg_estimator=bkg_estimator)\n filtered = np.concatenate((bkg1.background, bkg2.background), axis=1)\n fits.writeto('{}_filtered.fits'.format(outfile.replace('.fits','')), \n filtered, overwrite=True)\n print('Filtered sky dark generated.')", "def bin_isca_exp_nd_data(ds, s_lat=-30, n_lat=30, bin_var_nm='omega500', bin_var=None,\n grp_time_var='year', bins=np.arange(0,1.1,0.1), land_sea='global', land_mask_dir='./data',\n nd_varnames=None, nd=4):\n ds_m = ds.where(np.logical_and(ds.lat>=s_lat, ds.lat<=n_lat), drop=True)\n\n ds_mask = xr.open_dataset(os.path.join(land_mask_dir, 'era_land_t42.nc'), decode_times=False)\n ds_mask = ds_mask.where(np.logical_and(ds_mask.lat>=s_lat,ds_mask.lat<=n_lat), drop=True)\n #ds_m.coords['mask'] = (('lat', 'lon'), ds_mask.land_mask.values)\n\n omega_coeff = 3600. * 24. / 100.\n try:\n omega500_m = ds_m.omega500 * omega_coeff\n except:\n omega_m = ds_m.omega * omega_coeff\n\n try:\n omega500_m = omega_m.sel(pfull=500)\n except:\n omega500_m = omega_m.interp(pfull=500)\n else:\n fint = interpolate.interp1d(np.log(ds_m.pfull), omega_m, kind='linear', axis=1)\n omega500_m = fint(np.log(np.array([500])))\n omega500_m = xr.DataArray(omega500_m[:,0,:,:], coords=[ds_m.time, ds_m.lat, ds_m.lon],\n dims=['time', 'lat', 'lon'])\n\n bin_data_dict = {}\n\n # Add area info to dataset\n lats = ds_m.lat\n nlon = len(ds_m.lon)\n coslat = np.cos(np.deg2rad(lats))\n coslat2 = coslat / np.sum(coslat) / nlon\n # summing this over lat and lon = 1\n area_wts = np.moveaxis(np.tile(coslat2, [nlon, 1]), 0, 1)\n\n latlon_dims = ('lat', 'lon')\n latlon_coords = {}\n for d in latlon_dims:\n latlon_coords[d] = ds_m[d]\n area_wts = xr.DataArray(area_wts, dims=latlon_dims, coords=latlon_coords)\n bin_data_dict['area_weight'] = area_wts\n\n if bin_var is None:\n bin_data_dict['omega500'] = omega500_m\n #if 'lts' in bin_var_nm.lower():\n try:\n bin_data_dict['lts'] = ds_m.lts\n except:\n print('No LTS')\n #exit\n #if 'eis' in bin_var_nm.lower():\n try:\n bin_data_dict['eis'] = ds_m.eis\n except:\n print('No EIS')\n #exit\n #if 'elf' in bin_var_nm.lower():\n try:\n bin_data_dict['ELF'] = ds_m.ELF\n except:\n print('No ELF')\n #exit\n else:\n omega500_obs_t = np.ones_like(omega500_m) * np.nan\n omega500_obs_lat_range = bin_var.where(np.logical_and(bin_var.lat>=s_lat, bin_var.lat<=n_lat), drop=True)\n for t in range(len(ds_m.time)):\n omega500_obs_t[t,:,:] = omega500_obs_lat_range\n omega500_obs_t = xr.DataArray(omega500_obs_t, coords=[ds_m.time, ds_m.lat, ds_m.lon],\n dims=['time', 'lat', 'lon'])\n bin_data_dict['omega500'] = omega500_obs_t\n\n # Add percentile for each variable\n bin_data_dict_tmp = copy.deepcopy(bin_data_dict)\n for key, val in bin_data_dict_tmp.items():\n if 'area' not in key:\n val_percentile = get_monthly_percentile(val) #get_percentile(val)\n bin_data_dict[key + '_percentile'] = val_percentile\n\n bin_data_dict2 = copy.deepcopy(bin_data_dict)\n\n ## ====================== 4d variables ====================== ##\n if nd == 4:\n if nd_varnames is None:\n nd_varnames = ['cf', 'rh', 'sphum', 'qcl_rad', 'omega', 'temp'], # 'theta',\n #'soc_tdt_lw', 'soc_tdt_sw', 'soc_tdt_rad', ] # 'diff_m', 'diff_t'\n pdf_m, ds_bin_mean_m, dims, coords2 = select_4d_data(ds_m, bin_data_dict2, ds_mask,\n bins, bin_var_nm=bin_var_nm, land_sea=land_sea, grp_time_var=grp_time_var,\n four_d_varnames=nd_varnames)\n\n ## ====================== 3d variables ====================== ##\n if nd == 3:\n if nd_varnames is None:\n nd_varnames = ['soc_olr', 'soc_olr_clr', #'flux_lhe', 'flux_t',\n 'toa_sw_cre', 'toa_lw_cre', 'toa_net_cre',\n 'tot_cld_amt', 'low_cld_amt', 'mid_cld_amt', 'high_cld_amt',\n 'temp_2m', 't_surf', 'cwp'] #, 'soc_tot_cloud_cover','z_pbl'\n pdf_m, ds_bin_mean_m, dims, coords2 = select_3d_data(ds_m, bin_data_dict2, ds_mask,\n bins, bin_var_nm=bin_var_nm, land_sea=land_sea, grp_time_var=grp_time_var,\n three_d_varnames=nd_varnames)\n \n vars_dict = {}\n\n for key, val in ds_bin_mean_m.items():\n if 'area' in key:\n if 'sum' in key:\n if len(dims)>1:\n #vars_dict[key] = (dims[-1], val)\n vars_dict[key] = ((dims[0], dims[-1]), val)\n else:\n vars_dict[key] = ((dims[-1]), val)\n else: # neglect the area_weight\n pass\n else:\n vars_dict[key] = (dims, val)\n\n # print(dims)\n # for key, val in ds_bin_mean_m.items():\n # if 'area' in key:\n # vars_dict[key] = ((dims[0], dims[-1]), val)\n # else:\n # vars_dict[key] = (dims, val)\n\n dims2 = tuple([d for d in dims if d != 'pfull'])\n vars_dict['pdf'] = (dims2, pdf_m)\n # print(coords2)\n ds_bin_mean_m_array = xr.Dataset(vars_dict, coords=coords2)\n\n return ds_bin_mean_m_array", "def sim_filtered_brown_noise(T, Fs, f_range, N):\n\n if f_range is None:\n # Do not filter\n # Generate 1/f^2 noise\n brownN = simbrown(int(T * Fs))\n return brownN\n elif f_range[1] is None:\n # Make filter order odd if necessary\n nyq = Fs / 2.\n if N % 2 == 0:\n print('NOTE: Increased high-pass filter order by 1 in order to be odd')\n N += 1\n\n # Generate 1/f^2 noise\n brownN = sim_brown_noise(int(T * Fs + N * 2))\n\n # High pass filter\n taps = signal.firwin(N, f_range[0] / nyq, pass_zero=False)\n brownNf = signal.filtfilt(taps, [1], brownN)\n return brownNf[N:-N]\n\n else:\n # Bandpass filter\n # Generate 1/f^2 noise\n brownN = simbrown(int(T * Fs + N * 2))\n # Filter\n nyq = Fs / 2.\n taps = signal.firwin(N, np.array(f_range) / nyq, pass_zero=False)\n brownNf = signal.filtfilt(taps, [1], brownN)\n return brownNf[N:-N]", "def pil_image_mask_by_band_value(img, band, val, cval=0):\n # type: (PImage.Image, int, int) -> PImage.Image\n\n num_bands = len(img.getbands())\n\n if band >= num_bands:\n raise ValueError('Cannot get band with index {} from image with {} bands'.format(band, num_bands))\n\n # Create a look up table where only one value maps to itself and everything else to cval\n other_band_lut = [cval] * 256\n target_band_lut = [cval] * 256\n target_band_lut[val] = val\n lut = []\n\n for i in range(num_bands):\n if i == band:\n lut += target_band_lut\n else:\n lut += other_band_lut\n\n img = img.point(lut)\n return img", "def layers_from_h5(ctx, out_dir, layers, hsds):\n excl_h5 = ctx.obj['EXCL_H5']\n if layers is not None:\n layers = {layer: os.path.join(out_dir, \"{}.tif\".format(layer))\n for layer in layers}\n ExclusionsConverter.extract_layers(excl_h5, layers, hsds=hsds)\n else:\n ExclusionsConverter.extract_all_layers(excl_h5, out_dir, hsds=hsds)", "def five_min_ticker(*args):\n markets = fetch_markets()\n map(populate_five_min_data, markets)\n return", "def sky_median_sig_clip(input_arr, sig_fract, percent_fract, max_iter=100, low_cut=True, high_cut=True):\r\n\t\r\n\twork_arr = np.ravel(input_arr)\r\n\told_sky = np.median(work_arr)\r\n\toldStaDesviation = work_arr.std()\r\n\tupper_limit = old_sky + sig_fract * oldStaDesviation\r\n\tlower_limit = old_sky - sig_fract * oldStaDesviation\r\n\tif low_cut and high_cut:\r\n\t\tindices = np.where((work_arr < upper_limit) & (work_arr > lower_limit))\r\n\telse:\r\n\t\tif low_cut:\r\n\t\t\tindices = np.where((work_arr > lower_limit))\r\n\t\telse:\r\n\t\t\tindices = np.where((work_arr < upper_limit))\r\n\twork_arr = work_arr[indices]\r\n\tnew_sky = np.median(work_arr)\r\n\titeration = 0\r\n\twhile ((math.fabs(old_sky - new_sky)/new_sky) > percent_fract) and (iteration < max_iter) :\r\n\t\titeration += 1\r\n\t\told_sky = new_sky\r\n\t\toldStaDesviation = work_arr.std()\r\n\t\tupper_limit = old_sky + sig_fract * oldStaDesviation\r\n\t\tlower_limit = old_sky - sig_fract * oldStaDesviation\r\n\t\tif low_cut and high_cut:\r\n\t\t\tindices = np.where((work_arr < upper_limit) & (work_arr > lower_limit))\r\n\t\telse:\r\n\t\t\tif low_cut:\r\n\t\t\t\tindices = np.where((work_arr > lower_limit))\r\n\t\t\telse:\r\n\t\t\t\tindices = np.where((work_arr < upper_limit))\r\n\t\twork_arr = work_arr[indices]\r\n\t\tnew_sky = np.median(work_arr)\r\n\treturn (new_sky, iteration)" ]
[ "0.66861373", "0.65514684", "0.6478541", "0.5613955", "0.54316807", "0.5428405", "0.5313811", "0.520505", "0.51899797", "0.50904", "0.50412196", "0.5036899", "0.49864736", "0.47982645", "0.4767842", "0.47645608", "0.4758746", "0.47383195", "0.46717232", "0.46011153", "0.4570777", "0.45688832", "0.45655566", "0.45477608", "0.45297286", "0.44968966", "0.44782507", "0.44734576", "0.4399", "0.4386791", "0.4371137", "0.4370821", "0.43552357", "0.4335485", "0.4321996", "0.43213207", "0.4310818", "0.43019348", "0.42992353", "0.42989433", "0.42904896", "0.42898458", "0.42895308", "0.42832762", "0.42713168", "0.4263367", "0.42611855", "0.42606252", "0.42600292", "0.42484784", "0.42484397", "0.42261335", "0.42222974", "0.4211644", "0.42094156", "0.42090616", "0.42037085", "0.41939148", "0.4187401", "0.4183722", "0.4174783", "0.41730204", "0.41714883", "0.41693622", "0.4168003", "0.41565004", "0.41543728", "0.41420916", "0.41188413", "0.41173095", "0.4107627", "0.41062453", "0.41009265", "0.40999597", "0.40964976", "0.40928853", "0.4087095", "0.4087095", "0.4086952", "0.40797374", "0.40795085", "0.40763682", "0.40762898", "0.40760294", "0.4072676", "0.4072001", "0.40705267", "0.40571058", "0.40486845", "0.40482196", "0.4048043", "0.40419307", "0.40345767", "0.40325812", "0.40267757", "0.40252012", "0.40241066", "0.40198696", "0.40194592", "0.401611" ]
0.80881137
0
Function to perform a 4 year moving window filter for a single land cover value (such as Forest as 1) for all years in an image. Calls the function mask4. The image bands do not need to be in order, but the bandNames argument must be in chronological order. The temporal filter inspects the central position of four consecutive years, and if the extremities of the consecutive years are identical but the centre position is not, then the central pixels are reclassified to match its temporal neighbour class. This function can be applied to whichever land cover values the user decides, such as all of the land cover values or a select few.
def applyWindow4years(imagem, value, bandNames): img_out = imagem.select(bandNames[0]) for i in np.arange(1, len(bandNames)-2): img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)])) img_out = img_out.addBands(imagem.select(bandNames[-2])) img_out = img_out.addBands(imagem.select(bandNames[-1])) return img_out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyWindow5years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-3):\n img_out = img_out.addBands(mask5(imagem, value,bandNames[(i-1):(i+4)]))\n img_out = img_out.addBands(imagem.select(bandNames[-3]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def applyWindow3years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-1):\n img_out = img_out.addBands(mask3(imagem, value,bandNames[(i-1):(i+2)]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def mask4(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[3]).eq(value)) \n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value)\n change_img1 = imagem.select(bandNames[2]).mask(mask.eq(1)).where(mask.eq(1), value) \n img_out = imagem.select(bandNames[1]).blend(change_img).blend(change_img1)\n return img_out", "def applyForwardNoDataFilter(image, bandNames):\n #Get a list of band names from year(1) through the last year\n bandNamesEE = ee.List(bandNames[1:])\n \n #Define forwards filter\n #In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for the first year\n #currentImage = image.select(bandNames[1]), the image for the second year\n #previousImage = image.select(bandNames[0]), the first year\n #Find where the second year has missing data, replace those values with the values of the first year\n #Append previousImage to currentImage, so now currentImage is a two band image, with the first band being the second year with the gap fill\n #and the second band is the first years classification\n #The iteration continues, now with followingImage.select[0] being the second year with the gap fill applied, and bandName is the third year\n def forwardNoDataFilter(bandName, previousImage):\n currentImage = image.select(ee.String(bandName))\n previousImage = ee.Image(previousImage)\n currentImage = currentImage.unmask(previousImage.select([0]))\n return currentImage.addBands(previousImage)\n \n #Iterate through all the years, starting with the first year's classification\n filtered = bandNamesEE.iterate(forwardNoDataFilter,ee.Image(image.select(bandNames[0])))\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def applyBackwardNoDataFilter(image, bandNames):\n #Get a list of band names to iterate over, from year(-2) through year(0)\n bandNamesEE = ee.List(bandNames[:-1]).reverse()\n \n #Define backwards filter\n #In first iteration, bandName=bandNames[-2] and followingImage is image.select(bandNames[-1]), or the classifications for the final year\n #currentImage = image.select(bandNames[-2]), the second to last year\n #followingImage = image.select(bandNames[-1]), the final year\n #Find where the second to last year has missing data, replace those values with the values of the following year\n #Append followingImage to currentImage, so now currentImage is a two band image, with the first band being the second to last year with the gap fill\n #and the second band is the final years classification\n #The iteration continues, now with followingImage.select[0] being the second to last year with the gap fill applied, and bandName is the third to last year\n def backwardNoDataFilter(bandName, followingImage):\n currentImage = image.select(ee.String(bandName))\n followingImage = ee.Image(followingImage)\n currentImage = currentImage.unmask(followingImage.select([0]))\n return currentImage.addBands(followingImage)\n \n #Apply backwards filter, starting with the final year and iterating through to year(0) \n filtered = bandNamesEE.iterate(backwardNoDataFilter,ee.Image(image.select(bandNames[-1])))\n #Re-order bands to be in chronological order\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): \n #Grab land cover classes as a list of strings\n lc_classes = classDictionary.keys().getInfo()\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Get the frequency of each class through the years by reducing the image collection to an image using the sum reducer\n class_frequency = binary_class_images.reduce(ee.Reducer.sum().unweighted()).rename(lc_classes)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Define an image to add bands with frequency filter applied\n out_img = ee.Image()\n \n #Loop through years\n for yearBand in yearBandNames:\n #Select the target year from the image\n yearImage = image.select(yearBand)\n \n #Loop through land cover classes in filterParams\n for lc_class in lc_classes:\n #Get the minimum occurance allowed in that land cover class\n min_occurance = filterParams.get(lc_class)\n \n #Find if the land cover class had less than the number of min_occurances in each pixel\n change_class = class_frequency.select(lc_class).lt(ee.Image.constant(min_occurance))\n \n #If change_class==1, then replace that pixel with the mode of all the years in that pixel\n #This filter is only applied to pixels of this land cover class\n #First mask yearImage to pixels of this land cover class, then get the union of pixels where change_class==1,\n #if both conditions are true, then the pixel is replaced with the mode\n yearImage = yearImage.where(yearImage.eq(ee.Number(classDictionary.get(lc_class))).bitwiseAnd(change_class),mode_image)\n #Rename yearImage to bandName\n yearImage = yearImage.rename(yearBand)\n #Append to output image\n out_img = out_img.addBands(yearImage)\n \n return out_img", "def mask5(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[3]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[4]).eq(value))\n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value) \n change_img1 = imagem.select(bandNames[2]).mask(mask.eq(1)).where(mask.eq(1), value) \n change_img2 = imagem.select(bandNames[3]).mask(mask.eq(1)).where(mask.eq(1), value) \n img_out = imagem.select(bandNames[1]).blend(change_img).blend(change_img1).blend(change_img2)\n return img_out", "def fmask(bandname=\"fmask\"):\n\n def fmask(image):\n imgFmask = image.select(bandname)\n shadow = imgFmask.eq(3)\n snow = imgFmask.eq(4)\n cloud = imgFmask.eq(5)\n\n mask = shadow.Or(snow).Or(cloud)\n\n imgMask = image.updateMask(mask.Not())\n return imgMask\n return fmask", "def stack_bands(yr, roi):\n\n water_year_start = '{}-10-01'.format(yr - 1)\n\n winter_s, winter_e = '{}-01-01'.format(yr), '{}-03-01'.format(yr),\n spring_s, spring_e = '{}-03-01'.format(yr), '{}-05-01'.format(yr),\n late_spring_s, late_spring_e = '{}-05-01'.format(yr), '{}-07-01'.format(yr)\n summer_s, summer_e = '{}-07-01'.format(yr), '{}-09-01'.format(yr)\n fall_s, fall_e = '{}-09-01'.format(yr), '{}-12-31'.format(yr)\n\n periods = [('cy', winter_s, fall_e),\n ('1', spring_s, spring_e),\n ('2', late_spring_s, late_spring_e),\n ('3', summer_s, summer_e),\n ('4', fall_s, fall_e)]\n\n first = True\n for name, start, end in periods:\n bands = landsat_composites(yr, start, end, roi, name)\n if first:\n input_bands = bands\n proj = bands.select('B2_cy').projection().getInfo()\n first = False\n else:\n input_bands = input_bands.addBands(bands)\n\n for s, e, n in [(spring_s, spring_e, 'espr'),\n (late_spring_s, late_spring_e, 'lspr'),\n (summer_s, summer_e, 'smr'),\n (fall_s, fall_e, 'fl'),\n (water_year_start, spring_e, 'wy_espr'),\n (water_year_start, late_spring_e, 'wy_espr'),\n (water_year_start, summer_e, 'wy_smr'),\n (water_year_start, fall_e, 'wy')]:\n gridmet = ee.ImageCollection(\"IDAHO_EPSCOR/GRIDMET\").filterBounds(\n roi).filterDate(s, e).select('pr', 'eto', 'tmmn', 'tmmx')\n temp_reducer = ee.Reducer.mean()\n t_names = ['tmax'.format(n), 'tmin'.format(n)]\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_{}'.format(n), 'pet_total_{}'.format(n)).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n wd_estimate = precip_sum.select('precip_total_{}'.format(n)).subtract(precip_sum.select(\n 'pet_total_{}'.format(n))).rename('wd_est_{}'.format(n))\n input_bands = input_bands.addBands([temp_perc, precip_sum, wd_estimate])\n\n temp_reducer = ee.Reducer.percentile([10, 50, 90])\n t_names = ['tmmn_p10_cy', 'tmmn_p50_cy', 'tmmn_p90_cy', 'tmmx_p10_cy', 'tmmx_p50_cy', 'tmmx_p90_cy']\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_cy', 'pet_total_cy').resample('bilinear').reproject(crs=proj['crs'], scale=30)\n wd_estimate = precip_sum.select('precip_total_cy').subtract(precip_sum.select(\n 'pet_total_cy')).rename('wd_est_cy')\n\n coords = ee.Image.pixelLonLat().rename(['Lon_GCS', 'LAT_GCS']).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n ned = ee.Image('USGS/NED')\n terrain = ee.Terrain.products(ned).select('elevation', 'slope', 'aspect').reduceResolution(\n ee.Reducer.mean()).reproject(crs=proj['crs'], scale=30)\n\n world_climate = get_world_climate(proj=proj)\n elev = terrain.select('elevation')\n tpi_1250 = elev.subtract(elev.focal_mean(1250, 'circle', 'meters')).add(0.5).rename('tpi_1250')\n tpi_250 = elev.subtract(elev.focal_mean(250, 'circle', 'meters')).add(0.5).rename('tpi_250')\n tpi_150 = elev.subtract(elev.focal_mean(150, 'circle', 'meters')).add(0.5).rename('tpi_150')\n static_input_bands = coords.addBands([temp_perc, wd_estimate, terrain, tpi_1250, tpi_250, tpi_150, world_climate])\n\n nlcd = ee.Image('USGS/NLCD/NLCD2011').select('landcover').reproject(crs=proj['crs'], scale=30).rename('nlcd')\n\n cdl_cult = ee.Image('USDA/NASS/CDL/2017').select('cultivated'). \\\n remap([1, 2], [0, 1]).reproject(crs=proj['crs'], scale=30).rename('cdlclt')\n\n cdl_crop = ee.Image('USDA/NASS/CDL/2017').select('cropland').reproject(crs=proj['crs'],\n scale=30).rename('cdlcrp')\n\n gsw = ee.Image('JRC/GSW1_0/GlobalSurfaceWater')\n occ_pos = gsw.select('occurrence').gt(0)\n water = occ_pos.unmask(0).rename('gsw')\n\n static_input_bands = static_input_bands.addBands([nlcd, cdl_cult, cdl_crop, water])\n\n input_bands = input_bands.addBands(static_input_bands).clip(roi)\n\n # standardize names to match EE javascript output\n standard_names = []\n temp_ct = 1\n prec_ct = 1\n names = input_bands.bandNames().getInfo()\n for name in names:\n if 'tavg' in name and 'tavg' in standard_names:\n standard_names.append('tavg_{}'.format(temp_ct))\n temp_ct += 1\n elif 'prec' in name and 'prec' in standard_names:\n standard_names.append('prec_{}'.format(prec_ct))\n prec_ct += 1\n else:\n standard_names.append(name)\n\n input_bands = input_bands.rename(standard_names)\n return input_bands", "def applyMask3last(imagem, value, bandNames):\n mask = imagem.select(bandNames[-3]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[-2]).eq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[-1]).neq(value))\n change_img = imagem.select(bandNames[-1]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[0:-1])\n img_out = img_out.addBands(imagem.select(bandNames[-1]).blend(change_img))\n return img_out", "def request_band_extract(file_prefix, points_layer, region, years, filter_bounds=False):\n roi = ee.FeatureCollection(region)\n plots = ee.FeatureCollection(points_layer)\n for yr in years:\n stack = stack_bands(yr, roi)\n\n if filter_bounds:\n plots = plots.filterBounds(roi)\n\n filtered = plots.filter(ee.Filter.eq('YEAR', yr))\n\n plot_sample_regions = stack.sampleRegions(\n collection=filtered,\n properties=['POINT_TYPE', 'YEAR'],\n scale=30,\n tileScale=16)\n\n task = ee.batch.Export.table.toCloudStorage(\n plot_sample_regions,\n description='{}_{}'.format(file_prefix, yr),\n bucket='wudr',\n fileNamePrefix='{}_{}'.format(file_prefix, yr),\n fileFormat='CSV')\n\n task.start()\n print(yr)\n exit()", "def mask3(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).eq(value))\n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[1]).blend(change_img)\n return img_out", "def applyIncidenceFilter(image, bandNames, classDictionary, numChangesCutoff = 8, connectedPixelCutoff=6):\n #Calculate the number of times a pixel changes throughout the time series and determine if it is over the numChangesCutoff\n num_changes = calculateNumberOfChanges(image, bandNames)\n too_many_changes = num_changes.gt(numChangesCutoff)\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Calculate the number of connected pixels for each land cover class and year, reduce to a single band image representing the number\n #of connected pixels of the same land cover class as the central pixel, and determine if it is over the connectedPixelCutoff\n connected_pixel_count = ee.ImageCollection(binary_class_images.map(lambda x: x.mask(x).connectedPixelCount(100,False).reduce(ee.Reducer.sum()).lt(connectedPixelCutoff)))\n \n #Get a bitwiseAnd determination if the number of connected pixels <= connectedPixelCutoff and the number of changes > numChangesCutoff \n incidence_filter = ee.ImageCollection(connected_pixel_count.map(lambda x: x.bitwiseAnd(too_many_changes))).toBands().rename(bandNames)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Replace pixels of image where incidence_filter is True with mode_image\n incidence_filtered = image.where(incidence_filter, mode_image)\n \n return incidence_filtered", "def make_lightcurve(centroids, bands, band_idx, box_size, aperture_radius):\n band_names = np.sort(list(bands.keys()))\n num_stars= range(len(centroids))\n for star_idx in num_stars:\n xcenters, ycenters = [],[]\n aperture_sums = []\n background = []\n fwhms = []\n obs_time = []\n obs_mjd = []\n ##extract lightcurve (enumerate all frames) in a given band\n for i in tqdm(bands[band_names[band_idx]]):\n #import pdb; pdb.set_trace()\n hdr = fits.open(i)[0].header\n img = fits.open(i)[0].data\n #get dates from fits header\n date=dt.strptime(hdr['DATE-OBS'], '%Y-%m-%d')\n time=dt.strptime(hdr['EXP-STRT'], '%H:%M:%S.%f')\n newdate = time.replace(year=date.year, month=date.month, day=date.day)\n obs_time.append(newdate)\n obs_mjd.append(hdr['MJD-STRT'])\n\n #crop\n #import pdb; pdb.set_trace()\n image_crop = get_crop(img, centroids[star_idx], box_size)\n\n ###aperture photometry###\n #compute centroid\n centroid = get_centroid(image_crop)\n\n xcenters.append(centroid[0])\n ycenters.append(centroid[1])\n\n #compute backgound\n bkg_mean=get_bkg(image_crop, centroid, r_in=20., r_out=30.)\n\n #measure fwhm\n fwhm=get_fwhm(image_crop)\n\n #without aperture photometry\n\n aperture_sum = get_phot(image_crop, centroid, r=aperture_radius)\n\n #minus background wihtin annulus\n #aperture_sum = get_phot2(image_crop,bkg_mean,centroid,r=aperture_radius)\n\n aperture_sums.append(aperture_sum)\n background.append(bkg_mean)\n\n # if fwhm < 10*np.median(fwhms):\n # fwhms.append(fwhm)\n # else:\n # fwhms.append(np.nan)\n fwhms.append(fwhm)\n\n #output as dataframe of given band and star\n\n dfs.append(pd.DataFrame(\n {'{0}_{1}_x'.format(band_names[band_idx], str(star_idx)) : xcenters,\n '{0}_{1}_y'.format(band_names[band_idx], str(star_idx)) : ycenters,\n '{0}_{1}_flux_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : aperture_sums,\n '{0}_{1}_bkg_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : background,\n '{0}_{1}_fwhm_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : fwhms},\n #'airmass' : airmass\n index = obs_time))\n return dfs, band_idx, band_names", "def sky_groups():\n cam = \"sky\"\n for light, lens, ndc, good, window in [(True, True, False, True, True),\n (True, True, False, True, False),\n (True, True, False, False, False),\n (True, False, False, True, False),\n (True, False, False, False, False),\n (False, True, False, True, True),\n (False, True, False, False, True)]:\n filenames = flatfiles(cam)\n filenames = get_light_sky(filenames, light)\n filenames = get_lens(filenames, lens)\n filenames = get_ndc(filenames, ndc)\n filenames = get_good(filenames, good)\n filenames = get_window_sky(filenames, window)\n images = valid_images(filenames)\n process_images(images, cam, (light, lens, ndc, good, window))", "def applyGapFilter(image, bandNames):\n filtered = applyForwardNoDataFilter(image, bandNames)\n filtered = applyBackwardNoDataFilter(filtered, bandNames)\n return filtered", "def calculate_daily_climatology(\n pctile,\n windowHalfWidth,\n lenClimYear,\n smoothPercentile,\n smoothPercentileWidth,\n thresh_climYear, # empty array\n seas_climYear, # empty array\n clim, # empty dict\n feb29,\n doyClim,\n clim_start,\n clim_end,\n tempClim,\n temp,\n):\n # Loop over all day-of-year values, and calculate threshold and seasonal climatology across years\n for d in range(1, lenClimYear + 1):\n # Special case for Feb 29\n if d == feb29:\n continue\n # find all indices for each day of the year +/- windowHalfWidth and from them calculate the threshold\n tt0 = np.where(doyClim[clim_start : clim_end + 1] == d)[\n 0\n ] # the index for that day each year\n # If this doy value does not exist (i.e. in 360-day calendars) then skip it\n if len(tt0) == 0:\n continue\n tt = np.array([])\n for w in range(-windowHalfWidth, windowHalfWidth + 1): # -5 : 5 default\n tt = np.append(\n tt, clim_start + tt0 + w\n ) # append the daily values 5days before and 5days after\n tt = tt[tt >= 0] # Reject indices \"before\" the first element\n tt = tt[tt < TClim] # Reject indices \"after\" the last element\n thresh_climYear[d - 1] = np.percentile(nonans(tempClim[tt.astype(int)]), pctile)\n seas_climYear[d - 1] = np.mean(nonans(tempClim[tt.astype(int)]))\n\n # Special case for Feb 29 (LEAP YEAR)\n thresh_climYear[feb29 - 1] = (\n 0.5 * thresh_climYear[feb29 - 2] + 0.5 * thresh_climYear[feb29]\n )\n seas_climYear[feb29 - 1] = (\n 0.5 * seas_climYear[feb29 - 2] + 0.5 * seas_climYear[feb29]\n )\n\n if smoothPercentile:\n thresh_climYear, seas_climYear = smooth_climatologies(\n thresh_climYear, seas_climYear, smoothPercentileWidth\n )\n\n # Generate threshold for full time series\n clim[\"thresh\"] = thresh_climYear[doy.astype(int) - 1]\n clim[\"seas\"] = seas_climYear[doy.astype(int) - 1]\n # Save vector indicating which points in temp are missing values\n clim[\"missing\"] = np.isnan(temp)\n\n return clim", "def get_time_filtered_correlations(a_lt3,a_lt4,adwin_filt_bool,**kw):\r\n verbose = kw.pop('verbose',False)\r\n ### prepare RO results and sort them according to sweep point\r\n for a in [a_lt3,a_lt4]:\r\n a.pts = a.g.attrs['sweep_length']\r\n a.ssros = a.agrp['ssro_results'].value\r\n a.readouts = a.g.attrs['nr_of_ROsequences']\r\n # a.sorted_results = a_ssros.reshape((-1,a.pts,a.readouts))\r\n\r\n\r\n ### correlate the ROs with each other by making a boolean filter:\r\n ### variables here are described in terms of spin states!\r\n m00 = (a_lt3.ssros == 1)*(a_lt4.ssros == 1)\r\n m10 = (a_lt3.ssros == 1)*(a_lt4.ssros == 0)\r\n m01 = (a_lt3.ssros == 0)*(a_lt4.ssros == 1)\r\n m11 = (a_lt3.ssros == 0)*(a_lt4.ssros == 0)\r\n \r\n ### now define unique identifiers for each Ro correlation and recast the correlations into a single array.\r\n ### As identifieres I choose 1 = index 0 in the output list, i.e. 11; 2 = index 1 in the output list ... and so forth\r\n RO_correlators = np.array(len(a_lt3.ssros)*[1])*m11 \\\r\n + np.array(len(a_lt3.ssros)*[2])*m10 \\\r\n + np.array(len(a_lt3.ssros)*[3])*m01 \\\r\n + np.array(len(a_lt3.ssros)*[4])*m00 \r\n ### PH - added to make sure that has a full set of repetitions\r\n RO_correlators = RO_correlators[:(a.g.attrs['sweep_length']*(len(RO_correlators)/a.g.attrs['sweep_length']))]\r\n adwin_filt_bool = adwin_filt_bool[:(a.g.attrs['sweep_length']*(len(RO_correlators)/a.g.attrs['sweep_length']))]\r\n\r\n \r\n ### now sort the correlators and the adwin fltr according to the sweep pts\r\n sorted_RO_correlators = RO_correlators.reshape((-1,a_lt3.pts,a_lt3.readouts))\r\n sorted_adwin_fltr = adwin_filt_bool.reshape((-1,a_lt3.pts,a_lt3.readouts))\r\n\r\n ### from now on: no numpy magic anymore. from here it is brutforce 'for-looping'\r\n ### (all conceived arrays will have different lengths due to temporal filtering. this break most np methods)\r\n ### although vstack and hstack would probably work...\r\n \r\n return_list = range(a_lt3.pts) ## all of these pts will be substituted with the correlator occurence\r\n for i in range(a_lt3.pts): \r\n correlators_at_sweep_pt = [0,0,0,0]\r\n for j in [1,2,3,4]: ### loop over the correlator identifiers\r\n correlators_at_sweep_pt[j-1] = np.sum(np.logical_and(sorted_adwin_fltr[:,i,:],sorted_RO_correlators[:,i,:]==j)) ## exclude adwin filter and do a logical and with the correlator identifier. Then sum over the number of occurences\r\n\r\n\r\n return_list[i] = correlators_at_sweep_pt\r\n\r\n return return_list", "def countmap(band,skypos,tranges,skyrange,width=False,height=False,\n\t\t\t verbose=0,tscale=1000.,memlight=False,hdu=False,retries=20):\n\timsz = gxt.deg2pix(skypos,skyrange)\n\tcount = np.zeros(imsz)\n\tfor trange in tranges:\n\t\t# If memlight is requested, break the integration into\n\t\t# smaller chunks.\n\t\tstep = memlight if memlight else trange[1]-trange[0]\n\t\tfor i in np.arange(trange[0],trange[1],step):\n\t\t\tt0,t1=i,i+step\n\t\t\tif verbose:\n\t\t\t\tprint_inline('Coadding '+str(t0)+' to '+str(t1))\n\t\t\tevents = gQuery.getArray(gQuery.rect(band,skypos[0],skypos[1],t0,t1,\n\t\t\t\t\t\t\t\t\t\t\t\t skyrange[0],skyrange[1]),\n\t\t\t\t\t\t\t\t\t verbose=verbose,retries=retries)\n\n\t\t\t# Check that there is actually data here.\n\t\t\tif not events:\n\t\t\t\tif verbose>1:\n\t\t\t\t\tprint \"No data in \"+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\ttimes = np.array(events,dtype='float64')[:,0 ]/tscale\n\t\t\tcoo =\tnp.array(events,dtype='float64')[:,1:]\n\n\t\t\t# If there's no data, return a blank image.\n\t\t\tif len(coo)==0:\n\t\t\t\tif verbose:\n\t\t\t\t\tprint 'No data in this frame: '+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\t# Define World Coordinate System (WCS)\n\t\t\twcs = define_wcs(skypos,skyrange,width=False,height=False)\n\n\t\t\t# Map the sky coordinates onto the focal plane\n\t\t\tfoc = wcs.sip_pix2foc(wcs.wcs_world2pix(coo,1),1)\n\n\t\t\t# Bin the events into actual image pixels\n\t\t\tH,xedges,yedges=np.histogram2d(foc[:,1]-0.5,foc[:,0]-0.5,\n\t\t\t\t\t\t\t\tbins=imsz,range=([ [0,imsz[0]],[0,imsz[1]] ]))\n\t\t\tcount += H\n\n\treturn count", "def applyMask3first(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).neq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).eq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).eq(value))\n change_img = imagem.select(bandNames[0]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[0]).blend(change_img)\n img_out = img_out.addBands(imagem.select(bandNames[1:]))\n return img_out", "def cut4(image):\r\n i, j = image.shape\r\n a1 = image[:i // 2, :j // 2]\r\n a2 = image[i // 2:, :j // 2]\r\n a3 = image[:i // 2, j // 2:]\r\n a4 = image[i // 2:, j // 2:]\r\n return a1, a2, a3, a4", "def mask(mode: str = 'illuminated', band: str = '78') -> np.ndarray:\n if band in ('7', '8'):\n res = np.full((256, 500), False)\n else:\n res = np.full((256, 1000), False)\n\n res[coords(mode, band)] = True\n\n return res", "def plot_land_cover(data, year=None, measurement=None, out_width=15, cols=4,):\n # get measurement name\n measurement = get_layer_name(measurement, data)\n\n # get colour map, normalisation\n try:\n cmap, norm = lc_colourmap(measurement)\n except AssertionError:\n\n raise KeyError('Could not automatically determine colour scheme from'\n f'DataArray name {measurement}. Please specify which '\n 'DEA Landcover measurement is being plotted by providing'\n 'the name using the \"measurement\" variable For example'\n '(measurement = \"full_classification\")')\n\n height, width = data.geobox.shape\n scale = out_width / width\n\n if year:\n #plotting protocall if 'year' variable is passed\n year_string = f\"{year}-01-01\"\n data = data.sel(time=year_string, method=\"nearest\")\n \n fig, ax = plt.subplots()\n fig.set_size_inches(width * scale, height * scale)\n make_colorbar(fig, ax, measurement)\n im = ax.imshow(data, cmap=cmap, norm=norm, interpolation=\"nearest\")\n\n \n elif len(data.time) == 1:\n #plotting protocall if only one timestep is passed and not a year variable\n fig, ax = plt.subplots()\n fig.set_size_inches(width * scale, height * scale)\n make_colorbar(fig, ax, measurement)\n im = ax.imshow(data.isel(time=0), cmap=cmap, norm=norm, interpolation=\"nearest\")\n else:\n #plotting protocall if multible time steps are passed to plot\n if cols > len(data.time):\n cols = len(data.time)\n rows = int((len(data.time) + cols-1)/cols)\n\n fig, ax = plt.subplots(nrows=rows, ncols=cols)\n fig.set_size_inches(\n width * scale, (height * scale / cols) * (len(data.time) / cols))\n\n make_colorbar(fig, ax.flat[0], measurement)\n\n for a, b in enumerate(ax.flat):\n if a < data.shape[0]:\n im = b.imshow(data[a], cmap=cmap, norm=norm,\n interpolation=\"nearest\")\n\n return im", "def winter_gif(self):\n # Create the directory.\n os.mkdir('./medal_figures_winter')\n start = self.start_year\n end = self.end_year\n duration = self.duration\n # Specify the years.\n years = [i for i in self.years_winter if (i >= start) and (i <= end)]\n # Setup the colormap.\n cmap = sns.cubehelix_palette(n_colors=6, start=2.5, rot=0.1, hue=2, dark=0.3, light=1, as_cmap=True)\n # Important variable and keywords to initialize cartopy.\n shapename = 'admin_0_countries'\n countries_shp = shpreader.natural_earth(resolution='110m', category='cultural', name=shapename)\n filenames = []\n # Loop in the specific years.\n for i in years:\n fig = plt.figure(figsize=(10, 8))\n ax = fig.add_subplot(1, 1, 1, projection=ccrs.Mercator())\n ax.set_extent([-169.95, 169.95, -65, 80], crs=ccrs.PlateCarree())\n ax.add_feature(cfeature.BORDERS)\n ax.coastlines(resolution='110m')\n # Add some titles for specific years.\n if i == 1924:\n fig.suptitle('The First Winter Olympics.', y=0.9, fontsize=14, fontweight='bold')\n if i == 1994:\n fig.suptitle('The International Olympic Committee voted to separate the Summer and Winter Games.',\n y=0.9, fontsize=12, fontweight='bold')\n if i == 2018:\n fig.suptitle('Suspension of the Russian Olympic Committee due to Olympic Doping Controversy.',\n y=0.9, fontsize=12, fontweight='bold')\n iso_lib = list(self.conv['ISO'])\n if i != 2018:\n city = self.df_winter.loc[self.df_winter['Year'] == i]['City'].iloc[0]\n ax.title.set_text('Total Number of Medals of Winter Olympics Year: %d City: %s' % (i, city))\n df_tmp = self.df_winter.loc[self.df_winter['Year'] == i]\n d = dict(df_tmp.groupby(df_tmp['Country']).size())\n else:\n ax.title.set_text('Total Number of Medals of Winter Olympics Year: %d City: %s' % (i, 'Pyeongchang'))\n m = []\n for j in self.df_2018_winter['NOC'].tolist():\n n = j[j.find('(')+1:j.find(')')]\n m.append(n)\n k = self.df_2018_winter['Total'].tolist()\n d = dict(zip(m, k))\n d.pop('30 NOCs', None)\n max_medal = float(max(d.values()))\n for country in shpreader.Reader(countries_shp).records():\n iso = country.attributes['ADM0_A3']\n medal_num = 0\n if iso in iso_lib:\n ioc = self.conv.loc[self.conv['ISO'] == iso,'IOC'].iloc[0]\n if not pd.isna(ioc):\n if ioc in d.keys():\n medal_num = d[ioc]\n if all([iso == 'RUS', i>=1956, i<=1988]):\n medal_num = d['URS']\n if all([iso=='DEU', i>=1968, i<=1988]):\n medal_num = d['FRG'] + d['GDR']\n if all([iso=='DEU', i>=1956, i<=1964]):\n medal_num = d['EUA']\n if i==1952 and iso=='DEU':\n medal_num = d['FRG']\n if i==1992 and iso=='RUS':\n medal_num = d['EUN']\n if i==2018 and iso=='RUS':\n medal_num = d['OAR']\n ax.add_geometries(country.geometry, ccrs.PlateCarree(),\n facecolor=cmap(medal_num / max_medal, 1))\n sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(0, max_medal))\n sm._A = []\n plt.colorbar(sm, ax=ax, orientation=\"horizontal\", fraction=0.046, pad=0.04)\n fname = './medal_figures_winter/year_%d.png' % i\n filenames.append(fname)\n plt.savefig(fname=fname, format='png')\n plt.close(fig)\n images = []\n # Create the gif.\n for filename in filenames:\n images.append(imageio.imread(filename))\n imageio.mimsave('./medal_figures_winter/movie.gif', images, duration=duration)\n return", "def _build_multiband_mask(data, tractor, filt2pixscale, fill_value=0.0,\n threshmask=0.01, r50mask=0.05, maxshift=10,\n relmaxshift=0.1,\n sigmamask=3.0, neighborfactor=1.0, verbose=False):\n import numpy.ma as ma\n from copy import copy\n from skimage.transform import resize\n from legacyhalos.mge import find_galaxy\n from legacyhalos.misc import srcs2image, ellipse_mask\n\n import matplotlib.pyplot as plt\n from astropy.visualization import simple_norm\n\n bands, refband = data['bands'], data['refband']\n #residual_mask = data['residual_mask']\n\n #nbox = 5\n #box = np.arange(nbox)-nbox // 2\n #box = np.meshgrid(np.arange(nbox), np.arange(nbox))[0]-nbox//2\n\n xobj, yobj = np.ogrid[0:data['refband_height'], 0:data['refband_width']]\n\n # If the row-index of the central galaxy is not provided, use the source\n # nearest to the center of the field.\n if 'galaxy_indx' in data.keys():\n galaxy_indx = np.atleast_1d(data['galaxy_indx'])\n else:\n galaxy_indx = np.array([np.argmin((tractor.bx - data['refband_height']/2)**2 +\n (tractor.by - data['refband_width']/2)**2)])\n data['galaxy_indx'] = np.atleast_1d(galaxy_indx)\n data['galaxy_id'] = ''\n\n #print('Import hack!')\n #norm = simple_norm(img, 'log', min_percent=0.05, clip=True)\n #import matplotlib.pyplot as plt ; from astropy.visualization import simple_norm\n\n ## Get the PSF sources.\n #psfindx = np.where(tractor.type == 'PSF')[0]\n #if len(psfindx) > 0:\n # psfsrcs = tractor.copy()\n # psfsrcs.cut(psfindx)\n #else:\n # psfsrcs = None\n\n def tractor2mge(indx, factor=1.0):\n # Convert a Tractor catalog entry to an MGE object.\n class MGEgalaxy(object):\n pass\n\n default_majoraxis = tractor.diam_init[indx] * 60 / 2 / filt2pixscale[refband] # [pixels]\n default_pa = tractor.pa_init[indx]\n default_ba = tractor.ba_init[indx]\n #default_theta = (270 - default_pa) % 180\n #default_eps = 1 - tractor.ba_init[indx]\n\n #if tractor.sga_id[indx] > -1:\n if tractor.type[indx] == 'PSF' or tractor.shape_r[indx] < 2:\n pa = tractor.pa_init[indx]\n ba = tractor.ba_init[indx]\n # take away the extra factor of 2 we put in in read_sample()\n r50 = tractor.diam_init[indx] * 60 / 2 / 2\n if r50 < 5:\n r50 = 5.0 # minimum size, arcsec\n majoraxis = factor * r50 / filt2pixscale[refband] # [pixels]\n #majoraxis = factor * tractor.diam_init[indx] * 60 / 2 / 2 / filt2pixscale[refband] # [pixels]\n else:\n ee = np.hypot(tractor.shape_e1[indx], tractor.shape_e2[indx])\n ba = (1 - ee) / (1 + ee)\n pa = 180 - (-np.rad2deg(np.arctan2(tractor.shape_e2[indx], tractor.shape_e1[indx]) / 2))\n pa = pa % 180\n\n # can be zero (or very small) if fit as a PSF or REX\n if tractor.shape_r[indx] > 1:\n majoraxis = factor * tractor.shape_r[indx] / filt2pixscale[refband] # [pixels]\n else:\n majoraxis = factor * tractor.diam_init[indx] * 60 / 2 / 2 / filt2pixscale[refband] # [pixels]\n\n mgegalaxy = MGEgalaxy()\n \n mgegalaxy.xmed = tractor.by[indx]\n mgegalaxy.ymed = tractor.bx[indx]\n mgegalaxy.xpeak = tractor.by[indx]\n mgegalaxy.ypeak = tractor.bx[indx]\n\n # never use the Tractor geometry (only the centroid)\n # https://portal.nersc.gov/project/cosmo/temp/ioannis/virgofilaments-html/215/NGC5584/NGC5584.html\n if True:\n mgegalaxy.eps = 1-ba\n mgegalaxy.pa = pa\n mgegalaxy.theta = (270 - pa) % 180\n mgegalaxy.majoraxis = majoraxis\n else:\n mgegalaxy.eps = 1 - default_ba\n mgegalaxy.pa = default_pa\n mgegalaxy.theta = (270 - default_pa) % 180\n mgegalaxy.majoraxis = default_majoraxis\n\n # always restore all pixels within the nominal / initial size of the galaxy\n #objmask = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n # default_majoraxis,\n # default_majoraxis * (1-default_eps), \n # np.radians(default_theta-90), xobj, yobj)\n #objmask = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n # default_majoraxis, default_majoraxis, 0.0, xobj, yobj)\n\n objmask = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n mgegalaxy.majoraxis,\n mgegalaxy.majoraxis * (1-mgegalaxy.eps), \n np.radians(mgegalaxy.theta-90), xobj, yobj)\n\n # central 10% pixels can override the starmask\n objmask_center = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n 0.1*mgegalaxy.majoraxis,\n 0.1*mgegalaxy.majoraxis * (1-mgegalaxy.eps), \n np.radians(mgegalaxy.theta-90), xobj, yobj)\n\n return mgegalaxy, objmask, objmask_center\n\n # Now, loop through each 'galaxy_indx' from bright to faint.\n data['mge'] = []\n for ii, central in enumerate(galaxy_indx):\n print('Determing the geometry for galaxy {}/{}.'.format(\n ii+1, len(galaxy_indx)))\n\n # [1] Determine the non-parametric geometry of the galaxy of interest\n # in the reference band. First, subtract all models except the galaxy\n # and galaxies \"near\" it. Also restore the original pixels of the\n # central in case there was a poor deblend.\n largeshift = False\n mge, centralmask, centralmask2 = tractor2mge(central, factor=1.0)\n #plt.clf() ; plt.imshow(centralmask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-mask.png') ; pdb.set_trace()\n\n iclose = np.where([centralmask[np.int(by), np.int(bx)]\n for by, bx in zip(tractor.by, tractor.bx)])[0]\n \n srcs = tractor.copy()\n srcs.cut(np.delete(np.arange(len(tractor)), iclose))\n model = srcs2image(srcs, data['{}_wcs'.format(refband.lower())],\n band=refband.lower(),\n pixelized_psf=data['{}_psf'.format(refband.lower())])\n\n img = data[refband].data - model\n img[centralmask] = data[refband].data[centralmask]\n\n mask = np.logical_or(ma.getmask(data[refband]), data['residual_mask'])\n #mask = np.logical_or(data[refband].mask, data['residual_mask'])\n\n # restore the central pixels but not the masked stellar pixels\n centralmask[np.logical_and(data['starmask'], np.logical_not(centralmask2))] = False\n mask[centralmask] = False\n\n img = ma.masked_array(img, mask)\n ma.set_fill_value(img, fill_value)\n #if ii == 1:\n # pdb.set_trace()\n\n mgegalaxy = find_galaxy(img, nblob=1, binning=1, quiet=False)#, plot=True) ; plt.savefig('cosmo-www/tmp/junk-mge.png')\n #plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('junk-mask.png')\n ##plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n #pdb.set_trace()\n\n # Did the galaxy position move? If so, revert back to the Tractor geometry.\n if np.abs(mgegalaxy.xmed-mge.xmed) > maxshift or np.abs(mgegalaxy.ymed-mge.ymed) > maxshift:\n print('Large centroid shift (x,y)=({:.3f},{:.3f})-->({:.3f},{:.3f})'.format(\n mgegalaxy.xmed, mgegalaxy.ymed, mge.xmed, mge.ymed))\n print(' Reverting to the default geometry and the Tractor centroid.')\n largeshift = True\n mgegalaxy = copy(mge)\n\n radec_med = data['{}_wcs'.format(refband.lower())].pixelToPosition(\n mgegalaxy.ymed+1, mgegalaxy.xmed+1).vals\n radec_peak = data['{}_wcs'.format(refband.lower())].pixelToPosition(\n mgegalaxy.ypeak+1, mgegalaxy.xpeak+1).vals\n mge = {\n 'largeshift': largeshift,\n 'ra': tractor.ra[central], 'dec': tractor.dec[central],\n 'bx': tractor.bx[central], 'by': tractor.by[central],\n #'mw_transmission_g': tractor.mw_transmission_g[central],\n #'mw_transmission_r': tractor.mw_transmission_r[central],\n #'mw_transmission_z': tractor.mw_transmission_z[central],\n 'ra_moment': radec_med[0], 'dec_moment': radec_med[1],\n #'ra_peak': radec_med[0], 'dec_peak': radec_med[1]\n }\n for key in ('eps', 'majoraxis', 'pa', 'theta', 'xmed', 'ymed', 'xpeak', 'ypeak'):\n mge[key] = np.float32(getattr(mgegalaxy, key))\n if key == 'pa': # put into range [0-180]\n mge[key] = mge[key] % np.float32(180)\n data['mge'].append(mge)\n\n #if False:\n # #plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n # plt.clf() ; mgegalaxy = find_galaxy(img, nblob=1, binning=1, quiet=True, plot=True)\n # plt.savefig('/mnt/legacyhalos-data/debug.png')\n\n # [2] Create the satellite mask in all the bandpasses. Use srcs here,\n # which has had the satellites nearest to the central galaxy trimmed\n # out.\n print('Building the satellite mask.')\n satmask = np.zeros(data[refband].shape, bool)\n for filt in bands:\n # do not let GALEX and WISE contribute to the satellite mask\n if data[filt].shape != satmask.shape:\n continue\n \n cenflux = getattr(tractor, 'flux_{}'.format(filt.lower()))[central]\n satflux = getattr(srcs, 'flux_{}'.format(filt.lower()))\n if cenflux <= 0.0:\n #raise ValueError('Central galaxy flux is negative!')\n print('Central galaxy flux is negative! Proceed with caution...')\n #pdb.set_trace()\n \n satindx = np.where(np.logical_or(\n (srcs.type != 'PSF') * (srcs.shape_r > r50mask) *\n (satflux > 0.0) * ((satflux / cenflux) > threshmask),\n srcs.ref_cat == 'R1'))[0]\n #satindx = np.where(srcs.ref_cat == 'R1')[0]\n #if np.isin(central, satindx):\n # satindx = satindx[np.logical_not(np.isin(satindx, central))]\n if len(satindx) == 0:\n #raise ValueError('All satellites have been dropped!')\n #print('Warning! All satellites have been dropped from band {}!'.format(filt))\n print('Note: no satellites to mask in band {}.'.format(filt))\n else:\n satsrcs = srcs.copy()\n #satsrcs = tractor.copy()\n satsrcs.cut(satindx)\n satimg = srcs2image(satsrcs, data['{}_wcs'.format(filt.lower())],\n band=filt.lower(),\n pixelized_psf=data['{}_psf'.format(filt.lower())])\n thissatmask = satimg > sigmamask*data['{}_sigma'.format(filt.lower())]\n #if filt == 'FUV':\n # plt.clf() ; plt.imshow(thissatmask, origin='lower') ; plt.savefig('junk-{}.png'.format(filt.lower()))\n # #plt.clf() ; plt.imshow(data[filt], origin='lower') ; plt.savefig('junk-{}.png'.format(filt.lower()))\n # pdb.set_trace()\n if satmask.shape != satimg.shape:\n thissatmask = resize(thissatmask*1.0, satmask.shape, mode='reflect') > 0\n\n satmask = np.logical_or(satmask, thissatmask)\n #if True:\n # import matplotlib.pyplot as plt\n # plt.clf() ; plt.imshow(np.log10(satimg), origin='lower') ; plt.savefig('debug.png')\n # plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('debug.png')\n ## #plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n # pdb.set_trace()\n\n #print(filt, np.sum(satmask), np.sum(thissatmask))\n\n #plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-satmask.png')\n \n # [3] Build the final image (in each filter) for ellipse-fitting. First,\n # subtract out the PSF sources. Then update the mask (but ignore the\n # residual mask). Finally convert to surface brightness.\n #for filt in ['W1']:\n for filt in bands:\n thismask = ma.getmask(data[filt])\n if satmask.shape != thismask.shape:\n _satmask = (resize(satmask*1.0, thismask.shape, mode='reflect') > 0) == 1.0\n _centralmask = (resize(centralmask*1.0, thismask.shape, mode='reflect') > 0) == 1.0\n mask = np.logical_or(thismask, _satmask)\n mask[_centralmask] = False\n else:\n mask = np.logical_or(thismask, satmask)\n mask[centralmask] = False\n #if filt == 'r':\n # #plt.imshow(_satmask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-satmask-{}.png'.format(filt))\n # plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-mask-{}.png'.format(filt))\n # plt.clf() ; plt.imshow(img, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-img-{}.png'.format(filt))\n # pdb.set_trace()\n\n varkey = '{}_var'.format(filt.lower())\n imagekey = '{}_masked'.format(filt.lower())\n psfimgkey = '{}_psfimg'.format(filt.lower())\n thispixscale = filt2pixscale[filt]\n if imagekey not in data.keys():\n data[imagekey], data[varkey], data[psfimgkey] = [], [], []\n\n img = ma.getdata(data[filt]).copy()\n \n # Get the PSF sources.\n psfindx = np.where((tractor.type == 'PSF') * (getattr(tractor, 'flux_{}'.format(filt.lower())) / cenflux > threshmask))[0]\n if len(psfindx) > 0 and filt.upper() != 'W3' and filt.upper() != 'W4': \n #if len(psfindx) > 0 and filt.upper() != 'NUV' and filt.upper() != 'FUV' and filt.upper() != 'W3' and filt.upper() != 'W4':\n psfsrcs = tractor.copy()\n psfsrcs.cut(psfindx)\n else:\n psfsrcs = None\n \n if psfsrcs:\n psfimg = srcs2image(psfsrcs, data['{}_wcs'.format(filt.lower())],\n band=filt.lower(),\n pixelized_psf=data['{}_psf'.format(filt.lower())])\n if False:\n #import fitsio ; fitsio.write('junk-psf-{}.fits'.format(filt.lower()), data['{}_psf'.format(filt.lower())].img, clobber=True)\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)\n im = ax1.imshow(np.log10(img), origin='lower') ; fig.colorbar(im, ax=ax1)\n im = ax2.imshow(np.log10(psfimg), origin='lower') ; fig.colorbar(im, ax=ax2)\n im = ax3.imshow(np.log10(data['{}_psf'.format(filt.lower())].img), origin='lower') ; fig.colorbar(im, ax=ax3)\n im = ax4.imshow(img-psfimg, origin='lower') ; fig.colorbar(im, ax=ax4)\n plt.savefig('desi-users/ioannis/tmp/qa-psf-{}.png'.format(filt.lower()))\n if filt == 'r':# or filt == 'r':\n pdb.set_trace()\n img -= psfimg\n else:\n psfimg = np.zeros((2, 2), 'f4')\n\n data[psfimgkey].append(psfimg)\n \n img = ma.masked_array((img / thispixscale**2).astype('f4'), mask) # [nanomaggies/arcsec**2]\n var = data['{}_var_'.format(filt.lower())] / thispixscale**4 # [nanomaggies**2/arcsec**4]\n\n # Fill with zeros, for fun--\n ma.set_fill_value(img, fill_value)\n #if ii == 0 and filt == 'r': #filt == 'W1' or \n # plt.clf() ; plt.imshow(img, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-img-{}.png'.format(filt.lower()))\n # plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-mask-{}.png'.format(filt.lower()))\n ##### plt.clf() ; plt.imshow(thismask, origin='lower') ; plt.savefig('junk-thismask-{}.png'.format(filt.lower()))\n # pdb.set_trace()\n \n data[imagekey].append(img)\n data[varkey].append(var)\n\n #test = data['r_masked'][0]\n #plt.clf() ; plt.imshow(np.log(test.clip(test[mgegalaxy.xpeak, mgegalaxy.ypeak]/1e4)), origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n #pdb.set_trace()\n\n # Cleanup?\n for filt in bands:\n del data[filt]\n del data['{}_var_'.format(filt.lower())]\n\n return data", "def write_images(band,skypos,tranges,skyrange,write_cnt=False,write_int=False,write_rr=False,framesz=0,width=False,height=False,verbose=0,tscale=1000.,memlight=False,coadd=False,response=False,calpath='../cal/',clobber=False,retries=20):\n\t# No files were requested, so don't bother doing anything.\n\tif not (write_cnt or write_int or write_rr):\n\t\treturn\n\tcount,rr,intensity=create_images(band,skypos,tranges,skyrange,framesz=framesz,width=width,height=height,verbose=verbose,tscale=tscale,memlight=memlight,coadd=coadd,response=response,calpath=calpath,retries=retries)\n\n\t# Add a conditional so that this is only created for multi-frame images\n\ttbl = movie_tbl(band,tranges,framesz=framesz,verbose=verbose,retries=retries)\n\n\tif write_cnt:\n\t\thdu = pyfits.PrimaryHDU(count)\n\t\thdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)\n\t\thdulist = pyfits.HDUList([hdu,tbl])\n\t\tif verbose:\n\t\t\tprint 'Writing count image to '+str(write_cnt)\n\t\thdulist.writeto(write_cnt,clobber=clobber)\n\tif write_rr:\n\t\thdu = pyfits.PrimaryHDU(rr)\n\t\thdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)\n\t\thdulist = pyfits.HDUList([hdu,tbl])\n\t\tif verbose:\n\t\t\tprint 'Writing response image to '+str(write_rr)\n hdulist.writeto(write_rr,clobber=clobber)\n\tif write_int:\n\t\thdu = pyfits.PrimaryHDU(intensity)\n\t\thdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)\n\t\thdulist = pyfits.HDUList([hdu,tbl])\n\t\tif verbose:\n\t\t\tprint 'Writing intensity image to '+str(write_int)\n\t\thdulist.writeto(write_int,clobber=clobber)\n\n\treturn", "def movie(band,skypos,tranges,skyrange,framesz=0,width=False,height=False,\n\t\t verbose=0,tscale=1000.,memlight=False,coadd=False,\n\t\t response=False,calpath='../cal/',hdu=False,retries=20):\n\t# Not defining stepsz effectively creates a count map.\n\tmv = []\n\trr = []\n\tif coadd:\n\t\tif verbose>2:\n\t\t\tprint 'Coadding across '+str(tranges)\n\t\tmv.append(countmap(band,skypos,tranges,skyrange,width=width,\n\t\t\t\t height=height,verbose=verbose,tscale=tscale,memlight=memlight,\n\t\t\t\t hdu=hdu,retries=retries))\n\t\trr.append(rrhr(band,skypos,tranges,skyrange,response=response,width=width,height=height,stepsz=1.,verbose=verbose,calpath=calpath,tscale=tscale,hdu=hdu,retries=retries)) if response else rr.append(np.ones(np.shape(mv)[1:]))\n\telse:\n\t\tfor trange in tranges:\n\t\t\tstepsz = framesz if framesz else trange[1]-trange[0]\n\t\t\tsteps = np.ceil((trange[1]-trange[0])/stepsz)\n\t\t\tfor i,t0 in enumerate(np.arange(trange[0],trange[1],stepsz)):\n\t\t\t\tif verbose>1:\n\t\t\t\t\tprint_inline('Movie frame '+str(i+1)+' of '+str(int(steps)))\n\t\t\t\tt1 = trange[1] if i==steps else t0+stepsz\n\t\t\t\tmv.append(countmap(band,skypos,[[t0,t1]],skyrange,width=width,height=height,verbose=verbose,tscale=tscale,memlight=memlight,hdu=hdu,retries=retries))\n\t# FIXME: This should not create an rr unless it's requested...\n\t\t\t\trr.append(rrhr(band,skypos,[[t0,t1]],skyrange,response=response,width=width,height=height,stepsz=1.,verbose=verbose,calpath=calpath,tscale=tscale,retries=retries)) if response else rr.append(np.ones(np.shape(mv)[1:]))\n\n\treturn np.array(mv),np.array(rr)", "def _bands_competed_last_year():\n lLastYear = datetime.datetime.now().year - 1\n cursor = connection.cursor()\n cursor.execute(\"SELECT count(distinct(r.band_id)) FROM contests_contestevent e, contests_contestresult r WHERE r.contest_event_id = e.id AND extract(year from e.date_of_event) = %(year)s GROUP BY extract(year from e.date_of_event) ORDER BY extract(year from e.date_of_event) desc\", {'year' : lLastYear})\n rows = cursor.fetchall()\n lReturn = 0\n if rows and rows[0]:\n lReturn = rows[0][0]\n cursor.close()\n return lReturn", "def _filter_images(data, hmin):\n #Laziest way to get a circle mask\n fp = CircularAperture((0,0), r=hmin).to_mask().data>.1\n fp = fp.astype(bool)\n\n # Apply maximum filter, flux filter\n filt_image = maximum_filter(data, footprint=fp,\n mode='constant', cval=0)\n origins = product([0,-1], [0,-1])\n max_4sum = np.amax([_conv_origin(data, o) for o in origins], axis=0)\n return(filt_image, max_4sum)", "def masked(months=range(1, 13), years=[2009], folder=\"data/\", layer=\"BHR_VIS\"):\n data = []\n file_template = 'NETCDF:\"{:s}\":{:s}' # Template for the Netcdf path\n # the actual filename\n fname_template = '{:s}/GlobAlbedo.merge.albedo.05.{:d}{:02d}.nc'\n for year in years:\n for month in months:\n fname = fname_template.format(folder, year, month)\n netcdf_fname = file_template.format(fname, layer)\n g = gdal.Open(netcdf_fname)\n if g is None:\n raise IOError(\"Problem with reading file {}\".format(fname))\n the_data = g.ReadAsArray()\n masked_data = np.ma.array(the_data,mask=np.isnan(the_data))\n data.append(masked_data)\n output_data = np.ma.array(data)\n return output_data", "def compute_stage4(lon, lat, year):\n nc = netCDF4.Dataset(\"/mesonet/data/stage4/%s_stage4_hourly.nc\" % (year,))\n lons = nc.variables[\"lon\"][:]\n lats = nc.variables[\"lat\"][:]\n dist = ((lons - lon) ** 2 + (lats - lat) ** 2) ** 0.5\n (yidx, xidx) = np.unravel_index(dist.argmin(), dist.shape)\n print(\n (\"Computed stage4 nclon:%.2f nclat:%.2f yidx:%s xidx:%s \")\n % (lons[yidx, xidx], lats[yidx, xidx], yidx, xidx)\n )\n p01i = mm2inch(nc.variables[\"p01m\"][:, yidx, xidx])\n nc.close()\n df = pd.DataFrame(\n {\"precip\": 0.0},\n index=pd.date_range(\n \"%s-01-01\" % (year,), \"%s-12-31\" % (year,), tz=\"America/Chicago\"\n ),\n )\n for date in df.index.values:\n date2 = datetime.datetime.utcfromtimestamp(date.tolist() / 1e9)\n ts = datetime.datetime(date2.year, date2.month, date2.day, 6)\n ts = ts.replace(tzinfo=pytz.utc)\n ts = ts.astimezone(pytz.timezone(\"America/Chicago\"))\n ts = ts.replace(hour=0)\n ts = ts.astimezone(pytz.utc)\n tidx = hourly_offset(ts)\n # values are in the rears\n val = np.ma.sum(p01i[tidx + 1 : tidx + 25])\n if val > 0:\n df.at[date, \"precip\"] = val # close enough\n return df", "def n4_bias_correction(mri, mask_image=None, shrink_factor=(4, 4, 4)):\n from tinycat.label import gen_mask\n import SimpleITK as sitk\n\n mri_data = mri.get_data()\n mri_image = sitk.GetImageFromArray(mri_data)\n mri_image = sitk.Cast(mri_image, sitk.sitkFloat32)\n\n if mask_image is None:\n mask_image = sitk.OtsuThreshold(mri_image, 1)\n else:\n mask_image = sitk.GetImageFromArray(mask_image)\n\n # Shrink image to minimize computation cost\n mri_image_sh = sitk.Shrink(mri_image, shrink_factor)\n mask_image_sh = sitk.Shrink(mask_image, shrink_factor)\n corrector = sitk.N4BiasFieldCorrectionImageFilter()\n\n # Default parameters for slicer 3D\n corrector.SetSplineOrder = 3\n corrector.SetConvergenceThreshold = 0.0001\n corrector.SetMaximumNumberOfIterations = [50, 50, 50]\n corrector.SetWienerFilterNoise = 0\n corrector.SetNumberOfHistogramBins = 0\n corrector.SetBiasFieldFullWidthAtHalfMaximum = 0.15\n\n # Calculate bias-field filter\n n4_output = corrector.Execute(mri_image_sh, mask_image_sh)\n n4_filter = sitk.Subtract(n4_output, mri_image_sh)\n\n # Apply bias-field filter to masked original data\n n4_array = ndimage.interpolation.zoom(\n sitk.GetArrayFromImage(n4_filter), zoom=shrink_factor, order=3\n )\n mri_data = sitk.GetArrayFromImage(mri_image)\n semi_mask = mri_data >= mri_data.mean()\n mask = gen_mask(semi_mask)\n mri_data[mask] = mri_data[mask] - n4_array[mask]\n\n return cat.Nifti1Image(mri_data, mri.affine, mri.header)", "def calbands( band = 0, tmo = 30 ) :\n optimizeThresholds(band,tmo)\n flattenPhases(band,tmo)\n calibrateSpectra(band=band,tmo=tmo)", "def get_2D_nighttime_mask4date_pd(date=None, ncfile=None, res='4x5',\n mask_daytime=False, buffer_hours=0,\n debug=False):\n # Astronomical math\n import ephem\n from ephem import AlwaysUpError, NeverUpError\n # And functions in other AC_tools modules\n from .AC_time import add_days, add_hrs\n logging.info('get_2D_nighttime_mask4date_pd called for {}'.format(date))\n\n # Profile function...\n if debug:\n start_time = time.time()\n\n # --- Local variables?\n # reference data for ephem (number of days since noon on 1899 December 31)\n ref_date = datetime.datetime(1899, 12, 31, 12)\n\n # --- Get LON and LAT variables\n if isinstance(ncfile, type(None)):\n # extract from refence files\n lons, lats, alts = get_latlonalt4res(res=res)\n else:\n # TODO - allow any lat, lon grid to be used by taking input lats and\n # lons from ncfile file/arguments.\n print('Not implemented')\n sys.exit()\n if debug:\n print((\"--- (start-1) %s seconds ---\" % (time.time() - start_time)))\n\n # --- setup function to mask based on date, lat and lon\n def mask_nighttime(lon, lat, date=date, mask_daytime=mask_daytime,\n ref_date=datetime.datetime(1899, 12, 31, 12),\n buffer_hours=buffer_hours, debug=False):\n \"\"\"\n sub-function to mask if nightime for a given date at a specific lat/lon\n \"\"\"\n # --- get lat and lon values from columns\n if debug:\n print((\"--- (s4-1) %s seconds ---\" % (time.time() - start_time)))\n # --- get sunrise and sunset for location\n o = ephem.Observer()\n # set lat (decimal?), lon (decimal?), and date (UTC)\n o.lat = str(lat)\n o.long = str(lon)\n o.date = date\n # planetary body\n s = ephem.Sun()\n if debug:\n print((\"--- (s4-2) %s seconds ---\" % (time.time() - start_time)))\n\n # Compute sun vs observer\n s.compute()\n if debug:\n print((\"--- (s4-3) %s seconds ---\" % (time.time() - start_time)))\n\n # Work out if day or night based on sunrises and sunsets\n mask_value = 0\n try:\n\n # get sunrise time and date\n next_rising = o.next_rising(s)\n next_setting = o.next_setting(s)\n\n # convert to datetime.datetime\n next_rising = add_days(ref_date, next_rising)\n next_setting = add_days(ref_date, next_setting)\n\n # did the sun last rise or set? (inc. any adjustments)\n sun_last_rose = False\n if next_setting < next_rising:\n sun_last_rose = True\n\n # Add buffer to rising/setting if provided with buffer_hours\n if buffer_hours != 0:\n\n # Calculate last rise\n previous_rising = o.previous_rising(s)\n # convert to datetime.datetime\n previous_rising = add_days(ref_date, previous_rising)\n # Calculate last setting\n previous_setting = o.previous_setting(s)\n # convert to datetime.datetime\n previous_setting = add_days(ref_date, previous_setting)\n\n # Calculate absolute difference\n time_from_rise = (date-previous_rising).total_seconds()\n time_till_set = (date-next_setting).total_seconds()\n time_from_set = (date-previous_setting).total_seconds()\n time_till_rise = (date-next_rising).total_seconds()\n\n # If absolutely difference less than buffer\n if abs(time_from_rise)/60./60. <= buffer_hours:\n mask_value = 1\n elif abs(time_till_set)/60./60. <= buffer_hours:\n mask_value = 1\n elif abs(time_from_set)/60./60. < buffer_hours:\n mask_value = 1\n elif abs(time_till_rise)/60./60. < buffer_hours:\n mask_value = 1\n\n # --- Check if daytime or nighttime and mask if condition met.\n if sun_last_rose:\n if mask_daytime:\n # ... and has not set yet, it must be daytime\n if (date < next_setting):\n mask_value = 1\n\n # if the sun last set... (mask nighttime is default)\n else:\n # if mask nighttime (aka not mask_daytime)\n if not mask_daytime:\n # ... and has not risen yet, it must be nighttime\n if (date < next_rising):\n mask_value = 1\n\n # Add gotcha for locations where sun is always up.\n except AlwaysUpError:\n if mask_daytime:\n mask_value = 1\n\n # Add gotcha for locations where sun is always down.\n except NeverUpError:\n if not mask_daytime:\n mask_value = 1\n\n except:\n print('FAIL')\n sys.exit()\n\n # Mask value in array\n return mask_value\n\n # --- Setup an unstack(ed) pandas dataframe to contain masked values\n if debug:\n print((\"--- (2) %s seconds ---\" % (time.time() - start_time)))\n # Use list comprehension to setup list of indices for lat and lon\n # Better way of doing this? (e.g. pd.melt?)\n ind_lat_lons_list = [[lon_, lat_] for lat_ in lats for lon_ in lons]\n if debug:\n print((\"--- (3) %s seconds ---\" % (time.time() - start_time)))\n # Make this into a pd.DataFrame and label columns.\n df = pd.DataFrame(ind_lat_lons_list)\n df.columns = ['lons', 'lats']\n if debug:\n print((\"--- (4) %s seconds ---\" % (time.time() - start_time)))\n # Apply function to calculate mask value\n# df['mask'] = df.apply(mask_nighttime, axis=1)\n df['mask'] = df.apply(lambda x: mask_nighttime(\n x['lons'], x['lats']), axis=1)\n if debug:\n print((\"--- (5) %s seconds ---\" % (time.time() - start_time)))\n # Re-index by lat and lon\n df = pd.DataFrame(df['mask'].values, index=[df['lats'], df['lons']])\n if debug:\n print((\"--- (6) %s seconds ---\" % (time.time() - start_time)))\n # Unstack and return just as array\n df = df.unstack()\n marr = df.values\n if debug:\n print((\"--- (end-7) %s seconds ---\" % (time.time() - start_time)))\n\n return marr", "def scale_sky_spectrum(wlm, sky_spectrum, spectra, cut_sky=4., fmax=10, fmin=1, valid_wave_min=0, valid_wave_max=0, \n fibre_list=[100,200,300,400,500,600,700,800,900], plot=True, verbose=True, warnings=True): \n \n# # Read sky lines provided by 2dFdr\n# sky_line_,flux_sky_line_ = read_table(\"sky_lines_2dfdr.dat\", [\"f\", \"f\"] )\n# # Choose those lines in the range\n# sky_line=[]\n# flux_sky_line=[]\n# valid_wave_min = 6240\n# valid_wave_max = 7355\n# for i in range(len(sky_line_)):\n# if valid_wave_min < sky_line_[i] < valid_wave_max:\n# sky_line.append(sky_line_[i])\n# flux_sky_line.append(flux_sky_line_[i])\n \n \n if valid_wave_min == 0: valid_wave_min = wlm[0]\n if valid_wave_max == 0: valid_wave_max = wlm[-1]\n \n if verbose: print(\"\\n> Identifying sky lines using cut_sky =\",cut_sky,\", allowed SKY/OBJ values = [\",fmin,\",\",fmax,\"]\")\n if verbose: print(\" Using fibres = \",fibre_list)\n\n peaks,peaks_name,peaks_rest,continuum_limits=search_peaks(wlm,sky_spectrum, plot=plot, cut=cut_sky, fmax=fmax, only_id_lines=False, verbose=False) \n\n ratio_list=[]\n valid_peaks=[]\n \n if verbose: print(\"\\n Sky line Gaussian ratio Flux ratio\")\n n_sky_lines_found=0\n for i in range(len(peaks)):\n sky_spectrum_data=fluxes(wlm,sky_spectrum, peaks[i], fcal=False, lowlow=50,highhigh=50, plot=False, verbose=False, warnings=False)\n \n sky_median_continuum = np.nanmedian(sky_spectrum_data[11])\n \n object_spectrum_data_gauss=[]\n object_spectrum_data_integrated=[] \n median_list=[]\n for fibre in fibre_list: \n object_spectrum_flux=fluxes(wlm, spectra[fibre], peaks[i], fcal=False, lowlow=50,highhigh=50, plot=False, verbose=False, warnings=False)\n object_spectrum_data_gauss.append(object_spectrum_flux[3]) # Gaussian flux is 3\n object_spectrum_data_integrated.append(object_spectrum_flux[7]) # integrated flux is 7\n median_list.append(np.nanmedian(object_spectrum_flux[11]))\n object_spectrum_data=np.nanmedian(object_spectrum_data_gauss)\n object_spectrum_data_i=np.nanmedian(object_spectrum_data_integrated)\n \n object_median_continuum=np.nanmin(median_list) \n \n if fmin < object_spectrum_data/sky_spectrum_data[3] * sky_median_continuum/object_median_continuum < fmax :\n n_sky_lines_found = n_sky_lines_found + 1\n valid_peaks.append(peaks[i])\n ratio_list.append(object_spectrum_data/sky_spectrum_data[3])\n if verbose: print(\"{:3.0f} {:5.3f} {:2.3f} {:2.3f}\".format(n_sky_lines_found,peaks[i],object_spectrum_data/sky_spectrum_data[3], object_spectrum_data_i/sky_spectrum_data[7])) \n\n\n #print \"ratio_list =\", ratio_list\n #fit = np.polyfit(valid_peaks, ratio_list, 0) # This is the same that doing an average/mean\n #fit_line = fit[0]+0*wlm\n fit_line =np.nanmedian(ratio_list) # We just do a median\n #fit_line = fit[1]+fit[0]*wlm\n #fit_line = fit[2]+fit[1]*wlm+fit[0]*wlm**2\n #fit_line = fit[3]+fit[2]*wlm+fit[1]*wlm**2+fit[0]*wlm**3\n \n \n if plot:\n plt.plot(valid_peaks,ratio_list,\"+\")\n #plt.plot(wlm,fit_line)\n plt.axhline(y=fit_line, color='k', linestyle='--')\n plt.xlim(valid_wave_min-10, valid_wave_max+10) \n #if len(ratio_list) > 0:\n plt.ylim(np.nanmin(ratio_list)-0.2,np.nanmax(ratio_list)+0.2)\n plt.title(\"Scaling sky spectrum to object spectra\")\n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n plt.ylabel(\"OBJECT / SKY\")\n plt.minorticks_on()\n plt.show()\n plt.close()\n \n if verbose: print(\" Using this fit to scale sky spectrum to object, the median value is \",np.round(fit_line,3),\"...\") \n \n sky_corrected = sky_spectrum * fit_line\n\n# plt.plot(wlm,sky_spectrum, \"r\", alpha=0.3)\n# plt.plot(wlm,sky_corrected, \"g\", alpha=0.3)\n# plt.show()\n# plt.close()\n \n return sky_corrected, np.round(fit_line,3)", "def run_global(start_year, end_year, depth_from, depth_to, animate=True):\n# years, times, rootgrps = retrieve(1950,2018)\n# rootgrps_1950 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1950\\EN.4.2.1.f.analysis.g10.195001.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_1951 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1951\\EN.4.2.1.f.analysis.g10.195101.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_1952 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1952\\EN.4.2.1.f.analysis.g10.195201.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_1953 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1953\\EN.4.2.1.f.analysis.g10.195301.nc\", \"r+\", format=\"NETCDF4\")]\n#\n#\n# rootgrps_2015 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2015\\EN.4.2.1.f.analysis.g10.201501.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_2016 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2016\\EN.4.2.1.f.analysis.g10.201601.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_2017 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2017\\EN.4.2.1.f.analysis.g10.201701.nc\", \"r+\", format=\"NETCDF4\")]\n rootgrps_2018 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2018\\EN.4.2.1.f.analysis.g10.201801.nc\", \"r+\", format=\"NETCDF4\")]\n\n# HC_1950 = calculate_HC_global(rootgrps_1950, 0, 2000)\n# print('1950', time.time()-start)\n# HC_1951 = calculate_HC_global(rootgrps_1951, 0, 2000)\n# print('1951', time.time()-start)\n# HC_1952 = calculate_HC_global(rootgrps_1952, 0, 2000)\n# print('1952', time.time()-start)\n# HC_1953 = calculate_HC_global(rootgrps_1953, 0, 2000)\n# print('1953', time.time()-start) \n#\n# HC_2015 = calculate_HC_global(rootgrps_2015, 0, 2000)\n# print('2015', time.time()-start)\n# HC_2016 = calculate_HC_global(rootgrps_2016, 0, 2000)\n# print('2016', time.time()-start)\n# HC_2017 = calculate_HC_global(rootgrps_2017, 0, 2000)\n# print('2017', time.time()-start)\n HC_2018 = calculate_HC_global(rootgrps_2018, 0, 2000)\n# print('2018', time.time()-start)\n# HC_1950_mean = (HC_1950+HC_1951+HC_1952+HC_1953)/4\n# HC_2018_mean = (HC_2015+HC_2016+HC_2017+HC_2018)/4\n\n# dHC = (HC_2018_mean-HC_1950_mean)/(65*365*24*3600)\n if animate == True:\n plot(rootgrps_2018, HC_2018)\n return HC_2018", "def load_images(filename, bands, Args):\n image = np.zeros([Args.num, Args.out_size,\n Args.out_size, len(bands)])\n for i, band in enumerate(bands):\n print (\"Getting pstamps for band\", band)\n full_image = fits.open(filename.replace(\"band\", band))[0].data\n image[:, :, :, i] = get_stamps(full_image, Args)\n return image", "def prepare_ERA5_moisture_flux(era5_path=era5_path):\n import xarray as xr\n from aux_gps import save_ncfile\n from aux_gps import anomalize_xr\n import numpy as np\n from aux_gps import convert_wind_direction\n from dask.diagnostics import ProgressBar\n ds = xr.open_dataset(\n era5_path / 'ERA5_UVQ_4xdaily_israel_1996-2019.nc', chunks={'level': 5})\n # ds = ds.resample(time='D', keep_attrs=True).mean(keep_attrs=True)\n # ds.attrs['action'] = 'resampled to 1D from 12:00UTC data points'\n mf = (ds['q'] * ds['u']).to_dataset(name='qu')\n mf.attrs = ds.attrs\n mf['qu'].attrs['units'] = ds['u'].attrs['units']\n mf['qu'].attrs['long_name'] = 'U component of moisture flux'\n mf['qu'].attrs['standard_name'] = 'eastward moisture flux'\n mf['qv'] = ds['q'] * ds['v']\n mf['qv'].attrs['units'] = ds['v'].attrs['units']\n mf['qv'].attrs['long_name'] = 'V component moisture flux'\n mf['qv'].attrs['standard_name'] = 'northward moisture flux'\n mf['qf'], mf['qfdir'] = convert_wind_direction(u=mf['qu'], v=mf['qv'])\n mf['qf'].attrs['units'] = ds['v'].attrs['units']\n mf['qf'].attrs['long_name'] = 'moisture flux magnitude'\n # mf['qfdir'] = 270 - np.rad2deg(np.arctan2(mf['qv'], mf['qu']))\n mf['qfdir'].attrs['units'] = 'deg'\n mf['qfdir'].attrs['long_name'] = 'moisture flux direction (meteorological)'\n mf = mf.sortby('latitude')\n mf = mf.sortby('level', ascending=False)\n comp = dict(zlib=True, complevel=9)\n encoding_mf = {var: comp for var in mf}\n mf_delayed = mf.to_netcdf(era5_path / 'ERA5_MF_4xdaily_israel_1996-2019.nc',\n 'w', encoding=encoding_mf, compute=False)\n mf_anoms = anomalize_xr(mf, freq='MS', time_dim='time')\n mf_anoms_mean = mf_anoms.mean('latitude').mean('longitude')\n encoding_mf_anoms = {var: comp for var in mf_anoms}\n mf_anoms_delayed = mf_anoms_mean.to_netcdf(era5_path / 'ERA5_MF_anomalies_4xdaily_israel_mean_1996-2019.nc',\n 'w', encoding=encoding_mf_anoms, compute=False)\n with ProgressBar():\n results = mf_delayed.compute()\n with ProgressBar():\n results1 = mf_anoms_delayed.compute()\n # save_ncfile(mf, era5_path, 'ERA5_MF_4xdaily_israel_1996-2019.nc')\n # mf_anoms = anomalize_xr(mf, freq='MS', time_dim='time')\n # mf_anoms_mean = mf_anoms.mean('latitude').mean('longitude')\n # save_ncfile(mf_anoms_mean, era5_path,\n # 'ERA5_MF_anomalies_4xdaily_israel_mean_1996-2019.nc')\n return", "def closeyear(year):\n\n # Return the specific year\n return int(year % 4)", "def skycombine(dir = 'Objects'):\n \n if dir ==\"Objects\":\n dir = 'Objects/*/*/flat_corrected/'\n \n for d in glob(dir):\n \n directory = \"/\".join(d.split('/')[0:2]) + '/swarped'\n if not os.path.exists(directory):\n os.makedirs(directory)\n \n keys = ['OBJECTS', 'ITIME', 'FWINAME', 'OBSDATE', 'CAMNAME', 'HISTORY', 'FLSPECTR']\n images = ImageFileCollection(d, keywords = keys, glob_include = 'f*.fits')\n \n swarpfilter(d, dir, directory, images, keys, filter='H', lamp = '*', camera = 'narrow',\n done='Dark Subtracted', output='cKSkyNarrowH', type='EQUATORIAL')\n swarpfilter(d, dir, directory, images, keys, filter='H',lamp = '*', camera = 'wide', \n done='Dark Subtracted', output='cKSkyWideH', type='EQUATORIAL')\n swarpfilter(d, dir, directory, images, keys, filter='J',lamp = '*', camera = 'narrow', \n done='Dark Subtracted', output='cKSkyNarrowJ', type='EQUATORIAL')\n swarpfilter(d, dir, directory, images, keys, filter='J', lamp = '*',camera = 'wide', \n done='Dark Subtracted', output='cKSkyWideJ', type='EQUATORIAL') \n swarpfilter(d, dir, directory, images, keys, filter='Ks',lamp = '*', camera = 'narrow', \n done='Dark Subtracted', output='cKSkyNarrowKs', type='EQUATORIAL')\n swarpfilter(d, dir, directory, images, keys, filter='Ks',lamp = '*', camera = 'wide', \n done='Dark Subtracted', output='cKSkyWideKs', type='EQUATORIAL')\n swarpfilter(d, dir, directory, images, keys, filter='Lp',lamp = '*', camera = 'narrow', \n done='Dark Subtracted', output='cKSkyNarrowLp', type='EQUATORIAL')\n swarpfilter(d, dir, directory, images, keys, filter='Lp',lamp = '*', camera = 'wide', \n done='Dark Subtracted', output='cKSkyNarrowLp', type='EQUATORIAL')", "def scaled_octave_noise_4d(octaves, persistence, scale, loBound, hiBound, x, y, z, w):\n return octave_noise_4d(octaves, persistence, scale, x, y, z, w)*(hiBound - loBound)/2 + (hiBound + loBound)/2", "def broaden_mask(img, threshold=0.05, qual=None):\n if not np.any(qual):\n qual = DerivativeVariance(img.phase)\n qual = qual[img.mask==True].max()*1.1 - qual\n max_value = qual[img.mask==True].max()\n img['mask'][qual<max_value*threshold] = False", "def f4(\n self, sample_sets, indexes=None, windows=None, mode=\"site\", span_normalise=True\n ):\n return self.__k_way_sample_set_stat(\n self._ll_tree_sequence.f4,\n 4,\n sample_sets,\n indexes=indexes,\n windows=windows,\n mode=mode,\n span_normalise=span_normalise,\n )", "def butterworth_filter(dft4img, stopband2=10, order=3, showdft=False):\n h, w = dft4img.shape[0], dft4img.shape[1]\n P = h / 2\n Q = w / 2\n dst = np.zeros((h, w, 3), np.float64)\n for i in range(h):\n for j in range(w):\n r2 = float((i - P) ** 2 + (j - Q) ** 2)\n if r2 == 0:\n r2 = 1.0\n dst[i, j] = 1 / (1 + (r2 / stopband2) ** order)\n dst = np.float64(dst)\n if showdft:\n cv2.imshow(\"butterworth\", cv2.magnitude(dst[:, :, 0], dst[:, :, 1]))\n return dst", "def preprocess_land_cover(\n src_files, dst_raster, dst_crs, dst_bounds, dst_res, geom=None, overwrite=False\n):\n if os.path.isfile(dst_raster) and not overwrite:\n log.info(\"Land cover data already preprocessed. Skipping.\")\n return\n log.info(\"Starting preprocessing of land cover data.\")\n LC_CLASSES = [\n \"bare\",\n \"crops\",\n \"grass\",\n \"moss\",\n \"shrub\",\n \"tree\",\n \"urban\",\n \"water-permanent\",\n \"water-seasonal\",\n ]\n with TemporaryDirectory(prefix=\"geohealthaccess_\") as tmpdir:\n\n tmpdir = Path(tmpdir)\n for tile in src_files:\n unzip(tile, tmpdir)\n\n reprojected_files = []\n tile_names = unique_tiles(tmpdir)\n\n if not tile_names:\n raise MissingDataError(\"Land cover data not found.\")\n\n for lc_class in LC_CLASSES:\n tiles = [\n p.as_posix()\n for p in tmpdir.glob(f\"*{lc_class}-coverfraction-layer*.tif\")\n ]\n if len(tiles) > 1:\n src_file = merge_tiles(\n tiles, os.path.join(tmpdir, f\"{lc_class}_mosaic.tif\"), nodata=255,\n )\n else:\n src_file = tiles[0]\n reprojected_files.append(\n reproject(\n src_raster=src_file,\n dst_raster=os.path.join(tmpdir, f\"{lc_class}.tif\"),\n dst_crs=dst_crs,\n dst_bounds=dst_bounds,\n dst_res=dst_res,\n src_nodata=255,\n dst_nodata=255,\n dst_dtype=\"Byte\",\n resampling_method=\"cubic\",\n overwrite=overwrite,\n )\n )\n\n if len(reprojected_files) > 1:\n raster = concatenate_bands(\n src_files=reprojected_files,\n dst_file=dst_raster,\n band_descriptions=LC_CLASSES,\n )\n else:\n raster = reprojected_files[0]\n\n if geom:\n mask_raster(raster, geom)", "def test_nib_resample_image_4d(fake_4dimage_nib):\n img_r = resampling.resample_nib(fake_4dimage_nib, new_size=[2, 2, 1, 1], new_size_type='factor', interpolation='nn')\n assert img_r.get_data().shape == (18, 18, 9, 3)\n assert img_r.get_data()[8, 8, 4, 0] == 1.0 # make sure there is no displacement in world coordinate system\n assert img_r.get_data()[8, 8, 4, 1] == 0.0\n assert img_r.header.get_zooms() == (0.5, 0.5, 1.0, 1.0)", "def build_sea_data(\n start_year=1999,\n end_year=2016,\n netcdf_path=\"data/sea_level/netcdf/\",\n target_lon=175.8606890,\n target_lat=-36.993684,\n buffer_degrees=0.5,\n path_out=\".\",\n):\n # tairua_coords = (-36.993684, 175.8606890)\n df_sea_data = pd.DataFrame()\n\n for year in range(start_year, end_year + 1):\n ds_first = xr.open_mfdataset(\n os.path.join(netcdf_path, f\"dt_global_twosat_phy_l4_{year}*.nc\")\n )\n\n target_lon = xr.DataArray(\n list(target_lon + np.linspace(-buffer_degrees, buffer_degrees))\n )\n target_lat = xr.DataArray(\n list(target_lat + np.linspace(-buffer_degrees, buffer_degrees))\n )\n\n ds_tairua = ds_first[[\"adt\", \"ugos\", \"vgos\"]].sel(\n longitude=target_lon, latitude=target_lat, method=\"nearest\"\n )\n df_sealevel_pandas = (\n ds_tairua.resample(time=\"MS\")\n .mean()\n .mean(dim=\"dim_0\")\n .to_dataframe()\n )\n\n df_sea_data = pd.concat([df_sea_data, df_sealevel_pandas])\n\n print(\n f\"************************Done {year} ************************************\"\n )\n print(df_sea_data.tail(10))\n\n df_sea_data.to_csv(os.path.join(path_out, \"df_sea_data.csv\"))", "def mask_obs_for_division(obswvl, obsflux, ivar, temp=None, logg=None, fe=None, alpha=None, dlam=None, lines='new', hires=False):\n\n\t# Get smoothed synthetic spectrum and (NOT continuum-normalized) observed spectrum\n\tsynthflux = get_synth(obswvl, obsflux, ivar, dlam, synth=None, temp=temp, logg=logg, fe=fe, alpha=alpha)\n\n\t# Make a mask\n\tmask = np.zeros(len(synthflux), dtype=bool)\n\n\t# Mask out first and last five pixels\n\tmask[:5] = True\n\tmask[-5:] = True\n\n\t# Mask out more of ends of spectra\n\tmask[np.where(obswvl < 4650)] = True\n\tmask[np.where(obswvl > 6550)] = True\n\n\t# Mask out Halpha, Hbeta, Hgamma\n\tmask[np.where((obswvl > 4340 - 5) & (obswvl < 4340 + 5))] = True #Hgamma\n\tmask[np.where((obswvl > 4862 - 5) & (obswvl < 4862 + 5))] = True #Hbeta\n\tmask[np.where((obswvl > 6563 - 5) & (obswvl < 6563 + 5))] = True #Halpha\n\n\tif hires==False:\n\t\t# Mask out pixels near chip gap\n\t\tchipgap = int(len(mask)/2 - 1)\n\t\tprint('wavelength of chip gap: ', obswvl[chipgap])\n\t\t#print('Chip gap: ', chipgap)\n\t\t#mask[(chipgap - 10): (chipgap + 10)] = True\n\t\tmask[np.where((obswvl > (obswvl[chipgap] - 20)) & (obswvl < (obswvl[chipgap] + 20)))] = True\n\n\t# Mask out any bad pixels\n\tmask[np.where(synthflux <= 0.)] = True\n\t#print('Where synthflux < 0: ', obswvl[np.where(synthflux <=0.)])\n\n\tmask[np.where(ivar <= 0.)] = True\n\t#print('Where ivar < 0: ', obswvl[np.where(ivar <=0.)])\n\n\t# Mask out pixels around Na D doublet (5890, 5896 A)\n\tmask[np.where((obswvl > 5884.) & (obswvl < 5904.))] = True\n\n\t# Mask out pixels in regions around Mn lines\n\tmnmask = np.zeros(len(synthflux), dtype=bool)\n\n\t# For med-res spectra, mask out pixels in regions around Mn lines\n\tif hires==False:\n\t\tif lines == 'old':\n\t\t\tlines = np.array([[4744.,4772.],[4773.,4793.],[4813.,4833.],[5384,5404.],[5527.,5547.],[6003.,6031.]])\n\t\telif lines=='new':\n\t\t\t#with resonance lines: lines = np.array([[4729.,4793.],[4813.,4833.],[5384.,5442.],[5506.,5547.],[6003.,6031.],[6374.,6394.],[6481.,6501.]])\n\t\t\t#+/- 10A regions around Mn lines: lines = np.array([[4729.,4793.],[4813.,4833.],[5400.,5430.],[5506.,5547.],[6003.,6031.],[6374.,6394.],[6481.,6501.]])\n\t\t\t#+/- 5A regions around Mn lines: lines = np.array([[4734.1,4744.1],[4749.0,4770.8],[4778.4,4788.4],[4818.5,4828.5],[5402.3,5412.3],[5415.3,5425.3],[5511.8,5521.8],[5532.7,5542.7],[6008.3,6026.8],[6379.7,6389.7],[6486.7,6496.7]])\n\t\t\t#+/- 1A regions around Mn lines:\n\t\t\tlines = np.array([[4738.1,4740.1],[4753.0,4755.0],[4760.5,4763.3],[4764.8,4766.8],[4782.4,4784.4],\n\t\t\t\t\t\t\t [4822.5,4824.5],[5406.3,5408.3],[5419.3,5421.3],[5515.8,5517.8],[5536.7,5538.7],\n\t\t\t\t\t\t\t [6012.3,6014.3],[6015.6,6017.6],[6020.8,6022.8],[6383.7,6385.7],[6490.7,6492.7]])\n\n\t# For hi-res spectra, mask out pixels in +/- 1A regions around Mn lines\n\telse:\n\t\tlines = np.array([[4738.1,4740.1],[4753.,4755.],[4760.5,4763.3],[4764.8,4767.8],[4782.4,4784.4],[4822.5,4824.5]])\n\t\t\t\t\t\t#,[5402.,5412.],[5415.,5425.],[5511.,5521.],[5532.,5542.],[6008.,6026.],[6379.,6389.],[6486.,6596.]])\n\n\tfor line in range(len(lines)):\n\t\tmnmask[np.where((obswvl > lines[line][0]) & (obswvl < lines[line][1]))] = True\n\tmask[mnmask] = True\n\n\t# Create masked arrays\n\tsynthfluxmask \t= ma.masked_array(synthflux, mask)\n\tobsfluxmask \t= ma.masked_array(obsflux, mask)\n\tobswvlmask\t \t= ma.masked_array(obswvl, mask)\n\tivarmask\t \t= ma.masked_array(ivar, mask)\n\n\t# Split spectra into blue (index 0) and red (index 1) parts\n\tif hires==False:\n\t\tsynthfluxmask \t= [synthfluxmask[:chipgap], synthfluxmask[chipgap:]]\n\t\tobsfluxmask\t\t= [obsfluxmask[:chipgap], obsfluxmask[chipgap:]]\n\t\tobswvlmask \t\t= [obswvlmask[:chipgap], obswvlmask[chipgap:]]\n\t\tivarmask \t\t= [ivarmask[:chipgap], ivarmask[chipgap:]]\n\t\tmask \t\t\t= [mask[:chipgap], mask[chipgap:]]\n\n\treturn synthfluxmask, obsfluxmask, obswvlmask, ivarmask, mask", "def _resampler(df_year, year):\n # Aggregates data using mean for each time interval and gets a\n # sample count for each new data point.\n df_15 = df_year.resample('15T').apply(['mean', 'count'])\n df_30 = df_year.resample('30T').apply(['mean', 'count'])\n df_1h = df_year.resample('1H').apply(['mean', 'count'])\n df_1d = df_year.resample('D').apply(['mean', 'count'])\n\n # Removes top level title that is not needed.\n df_15.columns = df_15.columns.droplevel(0)\n df_30.columns = df_30.columns.droplevel(0)\n df_1h.columns = df_1h.columns.droplevel(0)\n df_1d.columns = df_1d.columns.droplevel(0)\n\n # Creating new date range to include all time intervals within the year.\n idx_15 = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:45:00', freq='15T')\n idx_30 = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:30:00', freq='30T')\n idx_1h = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:00:00', freq='1H')\n idx_1d = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:00:00', freq='D')\n\n # Reindexing so data that starts in, for example August, will now\n # have the months prior to August filled with nans.\n df_15_reindex = df_15.reindex(idx_15, fill_value=np.nan)\n df_15_reindex[['count']] = df_15_reindex[['count']].fillna(0).astype(int)\n # Adding all columns to match example excel.\n df_15_reindex = df_15_reindex.rename(columns={'mean': 'H(ft)'})\n df_15_reindex = df_15_reindex.rename(columns={'count': 'SampleCount'})\n\n # Adding meters column.\n df_15_reindex['H(m)'] = df_15_reindex['H(ft)'] / 3.28\n # Rounds meters column so significant digits match\n # original height column.\n df_15_reindex['H(m)'] = df_15_reindex['H(m)'].round(2)\n df_15_reindex['H(ft)'] = df_15_reindex['H(ft)'].round(2)\n df_15_reindex['DateTime2'] = df_15_reindex.index\n df_15_reindex['Date'] = df_15_reindex.index\n df_15_reindex['Date2'] = df_15_reindex.index\n df_15_reindex['Date_Python_generated'] = df_15_reindex['Date'].dt.date\n df_15_reindex['Time1'] = df_15_reindex['Date'].dt.time\n df_15_reindex['Time2'] = df_15_reindex['Date'].dt.time\n df_15_reindex['H(m)_final'] = df_15_reindex['H(m)']\n df_15_reindex = df_15_reindex.reset_index(drop=True)\n # Adding original datetime and height data to dataframe. To do this\n # pd.concat is used because the column lengths are different.\n df_15_reindex = pd.concat([\n df_15_reindex, df_year.reset_index(drop=True)], axis=1)\n df_15_reindex['dateTime'] = pd.to_datetime(df_15_reindex['dateTime'])\n # Reordering columns to match example excel.\n df_15_reindex = df_15_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n # Filling nans with empty cells in columns similar to example excel.\n df_15_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_15_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n\n # Similar to 15 minute interval code but 30 minutes interval.\n df_30_reindex = df_30.reindex(idx_30, fill_value=np.nan)\n df_30_reindex[['count']] = df_30_reindex[['count']].fillna(0).astype(int)\n df_30_reindex = df_30_reindex.rename(columns={'mean': 'H(ft)'})\n df_30_reindex = df_30_reindex.rename(columns={'count': 'SampleCount'})\n df_30_reindex['H(m)'] = df_30_reindex['H(ft)'] / 3.28\n df_30_reindex['H(m)'] = df_30_reindex['H(m)'].round(2)\n df_30_reindex['H(ft)'] = df_30_reindex['H(ft)'].round(2)\n df_30_reindex['DateTime2'] = df_30_reindex.index\n df_30_reindex['Date'] = df_30_reindex.index\n df_30_reindex['Date2'] = df_30_reindex.index\n df_30_reindex['Date_Python_generated'] = df_30_reindex['Date'].dt.date\n df_30_reindex['Time1'] = df_30_reindex['Date'].dt.time\n df_30_reindex['Time2'] = df_30_reindex['Date'].dt.time\n df_30_reindex['H(m)_final'] = df_30_reindex['H(m)']\n df_30_reindex = df_30_reindex.reset_index(drop=True)\n df_30_reindex = pd.concat([\n df_30_reindex, df_year.reset_index(drop=True)], axis=1)\n df_30_reindex['dateTime'] = pd.to_datetime(df_30_reindex['dateTime'])\n df_30_reindex = df_30_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n df_30_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_30_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n\n # Similar to 15 minute interval code but hourly interval.\n df_1h_reindex = df_1h.reindex(idx_1h, fill_value=np.nan)\n df_1h_reindex[['count']] = df_1h_reindex[['count']].fillna(0).astype(int)\n df_1h_reindex = df_1h_reindex.rename(columns={'mean': 'H(ft)'})\n df_1h_reindex = df_1h_reindex.rename(columns={'count': 'SampleCount'})\n df_1h_reindex['H(m)'] = df_1h_reindex['H(ft)'] / 3.28\n df_1h_reindex['H(m)'] = df_1h_reindex['H(m)'].round(2)\n df_1h_reindex['H(ft)'] = df_1h_reindex['H(ft)'].round(2)\n df_1h_reindex['DateTime2'] = df_1h_reindex.index\n df_1h_reindex['Date'] = df_1h_reindex.index\n df_1h_reindex['Date2'] = df_1h_reindex.index\n df_1h_reindex['Date_Python_generated'] = df_1h_reindex['Date'].dt.date\n df_1h_reindex['Time1'] = df_1h_reindex['Date'].dt.time\n df_1h_reindex['Time2'] = df_1h_reindex['Date'].dt.time\n df_1h_reindex['H(m)_final'] = df_1h_reindex['H(m)']\n df_1h_reindex = df_1h_reindex.reset_index(drop=True)\n df_1h_reindex = pd.concat([\n df_1h_reindex, df_year.reset_index(drop=True)], axis=1)\n df_1h_reindex['dateTime'] = pd.to_datetime(df_1h_reindex['dateTime'])\n df_1h_reindex = df_1h_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n df_1h_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_1h_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n\n # Similar to 15 minute interval code but daily interval.\n df_1d_reindex = df_1d.reindex(idx_1d, fill_value=np.nan)\n df_1d_reindex[['count']] = df_1d_reindex[['count']].fillna(0).astype(int)\n df_1d_reindex = df_1d_reindex.rename(columns={'mean': 'H(ft)'})\n df_1d_reindex = df_1d_reindex.rename(columns={'count': 'SampleCount'})\n df_1d_reindex['H(m)'] = df_1d_reindex['H(ft)'] / 3.28\n df_1d_reindex['H(m)'] = df_1d_reindex['H(m)'].round(2)\n df_1d_reindex['H(ft)'] = df_1d_reindex['H(ft)'].round(2)\n df_1d_reindex['DateTime2'] = df_1d_reindex.index\n df_1d_reindex['Date'] = df_1d_reindex.index\n df_1d_reindex['Date2'] = df_1d_reindex.index\n df_1d_reindex['Date_Python_generated'] = df_1d_reindex['Date'].dt.date\n df_1d_reindex['Time1'] = df_1d_reindex['Date'].dt.time\n df_1d_reindex['Time2'] = df_1d_reindex['Date'].dt.time\n df_1d_reindex['H(m)_final'] = df_1d_reindex['H(m)']\n df_1d_reindex = df_1d_reindex.reset_index(drop=True)\n df_1d_reindex = pd.concat([\n df_1d_reindex, df_year.reset_index(drop=True)], axis=1)\n df_1d_reindex['dateTime'] = pd.to_datetime(df_1d_reindex['dateTime'])\n df_1d_reindex = df_1d_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n df_1d_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_1d_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n return df_15_reindex, df_30_reindex, df_1h_reindex, df_1d_reindex", "def band_filter(self, bands) -> 'ImageCollection':\n\n process_id = 'filter_bands'\n args = {\n 'imagery': self.graph,\n 'bands': bands\n }\n return self.graph_add_process(process_id, args)", "def bandpass_filter(data, lowcut, highcut, fs=2000, numtaps=255):\n nyq = fs / 2\n\n # design filter\n fe1 = lowcut / nyq\n fe2 = highcut / nyq\n b = firwin(numtaps, (fe1, fe2), pass_zero=False)\n\n filtered = lfilter(b, 1, data)\n\n return filtered", "def mkNetCDF_mask4CVAO_African_nest(res='2x2.5', invert_mask=False,\n regrid21x1=True):\n import cartopy.crs as ccrs\n import matplotlib.pyplot as plt\n import xesmf as xe\n ds = AC.get_LWI_map(res=res, rtn_ds=True)\n ds = ds.transpose('lon', 'lat')\n var2use = 'LWI'\n # Get mask\n m = AC.get_CVAO_Africa_nest_Masked(res=res)\n # Name to save as\n if regrid21x1:\n res = '1x1'\n savename = 'ARNA_CVAO_African_nest_{}'.format(res)\n vals = ds[var2use].values\n # Change mask values\n if invert_mask:\n vals = np.where(m, vals, 0)\n vals = np.where(~m, vals, 1)\n ds[var2use].values = vals\n savename += '_inverted'\n ExtraStr = ' inverted'\n else:\n vals = np.where(~m, vals, 0)\n vals = np.where(m, vals, 1)\n ds[var2use].values = vals\n ExtraStr = ''\n # Only consider variable of interest\n ds = ds[[var2use]]\n\n # Plot up without mask present\n AC.quick_map_plot(ds, var2plot=var2use, savename=savename)\n plt.title(savename)\n\n # --- Add time to ds\n # Include a time dimension to meet COARDS\n try:\n ds = ds.expand_dims('time', axis=0)\n except ValueError:\n pass\n # Update Namings\n Var2SaveAs = 'MASK'\n ds = ds.rename({var2use: Var2SaveAs})\n var2use = Var2SaveAs\n # make sure ordering meets COARDS\n ds = ds.transpose('time', 'lat', 'lon')\n # --- Regrid\n if regrid21x1:\n # Regrid to use the same grid as the EMEP data (if the 2x2.5 fails)\n Fstr = get_local_folder('HEMCO_data')\n Fstr += '/MASKS/v2018-09/EMEP_mask.geos.1x1.20151222.nc'\n dsREF = xr.open_dataset(Fstr)\n # Create a dataset to re-grid into\n ds_out = xr.Dataset({\n 'lat': (['lat'], dsREF['lat']),\n 'lon': (['lon'], dsREF['lon']),\n })\n # Create a regidder (to be reused )\n regridder = xe.Regridder(ds, ds_out, 'bilinear', reuse_weights=True)\n # Loop and regrid variables\n ds_l = []\n for var_ in [var2use]:\n # Create a dataset to re-grid into\n ds_out = xr.Dataset({\n 'lat': (['lat'], dsREF['lat']),\n 'lon': (['lon'], dsREF['lon']),\n })\n # Get a DataArray\n dr = ds[var_] # .to_array()\n # build regridder\n dr_out = regridder(dr)\n # Important note: Extra dimensions must be on the left, i.e. (time, lev, lat, lon) is correct but (lat, lon, time, lev) would not work. Most data sets should have (lat, lon) on the right (being the fastest changing dimension in the memory). If not, use DataArray.transpose or numpy.transpose to preprocess the data.\n # exactly the same as input\n# xr.testing.assert_identical(dr_out['time'], dr['time'])\n # Save variable\n ds_l += [dr_out]\n # Combine variables\n dsN = xr.Dataset()\n for n, var2use in enumerate([var2use]):\n dsN[var2use] = ds_l[n]\n dsN[var2use].attrs = ds[var2use].attrs\n # Clean up\n regridder.clean_weight_file()\n # Parse the lat and lon attrs from original array\n dsN['lat'].attrs = ds['lat'].attrs\n dsN['lon'].attrs = ds['lon'].attrs\n # Now use re-gridded file\n del ds\n ds = dsN\n\n # - Plot up\n # Select data to plot\n a = ds[var2use].mean(dim='time')\n # Plot up map with mask present\n fig = plt.figure(figsize=(10, 6))\n ax = fig.add_subplot(111, projection=ccrs.Robinson(), aspect='auto')\n a.plot.imshow(x='lon', y='lat', ax=ax, transform=ccrs.PlateCarree())\n plt.title(savename)\n plt.savefig(savename+'.png')\n plt.close()\n\n # - Update attrs etc\n try:\n ds = ds.expand_dims('time', axis=0)\n except ValueError:\n pass\n ds = ds.assign_coords(time=[0])\n attrs = ds['time'].attrs\n attrs['standard_name'] = 'Time'\n attrs['long_name'] = attrs['standard_name']\n attrs['axis'] = 'T'\n attrs['units'] = 'hours since 2000-01-01 00:00:00'\n attrs['calendar'] = 'standard'\n ds['time'].attrs = attrs\n # Save array as float32 instead of float 64\n ds[Var2SaveAs].values = ds[Var2SaveAs].astype(np.float32).values\n # Update MASK attributes\n MainVarTitle = 'MASK of African high-res model nest '+ExtraStr\n attrs = ds[var2use].attrs\n attrs['standard_name'] = Var2SaveAs\n attrs['long_name'] = MainVarTitle\n# attrs['units'] = int(1)\n attrs['units'] = 'unitless'\n attrs['add_offset'] = int(0)\n attrs['scale_factor'] = int(1)\n attrs['missing_value'] = float(-1e-32)\n attrs['_FillValue'] = float(-1e-32)\n # Remove the vestigial ctm_units attribute\n try:\n del attrs['ctm_units']\n except:\n pass\n ds[var2use].attrs = attrs\n # Global attributes\n attrs = ds.attrs\n attrs['conventions'] = 'COARDS'\n attrs['contact'] = '[email protected]'\n attrs['title'] = MainVarTitle\n attrs['history'] = 'Created {}'.format(time.ctime(time.time()))\n# attrs['format'] = 'NetCDF-4'\n attrs['format'] = 'NETCDF3_CLASSIC'\n ds.attrs = attrs\n # Save as NetCDF\n# ds.to_netcdf( savename+'.nc', engine='scipy' )\n ds.to_netcdf(savename+'.nc', format='NETCDF3_CLASSIC')", "def brightness_mask(cmb_map, synch_template, synch_freq, dust_template, dust_freq, mask_file, regions_file, threshold=10.0):\n\n cmb_fluctuation = np.std(cmb_map)\n synch70 = synch_template*(70.0/synch_freq)**(-3.0)\n gamma = h/(kb*19.4)\n dust70 = dust_template*(70.0/dust_freq)**2.6*(np.exp(gamma*dust_freq*1.0e9) - 1.0)/(np.exp(gamma*70.0e9) - 1.0)\n\n regions = hp.read_map(regions_file)\n region_nums = [i for i in range(1, int(np.amax(regions)) + 1)]\n\n mask = np.ones(hp.nside2npix(hp.get_nside(cmb_map)))\n\n synch_idx = np.where(synch70>=threshold*cmb_fluctuation)[0]\n dust_idx = np.where(dust70>=threshold*cmb_fluctuation)[0]\n mask[synch_idx] = 0\n mask[dust_idx] = 0\n\n hp.write_map(mask_file, mask, overwrite=True)\n\n return mask", "def collapse(self):\n try:\n wavelengths = pylab.linspace(self.start, self.end,\n self.image.shape[not self.waveaxis])\n except TypeError:\n print 'The starting and ending wavelengths must be specified.'\n background = pylab.zeros(len(wavelengths))\n backgroundlines = 0\n data = pylab.zeros(len(wavelengths))\n datalines = 0\n for region in self.regions:\n if region['group'] is 0:\n backgroundlines += region['max'] - region['min']\n background += self.image[region['min']:region['max'] + 1, :]\\\n .sum(axis=self.waveaxis)\n else:\n datalines += region['max'] - region['min']\n data += self.image[region['min']:region['max'] + 1, :]\\\n .sum(axis=self.waveaxis)\n background = [sum/backgroundlines for sum in background]\n data = [sum/datalines for sum in data]\n corrected = pylab.array(data) - pylab.array(background)\n output = Spectrum(list(wavelengths), list(corrected))\n return output", "def monolayer_4band():\n a = 0.222\n ax = 0.438\n ay = 0.332\n theta = 96.79 * (pi / 180)\n phi = 103.69 * (pi / 180)\n\n lat = pb.Lattice(a1=[ax, 0], a2=[0, ay])\n\n h = a * sin(phi - pi / 2)\n s = 0.5 * ax - a * cos(theta / 2)\n lat.add_sublattices(\n ('A', [0, 0, h], 0),\n ('B', [s, 0, 0], 0),\n ('C', [ax/2, ay/2, 0], 0),\n ('D', [ax/2 + s, ay/2, h], 0)\n )\n\n lat.register_hopping_energies({\n 't1': -1.22,\n 't2': 3.665,\n 't3': -0.205,\n 't4': -0.105,\n 't5': -0.055\n })\n\n lat.add_hoppings(\n # t1\n ([-1, 0], 'A', 'D', 't1'),\n ([-1, -1], 'A', 'D', 't1'),\n ([ 0, 0], 'B', 'C', 't1'),\n ([ 0, -1], 'B', 'C', 't1'),\n # t2\n ([ 0, 0], 'A', 'B', 't2'),\n ([ 0, 0], 'C', 'D', 't2'),\n # t3\n ([ 0, 0], 'A', 'D', 't3'),\n ([ 0, -1], 'A', 'D', 't3'),\n ([ 1, 1], 'C', 'B', 't3'),\n ([ 1, 0], 'C', 'B', 't3'),\n # t4\n ([ 0, 0], 'A', 'C', 't4'),\n ([ 0, -1], 'A', 'C', 't4'),\n ([-1, 0], 'A', 'C', 't4'),\n ([-1, -1], 'A', 'C', 't4'),\n ([ 0, 0], 'B', 'D', 't4'),\n ([ 0, -1], 'B', 'D', 't4'),\n ([-1, 0], 'B', 'D', 't4'),\n ([-1, -1], 'B', 'D', 't4'),\n # t5\n ([-1, 0], 'A', 'B', 't5'),\n ([ 0, 1], 'A', 'B', 't5'),\n ([ 0, -1], 'A', 'B', 't5'),\n ([-1, 0], 'C', 'D', 't5'),\n ([ 0, 1], 'C', 'D', 't5'),\n ([ 0, -1], 'C', 'D', 't5'),\n )\n\n return lat", "def imshow_four(img1, img2, img3, img4):\n fig, axes = plt.subplots(\n nrows=2, ncols=2, sharex='all', sharey='all', figsize=(7, 8), dpi=300)\n ax = axes.ravel()\n\n ax[0].imshow(img1)\n pcm = ax[0].imshow(img1)\n ax[0].set_title('Cyan')\n ax[0].axis('off')\n\n ax[1].imshow(img2)\n pcm = ax[1].imshow(img2)\n ax[1].set_title('Magenta')\n ax[1].axis('off')\n\n ax[2].imshow(img3)\n pcm = ax[2].imshow(img3)\n ax[2].set_title('Yellow')\n ax[2].axis('off')\n\n ax[3].imshow(img4)\n pcm = ax[3].imshow(img4)\n ax[3].set_title('Black')\n ax[3].axis('off')\n\n fig.colorbar(pcm, orientation='horizontal', ax=ax.tolist())\n\n plt.show()", "def apply_mask(self, mask_band=None, mask_val=None):\n pass", "def smooth_climatologies(thresh_climYear, seas_climYear, smoothPercentileWidth):\n # If the climatology contains NaNs, then assume it is a <365-day year and deal accordingly\n if np.sum(np.isnan(seas_climYear)) + np.sum(np.isnan(thresh_climYear)):\n valid = ~np.isnan(thresh_climYear)\n thresh_climYear[valid] = runavg(thresh_climYear[valid], smoothPercentileWidth)\n valid = ~np.isnan(seas_climYear)\n seas_climYear[valid] = runavg(seas_climYear[valid], smoothPercentileWidth)\n else: # >= 365-day year (no nans)\n thresh_climYear = runavg(thresh_climYear, smoothPercentileWidth)\n seas_climYear = runavg(seas_climYear, smoothPercentileWidth)\n\n return thresh_climYear, seas_climYear", "def masked_f3kdb(clip: vs.VideoNode,\n rad: int = 16,\n thr: Union[int, List[int]] = 24,\n grain: Union[int, List[int]] = [12, 0],\n mask_args: Dict[str, Any] = {}\n ) -> vs.VideoNode:\n from debandshit import dumb3kdb\n\n deb_mask_args: Dict[str, Any] = dict(brz=(1000, 2750))\n deb_mask_args |= mask_args\n\n bits, clip = _get_bits(clip)\n\n deband_mask = detail_mask(clip, **deb_mask_args)\n\n deband = dumb3kdb(clip, radius=rad, threshold=thr, grain=grain, seed=69420)\n deband_masked = core.std.MaskedMerge(deband, clip, deband_mask)\n deband_masked = deband_masked if bits == 16 else depth(deband_masked, bits)\n return deband_masked", "def msatna_blocks_3lag_year(year: int) -> pd.Series:\n return msatna_blocks_3lag_panel()[year]", "def get_segmented_masks_from_rgbd(dict_of_scenes, training_weights_checkpoint_dir,\n dsn_config = None, rrn_config = None, uois3d_config = None):\n if dsn_config is None:\n dsn_config = {\n\n # Sizes\n 'feature_dim' : 64, # 32 would be normal\n\n # Mean Shift parameters (for 3D voting)\n 'max_GMS_iters' : 10, \n 'epsilon' : 0.05, # Connected Components parameter\n 'sigma' : 0.02, # Gaussian bandwidth parameter\n 'num_seeds' : 200, # Used for MeanShift, but not BlurringMeanShift\n 'subsample_factor' : 5,\n\n # Misc\n 'min_pixels_thresh' : 500,\n 'tau' : 15.,\n\n }\n \n if rrn_config is None:\n rrn_config = {\n\n # Sizes\n 'feature_dim' : 64, # 32 would be normal\n 'img_H' : 224,\n 'img_W' : 224,\n\n # architecture parameters\n 'use_coordconv' : False,\n\n }\n \n if uois3d_config is None:\n uois3d_config = {\n\n # Padding for RGB Refinement Network\n 'padding_percentage' : 0.25,\n\n # Open/Close Morphology for IMP (Initial Mask Processing) module\n 'use_open_close_morphology' : True,\n 'open_close_morphology_ksize' : 9,\n\n # Largest Connected Component for IMP module\n 'use_largest_connected_component' : True,\n\n }\n \n dsn_filename = training_weights_checkpoint_dir + 'DepthSeedingNetwork_3D_TOD_checkpoint.pth'\n rrn_filename = training_weights_checkpoint_dir + 'RRN_OID_checkpoint.pth'\n uois3d_config['final_close_morphology'] = 'TableTop_v5' in rrn_filename\n uois_net_3d = segmentation.UOISNet3D(uois3d_config, \n dsn_filename,\n dsn_config,\n rrn_filename,\n rrn_config\n )\n \n\n N = len(dict_of_scenes)\n if N == 0:\n print(\"Number of scenes in the dictonary are 0\")\n return None, None, None, None, None, None\n rgb_imgs = np.zeros((N, 480, 640, 3), dtype=np.float32)\n xyz_imgs = np.zeros((N, 480, 640, 3), dtype=np.float32)\n label_imgs = np.zeros((N, 480, 640), dtype=np.uint8)\n file_names = [None]*N\n \n i = 0\n for key, val in dict_of_scenes.items():\n # file names\n file_names[i] = key\n \n # RGB\n rgb_img = val['rgb']\n rgb_imgs[i] = data_augmentation.standardize_image(rgb_img)\n\n # XYZ\n xyz_imgs[i] = val['xyz']\n\n # Label\n label_imgs[i] = val['label']\n i += 1\n\n batch = {\n 'rgb' : data_augmentation.array_to_tensor(rgb_imgs),\n 'xyz' : data_augmentation.array_to_tensor(xyz_imgs),\n }\n \n print(\"Number of images: {0}\".format(N))\n ### Compute segmentation masks ###\n fg_masks, center_offsets, initial_masks, seg_masks = uois_net_3d.run_on_batch(batch)\n st_time = time()\n total_time = time() - st_time\n print('Total time taken for Segmentation: {0} seconds'.format(round(total_time, 3)))\n\n print(\"fg_masks.shape = \", fg_masks.shape)\n print(\"seg_masks.shape = \", seg_masks.shape)\n # Get results in numpy\n seg_masks = seg_masks.cpu().numpy()\n if np.array_equal(seg_masks, np.zeros(seg_masks.shape)):\n print(\"Seg_mask is zero\")\n sys.exit()\n fg_masks = fg_masks.cpu().numpy()\n center_offsets = center_offsets.cpu().numpy().transpose(0,2,3,1)\n initial_masks = initial_masks.cpu().numpy()\n rgb_imgs = util_.torch_to_numpy(batch['rgb'].cpu(), is_standardized_image=True)\n\n return rgb_imgs, xyz_imgs, seg_masks, label_imgs, fg_masks, file_names", "def minmax():\n minmaxlist = []\n timelist = []\n #create a list of the filenames of all sentinel-images\n s2files = [f for f in listdir(s2path) if endswith(join(s2path, f),\".tif\")==True]\n print(\"STEP 1/2\")\n print(\"EXPORTING MIN AND MAX VALUES PER BAND\")\n for i in s2files:\n start = time.time()\n nlfile = nlpath + \"/\" + i\n s2file = s2path+\"/\"+i\n #open the file\n s2raster = gdal.Open(s2file) \n #iterate over the bands of each image\n for n in range(s2raster.RasterCount):\n f = n + 1\n s2band = s2raster.GetRasterBand(f)\n #read the pixels of the band as an numpy-array\n s2band = s2band.ReadAsArray()\n #resize the bands to have all images in the same size\n s2band = np.resize(s2band,(1050,1050))\n #get the min and max values of each band to be able to 0-1 normalize after\n min = s2band.min()\n max = s2band.max()\n #check if there are already values for the band\n if len(minmaxlist) < s2raster.RasterCount + 1:\n s2minmax = [min,max]\n minmaxlist.append(s2minmax)\n # if the min value of the open band is smaller than the saved minimal value, overwrite it\n if min < minmaxlist[n][0]:\n minmaxlist[n][0] = min\n #if the max value of the open band is higher than the saves maximum value, overwrite it\n if max > minmaxlist[n][1]:\n minmaxlist[n][1] = max\n #open the nightlight img\n nlraster = gdal.Open(nlfile)\n nlband = nlraster.GetRasterBand(1)\n #read the only band of the image as a numpy-array\n nlband = nlband.ReadAsArray()\n #resize it the same way as the sentinel images\n nlband = np.resize(nlband,(1050,1050))\n #get the min and max values of the band\n nlmin = nlband.min()\n nlmax = nlband.max()\n #check if there are already information about min and max values for the nightlight images\n if len(minmaxlist) < s2raster.RasterCount + 1:\n nlminmax = [nlmin,nlmax]\n minmaxlist.append(nlminmax)\n #if the min value of the open nightlight image is smaller than the saved minimal value, overwrite it\n if nlmin < minmaxlist[16][0]:\n minmaxlist[16][0] = nlmin\n #if the max value of the open nightlight image is higher than the saves maximum value, overwrite it\n if nlmax > minmaxlist[16][1]:\n minmaxlist[16][1] = nlmax\n end = time.time()\n timelist.append(end-start)\n print(\"Step 1/2\",str(s2files.index(i)+1) + \"/\" + str(len(s2files)),\"Est. time left:\",time.strftime('%H:%M:%S',time.gmtime(int(sum(timelist)/len(timelist)*(len(s2files)-s2files.index(i))))))\n #throw out the Quality Bands (QA10,QA20,QA60)\n minmaxlist = [i for j,i in enumerate(minmaxlist) if j not in [13,14,15]]\n return minmaxlist", "def bandstop_filter(data, lowcut, highcut, fs=2000, numtaps=255):\n nyq = fs / 2\n\n # design filter\n fe1 = lowcut / nyq\n fe2 = highcut / nyq\n b = firwin(numtaps, (fe1, fe2), pass_zero=True)\n\n filtered = lfilter(b, 1, data)\n\n return filtered", "def segment_nuclei_4dstack(stack, seg_func, **kwargs):\n # Segment first frame, initialize 4D labelmask.\n frame0 = seg_func(stack[0], **kwargs)\n labelmask = np.array([frame0])\n # Segment subsequent frames, stack results together.\n for n in range(1, stack.shape[0]):\n print(n, end=' ')\n frame = seg_func(stack[n], **kwargs)\n labelmask = np.vstack((labelmask, [frame]))\n print('')\n return labelmask", "def reduce_dataset(years, values,flux_floor=0,max_tm_error=0,min_reduction_steps=200):\n non_zero_ind, min_retained_zero_years = remove_begin_end_zero_flux(years,values,flux_floor,min_reduction_steps)\n\n years_mod = years[non_zero_ind]\n values_mod = values[non_zero_ind]\n\n if years_mod.size <3:\n years_mod = years\n values_mod = values\n values_mod = 0\n else:\n #makes ure you have not removed more than 1% of the mass when removing 0 or flux floor rates\n o_mass = TimeSeries(years,values,None,None).integrate().values[-1]\n r_mass = TimeSeries(years_mod, values_mod, None, None).integrate().values[-1]\n if abs((o_mass-r_mass)/o_mass)*100 > 1:\n years_mod = years\n values_mod = values\n timeseries = TimeSeries(years_mod, values_mod, None, None)\n mass = timeseries.integrate()\n\n #normalize Values\n maxval = np.max(values_mod)\n values_mod = values_mod/maxval\n o_timeseries = TimeSeries(years,values/maxval,None,None)\n o_mass = o_timeseries.integrate()\n timeseries = TimeSeries(years_mod, values_mod, None, None)\n mass = timeseries.integrate()\n\n mx = np.argmax(timeseries.values)\n points = [0, mx, len(timeseries)]\n x = timeseries.times\n\n ythresh = 100*np.mean(timeseries.values)\n out_error = 1\n out_error_last = out_error\n OUT_ERROR_THRESHOLD = 1e-2\n\n UPPER_N = 200\n LOWER_N = 50\n last_result = None\n MAX_ITERATIONS = 80\n\n solve_type = SMOOTH\n simple_peaks = False\n last_result,ix = reduct_iter(timeseries,flux_floor,ythresh,out_error,out_error_last,OUT_ERROR_THRESHOLD,UPPER_N,LOWER_N,last_result,MAX_ITERATIONS)\n last_result = retain_min_years(last_result.reduced_flux,o_timeseries,o_mass,min_retained_zero_years)\n #if there are less points than the min_reduction_steps then use the remaining\n #points to rebalance the segments with the largest mass errors.\n play_points = min_reduction_steps - last_result.num_reduced_points\n bef = last_result.reduced_flux.times.size\n if play_points > 0:\n last_result = red_flux.rebalance_extra_points(last_result,play_points)\n\n rr = last_result\n\n #find peaks for data rebalance and reporting\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=3,rel_height=1)\n if peaks.size == 0 :\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=2,rel_height=1)\n if peaks.size == 0:\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=1,rel_height=1)\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=3,rel_height=1)\n if pneg.size == 0:\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=2,rel_height=1)\n if pneg.size == 0:\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=1,rel_height=1)\n\n peaks = rr.reduced_flux.times[peaks]\n pneg = rr.reduced_flux.times[pneg]\n\n peaks = np.isin(o_timeseries.times,peaks)\n pneg = np.isin(o_timeseries.times,pneg)\n peaks = np.where(peaks)\n pneg = np.where(pneg)\n\n peaks = peaks[0]\n pneg = pneg[0]\n iter = 0\n while iter < 100 and (abs(last_result.total_mass_error*maxval) > max_tm_error or abs(last_result.total_mass_error/last_result.mass.values[-1])*100 > .001) :\n rr = red_flux.rebalance_valleys(rr,peaks,pneg)\n #keep the lowest total_mass_error\n if abs(rr.total_mass_error) < abs(last_result.total_mass_error):\n last_result = rr\n else:\n break\n iter += 1\n\n out_times = last_result.reduced_flux.times\n out_values = last_result.reduced_flux.values\n #return the reduced data, undo normalize of the values (*maxval)\n return out_times, out_values*maxval,-(last_result.total_mass_error * maxval),peaks.size,iter", "def dwt(image_array, quantization_Array):\n # Create the high pass and low pass filters\n # both filters are non-causal\n # symmetric\n # [-2, -1, 0, 1, 2]\n LPF = [-0.125, 0.25, 0.75, 0.25, -0.125]\n LPF_center = 2\n\n # [ -2,-1, 0]\n HPF = [-0.5, 1, -0.5]\n HPF_center = 2\n\n nrow, ncol = image_array.shape\n\n # create an array that will contain the 4 different subbands of the image\n LL = np.zeros((nrow, ncol))\n LH = np.zeros((nrow, ncol))\n HL = np.zeros((nrow, ncol))\n HH = np.zeros((nrow, ncol))\n filtered_image = [LL, LH, HL, HH]\n\n # filtering the rows using a low pass and high pass filters\n LowPass_rows = np.zeros((nrow, ncol))\n HighPass_rows = np.zeros((nrow, ncol))\n for i in range(0, nrow):\n LowPass_rows[i, :] = lfilter(LPF, image_array[i, :], LPF_center)\n HighPass_rows[i, :] = lfilter(HPF, image_array[i, :], HPF_center)\n\n # down sample rows.\n # which means we will have half the number of columns\n for i in range(0, len(filtered_image)):\n filtered_image[i] = filtered_image[i][:, ::2]\n\n # apply filters accross columns\n for i in range(0, ncol):\n LL[:, i] = lfilter(LPF, LowPass_rows[:, i], LPF_center)\n LH[:, i] = lfilter(HPF, LowPass_rows[:, i], HPF_center)\n HL[:, i] = lfilter(LPF, HighPass_rows[:, i], LPF_center)\n HH[:, i] = lfilter(HPF, HighPass_rows[:, i], HPF_center)\n\n # down sample columns and quantize\n for i in range(0, len(filtered_image)):\n filtered_image[i] = filtered_image[i][::2, :]\n filtered_image[i] = np.round(\n filtered_image[i]/quantization_Array[i]).astype(int)\n\n return filtered_image", "def get_analysis_masks(masks='basic', hPa=None, M_all=False, res='4x5',\n saizlopez=False, r_pstr=True, wd=None, trop_limit=True,\n mask4D=False,\n use_multiply_method=True, debug=False):\n # --- Check hPa has been provided as arg.\n if isinstance(hPa, type(None)):\n print('ERROR: Please provide array of hPa to get_analysis_masks')\n\n if masks == 'full':\n # ---- List of masks\n mtitles = [\n 'All', 'Ocean', 'Land', 'Ice', 'All Sur.', 'Ocean Sur.',\n 'Land Sur.', 'Ice Sur.', 'NH', 'SH', 'Tropics', 'Ex. Tropics',\n 'Mid Lats', 'Ocn. 50S-50N', '50S-50N'\n ]\n tsects3D = ['MBL', 'BL', 'FT', 'UT']\n # get MBL, FT and UT maskes\n sects3D = [\n mask_3D(hPa, i, MBL=False, M_all=M_all, res=res,\n use_multiply_method=use_multiply_method) for i in tsects3D\n ]\n # ---- Use non-pythonic mulitply method?\n if use_multiply_method:\n maskes = [mask_all_but(i, trop_limit=trop_limit, mask3D=True,\n use_multiply_method=True, res=res)\n for i in mtitles]\n # if comparison with saiz-lopez 2014,\n if M_all:\n ind = [n for n, i in enumerate(mtitles) if not ('MBL' in i)]\n for n in ind:\n maskes[n] = maskes[n]*land_unmasked(res=res)\n # --- Use pythonic approach\n else:\n maskes = [mask_all_but(i, trop_limit=trop_limit, mask3D=True,\n use_multiply_method=False, res=res)\n for i in mtitles]\n # If not 'use_multiply_method', then invert hPa masks\n sects3D = [np.logical_not(i) for i in sects3D]\n # Add to mask and mask title lists\n maskes = maskes + sects3D\n mtitles = mtitles + tsects3D\n # Also create print strings...\n npstr = '{:<12}'*len(maskes)\n pstr = '{:<12,.2f}'*len(maskes)\n if masks == 'basic':\n tsects3D = ['All', 'MBL', 'BL', 'FT', 'UT']\n mtitles = [i+' (All)' for i in tsects3D] + \\\n [i+' (Tropics)' for i in tsects3D] + \\\n [i+' (Mid Lats)' for i in tsects3D]\n # Standard maskes none, tropics, mid-lats (3)\n maskes = [\n np.logical_not(i) for i in (all_unmasked(res=res),\n tropics_unmasked(\n res, saizlopez=saizlopez),\n mid_lats_unmasked(res))]\n # Additional masks - tsects3D (4+1) * standard maskes (3)\n dmaskes = [\n [mask_3D(hPa, i, MBL=False, extra_mask=mask, M_all=M_all, res=res)\n for i in tsects3D] for mask in maskes]\n # Unpack and set as maskes (12) to list (3)\n dmaskes = [j for i in dmaskes for j in i]\n print([len(i) for i in (maskes, dmaskes, mtitles, tsects3D)])\n maskes = dmaskes\n print([len(i) for i in (maskes, dmaskes, mtitles, tsects3D)])\n # If comparison with saiz-lopez 2014 appli marine mask to all...\n if M_all:\n ind = [n for n, i in enumerate(mtitles) if not 'MBL' in i]\n for n in ind:\n maskes[n] = maskes[n]*land_unmasked(res=res)\n if debug:\n print([len(i) for i in (maskes, dmaskes, mtitles, tsects3D)])\n # Also create print strings...\n npstr = '{:<15}'*len(maskes)\n pstr = '{:<15,.2f}'*len(maskes)\n if masks == 'trop_regions':\n mtitles = ['BL', 'FT', 'UT']\n maskes = [mask_3D(hPa, i, M_all=M_all, MBL=False, res=res)[:, :, :38]\n for i in mtitles]\n # Also create print strings...\n npstr = '{:<15}'*len(maskes)\n pstr = '{:<15,.2f}'*len(maskes)\n # Only consider the \"chemical troposphere\" - according v9-2\n if trop_limit:\n maskes = [i[:, :, :38] for i in maskes]\n # Create 4D array by concatenating through time dimension\n # ( assuming year long array of 1 months )\n if mask4D:\n for n, mask in enumerate(maskes):\n if any([(mask.shape[-1] == i) for i in [12]]):\n pass\n else: # concatenate dimensions\n maskes[n] = np.concatenate([mask[..., None]]*12, axis=3)\n if r_pstr:\n return maskes, mtitles, npstr, pstr\n else:\n return maskes, mtitles", "def crop_acc_mask(images_dir, images_output_dir, masks_dir, mask_suffix=None, masks_output_dir=None): \n image_suffix_list = [\"C0\", \"DE\", \"T2\"]\n if not os.path.exists(images_output_dir):\n os.makedirs(images_output_dir)\n if masks_output_dir is not None and (not os.path.exists(masks_output_dir)):\n os.makedirs(masks_output_dir)\n margin = [0, 30, 30]\n masks_list = os.listdir(masks_dir)\n masks_list.sort()\n json_dict = OrderedDict()\n for mask in masks_list:\n mask_path = os.path.join(masks_dir, mask)\n if mask.endswith(\".nii.gz\"):\n print(\"#\" * 11 *11)\n print(mask_path)\n mask_sitk = sitk.ReadImage(mask_path)\n mask_npy = sitk.GetArrayFromImage(mask_sitk)\n mask_shape = mask_npy.shape\n crop_bbox_min, crop_bbox_max = get_ND_bounding_box(mask_npy, margin=margin)\n # do not crop along depth dimension\n crop_bbox_min[0] = 0\n crop_bbox_max[0] = mask_shape[0]\n print(crop_bbox_min, crop_bbox_max)\n json_dict[mask_path] = {\"crop_bbox_min\": crop_bbox_min, \"crop_bbox_max\": crop_bbox_max}\n mask_output_npy = crop_ND_volume_with_bounding_box(mask_npy, crop_bbox_min, crop_bbox_max)\n if mask_suffix is not None:\n mask = mask.replace(\"_\" + mask_suffix + \".nii.gz\", \".nii.gz\")\n if masks_output_dir is not None:\n save_cropped_array_as_nifty_volume(mask_output_npy, os.path.join(masks_output_dir, mask), mask_sitk)\n save_cropped_array_as_nifty_volume(convert_label(mask_output_npy, [1, 2, 3, 4, 5], [1, 2, 3, 1, 1]), \\\n os.path.join(images_output_dir, mask.replace(\".nii.gz\", \"_{0:04d}.nii.gz\".format(len( \\\n image_suffix_list)))), mask_sitk)\n for i, image_suffix in enumerate(image_suffix_list):\n image = mask.replace(\".nii.gz\", \"_{}.nii.gz\".format(image_suffix))\n image_path = os.path.join(images_dir, image)\n print(image_path)\n image_sitk = sitk.ReadImage(image_path)\n image_npy = sitk.GetArrayFromImage(image_sitk)\n image_output_npy = crop_ND_volume_with_bounding_box(image_npy, crop_bbox_min, crop_bbox_max)\n save_cropped_array_as_nifty_volume(image_output_npy, os.path.join(images_output_dir, mask.replace( \\\n \".nii.gz\", \"_{0:04d}.nii.gz\".format(i))), image_sitk)\n save_json(json_dict, os.path.join(images_output_dir, \"crop_information.json\"))\n if masks_output_dir is not None:\n save_json(json_dict, os.path.join(masks_output_dir, \"crop_information.json\"))", "def GetMapId(landsat, date, date_range):\n \n def maskClouds(img):\n scored = ee.Algorithms.Landsat.simpleCloudScore(img);\n return img.updateMask(scored.select(['cloud']).lt(20));\n\n def CreateTimeBand(img):\n return maskClouds(img).byte().addBands(img.metadata('system:time_start'))\n\n if landsat == 'l7':\n collection = ee.ImageCollection(IMAGE_COLLECTION_ID_L7)\n l7 = collection.filter(ee.Filter.lte('CLOUD_COVER', 25)).filterDate(date_range, date).map(CreateTimeBand);\n l7Composite = l7.qualityMosaic('system:time_start');\n\n #vizParams = {bands: ['B4', 'B3', 'B2'], min: 0, max: 0.4};\n\n return l7Composite.getMapId({\n 'min': '0,0,0',\n 'max': '255,255,255',\n 'bands': 'B4,B3,B2',\n })\n if landsat == 'l8':\n collection = ee.ImageCollection(IMAGE_COLLECTION_ID_L8)\n l8 = collection.filter(ee.Filter.lte('CLOUD_COVER', 25)).filterDate(date_range, date).map(CreateTimeBand);\n l8Composite = l8.qualityMosaic('system:time_start');\n\n #vizParams = {bands: ['B4', 'B3', 'B2'], min: 0, max: 0.4};\n\n return l8Composite.getMapId({\n 'min': '0',\n 'max': '0.4',\n 'bands': 'B4,B3,B2',\n })", "def masked_f3kdb(clip: vs.VideoNode,\n rad: int = 16,\n thr: Union[int, List[int]] = 24,\n grain: Union[int, List[int]] = [12, 0],\n mask_args: Dict[str, Any] = {},\n show_mask: bool = False) -> vs.VideoNode:\n from debandshit import dumb3kdb\n\n deb_mask_args: Dict[str, Any] = dict(detail_brz=1500, lines_brz=1000)\n deb_mask_args |= mask_args\n\n bits, clip = _get_bits(clip)\n\n deband_mask = detail_mask(clip, **deb_mask_args)\n\n deband = dumb3kdb(clip, radius=rad, threshold=thr, grain=grain)\n deband_masked = core.std.MaskedMerge(deband, clip, deband_mask)\n deband_masked = deband_masked if bits == 16 else depth(deband_masked, bits)\n return deband_masked", "def make_niimage_4d(maps, mask, affine, zscore=False):\n\tK, V = maps.shape\n\tnx, ny, nz = mask.shape\n\tdata = np.zeros((nx, ny, nz, K))\n\tassert(V == mask.sum())\n\tmask = mask == True\n\tfor k in range(K):\n\t\tmap = maps[k,:]\n\t\tif zscore:\n\t\t\tmap = (map - map.mean()) / map.var()\n\t\tdata[mask,k] = map \n\n\tnifti_image = nib.Nifti1Image(data, affine)\n\treturn nifti_image", "def octave_noise_4d(octaves, persistence, scale, x, y, z, w):\n total = 0.0\n frequency = scale\n amplitude = 1.0\n\n # We have to keep track of the largest possible amplitude,\n # because each octave adds more, and we need a value in [-1, 1].\n maxAmplitude = 0.0\n\n for i in range(octaves):\n total += raw_noise_4d(x*frequency, y*frequency, z*frequency, w*frequency)*amplitude\n frequency *= 2\n maxAmplitude += amplitude\n amplitude *= persistence\n\n return total/maxAmplitude", "def mask_gpis(irrig_fpath, treshhold=0.05):\n irrig_data = pd.DataFrame.from_csv(irrig_fpath)\n print irrig_data\n masked_gpis = load_lcmask()\n\n # merge data frames: colname = gpi_quarter\n merged_data = pd.merge(irrig_data, masked_gpis, how='left', on='gpi_quarter')\n # drop masked out gpis\n masked_data = merged_data.loc[(merged_data['crop_mask'] >= treshhold) & (merged_data['crop_mask'] <= 1.0)]\n return masked_data", "def __set_mask_regions(self):\n self.bottom_clip = np.int32(np.int32([[[60,0], [1179,0], [1179,650], [60,650]]]))\n self.roi_clip = np.int32(np.int32([[[640, 425], [1179,550], [979,719],\n [299,719], [100, 550], [640, 425]]]))", "def load_4d(path, horizontalCameras, verticalCameras, config, rgb=True, roi=None, switchOrder=False):\n assert isinstance(path, str)\n assert isinstance(rgb, bool)\n if roi is not None:\n assert isinstance(roi, dict)\n\n fnames = []\n for f in glob(path + \"*.png\"):\n fnames.append(f)\n if len(fnames) == 0:\n for f in glob(path + \"*.jpg\"):\n fnames.append(f)\n if len(fnames) == 0:\n for f in glob(path + \"*.JPG\"):\n fnames.append(f)\n if len(fnames) == 0:\n for f in glob(path + \"*.tif\"):\n fnames.append(f)\n if len(fnames) == 0:\n for f in glob(path + \"*.TIF\"):\n fnames.append(f)\n if len(fnames) == 0:\n for f in glob(path + \"*.exr\"):\n fnames.append(f)\n if len(fnames) == 0:\n for f in glob(path + \"*.ppm\"):\n fnames.append(f)\n\n fnames.sort()\n\n for i in fnames:\n print(i)\n\n if switchOrder:\n fnames.reverse()\n\n im = vigra.readImage(fnames[0], order='C')\n\n if len(im.shape) == 2:\n rgb = False\n\n lf = np.zeros((verticalCameras, horizontalCameras, im.shape[0], im.shape[1], 3), dtype=np.float32)\n\n for n in range(0, len(fnames)):\n im = vigra.readImage(fnames[n], order='C')\n a = (horizontalCameras-1) - n % horizontalCameras\n b = int(n / horizontalCameras)\n lf[b,a, :, :, :] = im[:,:,0:3]\n\n amax = np.amax(lf)\n if amax >= 1:\n lf[:] /= 255\n\n # for v in range(lf.shape[0]):\n # for h in range(lf.shape[1]):\n # plt.imsave(config.result_path+config.result_label+\"image_{0}.png\".format(h+9*v), lf[v,h, :, :, :])\n\n\n return lf", "def replace_missingvalues_bandmean(X):\n if X.ndim != 4:\n raise ValueError('Input not valid, no [pic, row, column, band] data format')\n\n zeros = np.where(X[:,:,:] == 0)\n\n bandmean = {}\n\n for i in sorted(np.unique(zeros[3])):\n bandmean.update({i:np.mean(X[:,:,:,i])})\n\n for i in range(0,len(zeros[0])):\n pic, row, column, band = zeros[0][i],zeros[1][i],zeros[2][i],zeros[3][i]\n mean = bandmean.get(band)\n X[pic,row,column,band] = int(mean)\n\n return X", "def get_cruise_track_mask(max_lon=None, min_lon=None, max_lat=None,\n min_lat=None, unmask_water=True, res='4x5',\n trop_limit=True):\n # only look at surface\n m = surface_unmasked(res=res, trop_limit=trop_limit)\n # apply ocean mask\n if unmask_water:\n m = m + ocean_unmasked(res=res)\n # Mask over given longitude range, if provided\n if not isinstance(max_lon, type(None)):\n m = m + lon2lon_2D_unmasked(lowerlon=min_lon, higherlon=max_lon,\n res=res)[:, :, None]\n # Mask over given latitude range, if provided\n if not isinstance(max_lat, type(None)):\n m = m + lat2lat_2D_unmasked(lowerlat=min_lat, higherlat=max_lat,\n res=res)[:, :, None]\n # Invert\n m = np.logical_not(m)\n return m", "def _correct_band(image, band_name, kvol, kvol0, f_iso, f_geo, f_vol):\n\t\t\tiso = ee.Image(f_iso)\n\t\t\tgeo = ee.Image(f_geo)\n\t\t\tvol = ee.Image(f_vol)\n\t\t\tpred = vol.multiply(kvol).add(geo.multiply(kvol)).add(iso).rename(['pred'])\n\t\t\tpred0 = vol.multiply(kvol0).add(geo.multiply(kvol0)).add(iso).rename(['pred0'])\n\t\t\tcfac = pred0.divide(pred).rename(['cfac'])\n\t\t\tcorr = image.select(band_name).multiply(cfac).rename([band_name])\n\t\t\treturn corr", "def Five2Four(data, shape4d, dst_type, format_, target=utils.CCE):\n utils.ops_dtype_check([data.dtype, dst_type], utils.DtypeForDavinci.ALL_FLOAT)\n shape5d = get_shape(data)\n if not shape_is_dynamic(data):\n if len(shape5d) != 5 or shape5d[-1] != 16:\n raise ValueError(\"five2four_cce only support 5-dim data and last dim should be 16\")\n\n bs, c1, h, w, c0 = shape5d\n if not shape_is_dynamic(data):\n utils.davinci_format_check(shape5d, \"NC1HWC0\", dim=5)\n # Check format\n if format_ not in ['NCHW', 'NHWC']:\n raise ValueError(\"{} format is not support, five2four only support NCHW and NHWC format input\"\n .format(format_))\n if format_ == \"NCHW\":\n if shape_is_dynamic(data):\n shape4d = [bs, c1 * c0, h, w]\n _, c, h_4d, w_4d = shape4d\n else:\n if shape_is_dynamic(data):\n shape4d = [bs, h, w, c1 * c0]\n _, h_4d, w_4d, c = shape4d\n utils.davinci_format_check(shape4d, format_, dim=4)\n\n # Check is shape4d and shape5d match\n if False not in [isinstance(s, (int, akg.tvm.expr.IntImm)) for s in shape5d]:\n if h_4d != h or w_4d != w:\n raise ValueError(\"five2four_cce's shape4d h and w should equal to data shape's h and w\")\n if c > c1 * c0 or c <= (c1 - 1) * c0:\n raise ValueError(\"five2four_cce's shape4d c should in set ((c1 - 1) * c0, c1 * c0]\")\n\n # Check size c when casting happens\n if not shape_is_dynamic(data):\n if data.dtype != dst_type and c >= C_LIMIT_FOR_CAST:\n raise ValueError(\"When input and output data type is not matched, shape of 'c' axis should not exceed {}, \"\n \"while currently set is {}\".format(C_LIMIT_FOR_CAST, c))\n\n @script(capture=locals())\n def nc1hwc0_to_nhwc(inputs, bs, h, w, c, c1, c0):\n output = allocate((bs, h, w, c), inputs.dtype, \"local\")\n for n_i in range(bs):\n for h_i in range(h):\n for w_i in range(w):\n for c_i in range(c1):\n for c_i0 in range(c0):\n output[n_i, h_i, w_i, c_i * c0 + c_i0] = inputs[n_i, c_i, h_i, w_i, c_i0]\n return output\n\n @script(capture=locals())\n def nc1hwc0_to_nchw(inputs, bs, h, w, c, c1, c0):\n output = allocate((bs, c, h, w), inputs.dtype, \"local\")\n for n_i in range(bs):\n for c_i in range(c1):\n for h_i in range(h):\n for w_i in range(w):\n for c_i0 in range(c0):\n output[n_i, c_i * c0 + c_i0, h_i, w_i] = inputs[n_i, c_i, h_i, w_i, c_i0]\n return output\n\n # if c % 16 == 0, h and w == 1, five2four is a reshape operation\n if shape_is_dynamic(data):\n call_reshape = isinstance(h, int) and isinstance(w, int) and h == 1 and w == 1\n else:\n call_reshape = h == 1 and w == 1 and c % 16 == 0\n c_value = None\n expansion = None\n if format_ == \"NHWC\":\n if call_reshape:\n output = akg.topi.reshape(data, (bs, h, w, c))\n if shape_is_dynamic(data):\n output = akg.tvm.compute((bs, h, w, c), lambda *indice: output(*indice), name=\"reshape\")\n elif c < c0:\n reshape_output = akg.topi.reshape(data, (bs, h, w, c0))\n output = akg.tvm.compute((bs, h, w, c), lambda *i: reshape_output(*i), name='slice_output')\n else:\n output = nc1hwc0_to_nhwc(\n data,\n to_tvm_const(bs),\n to_tvm_const(h),\n to_tvm_const(w),\n to_tvm_const(c),\n to_tvm_const(c1),\n to_tvm_const(c0))\n\n else:\n if call_reshape:\n output = akg.topi.reshape(data, (bs, c, h, w))\n if shape_is_dynamic(data):\n output = akg.tvm.compute((bs, c, h, w), lambda *indice: output(*indice), name=\"reshape\")\n else:\n output = nc1hwc0_to_nchw(\n data,\n to_tvm_const(bs),\n to_tvm_const(h),\n to_tvm_const(w),\n to_tvm_const(c),\n to_tvm_const(c1),\n to_tvm_const(c0))\n\n # two special cases for tiling strategy\n if not shape_is_dynamic(data):\n if c < c0 or output.dtype != dst_type:\n c_value = c\n if c % c0 != 0 and output.dtype != dst_type:\n expansion = int(ct_util.BLOCK_SIZE / get_bytes(data.dtype))\n attrs = get_attrs()\n if not call_reshape:\n attrs[\"custom_tiling\"] = five2four_tiling_strategy(data, c_value, expansion)\n\n if output.dtype != dst_type:\n output = akg.topi.cast(output, dst_type)\n return output, attrs", "def testMask4D(self):\n mask = np.ones((3, 3, 3, 5), dtype=np.float32)\n inputs = tf.constant(1.0, shape=(5, 5, 5, 5, 5))\n conv1 = snt.Conv3D(\n output_channels=1,\n kernel_shape=3,\n mask=mask,\n padding=snt.VALID,\n use_bias=False,\n initializers=create_constant_initializers(1.0, 0.0, use_bias=False))\n out = conv1(inputs)\n expected_out = 135 * np.ones((5, 3, 3, 3, 1), dtype=np.float32)\n with self.test_session():\n tf.variables_initializer([conv1.w]).run()\n self.assertAllClose(out.eval(), expected_out)", "def calibrate(science_list_fname, master_flat_fname, master_dark_fname, hp_map_fname, bp_map_fname, mask_bad_pixels = False,\n clean_Bad_Pix=True, replace_nans=True, background_fname = None, outdir = None):\n\n #Get the list of science frames\n #science_list = np.loadtxt(science_list_fname, dtype=str)\n science_list = science_list_fname\n\n #Open the master dark\n master_dark_hdu = f.open(master_dark_fname)\n master_dark = master_dark_hdu[0].data\n dark_shape = np.shape(master_dark)\n print((\"Subtracting {} from each flat file\".format(master_dark_fname)))\n dark_exp_time = master_dark_hdu[0].header['EXPTIME']\n\n #Open the master flat\n master_flat_hdu = f.open(master_flat_fname)\n master_flat = master_flat_hdu[0].data\n print((\"Dividing each file by {}\".format(master_flat_fname)))\n dark_exp_time = master_dark_hdu[0].header['EXPTIME']\n\n #Open the bad pixel map from flat\n bp_map_hdu = f.open(bp_map_fname)\n bad_pixel_map = bp_map_hdu[0].data\n bad_pixel_map_bool = np.array(bad_pixel_map, dtype=bool)\n print((\"Using bad pixel map {}\".format(bp_map_fname)))\n\n #now if hot pixel map from dark is also given\n if hp_map_fname != None:\n hp_map_hdu = f.open(hp_map_fname)\n hot_pixel_map = hp_map_hdu[0].data\n bad_pixel_map_bool = np.logical_or(bad_pixel_map_bool, hot_pixel_map.astype(bool) )\n\n\n if background_fname != None:\n background_hdu = f.open(background_fname)\n background = background_hdu[0].data\n print(\"Subtracting background frame {} from all science files\".format(background_fname))\n\n\n for fname in science_list:\n #Open the file\n print((\"Calibrating {}\".format(fname\n )))\n hdu = f.open(fname)\n data = hdu[0].data\n science_exp_time = hdu[0].header['EXPTIME']\n\n if dark_exp_time != science_exp_time:\n warnings.warn(\"The master dark file doesn't have the same exposure time as the data. We'll scale the dark for now, but this isn't ideal\", UserWarning)\n factor = science_exp_time/dark_exp_time\n else:\n factor = 1.\n\n #Subtract the dark, divide by flat\n redux = ((data - factor*master_dark)/master_flat)\n #get rid of crazy values at bad pixel\n redux = redux*~bad_pixel_map_bool\n\n if background_fname != None:\n redux -= background\n\n if clean_Bad_Pix:\n # plt.plot(bad_pixel_map_bool)\n redux = cleanBadPix(redux, bad_pixel_map_bool)\n #redux = ccdproc.cosmicray_lacosmic(redux, sigclip=5)[0]\n\n # redux = ccdproc.cosmicray_median(redux, mbox=7, rbox=5, gbox=7)[0]\n\n #Mask the bad pixels if the flag is set\n if mask_bad_pixels:\n redux *= ~bad_pixel_map_bool\n\n if replace_nans:\n # nan_map = ~np.isfinite(redux)\n # redux = cleanBadPix(redux, nan_map)\n # plt.imshow(redux-after)\n nanmask = np.isnan(redux) #nan = True, just in case this is useful\n redux = np.nan_to_num(redux)\n\n #Put the cablibrated data back in the HDU list\n hdu[0].data = redux\n\n #Add pipeline version and history keywords\n vers = version.get_version()\n hdu[0].header.set('PL_VERS',vers,'Version of pipeline used for processing')\n hdu[0].header['HISTORY'] = \"Subtracting {} from each flat file\".format(master_dark_fname)\n hdu[0].header['HISTORY'] = \"Dividing each file by {}\".format(master_flat_fname)\n\n if background_fname != None:\n hdu[0].header['HISTORY'] = \"Subtracted background frame {}\".format(background_fname)\n\n if mask_bad_pixels:\n hdu[0].header['HISTORY'] = \"Masking all bad pixels found in {}\".format(bp_map_fname)\n\n if clean_Bad_Pix:\n hdu[0].header['HISTORY'] = \"Cleaned all bad pixels found in {} using a median filter\".format(bp_map_fname)\n\n # #Append the bad pixel list to the HDU list\n # hdu.append(f.PrimaryHDU([bad_pixel_map]))\n # hdu[1].header['HISTORY'] = \"Appending bad pixel map :{}\".format(bp_map_fname)\n # hdu[1].header['HISTORY'] = \"0 = good pixel\"\n # hdu[1].header['HISTORY'] = \"1 = bad pixel from flat fields\"\n # hdu[1].header['HISTORY'] = \"2 = hot pixel from darks\"\n\n outname = fname.split('.')[0]+\"_calib.fits\"\n\n #if an output directory is specified we can write out to that directory instead\n #making sure to take only the stuff after the last '/' to avoid directory issues from fname\n if outdir:\n outname = outdir + fname.split('/')[-1]\n\n print((\"Writing calibrated file to {}\".format(outname)))\n #Save the calibrated file\n hdu.writeto(outname, overwrite=True)\n\n # f.PrimaryHDU(redux).writeto('redux_'+i, overwrite = True)", "def constant_2015():\n\n #Load the CMIP6 historical\n cubes = iris.load(data_dir+'SO2DMS-em-anthro_input4MIPs_emissions_CMIP_CEDS-v2016-07-26-gr_200001-201412_n48.nc')\n #Get low and high level emissions just in the last year (2014)\n cubes = iris.cube.CubeList([cubes[2],cubes[1]])\n final_cubes = iris.cube.CubeList()\n for cube in cubes:\n final_cube = cube[-12:]\n final_cubes.append(final_cube)\n \n #Set the year-on-year proportional reductions to be nothing\n yoy_rates = calc_perc_reducts()\n yoy_rates = np.array(yoy_rates)\n yoy_rates = np.ones_like(yoy_rates)\n\n #Create coordinates for new nc file between 2014 and 2100\n lat_coord = cubes[0].coord('latitude')\n lon_coord = cubes[0].coord('longitude')\n time_coord = DimCoord(np.arange(95055.,95055.+(2100-2014+1)*360.,30.),standard_name=u'time', units=cf_units.Unit('days since 1750-1-1 00:00:00', calendar='360_day'), long_name=u'time', var_name='time')\n\n #Create the cube date\n cube_data_surf = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n cube_data_high = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n #Set first year equal to 2014 in CMIP6 historical\n cube_data_surf[:12,...] = final_cubes[0].data\n cube_data_high[:12,...] = final_cubes[1].data\n #Apply equal emissions in all other years too\n for i in range(12,cube_data_surf.shape[0]):\n cube_data_surf[i,...] = cube_data_surf[(i-12),...] * yoy_rates[0,i]\n cube_data_high[i,...] = cube_data_high[(i-12),...] * yoy_rates[1,i]\n #Make the output cubes\n fut_cube_surf = iris.cube.Cube(cube_data_surf,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[0].standard_name, long_name=final_cubes[0].long_name, var_name=final_cubes[0].var_name, units=final_cubes[0].units, attributes=final_cubes[0].attributes)\n fut_cube_high = iris.cube.Cube(cube_data_high,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[1].standard_name, long_name=final_cubes[1].long_name, var_name=final_cubes[1].var_name, units=final_cubes[1].units, attributes=final_cubes[1].attributes)\n\n fut_cube_high.var_name = 'field569_1'\n fut_cube_high.units='kg/m2/s'\n fut_cube_high.long_name ='HIGH LEVEL SO2 EMISSIONS KG/M2/S'\n fut_cube_surf.var_name = 'field569'\n fut_cube_surf.units='kg/m2/s'\n fut_cube_surf.long_name ='SULPHUR DIOXIDE EMISSIONS'\n\n #Load the DMS cube from standard RCP2.6\n dms_cube = iris.load(data_dir+'DMSSO2NH3_18502100_RCP26_monthly.nc')[0]\n iris.coord_categorisation.add_year(dms_cube,'time',name='year')\n dms_cube = dms_cube.extract(iris.Constraint(year = lambda y: y>=2014))\n\n dms_cube.var_name = 'field570'\n dms_cube.attributes.pop('name')\n dms_cube.coord('time').var_name = 'time'\n dms_cube.coord('time').long_name = 'time'\n\n fut_cube_high = fut_cube_high[:-2]\n fut_cube_surf = fut_cube_surf[:-2]\n\n fut_dms = iris.cube.Cube(dms_cube.data[:,0,::-1,:],dim_coords_and_dims=[(fut_cube_surf.coord('time'),0),(fut_cube_surf.coord('latitude'),1),(fut_cube_surf.coord('longitude'), 2)],standard_name=dms_cube.standard_name, long_name=dms_cube.long_name, var_name=dms_cube.var_name, units=dms_cube.units, attributes=dms_cube.attributes)\n\n #Save the final cubes as netcdf (cutting them to be the same length)\n iris.save(iris.cube.CubeList([fut_dms,fut_cube_high,fut_cube_surf]),data_dir+ \"SO2DMS_const2014.nc\")\n os.system('ncatted -O -a calendar,time,m,c,\"360_day\" '+data_dir+ \"SO2DMS_const2014.nc\")\n\n return", "def contours_and_data(epoch, model, features, filters, figname, fgal=0.5,\n idx=-1, data='s82', N=60000):\n if data == 's82':\n # fetch Stripe 82 data\n X, Xcov = fetch_prepped_s82data(epoch, fgal, features, filters)\n Xcoadd, Xcoaddcov = fetch_prepped_s82data(epoch, fgal, features,\n filters, use_single=False)\n sind = np.abs(Xcoadd[:, idx]) < 0.03\n gind = np.abs(Xcoadd[:, idx]) > 0.03\n\n else:\n # fetch DR10 data\n X, Xcov = fetch_prepped_dr10data(N, fgal, features, filters)\n sind = np.abs(X[:, idx]) < 0.145\n gind = np.abs(X[:, idx]) > 0.145\n\n # unpickle the XD model\n if type(model) == str: \n f = open(model, 'rb')\n model = cPickle.load(f)\n f.close()\n\n fs = 5\n ms = 1\n lsize = 20\n idx = [[0, -1], [2, 3], [3, 4]]\n xlim = [(18., 22), (-0.5, 2.5), (-0.5, 2)]\n ylim = [(-0.1, 0.5), (-0.5, 2.5), (-0.5, 1.5)]\n xlab = ['psfmag $r$', 'modelmag $g-r$', 'modelmag $r-i$']\n ylab = ['psfmag - modelmag $r$', 'modelmag $r-i$', 'modelmag $i-z$']\n\n f = pl.figure(figsize=(3 * fs, 3 * fs))\n Nstar = len(np.where(model.fixed_means[:, idx] != np.inf)[0])\n pl.subplots_adjust(wspace=0.3)\n for i in range(1, 10):\n k = (i - 1) % 3\n if i < 4:\n ind = np.arange(X.shape[0], dtype=np.int)\n rng = range(model.n_components)\n elif 3 < i < 7:\n ind = sind\n rng = range(Nstar)\n else:\n ind = gind\n rng = range(Nstar, model.n_components)\n ax = pl.subplot(3, 3, i)\n for j in rng:\n if model.alpha[j] > 1.e-3:\n draw_ellipse(model.mu[j, idx[k]],\n model.V[j, idx[k]][:, idx[k]],\n scales=[2], ec='k', fc='gray', alpha=0.2)\n pl.plot(X[ind][::10, idx[k][0]],\n X[ind][::10, idx[k][1]], '.k',ms=ms)\n pl.xlim(xlim[k])\n pl.ylim(ylim[k])\n pl.xlabel(xlab[k], fontsize=lsize)\n pl.ylabel(ylab[k], fontsize=lsize)\n if ('psf' in ylab[k]) & ('model' in ylab[k]):\n ytick = ['%0.1f' % v for v in np.linspace(-.1, 0.4, 6)]\n ytick[0] = ''\n ax.set_yticklabels(ytick)\n if i == 1:\n s = 'All'\n elif i == 3:\n s = '\"Stars\"'\n else:\n s = '\"Galaxies\"'\n ax.text(-.3, 0.5, s, ha='center', va='center', fontsize=25,\n rotation='vertical', transform=ax.transAxes)\n f.savefig(figname, bbox_inches='tight')", "def createExtractedBandList(self):\n\n L8_bands = None\n\n # Working directory for extracted L8 bands\n working_dir = self.config['working_d']\n outpath_bands = self.scene.extractBands(working_dir)\n\n if outpath_bands is not None:\n # Create a list all filenames extracted from the downloaded tar file\n f_Bands = os.listdir(outpath_bands)\n\n # Search only for filename ending in '_Bx.TIF' and add them to the list of L8 band files\n L8_bands = [x for x in f_Bands if re.search(r'_B(\\d+)\\.TIF$', x, flags=RegexFlag.IGNORECASE)]\n L8_bands.sort(key=natural_keys)\n\n if len(L8_bands) != self.scene.getNumberOfBands():\n self.logger.critical('Skipping scene: Path/Row= [%s/%s] date= [%s]', self.scene.path, self.scene.row, self.scene.acqdate)\n raise workflowException('Missing band files detected: {0} found instead of {1}'.format(len(L8_bands), '11'))\n else:\n L8_bands = None\n self.logger.critical('Error decompressing %s', str(self.scene))\n raise workflowException('Error passing original band files')\n\n filename = L8_bands[0]\n title, info1, info2, info3 = self.scene.decodeProduct(filename)\n\n # log band file information\n self.logger.info(' ')\n self.logger.info('%s', title)\n self.logger.info('%s', info1)\n self.logger.info('%s', info2)\n self.logger.info('%s', info3)\n self.logger.info(' ')\n\n return L8_bands", "def write_jpeg(filename,band,skypos,tranges,skyrange,width=False,height=False,\n\t\t\t stepsz=1.,clobber=False,verbose=0,tscale=1000.,retries=20):\n\tscipy.misc.imsave(filename,countmap(band,skypos,tranges,skyrange,\n\t\t\t\t\t width=width,height=height,verbose=verbose,tscale=tscale,\n\t\t\t\t\t retries=retries))\n\treturn", "def bqa_fmask_func(qa):\n # Extracting cloud masks from BQA using np.right_shift() and np.bitwise_and()\n # Cloud (med & high confidence), then snow, then shadow, then fill\n # Low confidence clouds tend to be the FMask buffer\n fill_mask = np.bitwise_and(np.right_shift(qa, 0), 1) >= 1\n cloud_mask = np.bitwise_and(np.right_shift(qa, 4), 1) >= 1 # cloud bit\n cloud_mask &= np.bitwise_and(np.right_shift(qa, 5), 3) >= 2 # cloud conf.\n cloud_mask |= np.bitwise_and(np.right_shift(qa, 11), 3) >= 3 # cirrus\n shadow_mask = np.bitwise_and(np.right_shift(qa, 7), 3) >= 3\n snow_mask = np.bitwise_and(np.right_shift(qa, 9), 3) >= 3\n\n fmask = (fill_mask != True).astype(np.uint8)\n fmask[shadow_mask] = 2\n fmask[snow_mask] = 3\n fmask[cloud_mask] = 4\n\n return fmask", "def make_skydark(files, ext=1, nproc=6, title='ext_1', overwrite=False):\n\n # See if outfile already exists\n outfile = 'skydark_{}.fits'.format(title)\n if (os.path.exists(outfile)) & (overwrite is False):\n print('{} already exists, stopping...'.format(outfile))\n\n else:\n print('Making a stack of the input files...')\n stack = np.zeros((len(files), 2051, 4096))\n for i,f in enumerate(files):\n h = fits.open(f)\n data = h[ext].data\n #dq = h[ext+2].data\n\n # Get the segmap for this file\n segmap_file = f.replace('.fits', '_seg_ext_{}.fits'.format(ext))\n if not os.path.isfile(segmap_file): # sometimes input files are medsub/equalized\n segmap_file = f.replace('_medsub', '').replace('_eq', '').replace('.fits', '_seg_ext_{}.fits'.format(ext))\n segmap = fits.getdata(segmap_file)\n\n # Mask bad pixels and sources\n #data[dq!=0] = np.nan\n data[segmap>0] = np.nan\n stack[i] = data\n h.close()\n\n # Make the skydark\n print('Calculating the median through the stack of input files...')\n if nproc==1:\n skydark = np.nanmedian(stack, axis=0)\n else:\n stacks = np.split(stack, 16, axis=2) # split stack into 16 2048x256 sections\n p = Pool(nproc)\n results = p.map(med_stack, stacks)\n skydark = np.concatenate(results, axis=1)\n\n # Write out the sky dark\n fits.writeto(outfile, skydark, overwrite=True)\n print('Sky dark generated.')\n\n # Make a filtered version of the skydark\n print('Filtering the sky dark...')\n amp1, amp2 = np.split(skydark, 2, axis=1) # treat amps separately\n sigma_clip = SigmaClip(sigma=3.)\n bkg_estimator = MedianBackground()\n bkg1 = Background2D(amp1, (100, 100), filter_size=(10, 10), \n sigma_clip=sigma_clip, bkg_estimator=bkg_estimator)\n bkg2 = Background2D(amp2, (100, 100), filter_size=(10, 10), \n sigma_clip=sigma_clip, bkg_estimator=bkg_estimator)\n filtered = np.concatenate((bkg1.background, bkg2.background), axis=1)\n fits.writeto('{}_filtered.fits'.format(outfile.replace('.fits','')), \n filtered, overwrite=True)\n print('Filtered sky dark generated.')", "def include_wcs_in_masks(input_images):\n img_list = [astroim.Astroim(im_name, memmap=True) for im_name in input_images]\n mask_names = [im.primary_header.get(\"MASK\") for im in img_list]\n output = []\n for im_object, mask_name in zip(img_list, mask_names):\n with fits.open(mask_name, 'readonly') as mask:\n mask_header = im_object.chips[0].header.hdr\n mask_data = mask[0].data.copy()\n mask_data[mask_data>0] = 1\n _, path = tempfile.mkstemp(suffix=\".fits\")\n fits.writeto(path, mask_data * 1., mask_header, clobber=True)\n output.append(path)\n return output", "def foldcurve(_band, _period):\n # Set epoch to first date observed\n _epoch = _band[0][0]\n # Iterate through array, update date to phase\n for i in range(0, _band.shape[0]):\n _band[i, 0] = ((_band[i, 0] - _epoch) / _period) % 1\n # Return folded array\n return _band", "def makeSkyList(skyFrameList, sciencelist, obsDir):\n logging.info(\"\\n#############################################################\")\n logging.info(\"# #\")\n logging.info(\"# Matching science frames with sky frames closest in time #\")\n logging.info(\"# #\")\n logging.info(\"#############################################################\\n\")\n # Do some tests first.\n # Check that data is either:\n # ABA ABA ABA- one sky frame per two science frames.\n # AB AB AB- one sky frame per one two science frames.\n #\n # If it is neither warn user to verify that sky frames were matched with science frames correctly.\n if len(skyFrameList) != len(sciencelist)/2 and len(skyFrameList) != len(sciencelist):\n logging.info(\"\\n#####################################################################\")\n logging.info(\"#####################################################################\")\n logging.info(\"\")\n logging.info(\" WARNING in reduce: it appears science frames and sky frames were not\")\n logging.info(\" taken in an ABA ABA or AB AB pattern.\")\n logging.info(\"\")\n logging.info(\"#####################################################################\")\n logging.info(\"#####################################################################\\n\")\n skytimes = []\n prepared_sky_list = []\n # Calculate time of each sky frame. Store the calculated time and the frame name in skytimes, a\n # 2D list of [skyframe_time, skyframe_name] pairs.\n # Eg: [[39049.3, 'N20130527S0241'], [39144.3, 'N20130527S0244'], [39328.8, 'N20130527S0247'], [39590.3, 'N20130527S0250']]\n for item in skyFrameList:\n # Strip off the trailing newline.\n item = str(item).strip()\n # Calculate the time of the sky frame.\n skytime = timeCalc(item+'.fits')\n # Store the sky frame time and corresponding sky frame name in skytimes.\n templist = [skytime, item]\n skytimes.append(templist)\n logging.info(\"scienceframelist: skyFrameList: time delta (between observation UT start times from .fits headers):\")\n for item in sciencelist:\n # Calculate time of the science frame in seconds.\n item = str(item).strip()\n sciencetime = timeCalc(item+'.fits')\n # Sort the 2D list of [skyframe_time, skyframe_name] pairs by absolute science_frame_time - skyframe_time.\n # Eg: [[39049.3, 'N20130527S0241'], [39144.3, 'N20130527S0244'], [39328.8, 'N20130527S0247'], [39590.3, 'N20130527S0250']]\n sorted_by_closest_time = sorted(skytimes, key=lambda x: (abs(sciencetime - x[0])))\n # Append the name corresponding to the minimum time difference to prepared_sky_list.\n prepared_sky_list.append(sorted_by_closest_time[0][1])\n # Print the scienceframe, matching skyframe and time difference side by side for later comparison.\n logging.info(\" \"+ str(item)+ \" \"+ str(sorted_by_closest_time[0][1])+ \" \"+ str(abs(sciencetime - sorted_by_closest_time[0][0])))\n logging.info(\"\\n\")\n\n os.rename('skyFrameList', 'original_skyFrameList')\n\n f = open('skyFrameList', 'w')\n for image in prepared_sky_list:\n f.write(image+'\\n')\n f.close()\n\n return prepared_sky_list", "def spectrum_regions(x_on,\n y_on,\n r_on,\n x_fov,\n y_fov,\n r_fov,\n exclusion,\n outfile,\n min_on_distance):\n from astropy.io import fits\n from gammapy.background import ReflectedRegionMaker\n\n if exclusion:\n log.info('Reading {0}'.format(exclusion))\n exclusion = fits.open(exclusion)[0]\n else:\n # log.info('No exclusion mask used.')\n # TODO: make this work without exclusion mask\n log.error(\"Currently an exclusion mask is required\")\n exit(-1)\n\n fov = dict(x=x_fov, y=y_fov, r=r_fov)\n rr_maker = ReflectedRegionMaker(exclusion=exclusion,\n fov=fov)\n source = dict(x_on=x_on, y_on=y_on, r_on=r_on)\n rr_maker.compute(**source)\n\n log.info('Writing {0}'.format(outfile))\n rr_maker.write_off_regions(outfile)", "def filter_on_adwin_parameters(a_lt3,a_lt4,**kw):\r\n\r\n filter_params = kw.pop('adwin_filter_params',{})\r\n if len(filter_params):\r\n old_params = analysis_params.SPSP_fltr_adwin_settings\r\n \r\n for setup_key,setup_dict in filter_params.iteritems():\r\n for key,params in setup_dict.iteritems():\r\n analysis_params.SPSP_fltr_adwin_settings['fltr_dict_'+setup_key][key] = params\r\n\r\n fltr = np.array([True]*len(a_lt3.agrp['ssro_results'].value)) ### initially everything true\r\n\r\n for a,suffix in zip([a_lt3,a_lt4],['lt3','lt4']): ### loop over both files\r\n for key,val in analysis_params.SPSP_fltr_adwin_settings['fltr_dict_'+suffix].iteritems(): ### loop over the list of filter parameters\r\n [filter_on,minimum,maximum] = val\r\n\r\n if filter_on:\r\n if key == 'repetition_number':\r\n values = np.array([i for i in range(len(fltr)/a.g.attrs['sweep_length']) for _ in range(a.g.attrs['sweep_length'])]) ### Make an array of values corresponding to the current rep\r\n else:\r\n values = a.agrp[key].value\r\n\r\n fltr = np.logical_and(fltr,(values >= minimum) & ( values <= maximum)) ### update filter\r\n\r\n if len(filter_params):\r\n analysis_params.SPSP_fltr_adwin_settings = old_params\r\n\r\n return fltr", "def testMask4D(self):\n\n # This mask, applied on an image filled with 1, should result in an image\n # filled with 17, as there are 18 weights but we zero out one of them.\n mask = np.ones([3, 3, 2, 1], dtype=np.float32)\n mask[0, 0, 0, :] = 0\n inputs = tf.constant(1.0, shape=(1, 5, 5, 2))\n conv1 = snt.Conv2D(\n output_channels=1,\n kernel_shape=3,\n mask=mask,\n padding=snt.VALID,\n use_bias=False,\n initializers=create_constant_initializers(1.0, 0.0, use_bias=False))\n out = conv1(inputs)\n expected_out = np.array([[17] * 3] * 3)\n with self.test_session():\n tf.variables_initializer([conv1.w]).run()\n self.assertAllClose(np.reshape(out.eval(), [3, 3]), expected_out)", "def ocean_unmasked(res='4x5', debug=False):\n\n from .GEOSChem_bpch import get_LWI_map\n if debug:\n print(('ocean_mask called for: ', res))\n\n # Create a mask from land/water/ice indices\n m = np.ma.masked_not_equal(get_LWI_map(res=res), 0)\n if debug:\n print((mask, mask.shape))\n return m.mask", "def numberOfWideBands(config=None):\n # Get correlator configuration\n c = config\n if c == None: \n c = utils.getConfigAstroband()\n\n # Determine if we have both wideband and spectral line astrobands. \n # If we do, we return nwide & maxbandwidth for sl only since \n # this is the correlator which will be attached to all ants.\n astrobands = [ abc[0] for abc in c ]\n if len( astrobands ) == 0:\n raise Exception, \"No existing astroband configuration.\"\n if max( astrobands ) > 8 and min( astrobands ) < 9: \n astrobands = [ ab for ab in astrobands if ab < 9 ]\n\n # Check bandwidth\n nwide = 0\n maxbandwidth = 0\n for t in c:\n astroband = t[0]\n # Skip band if it is not being used or is not in astroband list above.\n mp = commands.queryString('SignalPath.Mapping.Astroband%d.confTag' % (astroband) )\n if mp == 'NONE' or astroband not in astrobands: continue\n\n # Get bandwidth\n if t[2] == commands.BW500:\n bw = 500\n elif t[2] == commands.BW250:\n bw = 250\n elif t[2] == commands.BW125:\n bw = 125\n elif t[2] == commands.BW62:\n bw = 62\n elif t[2] == commands.BW31:\n bw = 31\n elif t[2] == commands.BW8:\n bw = 8\n elif t[2] == commands.BW2:\n bw = 2\n else:\n raise Exception, 'Could not find bandwith for '+str(t[2])\n\n # Maximum?\n if bw > maxbandwidth: \n maxbandwidth = bw\n if utils.isDualPol( astroband ):\n nwide = 2 \n else:\n nwide = 1\n elif bw == maxbandwidth:\n if utils.isDualPol( astroband ): \n nwide += 2 \n else:\n nwide += 1\n\n return nwide, maxbandwidth", "def get_masked_scene(orig, mask, local_context_size = 80, dilation=False):\n orig_scene = orig.copy()\n mask_scene = mask.copy()\n orig_scene_no_mask = orig.copy()\n \n mask_info = np.where(mask_scene == 0) \n min_x = max(min(mask_info[0]) - local_context_size, 0)\n max_x = max(mask_info[0]) + local_context_size\n min_y = max(min(mask_info[1]) - local_context_size, 0)\n max_y = max(mask_info[1]) + local_context_size\n \n orig_scene = orig_scene[min_x:max_x,min_y:max_y]\n orig_scene_no_mask = orig_scene_no_mask[min_x:max_x,min_y:max_y]\n mask_scene = mask_scene[min_x:max_x,min_y:max_y]\n \n dialation_mask = np.zeros(mask_scene.shape) + 255\n \n if dilation:\n dialation_mask = cv2.dilate(255-mask_scene, np.ones((local_context_size,local_context_size)))\n \n #implot(dialation_mask)\n #plt.imshow(dialation_mask, 'gray')\n \n for x in range(mask_scene.shape[0]):\n for y in range(mask_scene.shape[1]):\n if mask_scene[x, y] == 0:\n orig_scene[x, y, :] = 0\n orig_scene_no_mask[x,y,:] = 0\n if dilation:\n if dialation_mask[x,y] == 0:\n orig_scene[x, y, :] = 0\n \n return orig_scene, mask_scene, orig_scene_no_mask, dialation_mask", "def flattenFrames(stack, onh_info):\n \n maxHeight=0\n frameList=[]\n\n if onh_info!=-1:\n y_min = onh_info.bbox[0]\n #need to subtract one because index?\n y_max = onh_info.bbox[2]\n \n #hull starts at (0,0), add the y and x min to translate to correct indices.\n hull_onh = np.array(np.where(onh_info.convex_image)) + np.array([[y_min], [onh_info.bbox[1]]])\n elif onh_info==-1:\n #should prevent shiftDetectorONH from running since i will always be greater than -1\n #hull_onh has been left undefined.\n y_min, y_max = -1,-1\n \n for i, frame in enumerate(stack):\n #medFrame = ndimage.filters.median_filter(frame,size=(1,60)) #Takes 3.5 minutes\n medFrame = ndimage.filters.uniform_filter1d(frame, 60) #Takes 1.0 minutes and has same output as med filter\n if i>=y_min and i<y_max:\n #get the index of x pixels that are part of the onh for each frame\n #these are indices of indices\n x_onh_ind = np.array(np.where(hull_onh[0]==i)) \n x_onh = hull_onh.T[x_onh_ind][0].T[1]\n #this should be sorted so that its the x_min and max for each frame\n x_onh_bounds = (x_onh[0], x_onh[-1])\n shifts = shiftDetectorONH(medFrame, onh_info, x_onh_bounds)\n else:\n shifts = shiftDetector(medFrame)\n newFrame = adjustFrame(frame, shifts)\n frameList.append(newFrame)\n if newFrame.shape[0] > maxHeight:\n maxHeight = newFrame.shape[0]\n \n #Show percentage of loop completed.\n print('\\rFinding and correcting horizontal shifts: {:.2f}% done'.format((100.0*((i+1)/len(stack)))), end='', flush=True)\n print('\\n')\n \n flattenedStack = padFrames(frameList, maxHeight)\n\n return flattenedStack", "def ShowOneContour(index,all_images,all_pointing,thex0,they0,all_titles,object_name,all_expo,dir_top_img,all_filt,figname):\n plt.figure(figsize=(15,6))\n spec_index_min=100 # cut the left border\n spec_index_max=1900 # cut the right border\n star_halfwidth=70\n \n YMIN=-15\n YMAX=15\n \n figfilename=os.path.join(dir_top_img,figname) \n \n #center is approximately the one on the original raw image (may be changed)\n #x0=int(all_pointing[index][0])\n x0=int(thex0[index])\n \n \n # Extract the image \n full_image=np.copy(all_images[index])\n \n # refine center in X,Y\n star_region_X=full_image[:,x0-star_halfwidth:x0+star_halfwidth]\n \n profile_X=np.sum(star_region_X,axis=0)\n profile_Y=np.sum(star_region_X,axis=1)\n\n NX=profile_X.shape[0]\n NY=profile_Y.shape[0]\n \n X_=np.arange(NX)\n Y_=np.arange(NY)\n \n avX,sigX=weighted_avg_and_std(X_,profile_X**4) # take squared on purpose (weigh must be >0)\n avY,sigY=weighted_avg_and_std(Y_,profile_Y**4)\n \n x0=int(avX+x0-star_halfwidth)\n \n \n # find the center in Y on the spectrum\n yprofile=np.sum(full_image[:,spec_index_min:spec_index_max],axis=1)\n y0=np.where(yprofile==yprofile.max())[0][0]\n\n # cut the image in vertical and normalise by exposition time\n reduc_image=full_image[y0-20:y0+20,x0:spec_index_max]/all_expo[index] \n reduc_image[:,0:100]=0 # erase central star\n \n X_Size_Pixels=np.arange(0,reduc_image.shape[1])\n Y_Size_Pixels=np.arange(0,reduc_image.shape[0])\n Transverse_Pixel_Size=Y_Size_Pixels-int(float(Y_Size_Pixels.shape[0])/2.)\n \n # calibration in wavelength\n #grating_name=all_filt[index].replace('dia ','')\n grating_name=get_disperser_filtname(all_filt[index])\n \n lambdas=Pixel_To_Lambdas(grating_name,X_Size_Pixels,all_pointing[index],True)\n \n #if grating_name=='Ron200':\n # holo = Hologram('Ron400',verbose=True)\n #else: \n # holo = Hologram(grating_name,verbose=True)\n #lambdas=holo.grating_pixel_to_lambda(X_Size_Pixels,all_pointing[index])\n #if grating_name=='Ron200':\n # lambdas=lambdas*2.\n \n\n X,Y=np.meshgrid(lambdas,Transverse_Pixel_Size) \n T=np.transpose(reduc_image)\n \n \n plt.contourf(X, Y, reduc_image, 100, alpha=1., cmap='jet',origin='lower')\n C = plt.contour(X, Y, reduc_image , 20, colors='black', linewidth=.5,origin='lower')\n \n \n for line in LINES:\n if line == O2 or line == HALPHA or line == HBETA or line == HGAMMA:\n plt.plot([line['lambda'],line['lambda']],[YMIN,YMAX],'-',color='lime',lw=0.5)\n plt.text(line['lambda'],YMAX-3,line['label'],verticalalignment='bottom', horizontalalignment='center',color='lime', fontweight='bold',fontsize=16)\n \n \n \n plt.axis([X.min(), X.max(), Y.min(), Y.max()]); plt.grid(True)\n plt.title(all_titles[index])\n plt.grid(color='white', ls='solid')\n plt.text(200,-5.,all_filt[index],verticalalignment='bottom', horizontalalignment='center',color='yellow', fontweight='bold',fontsize=16)\n plt.xlabel('$\\lambda$ (nm)')\n plt.ylabel('pixels')\n plt.ylim(YMIN,YMAX)\n plt.xlim(0.,1200.)\n plt.savefig(figfilename)", "def main(years=(2000, 2019)):\n year_list = range(years[0], years[1] + 1)\n dfs = []\n for year in year_list:\n dfs.append(get_df(year))\n print(f\"Done: {len(dfs)} dataframes written\")", "def precover(\n self,\n orbit: Orbit,\n tolerance: float = 30 * ARCSEC,\n start_mjd: Optional[float] = None,\n end_mjd: Optional[float] = None,\n window_size: int = 7,\n datasets: Optional[set[str]] = None,\n ) -> Tuple[List[PrecoveryCandidate], List[FrameCandidate]]:\n # basically:\n \"\"\"\n find all windows between start and end of given size\n for each window:\n propagate to window center\n for each unique epoch,obscode in window:\n propagate to epoch\n find frames which match healpix of propagation\n for each matching frame\n find matching observations\n for each matching observation\n yield match\n \"\"\"\n if datasets is not None:\n self._warn_for_missing_datasets(datasets)\n\n if start_mjd is None or end_mjd is None:\n first, last = self.frames.idx.mjd_bounds()\n if start_mjd is None:\n start_mjd = first\n if end_mjd is None:\n end_mjd = last\n\n logger.info(\n \"precovering orbit %s from %.5f to %.5f, window=%d, datasets=%s\",\n orbit.orbit_id,\n start_mjd,\n end_mjd,\n window_size,\n datasets or \"all\",\n )\n\n windows = self.frames.idx.window_centers(\n start_mjd, end_mjd, window_size, datasets=datasets\n )\n\n # group windows by obscodes so that many windows can be searched at once\n matches = []\n for obscode, obs_windows in itertools.groupby(\n windows, key=lambda pair: pair[1]\n ):\n mjds = [window[0] for window in obs_windows]\n matches_window = self._check_windows(\n mjds,\n obscode,\n orbit,\n tolerance,\n start_mjd=start_mjd,\n end_mjd=end_mjd,\n window_size=window_size,\n datasets=datasets,\n )\n matches += list(matches_window)\n\n precovery_candidates, frame_candidates = sift_candidates(matches)\n\n return precovery_candidates, frame_candidates" ]
[ "0.6658748", "0.64081764", "0.6048875", "0.5828636", "0.53740543", "0.5318547", "0.5314894", "0.5258221", "0.5224046", "0.49241713", "0.48995483", "0.48989847", "0.48840585", "0.47406876", "0.47206843", "0.4713083", "0.46992207", "0.46542522", "0.4640126", "0.4630526", "0.46242535", "0.4550905", "0.45305178", "0.44995046", "0.44573098", "0.4433689", "0.44312298", "0.43917808", "0.43894905", "0.43639097", "0.43318665", "0.43205565", "0.4309011", "0.43019125", "0.4291098", "0.42726216", "0.42720616", "0.42537484", "0.42468128", "0.42295322", "0.42280176", "0.42260963", "0.42204076", "0.41973126", "0.4196152", "0.41940245", "0.418673", "0.41846398", "0.41845337", "0.4174935", "0.4168252", "0.41572848", "0.41517147", "0.41504192", "0.41490814", "0.41442484", "0.41423163", "0.41362107", "0.4108239", "0.40980932", "0.40925404", "0.40838462", "0.40825883", "0.4081386", "0.40788984", "0.40707836", "0.40680632", "0.40614784", "0.40590936", "0.40547174", "0.40541393", "0.40487733", "0.40440798", "0.40418544", "0.4034604", "0.40284756", "0.40257576", "0.40229768", "0.4022317", "0.40219206", "0.40156052", "0.4013673", "0.40136027", "0.40033844", "0.4002221", "0.39925238", "0.3989", "0.39884207", "0.39869708", "0.39856833", "0.39838007", "0.39802644", "0.3980034", "0.3979952", "0.39782917", "0.39775884", "0.39773372", "0.39758134", "0.39698473", "0.39637673" ]
0.76945615
0
Function to perform a 3 year moving window filter for a single land cover value (such as Forest as 1) for all years in an image. Calls the function mask3. The image bands do not need to be in order, but the bandNames argument must be in chronological order. The temporal filter inspects the central position of three consecutive years, and if the extremities of the consecutive years are identical but the centre position is not, then the central pixels are reclassified to match its temporal neighbour class. This function can be applied to whichever land cover values the user decides, such as all of the land cover values or a select few.
def applyWindow3years(imagem, value, bandNames): img_out = imagem.select(bandNames[0]) for i in np.arange(1, len(bandNames)-1): img_out = img_out.addBands(mask3(imagem, value,bandNames[(i-1):(i+2)])) img_out = img_out.addBands(imagem.select(bandNames[-1])) return img_out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyWindow4years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-2):\n img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def applyWindow5years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-3):\n img_out = img_out.addBands(mask5(imagem, value,bandNames[(i-1):(i+4)]))\n img_out = img_out.addBands(imagem.select(bandNames[-3]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def applyMask3last(imagem, value, bandNames):\n mask = imagem.select(bandNames[-3]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[-2]).eq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[-1]).neq(value))\n change_img = imagem.select(bandNames[-1]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[0:-1])\n img_out = img_out.addBands(imagem.select(bandNames[-1]).blend(change_img))\n return img_out", "def applyMask3first(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).neq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).eq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).eq(value))\n change_img = imagem.select(bandNames[0]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[0]).blend(change_img)\n img_out = img_out.addBands(imagem.select(bandNames[1:]))\n return img_out", "def mask3(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).eq(value))\n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[1]).blend(change_img)\n return img_out", "def applyForwardNoDataFilter(image, bandNames):\n #Get a list of band names from year(1) through the last year\n bandNamesEE = ee.List(bandNames[1:])\n \n #Define forwards filter\n #In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for the first year\n #currentImage = image.select(bandNames[1]), the image for the second year\n #previousImage = image.select(bandNames[0]), the first year\n #Find where the second year has missing data, replace those values with the values of the first year\n #Append previousImage to currentImage, so now currentImage is a two band image, with the first band being the second year with the gap fill\n #and the second band is the first years classification\n #The iteration continues, now with followingImage.select[0] being the second year with the gap fill applied, and bandName is the third year\n def forwardNoDataFilter(bandName, previousImage):\n currentImage = image.select(ee.String(bandName))\n previousImage = ee.Image(previousImage)\n currentImage = currentImage.unmask(previousImage.select([0]))\n return currentImage.addBands(previousImage)\n \n #Iterate through all the years, starting with the first year's classification\n filtered = bandNamesEE.iterate(forwardNoDataFilter,ee.Image(image.select(bandNames[0])))\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def applyBackwardNoDataFilter(image, bandNames):\n #Get a list of band names to iterate over, from year(-2) through year(0)\n bandNamesEE = ee.List(bandNames[:-1]).reverse()\n \n #Define backwards filter\n #In first iteration, bandName=bandNames[-2] and followingImage is image.select(bandNames[-1]), or the classifications for the final year\n #currentImage = image.select(bandNames[-2]), the second to last year\n #followingImage = image.select(bandNames[-1]), the final year\n #Find where the second to last year has missing data, replace those values with the values of the following year\n #Append followingImage to currentImage, so now currentImage is a two band image, with the first band being the second to last year with the gap fill\n #and the second band is the final years classification\n #The iteration continues, now with followingImage.select[0] being the second to last year with the gap fill applied, and bandName is the third to last year\n def backwardNoDataFilter(bandName, followingImage):\n currentImage = image.select(ee.String(bandName))\n followingImage = ee.Image(followingImage)\n currentImage = currentImage.unmask(followingImage.select([0]))\n return currentImage.addBands(followingImage)\n \n #Apply backwards filter, starting with the final year and iterating through to year(0) \n filtered = bandNamesEE.iterate(backwardNoDataFilter,ee.Image(image.select(bandNames[-1])))\n #Re-order bands to be in chronological order\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def msatna_blocks_3lag_year(year: int) -> pd.Series:\n return msatna_blocks_3lag_panel()[year]", "def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): \n #Grab land cover classes as a list of strings\n lc_classes = classDictionary.keys().getInfo()\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Get the frequency of each class through the years by reducing the image collection to an image using the sum reducer\n class_frequency = binary_class_images.reduce(ee.Reducer.sum().unweighted()).rename(lc_classes)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Define an image to add bands with frequency filter applied\n out_img = ee.Image()\n \n #Loop through years\n for yearBand in yearBandNames:\n #Select the target year from the image\n yearImage = image.select(yearBand)\n \n #Loop through land cover classes in filterParams\n for lc_class in lc_classes:\n #Get the minimum occurance allowed in that land cover class\n min_occurance = filterParams.get(lc_class)\n \n #Find if the land cover class had less than the number of min_occurances in each pixel\n change_class = class_frequency.select(lc_class).lt(ee.Image.constant(min_occurance))\n \n #If change_class==1, then replace that pixel with the mode of all the years in that pixel\n #This filter is only applied to pixels of this land cover class\n #First mask yearImage to pixels of this land cover class, then get the union of pixels where change_class==1,\n #if both conditions are true, then the pixel is replaced with the mode\n yearImage = yearImage.where(yearImage.eq(ee.Number(classDictionary.get(lc_class))).bitwiseAnd(change_class),mode_image)\n #Rename yearImage to bandName\n yearImage = yearImage.rename(yearBand)\n #Append to output image\n out_img = out_img.addBands(yearImage)\n \n return out_img", "def stack_bands(yr, roi):\n\n water_year_start = '{}-10-01'.format(yr - 1)\n\n winter_s, winter_e = '{}-01-01'.format(yr), '{}-03-01'.format(yr),\n spring_s, spring_e = '{}-03-01'.format(yr), '{}-05-01'.format(yr),\n late_spring_s, late_spring_e = '{}-05-01'.format(yr), '{}-07-01'.format(yr)\n summer_s, summer_e = '{}-07-01'.format(yr), '{}-09-01'.format(yr)\n fall_s, fall_e = '{}-09-01'.format(yr), '{}-12-31'.format(yr)\n\n periods = [('cy', winter_s, fall_e),\n ('1', spring_s, spring_e),\n ('2', late_spring_s, late_spring_e),\n ('3', summer_s, summer_e),\n ('4', fall_s, fall_e)]\n\n first = True\n for name, start, end in periods:\n bands = landsat_composites(yr, start, end, roi, name)\n if first:\n input_bands = bands\n proj = bands.select('B2_cy').projection().getInfo()\n first = False\n else:\n input_bands = input_bands.addBands(bands)\n\n for s, e, n in [(spring_s, spring_e, 'espr'),\n (late_spring_s, late_spring_e, 'lspr'),\n (summer_s, summer_e, 'smr'),\n (fall_s, fall_e, 'fl'),\n (water_year_start, spring_e, 'wy_espr'),\n (water_year_start, late_spring_e, 'wy_espr'),\n (water_year_start, summer_e, 'wy_smr'),\n (water_year_start, fall_e, 'wy')]:\n gridmet = ee.ImageCollection(\"IDAHO_EPSCOR/GRIDMET\").filterBounds(\n roi).filterDate(s, e).select('pr', 'eto', 'tmmn', 'tmmx')\n temp_reducer = ee.Reducer.mean()\n t_names = ['tmax'.format(n), 'tmin'.format(n)]\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_{}'.format(n), 'pet_total_{}'.format(n)).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n wd_estimate = precip_sum.select('precip_total_{}'.format(n)).subtract(precip_sum.select(\n 'pet_total_{}'.format(n))).rename('wd_est_{}'.format(n))\n input_bands = input_bands.addBands([temp_perc, precip_sum, wd_estimate])\n\n temp_reducer = ee.Reducer.percentile([10, 50, 90])\n t_names = ['tmmn_p10_cy', 'tmmn_p50_cy', 'tmmn_p90_cy', 'tmmx_p10_cy', 'tmmx_p50_cy', 'tmmx_p90_cy']\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_cy', 'pet_total_cy').resample('bilinear').reproject(crs=proj['crs'], scale=30)\n wd_estimate = precip_sum.select('precip_total_cy').subtract(precip_sum.select(\n 'pet_total_cy')).rename('wd_est_cy')\n\n coords = ee.Image.pixelLonLat().rename(['Lon_GCS', 'LAT_GCS']).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n ned = ee.Image('USGS/NED')\n terrain = ee.Terrain.products(ned).select('elevation', 'slope', 'aspect').reduceResolution(\n ee.Reducer.mean()).reproject(crs=proj['crs'], scale=30)\n\n world_climate = get_world_climate(proj=proj)\n elev = terrain.select('elevation')\n tpi_1250 = elev.subtract(elev.focal_mean(1250, 'circle', 'meters')).add(0.5).rename('tpi_1250')\n tpi_250 = elev.subtract(elev.focal_mean(250, 'circle', 'meters')).add(0.5).rename('tpi_250')\n tpi_150 = elev.subtract(elev.focal_mean(150, 'circle', 'meters')).add(0.5).rename('tpi_150')\n static_input_bands = coords.addBands([temp_perc, wd_estimate, terrain, tpi_1250, tpi_250, tpi_150, world_climate])\n\n nlcd = ee.Image('USGS/NLCD/NLCD2011').select('landcover').reproject(crs=proj['crs'], scale=30).rename('nlcd')\n\n cdl_cult = ee.Image('USDA/NASS/CDL/2017').select('cultivated'). \\\n remap([1, 2], [0, 1]).reproject(crs=proj['crs'], scale=30).rename('cdlclt')\n\n cdl_crop = ee.Image('USDA/NASS/CDL/2017').select('cropland').reproject(crs=proj['crs'],\n scale=30).rename('cdlcrp')\n\n gsw = ee.Image('JRC/GSW1_0/GlobalSurfaceWater')\n occ_pos = gsw.select('occurrence').gt(0)\n water = occ_pos.unmask(0).rename('gsw')\n\n static_input_bands = static_input_bands.addBands([nlcd, cdl_cult, cdl_crop, water])\n\n input_bands = input_bands.addBands(static_input_bands).clip(roi)\n\n # standardize names to match EE javascript output\n standard_names = []\n temp_ct = 1\n prec_ct = 1\n names = input_bands.bandNames().getInfo()\n for name in names:\n if 'tavg' in name and 'tavg' in standard_names:\n standard_names.append('tavg_{}'.format(temp_ct))\n temp_ct += 1\n elif 'prec' in name and 'prec' in standard_names:\n standard_names.append('prec_{}'.format(prec_ct))\n prec_ct += 1\n else:\n standard_names.append(name)\n\n input_bands = input_bands.rename(standard_names)\n return input_bands", "def fmask(bandname=\"fmask\"):\n\n def fmask(image):\n imgFmask = image.select(bandname)\n shadow = imgFmask.eq(3)\n snow = imgFmask.eq(4)\n cloud = imgFmask.eq(5)\n\n mask = shadow.Or(snow).Or(cloud)\n\n imgMask = image.updateMask(mask.Not())\n return imgMask\n return fmask", "def _mask3d(self, n, i, window):\n\n n = np.array(n)\n i = np.array(i)\n\n w2 = (window - 1) // 2\n\n x1, y1, z1 = np.clip(i - w2, 0 * n, n)\n x2, y2, z2 = np.clip(i + w2 + 1, 0 * n, n)\n\n mask = np.zeros(n, dtype=np.bool)\n mask[x1:x2, y1:y2, z1:z2] = True\n\n return mask", "def masked_f3kdb(clip: vs.VideoNode,\n rad: int = 16,\n thr: Union[int, List[int]] = 24,\n grain: Union[int, List[int]] = [12, 0],\n mask_args: Dict[str, Any] = {}\n ) -> vs.VideoNode:\n from debandshit import dumb3kdb\n\n deb_mask_args: Dict[str, Any] = dict(brz=(1000, 2750))\n deb_mask_args |= mask_args\n\n bits, clip = _get_bits(clip)\n\n deband_mask = detail_mask(clip, **deb_mask_args)\n\n deband = dumb3kdb(clip, radius=rad, threshold=thr, grain=grain, seed=69420)\n deband_masked = core.std.MaskedMerge(deband, clip, deband_mask)\n deband_masked = deband_masked if bits == 16 else depth(deband_masked, bits)\n return deband_masked", "def mask4(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[3]).eq(value)) \n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value)\n change_img1 = imagem.select(bandNames[2]).mask(mask.eq(1)).where(mask.eq(1), value) \n img_out = imagem.select(bandNames[1]).blend(change_img).blend(change_img1)\n return img_out", "def test_3dtproject_temporal_filter_wf(self):\n \n self.wf = build_3dtproject_temporal_filter(\n bpHigh= .9, bpLow= 0.005, tr=2,\n import_file=self.sample_raw_image,\n export_file=self.export_path,\n base_dir=self.test_path, crashdump_dir=self.test_path,\n mask_file=self.sample_raw_image_mask\n )", "def load_copernicus_ammonia(layers, time_slice, lat_slice, lon_slice, verbose=False):\n xr_layers = []\n\n if 'agl' in layers:\n xr_layers.append(xr.load_dataset(\n './data/copernicus/ammonia/CAMS-GLOB-ANT_Glb_0.1x0.1_anthro_nh3_v4.2_monthly_agl.nc').agl.sel(\n time=time_slice, lat=lat_slice, lon=lon_slice))\n\n if 'ags' in layers:\n xr_layers.append(xr.load_dataset(\n './data/copernicus/ammonia/CAMS-GLOB-ANT_Glb_0.1x0.1_anthro_nh3_v4.2_monthly_ags.nc').ags.sel(\n time=time_slice, lat=lat_slice, lon=lon_slice))\n\n nh3 = sum(xr_layers)\n nh3.name = 'nh3'\n\n if verbose:\n\n shape = gpd.read_file('./shp/lombardia/lombardia.shp').to_crs(epsg=4326)\n\n ncols = len(xr_layers) + 1\n fig, axs = plt.subplots(ncols=ncols, figsize=(8 * ncols, 5))\n\n for i in range(len(xr_layers)):\n shape.plot(ax=axs[i], color='black', alpha=0.5)\n xr_layers[i].mean(dim='time').plot(ax=axs[i], alpha=0.5)\n\n shape.plot(ax=axs[len(xr_layers)], color='black', alpha=0.5)\n nh3.mean(dim='time').plot(ax=axs[len(xr_layers)], alpha=0.5)\n\n plt.show()\n\n return nh3", "def masked_f3kdb(clip: vs.VideoNode,\n rad: int = 16,\n thr: Union[int, List[int]] = 24,\n grain: Union[int, List[int]] = [12, 0],\n mask_args: Dict[str, Any] = {},\n show_mask: bool = False) -> vs.VideoNode:\n from debandshit import dumb3kdb\n\n deb_mask_args: Dict[str, Any] = dict(detail_brz=1500, lines_brz=1000)\n deb_mask_args |= mask_args\n\n bits, clip = _get_bits(clip)\n\n deband_mask = detail_mask(clip, **deb_mask_args)\n\n deband = dumb3kdb(clip, radius=rad, threshold=thr, grain=grain)\n deband_masked = core.std.MaskedMerge(deband, clip, deband_mask)\n deband_masked = deband_masked if bits == 16 else depth(deband_masked, bits)\n return deband_masked", "def applyIncidenceFilter(image, bandNames, classDictionary, numChangesCutoff = 8, connectedPixelCutoff=6):\n #Calculate the number of times a pixel changes throughout the time series and determine if it is over the numChangesCutoff\n num_changes = calculateNumberOfChanges(image, bandNames)\n too_many_changes = num_changes.gt(numChangesCutoff)\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Calculate the number of connected pixels for each land cover class and year, reduce to a single band image representing the number\n #of connected pixels of the same land cover class as the central pixel, and determine if it is over the connectedPixelCutoff\n connected_pixel_count = ee.ImageCollection(binary_class_images.map(lambda x: x.mask(x).connectedPixelCount(100,False).reduce(ee.Reducer.sum()).lt(connectedPixelCutoff)))\n \n #Get a bitwiseAnd determination if the number of connected pixels <= connectedPixelCutoff and the number of changes > numChangesCutoff \n incidence_filter = ee.ImageCollection(connected_pixel_count.map(lambda x: x.bitwiseAnd(too_many_changes))).toBands().rename(bandNames)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Replace pixels of image where incidence_filter is True with mode_image\n incidence_filtered = image.where(incidence_filter, mode_image)\n \n return incidence_filtered", "def sky_groups():\n cam = \"sky\"\n for light, lens, ndc, good, window in [(True, True, False, True, True),\n (True, True, False, True, False),\n (True, True, False, False, False),\n (True, False, False, True, False),\n (True, False, False, False, False),\n (False, True, False, True, True),\n (False, True, False, False, True)]:\n filenames = flatfiles(cam)\n filenames = get_light_sky(filenames, light)\n filenames = get_lens(filenames, lens)\n filenames = get_ndc(filenames, ndc)\n filenames = get_good(filenames, good)\n filenames = get_window_sky(filenames, window)\n images = valid_images(filenames)\n process_images(images, cam, (light, lens, ndc, good, window))", "def countmap(band,skypos,tranges,skyrange,width=False,height=False,\n\t\t\t verbose=0,tscale=1000.,memlight=False,hdu=False,retries=20):\n\timsz = gxt.deg2pix(skypos,skyrange)\n\tcount = np.zeros(imsz)\n\tfor trange in tranges:\n\t\t# If memlight is requested, break the integration into\n\t\t# smaller chunks.\n\t\tstep = memlight if memlight else trange[1]-trange[0]\n\t\tfor i in np.arange(trange[0],trange[1],step):\n\t\t\tt0,t1=i,i+step\n\t\t\tif verbose:\n\t\t\t\tprint_inline('Coadding '+str(t0)+' to '+str(t1))\n\t\t\tevents = gQuery.getArray(gQuery.rect(band,skypos[0],skypos[1],t0,t1,\n\t\t\t\t\t\t\t\t\t\t\t\t skyrange[0],skyrange[1]),\n\t\t\t\t\t\t\t\t\t verbose=verbose,retries=retries)\n\n\t\t\t# Check that there is actually data here.\n\t\t\tif not events:\n\t\t\t\tif verbose>1:\n\t\t\t\t\tprint \"No data in \"+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\ttimes = np.array(events,dtype='float64')[:,0 ]/tscale\n\t\t\tcoo =\tnp.array(events,dtype='float64')[:,1:]\n\n\t\t\t# If there's no data, return a blank image.\n\t\t\tif len(coo)==0:\n\t\t\t\tif verbose:\n\t\t\t\t\tprint 'No data in this frame: '+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\t# Define World Coordinate System (WCS)\n\t\t\twcs = define_wcs(skypos,skyrange,width=False,height=False)\n\n\t\t\t# Map the sky coordinates onto the focal plane\n\t\t\tfoc = wcs.sip_pix2foc(wcs.wcs_world2pix(coo,1),1)\n\n\t\t\t# Bin the events into actual image pixels\n\t\t\tH,xedges,yedges=np.histogram2d(foc[:,1]-0.5,foc[:,0]-0.5,\n\t\t\t\t\t\t\t\tbins=imsz,range=([ [0,imsz[0]],[0,imsz[1]] ]))\n\t\t\tcount += H\n\n\treturn count", "def maskClouds(self,img):\n\t\t\n\t\tscore = ee.Image(1.0);\n\t\t# Clouds are reasonably bright in the blue band.\n\t\tblue_rescale = img.select('blue').subtract(ee.Number(0.1)).divide(ee.Number(0.3).subtract(ee.Number(0.1)))\n\t\tscore = score.min(blue_rescale);\n\n\t\t# Clouds are reasonably bright in all visible bands.\n\t\tvisible = img.select('red').add(img.select('green')).add(img.select('blue'))\n\t\tvisible_rescale = visible.subtract(ee.Number(0.2)).divide(ee.Number(0.8).subtract(ee.Number(0.2)))\n\t\tscore = score.min(visible_rescale);\n\n\t\t# Clouds are reasonably bright in all infrared bands.\n\t\tinfrared = img.select('nir').add(img.select('swir1')).add(img.select('swir2'))\n\t\tinfrared_rescale = infrared.subtract(ee.Number(0.3)).divide(ee.Number(0.8).subtract(ee.Number(0.3)))\n\t\tscore = score.min(infrared_rescale);\n\n\t\t# Clouds are reasonably cool in temperature.\n\t\ttemp_rescale = img.select('thermal').subtract(ee.Number(300)).divide(ee.Number(290).subtract(ee.Number(300)))\n\t\tscore = score.min(temp_rescale);\n\n\t\t# However, clouds are not snow.\n\t\tndsi = img.normalizedDifference(['green', 'swir1']);\n\t\tndsi_rescale = ndsi.subtract(ee.Number(0.8)).divide(ee.Number(0.6).subtract(ee.Number(0.8)))\n\t\tscore = score.min(ndsi_rescale).multiply(100).byte();\n\t\tmask = score.lt(self.env.cloudThreshold).rename(['cloudMask']);\n\t\timg = img.updateMask(mask);\n \n\t\treturn img;", "def mask5(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[3]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[4]).eq(value))\n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value) \n change_img1 = imagem.select(bandNames[2]).mask(mask.eq(1)).where(mask.eq(1), value) \n change_img2 = imagem.select(bandNames[3]).mask(mask.eq(1)).where(mask.eq(1), value) \n img_out = imagem.select(bandNames[1]).blend(change_img).blend(change_img1).blend(change_img2)\n return img_out", "def octave_bands(fc=1000, third=False, start=0.0, n=8):\n\n div = 1\n if third:\n div = 3\n\n # Octave Bands\n fcentre = fc * (\n 2.0 ** (np.arange(start * div, (start + n) * div - (div - 1)) / div)\n )\n fd = 2 ** (0.5 / div)\n bands = np.array([[f / fd, f * fd] for f in fcentre])\n\n return bands, fcentre", "def constant_2015():\n\n #Load the CMIP6 historical\n cubes = iris.load(data_dir+'SO2DMS-em-anthro_input4MIPs_emissions_CMIP_CEDS-v2016-07-26-gr_200001-201412_n48.nc')\n #Get low and high level emissions just in the last year (2014)\n cubes = iris.cube.CubeList([cubes[2],cubes[1]])\n final_cubes = iris.cube.CubeList()\n for cube in cubes:\n final_cube = cube[-12:]\n final_cubes.append(final_cube)\n \n #Set the year-on-year proportional reductions to be nothing\n yoy_rates = calc_perc_reducts()\n yoy_rates = np.array(yoy_rates)\n yoy_rates = np.ones_like(yoy_rates)\n\n #Create coordinates for new nc file between 2014 and 2100\n lat_coord = cubes[0].coord('latitude')\n lon_coord = cubes[0].coord('longitude')\n time_coord = DimCoord(np.arange(95055.,95055.+(2100-2014+1)*360.,30.),standard_name=u'time', units=cf_units.Unit('days since 1750-1-1 00:00:00', calendar='360_day'), long_name=u'time', var_name='time')\n\n #Create the cube date\n cube_data_surf = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n cube_data_high = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n #Set first year equal to 2014 in CMIP6 historical\n cube_data_surf[:12,...] = final_cubes[0].data\n cube_data_high[:12,...] = final_cubes[1].data\n #Apply equal emissions in all other years too\n for i in range(12,cube_data_surf.shape[0]):\n cube_data_surf[i,...] = cube_data_surf[(i-12),...] * yoy_rates[0,i]\n cube_data_high[i,...] = cube_data_high[(i-12),...] * yoy_rates[1,i]\n #Make the output cubes\n fut_cube_surf = iris.cube.Cube(cube_data_surf,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[0].standard_name, long_name=final_cubes[0].long_name, var_name=final_cubes[0].var_name, units=final_cubes[0].units, attributes=final_cubes[0].attributes)\n fut_cube_high = iris.cube.Cube(cube_data_high,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[1].standard_name, long_name=final_cubes[1].long_name, var_name=final_cubes[1].var_name, units=final_cubes[1].units, attributes=final_cubes[1].attributes)\n\n fut_cube_high.var_name = 'field569_1'\n fut_cube_high.units='kg/m2/s'\n fut_cube_high.long_name ='HIGH LEVEL SO2 EMISSIONS KG/M2/S'\n fut_cube_surf.var_name = 'field569'\n fut_cube_surf.units='kg/m2/s'\n fut_cube_surf.long_name ='SULPHUR DIOXIDE EMISSIONS'\n\n #Load the DMS cube from standard RCP2.6\n dms_cube = iris.load(data_dir+'DMSSO2NH3_18502100_RCP26_monthly.nc')[0]\n iris.coord_categorisation.add_year(dms_cube,'time',name='year')\n dms_cube = dms_cube.extract(iris.Constraint(year = lambda y: y>=2014))\n\n dms_cube.var_name = 'field570'\n dms_cube.attributes.pop('name')\n dms_cube.coord('time').var_name = 'time'\n dms_cube.coord('time').long_name = 'time'\n\n fut_cube_high = fut_cube_high[:-2]\n fut_cube_surf = fut_cube_surf[:-2]\n\n fut_dms = iris.cube.Cube(dms_cube.data[:,0,::-1,:],dim_coords_and_dims=[(fut_cube_surf.coord('time'),0),(fut_cube_surf.coord('latitude'),1),(fut_cube_surf.coord('longitude'), 2)],standard_name=dms_cube.standard_name, long_name=dms_cube.long_name, var_name=dms_cube.var_name, units=dms_cube.units, attributes=dms_cube.attributes)\n\n #Save the final cubes as netcdf (cutting them to be the same length)\n iris.save(iris.cube.CubeList([fut_dms,fut_cube_high,fut_cube_surf]),data_dir+ \"SO2DMS_const2014.nc\")\n os.system('ncatted -O -a calendar,time,m,c,\"360_day\" '+data_dir+ \"SO2DMS_const2014.nc\")\n\n return", "def calculate_daily_climatology(\n pctile,\n windowHalfWidth,\n lenClimYear,\n smoothPercentile,\n smoothPercentileWidth,\n thresh_climYear, # empty array\n seas_climYear, # empty array\n clim, # empty dict\n feb29,\n doyClim,\n clim_start,\n clim_end,\n tempClim,\n temp,\n):\n # Loop over all day-of-year values, and calculate threshold and seasonal climatology across years\n for d in range(1, lenClimYear + 1):\n # Special case for Feb 29\n if d == feb29:\n continue\n # find all indices for each day of the year +/- windowHalfWidth and from them calculate the threshold\n tt0 = np.where(doyClim[clim_start : clim_end + 1] == d)[\n 0\n ] # the index for that day each year\n # If this doy value does not exist (i.e. in 360-day calendars) then skip it\n if len(tt0) == 0:\n continue\n tt = np.array([])\n for w in range(-windowHalfWidth, windowHalfWidth + 1): # -5 : 5 default\n tt = np.append(\n tt, clim_start + tt0 + w\n ) # append the daily values 5days before and 5days after\n tt = tt[tt >= 0] # Reject indices \"before\" the first element\n tt = tt[tt < TClim] # Reject indices \"after\" the last element\n thresh_climYear[d - 1] = np.percentile(nonans(tempClim[tt.astype(int)]), pctile)\n seas_climYear[d - 1] = np.mean(nonans(tempClim[tt.astype(int)]))\n\n # Special case for Feb 29 (LEAP YEAR)\n thresh_climYear[feb29 - 1] = (\n 0.5 * thresh_climYear[feb29 - 2] + 0.5 * thresh_climYear[feb29]\n )\n seas_climYear[feb29 - 1] = (\n 0.5 * seas_climYear[feb29 - 2] + 0.5 * seas_climYear[feb29]\n )\n\n if smoothPercentile:\n thresh_climYear, seas_climYear = smooth_climatologies(\n thresh_climYear, seas_climYear, smoothPercentileWidth\n )\n\n # Generate threshold for full time series\n clim[\"thresh\"] = thresh_climYear[doy.astype(int) - 1]\n clim[\"seas\"] = seas_climYear[doy.astype(int) - 1]\n # Save vector indicating which points in temp are missing values\n clim[\"missing\"] = np.isnan(temp)\n\n return clim", "def apply_land_ocean_mask(data_cube, mask_cube, include_only):\n\n target_shape = data_cube.shape\n target_ndim = len(target_shape)\n\n if include_only == 'land':\n mask_array = numpy.where(mask_cube.data > 0.1, False, True)\n elif include_only == 'ocean':\n mask_array = numpy.where(mask_cube.data < 0.1, False, True)\n\n mask = broadcast_array(mask_array, [target_ndim - 2, target_ndim - 1], target_shape)\n assert mask.shape == target_shape \n\n data_cube.data = numpy.ma.asarray(data_cube.data)\n data_cube.data.mask = mask\n\n return data_cube", "def test_05_01_mask_of3D(self):\n x=cpi.Image()\n x.image = np.ones((10,10,3))\n self.assertTrue(x.mask.ndim==2)", "def cygx3IndFlux(self):\n # --------------------------------------------------------------------------------------------- #\n # Read data\n fitsNnam = os.path.join(self.workpath, 'LCresults.fits')\n lcTab = Table.read(fitsNnam)\n detect = lcTab['ts'] >= self.tsmin\n lcTab = lcTab[detect] \n\n ind08 = (lcTab['mjd'] > 54700) & (lcTab['mjd'] < 54900) \n flux08 = lcTab['flux'][ind08]\n fluxerr08 = lcTab['fluxerr'][ind08]\n index08 = lcTab['index'][ind08]\n indexerr08 = lcTab['indexerr'][ind08]\n\n ind09 = (lcTab['mjd'] > 54900) & (lcTab['mjd'] < 55100) \n flux09 = lcTab['flux'][ind09]\n fluxerr09 = lcTab['fluxerr'][ind09]\n index09 = lcTab['index'][ind09]\n indexerr09 = lcTab['indexerr'][ind09]\n\n scale = 10**int(np.floor(np.log10( np.mean( np.concatenate( (flux08, flux09), axis=0) ) ))) \n\n # --------------------------------------------------------------------------------------------- #\n # Plot\n indplt = FermiPlot(savepath='', xsize=8.5, ysize=6)\n indplt.figname = os.path.join(self.workpath, 'IndvsFlux.pdf')\n indplt.xlabel = r'Flux ($10^{%d}$ ph\\,cm$^{-2}$\\,s$^{-1}$)'%(int(np.log10(scale)))\n indplt.ylabel = r'Index'\n indplt.mksize = 2\n indplt.color = self.lblue\n indplt.label = r'2008'\n indplt.plot(x=flux08/scale, xerr=fluxerr08/scale, y=index08, yerr=indexerr08)\n indplt.color = self.loran\n indplt.label = r'2009'\n indplt.plot(x=flux09/scale, xerr=fluxerr09/scale, y=index09, yerr=indexerr09)\n indplt.save()\n\n print(\"\\t=== Figure '{}' created ===\".format(indplt.figname)) \n return", "def read_dr3_spectrum(path, common_dispersion=None, bounds_error=False):\n\n header_keys = (\"helio_rv\", \"z\", \"z_err\")\n\n with fits.open(path) as image:\n # data array indices:\n # flux, inverse variance, wavelength, andmask, ormask.\n flux, ivar, dispersion, and_mask, or_mask = image[0].data\n\n # Create a meta dictionary that contains things we will probably care \n # about later on, and the path so that we can trace provenance of other\n # things as needed.\n meta = dict(path=path)\n for header_key in header_keys:\n meta[header_key] = image[0].header[header_key.upper()]\n\n # Use the OR mask to set the inverse variances to zero for any pixels with\n # indications of being bad. For example, the bit mask meanings are:\n # 1 : BADCCD : bad pixel on CCD\n # 2 : BADPROFILE : bad profile in extraction\n # 3 : NOSKY : no sky information at this wavelength\n # 4 : BRIGHTSKY : sky level too high\n # 5 : BADCENTER : fibre trace out of the CCD\n # 6 : NODATA : no good data.\n\n # From http://dr3.lamost.org/doc/data-production-description\n\n # These are all bad things. And the LAMOST pipeline people are more familiar\n # with the data than we are. So let's believe them.\n\n rest_dispersion = dispersion * (1 - meta[\"z\"])\n ivar[or_mask > 0] = 0.0\n\n if common_dispersion is not None:\n flux = (interpolate.interp1d(rest_dispersion, flux,\n bounds_error=bounds_error, fill_value=1))(common_dispersion)\n ivar = (interpolate.interp1d(rest_dispersion, ivar,\n bounds_error=bounds_error, fill_value=0))(common_dispersion)\n\n rest_dispersion = common_dispersion\n ivar[ivar < 0] = 0\n\n assert np.all(ivar >= 0), \"negative inverse variances\"\n assert np.all(np.isfinite(flux)), \"non-finite fluxes\"\n\n return (rest_dispersion, flux, ivar, meta)", "def test_3d_time():\n dic,data = ng.pipe.read(\"common_data/3d_pipe/data/test%03d.fid\")\n sdic,sdata = ng.pipe.read(\"common_data/3d_pipe/data/test001.fid\")\n assert data.shape == (128, 88, 1250)\n assert data.dtype == 'complex64'\n assert round(data[0,1,2].real,2) == -7.98\n assert round(data[0,1,2].imag,2) == 33.82\n assert round(data[10,22,5].real,2) == 15.71\n assert round(data[10,22,5].imag,2) == 15.1\n\n # and the first slice\n assert sdata.shape == (88, 1250)\n assert sdata.dtype == 'complex64'\n assert round(sdata[1,2].real,2) == -7.98\n assert round(sdata[1,2].imag,2) == 33.82\n assert round(sdata[22,5].real,2) == 22.65\n assert round(sdata[22,5].imag,2) == 13.65\n\n # slice/data matching\n assert_array_equal(data[0],sdata)\n\n write_readback_3D(dic,data)", "def colorfilter(FilteredMask):\n FilteredMask3channels=np.stack((FilteredMask,)*3, axis=-1)\n for i in range(len(FilteredMask)):\n for j in range(len(FilteredMask[0])):\n if FilteredMask[i,j]==0:\n FilteredMask3channels[i,j]=[0,0,0]\n if FilteredMask[i,j]==1:\n FilteredMask3channels[i,j]=[255,255,255]\n if FilteredMask[i,j]==2:\n FilteredMask3channels[i,j]=[255,0,0]\n if FilteredMask[i,j]==3:\n FilteredMask3channels[i,j]=[0,255,0]\n if FilteredMask[i,j]==4:\n FilteredMask3channels[i,j]=[0,0,255]\n return FilteredMask3channels", "def stack_tir(scene_urls,cloud_mask_bits,aoi,aoi_crs,\n subtract_median_lst=True,subtract_air_temp=False):\n if subtract_air_temp:\n ceda_password = get_ceda_password()\n at = met_climate.access_ukcp09(cf.ceda_username,ceda_password)\n\n \n # with rasterio.open(scene_bqa) as bqa:\n # with rasterio.open(scene_tir) as tir:\n\n # bqa_data,bqa_trans = ru.read_in_aoi(bqa,**aoi_kwargs)\n # tir_data,tir_trans = ru.read_in_aoi(tir,**aoi_kwargs)\n \n # bqa_data = bqa_data[0,:,:]\n # tir_data = tir_data[0,:,:]\n # tir_data = ma.array(tir_data,dtype=float,\n # mask=ru.mask_qa(bqa_data,bitmask=0b1))\n\n # (ymin,ymax) = (0, tir_data.shape[0])\n # (xmin,xmax) = (0, tir_data.shape[1])\n \n counter=-1\n for scene_url in scene_urls:\n counter+=1\n scene_tir = scene_url\n scene_bqa = scene_url.replace('B'+tirband,'B'+qaband)\n scene_red = scene_url.replace('B'+tirband,'B'+rband)\n scene_nir = scene_url.replace('B'+tirband,'B'+nband)\n scene_metadata = scene_url.replace('B'+tirband+'.TIF','MTL.txt')\n\n print('Reading scene {}'.format(counter+1))\n try:\n with rasterio.open(scene_bqa) as bqa:\n #print(scene_bqa)\n bqa_data,bqa_trans = ru.read_in_aoi(bqa,aoi=aoi,aoi_crs=aoi_crs)\n\n with rasterio.open(scene_tir) as tir:\n #print(scene_tir)\n tir_data,tir_trans = ru.read_in_aoi(tir,aoi=aoi,aoi_crs=aoi_crs)\n tir_crs = tir.crs\n tir_profile = tir.profile\n\n with rasterio.open(scene_red) as red:\n #print(scene_red)\n red_data,red_trans = ru.read_in_aoi(red,aoi=aoi,aoi_crs=aoi_crs)\n red_crs = red.crs\n\n with rasterio.open(scene_nir) as nir:\n #print(scene_nir)\n nir_data,nir_trans = ru.read_in_aoi(nir,aoi=aoi,aoi_crs=aoi_crs)\n \n except OSError as e:\n print('ERROR',e)\n print('skipping scene')\n counter = counter-1\n continue\n \n # Determine size of stack allowing for AoI to extend outside of scene\n if counter == 0:\n aoi_box = rasterio.warp.transform_bounds(aoi_crs,tir_crs,*aoi.values())\n aoi_left, aoi_bottom, aoi_right, aoi_top = aoi_box\n aoi_box = dict(zip(('minx','miny','maxx','maxy'),aoi_box))\n # rowmin,colmin = (bqa.index(aoi_left,aoi_top)) #,op=round))\n # rowmax,colmax = (bqa.index(aoi_right,aoi_bottom)) #,op=round))\n # The above two lines are fine but the following does not \n # require the rasterio dataset to be kept open\n rowmin,colmin = rasterio.transform.rowcol(tir_trans,aoi_left,aoi_top)\n rowmax,colmax = rasterio.transform.rowcol(tir_trans,aoi_right,aoi_bottom)\n stack_height,stack_width = (rowmax-rowmin,colmax-colmin)\n lst_stack = (ma.zeros((len(scene_urls),stack_height,stack_width),\n dtype=np.float,fill_value=np.nan\n )+np.nan) \n \n # Determine size of intersect in THIS scene\n intersect = ru.aoi_scene_intersection(aoi_box,bqa)\n ins_left, ins_bottom, ins_right, ins_top = intersect.bounds\n #rowmin,colmin = (bqa.index(ins_left,ins_top,op=round))\n #rowmax,colmax = (bqa.index(ins_right,ins_bottom,op=round))\n # The above two lines are incorrect now that we read a window:\n # We need to transform the coordinates into the row,col of \n # the window, not the original file.\n rowmin,colmin = rasterio.transform.rowcol(tir_trans,ins_left,ins_top)\n rowmax,colmax = rasterio.transform.rowcol(tir_trans,ins_right,ins_bottom)\n\n try:\n # Subset data \n bqa_data = ma.array(bqa_data[0,rowmin:rowmax,colmin:colmax])\n tir_data = ma.array(tir_data[0,rowmin:rowmax,colmin:colmax])\n red_data = ma.array(red_data[0,rowmin:rowmax,colmin:colmax])\n nir_data = ma.array(nir_data[0,rowmin:rowmax,colmin:colmax])\n assert tir_data.shape == lst_stack.shape[1:]\n except (IndexError,AssertionError) as e:\n print('ERROR:',e)\n print('loop count',counter)\n print(tir_data.shape, lst_stack.shape)\n print(rowmin,rowmax,colmin,colmax)\n import pdb; pdb.set_trace()\n\n lst_data = lst.calculate_land_surface_temperature_NB(\n red_data, nir_data, tir_data,\n red_trans, tir_trans, \n red_crs, tir_crs, scene_metadata\n )\n \n # Masks\n smw = 11\n mask_all = filters.maximum_filter(\n ru.mask_qa(bqa_data,bits=cloud_mask_bits),size=smw\n )\n\n lst_data_mask_all = ma.array(lst_data,\n mask=mask_all,\n dtype=np.float,\n fill_value=np.nan) #.filled()\n\n # After masking, reproject\n # not necessary if they share a CRS\n if counter > 0:\n assert tir_crs == prev_crs\n prev_crs = tir_crs\n\n # Now do some normalisation\n if subtract_air_temp:\n filename = scene_tir.split('/')[-1]\n datestring = filename.split('_')[3]\n\n atscene = met_climate.dummy_scene( \n tir_crs, tir_trans, aoi_box,(stack_height,stack_width))\n\n # import pdb; pdb.set_trace()\n # If the following fails, it may mean there was a problem setting up the session\n atdata = at.grid_temp_over_scene(\n atscene, datestring, interpolation='linear')\n atdata = atdata[rowmin:rowmax,colmin:colmax]\n assert lst_data_mask_all.shape == atdata.shape\n lst_data_mask_all = ma.array(\n lst_data_mask_all - atdata,\n mask=mask_all,\n fill_value=np.nan)\n \n if subtract_median_lst:\n # ALSO subtract median xLST\n medval = ma.median(lst_data_mask_all)\n lst_data_mask_all = ma.array(\n lst_data_mask_all - medval,\n mask=mask_all,\n fill_value=np.nan)\n \n elif subtract_median_lst:\n # Subtract median LST from scene (within QA mask) \n \n medval = ma.median(lst_data_mask_all)\n lst_data_mask_all = ma.array(\n lst_data_mask_all - medval,\n mask=mask_all,\n fill_value=np.nan)\n \n # Then add to stack\n lst_stack[counter,:,:] = lst_data_mask_all\n\n # Make profile for file output\n N_layers = counter+1\n tir_profile.update(\n dtype=rasterio.float64,\n width=stack_width,\n height=stack_height,\n transform=tir_trans,\n count=N_layers,\n compress='lzw'\n )\n\n\n return lst_stack, tir_profile", "def _gaufit3d(self, coa_map, lx=None, ly=None, lz=None, thresh=0., win=7):\n\n # Get shape of 3-D coalescence map and max coalesence grid location\n nx, ny, nz = coa_map.shape\n mx, my, mz = np.unravel_index(np.nanargmax(coa_map), coa_map.shape)\n\n # Only use grid cells above threshold value, and within the specified\n # window around the coalescence peak\n flg = np.logical_and(coa_map > thresh,\n self._mask3d([nx, ny, nz], [mx, my, mz], win))\n ix, iy, iz = np.where(flg)\n\n # Subtract mean of entire 3-D coalescence map from the local grid\n # window so it is better approximated by a gaussian (which goes to zero\n # at infinity)\n coa_map = coa_map - np.nanmean(coa_map)\n\n # Fit 3-D gaussian function\n ncell = len(ix)\n\n if not lx:\n lx = np.arange(nx)\n ly = np.arange(ny)\n lz = np.arange(nz)\n\n if lx.ndim == 3:\n iloc = [lx[mx, my, mz], ly[mx, my, mz], lz[mx, my, mz]]\n x = lx[ix, iy, iz] - iloc[0]\n y = ly[ix, iy, iz] - iloc[1]\n z = lz[ix, iy, iz] - iloc[2]\n else:\n iloc = [lx[mx], ly[my], lz[mz]]\n x = lx[ix] - iloc[0]\n y = ly[iy] - iloc[1]\n z = lz[iz] - iloc[2]\n\n X = np.c_[x * x, y * y, z * z,\n x * y, x * z, y * z,\n x, y, z, np.ones(ncell)].T\n Y = -np.log(np.clip(coa_map.astype(np.float64)[ix, iy, iz],\n 1e-300, np.inf))\n\n X_inv = np.linalg.pinv(X)\n P = np.matmul(Y, X_inv)\n G = -np.array([2 * P[0], P[3], P[4],\n P[3], 2 * P[1], P[5],\n P[4], P[5], 2 * P[2]]).reshape((3, 3))\n H = np.array([P[6], P[7], P[8]])\n loc = np.matmul(np.linalg.inv(G), H)\n cx, cy, cz = loc\n\n K = P[9] \\\n - P[0] * cx ** 2 \\\n - P[1] * cy ** 2 \\\n - P[2] * cz ** 2 \\\n - P[3] * cx * cy \\\n - P[4] * cx * cz \\\n - P[5] * cy * cz \\\n\n M = np.array([P[0], P[3] / 2, P[4] / 2,\n P[3] / 2, P[1], P[5] / 2,\n P[4] / 2, P[5] / 2, P[2]]).reshape(3, 3)\n egv, vec = np.linalg.eig(M)\n sgm = np.sqrt(0.5 / np.clip(np.abs(egv), 1e-10, np.inf))/2\n val = np.exp(-K)\n csgm = np.sqrt(0.5 / np.clip(np.abs(M.diagonal()), 1e-10, np.inf))\n\n # Convert back to whole-grid coordinates\n gau_3d = [loc + iloc, vec, sgm, csgm, val]\n\n # Convert grid location to XYZ / coordinates\n xyz = self.lut.xyz2loc(np.array([[gau_3d[0][0],\n gau_3d[0][1],\n gau_3d[0][2]]]),\n inverse=True)\n loc_gau = self.lut.xyz2coord(xyz)[0]\n\n loc_gau_err = np.array([gau_3d[2][0] * self.lut.cell_size[0],\n gau_3d[2][1] * self.lut.cell_size[1],\n gau_3d[2][2] * self.lut.cell_size[2]])\n\n return loc_gau, loc_gau_err", "def test_3dtproject_temporal_filter_wf_scrubs(self):\n\n self.wf = build_3dtproject_temporal_filter(\n bpHigh= .9, bpLow= 0.005, tr=2,\n scrub_targets=True,\n import_file=self.sample_raw_image,\n export_file=self.export_path,\n base_dir=self.test_path, crashdump_dir=self.test_path,\n mask_file=self.sample_raw_image_mask\n )\n scrub_targets = [1] * 100\n scrub_targets[46:52] = [0] * 6\n self.highlight_ranges = [(45.5, 52.5)]\n self.wf.inputs.inputnode.scrub_targets = scrub_targets", "def winter_avg(var_nc,lat_slice=None,lon_slice=None): \n #\n # accumulate in shape [plev,lat,lon]\n #\n # use the whole array if slice objects are missing\n #\n if (lat_slice is None) and (lon_slice is None):\n num_lats=var_nc.shape[2]\n num_lons=var_nc.shape[3]\n lat_slice=slice(0,num_lats)\n lon_slice=slice(0,num_lons)\n print \"in winter avg: \",lat_slice,lon_slice\n else:\n num_lats=lat_slice.stop - lat_slice.start\n num_lons=lon_slice.stop - lon_slice.start\n num_levs=var_nc.shape[1]\n accumulate=ma.zeros([num_levs,num_lats,num_lons],dtype=var_nc.dtype)\n #\n # year 0 is special case since it doesn't have a december\n #\n djf0=np.array([0,1],dtype=np.int32) #january and feburary\n the_slice=var_nc[djf0,:,lat_slice,lon_slice]\n the_slice=ma.mean(the_slice,axis=0) #average over the two months\n accumulate+=the_slice\n num_years=var_nc.shape[0]//12\n #\n # now year 1 has year 0's december\n #\n djf=np.array([11,12,13],dtype=np.int32)\n #\n # iterate one year less because we've alread\n # done year zero as a special case\n #\n for the_year in np.arange(0,num_years-1):\n the_slice=var_nc[djf,:,lat_slice,lon_slice]\n the_slice=ma.mean(the_slice,axis=0)\n accumulate+=the_slice\n djf=djf+12\n accumulate=accumulate/num_years \n the_avg=ma.mean(accumulate,axis=1)\n the_avg=ma.mean(the_avg,axis=1)\n return the_avg", "def applyGapFilter(image, bandNames):\n filtered = applyForwardNoDataFilter(image, bandNames)\n filtered = applyBackwardNoDataFilter(filtered, bandNames)\n return filtered", "def make_lightcurve(centroids, bands, band_idx, box_size, aperture_radius):\n band_names = np.sort(list(bands.keys()))\n num_stars= range(len(centroids))\n for star_idx in num_stars:\n xcenters, ycenters = [],[]\n aperture_sums = []\n background = []\n fwhms = []\n obs_time = []\n obs_mjd = []\n ##extract lightcurve (enumerate all frames) in a given band\n for i in tqdm(bands[band_names[band_idx]]):\n #import pdb; pdb.set_trace()\n hdr = fits.open(i)[0].header\n img = fits.open(i)[0].data\n #get dates from fits header\n date=dt.strptime(hdr['DATE-OBS'], '%Y-%m-%d')\n time=dt.strptime(hdr['EXP-STRT'], '%H:%M:%S.%f')\n newdate = time.replace(year=date.year, month=date.month, day=date.day)\n obs_time.append(newdate)\n obs_mjd.append(hdr['MJD-STRT'])\n\n #crop\n #import pdb; pdb.set_trace()\n image_crop = get_crop(img, centroids[star_idx], box_size)\n\n ###aperture photometry###\n #compute centroid\n centroid = get_centroid(image_crop)\n\n xcenters.append(centroid[0])\n ycenters.append(centroid[1])\n\n #compute backgound\n bkg_mean=get_bkg(image_crop, centroid, r_in=20., r_out=30.)\n\n #measure fwhm\n fwhm=get_fwhm(image_crop)\n\n #without aperture photometry\n\n aperture_sum = get_phot(image_crop, centroid, r=aperture_radius)\n\n #minus background wihtin annulus\n #aperture_sum = get_phot2(image_crop,bkg_mean,centroid,r=aperture_radius)\n\n aperture_sums.append(aperture_sum)\n background.append(bkg_mean)\n\n # if fwhm < 10*np.median(fwhms):\n # fwhms.append(fwhm)\n # else:\n # fwhms.append(np.nan)\n fwhms.append(fwhm)\n\n #output as dataframe of given band and star\n\n dfs.append(pd.DataFrame(\n {'{0}_{1}_x'.format(band_names[band_idx], str(star_idx)) : xcenters,\n '{0}_{1}_y'.format(band_names[band_idx], str(star_idx)) : ycenters,\n '{0}_{1}_flux_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : aperture_sums,\n '{0}_{1}_bkg_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : background,\n '{0}_{1}_fwhm_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : fwhms},\n #'airmass' : airmass\n index = obs_time))\n return dfs, band_idx, band_names", "def mask_to_objects_3dt(mask, background=0, offset=None):\n if mask.ndim != 4:\n raise ValueError(\"Cannot handle image with ndim different from 4 ({} dim. given).\".format(mask.ndim))\n duration = mask.shape[0]\n offset_xyz = offset[1:]\n offset_t = offset[0]\n objects = dict()\n for t in range(duration):\n time_objects = mask_to_objects_3d(\n mask,\n background=background,\n offset=offset_xyz,\n assume_unique_labels=True,\n time=False\n )\n for time_slices in time_objects:\n label = time_slices[0].label\n slices_3dt = [ # transform type of objects to\n AnnotationSlice(\n polygon=s.polygon,\n label=s.label,\n depth=s.depth,\n time=t + offset_t\n ) for s in time_slices\n ]\n objects[label] = objects.get(label, []) + [slices_3dt]\n return objects.values()", "def apply_tracking3(td, time_us=1000, alpha=0.7, threshold=-1):\n assert (alpha >= 0)\n assert (alpha <= 1)\n mix = 1 - alpha\n track_x = center_x = float(td.width / 2)\n track_y = center_y = float(td.height / 2)\n threshold_sq = math.floor(center_y ** 2)\n\n if threshold > 0:\n threshold_sq = math.floor(threshold ** 2)\n\n copy = np.copy(td.data).view(np.recarray)\n offset_x = offset_y = 0\n offset_x_arr = np.zeros(copy.size, np.float32)\n offset_y_arr = np.zeros(copy.size, np.float32)\n offset_index = 0 # used to keep track of the offsets we are writing to\n\n for start_ts in range(copy[0].ts, copy[-1].ts, time_us):\n end_ts = start_ts + time_us\n frame_data = copy[(copy.ts >= start_ts) & (copy.ts < end_ts)]\n distances = ((frame_data.x - track_x) ** 2) + (\n (frame_data.y - track_y) ** 2)\n valid_data = frame_data[distances < threshold_sq]\n\n if valid_data.size > 0:\n x_avg = float(np.sum(valid_data.x)) / valid_data.size\n y_avg = float(np.sum(valid_data.y)) / valid_data.size\n track_x = (track_x * alpha) + (x_avg * mix)\n track_y = (track_y * alpha) + (y_avg * mix)\n\n offset_x = int(round(center_x - track_x))\n offset_y = int(round(center_y - track_y))\n offset_x_arr[offset_index:offset_index + frame_data.size] = \\\n offset_x\n offset_y_arr[offset_index:offset_index + frame_data.size] = \\\n offset_y\n offset_index += frame_data.size\n\n offset_x_arr[offset_index:] = offset_x\n offset_y_arr[offset_index:] = offset_y\n copy.x = (copy.x + offset_x_arr).astype(np.uint8)\n copy.y = (copy.y + offset_y_arr).astype(np.uint8)\n # remove the events that are out of bounds\n return copy[(copy.x >= 0) & (copy.y >= 0) & (copy.x < td.width) & (\n copy.y < td.height)]", "def plot_land_cover(data, year=None, measurement=None, out_width=15, cols=4,):\n # get measurement name\n measurement = get_layer_name(measurement, data)\n\n # get colour map, normalisation\n try:\n cmap, norm = lc_colourmap(measurement)\n except AssertionError:\n\n raise KeyError('Could not automatically determine colour scheme from'\n f'DataArray name {measurement}. Please specify which '\n 'DEA Landcover measurement is being plotted by providing'\n 'the name using the \"measurement\" variable For example'\n '(measurement = \"full_classification\")')\n\n height, width = data.geobox.shape\n scale = out_width / width\n\n if year:\n #plotting protocall if 'year' variable is passed\n year_string = f\"{year}-01-01\"\n data = data.sel(time=year_string, method=\"nearest\")\n \n fig, ax = plt.subplots()\n fig.set_size_inches(width * scale, height * scale)\n make_colorbar(fig, ax, measurement)\n im = ax.imshow(data, cmap=cmap, norm=norm, interpolation=\"nearest\")\n\n \n elif len(data.time) == 1:\n #plotting protocall if only one timestep is passed and not a year variable\n fig, ax = plt.subplots()\n fig.set_size_inches(width * scale, height * scale)\n make_colorbar(fig, ax, measurement)\n im = ax.imshow(data.isel(time=0), cmap=cmap, norm=norm, interpolation=\"nearest\")\n else:\n #plotting protocall if multible time steps are passed to plot\n if cols > len(data.time):\n cols = len(data.time)\n rows = int((len(data.time) + cols-1)/cols)\n\n fig, ax = plt.subplots(nrows=rows, ncols=cols)\n fig.set_size_inches(\n width * scale, (height * scale / cols) * (len(data.time) / cols))\n\n make_colorbar(fig, ax.flat[0], measurement)\n\n for a, b in enumerate(ax.flat):\n if a < data.shape[0]:\n im = b.imshow(data[a], cmap=cmap, norm=norm,\n interpolation=\"nearest\")\n\n return im", "def test_3d_freq():\n dic,data = ng.pipe.read(\"common_data/3d_pipe/ft/test%03d.ft3\")\n sdic,sdata = ng.pipe.read(\"common_data/3d_pipe/ft/test001.ft3\")\n\n assert data.shape == (128, 128, 4096)\n assert data.dtype == 'float32'\n assert round(data[0,1,2],2) == 25980.13\n assert round(data[10,22,5],2) == 1561.09\n check_ppm_limits(dic,data,0,[78.10, 34.24])\n check_ppm_limits(dic,data,1,[147.42, 93.01])\n check_ppm_limits(dic,data,2,[254.92, -142.83])\n\n # and the first slice\n assert sdata.shape == (128, 4096)\n assert sdata.dtype == 'float32'\n assert round(sdata[1,2],2) == 25980.13\n assert round(sdata[22,5],2) == -8336.05\n check_ppm_limits(sdic,sdata,0,[147.42, 93.01])\n check_ppm_limits(sdic,sdata,1,[254.92, -142.83])\n\n # slice/data matching\n assert_array_equal(data[0],sdata)\n\n write_readback_3D(dic,data)", "def winter_gif(self):\n # Create the directory.\n os.mkdir('./medal_figures_winter')\n start = self.start_year\n end = self.end_year\n duration = self.duration\n # Specify the years.\n years = [i for i in self.years_winter if (i >= start) and (i <= end)]\n # Setup the colormap.\n cmap = sns.cubehelix_palette(n_colors=6, start=2.5, rot=0.1, hue=2, dark=0.3, light=1, as_cmap=True)\n # Important variable and keywords to initialize cartopy.\n shapename = 'admin_0_countries'\n countries_shp = shpreader.natural_earth(resolution='110m', category='cultural', name=shapename)\n filenames = []\n # Loop in the specific years.\n for i in years:\n fig = plt.figure(figsize=(10, 8))\n ax = fig.add_subplot(1, 1, 1, projection=ccrs.Mercator())\n ax.set_extent([-169.95, 169.95, -65, 80], crs=ccrs.PlateCarree())\n ax.add_feature(cfeature.BORDERS)\n ax.coastlines(resolution='110m')\n # Add some titles for specific years.\n if i == 1924:\n fig.suptitle('The First Winter Olympics.', y=0.9, fontsize=14, fontweight='bold')\n if i == 1994:\n fig.suptitle('The International Olympic Committee voted to separate the Summer and Winter Games.',\n y=0.9, fontsize=12, fontweight='bold')\n if i == 2018:\n fig.suptitle('Suspension of the Russian Olympic Committee due to Olympic Doping Controversy.',\n y=0.9, fontsize=12, fontweight='bold')\n iso_lib = list(self.conv['ISO'])\n if i != 2018:\n city = self.df_winter.loc[self.df_winter['Year'] == i]['City'].iloc[0]\n ax.title.set_text('Total Number of Medals of Winter Olympics Year: %d City: %s' % (i, city))\n df_tmp = self.df_winter.loc[self.df_winter['Year'] == i]\n d = dict(df_tmp.groupby(df_tmp['Country']).size())\n else:\n ax.title.set_text('Total Number of Medals of Winter Olympics Year: %d City: %s' % (i, 'Pyeongchang'))\n m = []\n for j in self.df_2018_winter['NOC'].tolist():\n n = j[j.find('(')+1:j.find(')')]\n m.append(n)\n k = self.df_2018_winter['Total'].tolist()\n d = dict(zip(m, k))\n d.pop('30 NOCs', None)\n max_medal = float(max(d.values()))\n for country in shpreader.Reader(countries_shp).records():\n iso = country.attributes['ADM0_A3']\n medal_num = 0\n if iso in iso_lib:\n ioc = self.conv.loc[self.conv['ISO'] == iso,'IOC'].iloc[0]\n if not pd.isna(ioc):\n if ioc in d.keys():\n medal_num = d[ioc]\n if all([iso == 'RUS', i>=1956, i<=1988]):\n medal_num = d['URS']\n if all([iso=='DEU', i>=1968, i<=1988]):\n medal_num = d['FRG'] + d['GDR']\n if all([iso=='DEU', i>=1956, i<=1964]):\n medal_num = d['EUA']\n if i==1952 and iso=='DEU':\n medal_num = d['FRG']\n if i==1992 and iso=='RUS':\n medal_num = d['EUN']\n if i==2018 and iso=='RUS':\n medal_num = d['OAR']\n ax.add_geometries(country.geometry, ccrs.PlateCarree(),\n facecolor=cmap(medal_num / max_medal, 1))\n sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(0, max_medal))\n sm._A = []\n plt.colorbar(sm, ax=ax, orientation=\"horizontal\", fraction=0.046, pad=0.04)\n fname = './medal_figures_winter/year_%d.png' % i\n filenames.append(fname)\n plt.savefig(fname=fname, format='png')\n plt.close(fig)\n images = []\n # Create the gif.\n for filename in filenames:\n images.append(imageio.imread(filename))\n imageio.mimsave('./medal_figures_winter/movie.gif', images, duration=duration)\n return", "def itkBoundedReciprocalImageFilterIF3IF3_cast(*args):\n return _itkBoundedReciprocalImageFilterPython.itkBoundedReciprocalImageFilterIF3IF3_cast(*args)", "def write_images(band,skypos,tranges,skyrange,write_cnt=False,write_int=False,write_rr=False,framesz=0,width=False,height=False,verbose=0,tscale=1000.,memlight=False,coadd=False,response=False,calpath='../cal/',clobber=False,retries=20):\n\t# No files were requested, so don't bother doing anything.\n\tif not (write_cnt or write_int or write_rr):\n\t\treturn\n\tcount,rr,intensity=create_images(band,skypos,tranges,skyrange,framesz=framesz,width=width,height=height,verbose=verbose,tscale=tscale,memlight=memlight,coadd=coadd,response=response,calpath=calpath,retries=retries)\n\n\t# Add a conditional so that this is only created for multi-frame images\n\ttbl = movie_tbl(band,tranges,framesz=framesz,verbose=verbose,retries=retries)\n\n\tif write_cnt:\n\t\thdu = pyfits.PrimaryHDU(count)\n\t\thdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)\n\t\thdulist = pyfits.HDUList([hdu,tbl])\n\t\tif verbose:\n\t\t\tprint 'Writing count image to '+str(write_cnt)\n\t\thdulist.writeto(write_cnt,clobber=clobber)\n\tif write_rr:\n\t\thdu = pyfits.PrimaryHDU(rr)\n\t\thdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)\n\t\thdulist = pyfits.HDUList([hdu,tbl])\n\t\tif verbose:\n\t\t\tprint 'Writing response image to '+str(write_rr)\n hdulist.writeto(write_rr,clobber=clobber)\n\tif write_int:\n\t\thdu = pyfits.PrimaryHDU(intensity)\n\t\thdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)\n\t\thdulist = pyfits.HDUList([hdu,tbl])\n\t\tif verbose:\n\t\t\tprint 'Writing intensity image to '+str(write_int)\n\t\thdulist.writeto(write_int,clobber=clobber)\n\n\treturn", "def load_3D_netCDF(filename, var_name = \"prcp\", lat_name = \"lats\", lon_name = \"lons\", time_name = \"times\"):\n data = Dataset(filename, 'r')\n var = data[var_name][:]\n lats = data[lat_name][:]\n lons = data[lon_name][:]\n times = data[time_name][:]\n data.close()\n return var, lats, lons, times", "def plot_three(spectrum, thresh=1):\n plt.figure(figsize=(10, 4))\n plt.subplot(1,3,1)\n spectrum.plot()\n plt.subplot(1,3,2)\n plot_angle(spectrum, thresh=thresh)\n plt.subplot(1,3,3)\n wave = spectrum.make_wave()\n wave.unbias()\n wave.normalize()\n wave.segment(duration=0.01).plot()\n display(wave.make_audio())", "def testMask3D(self):\n mask = np.ones((3, 3, 3), dtype=np.float32)\n inputs = tf.constant(1.0, shape=(5, 5, 5, 5, 5))\n conv1 = snt.Conv3D(\n output_channels=1,\n kernel_shape=3,\n mask=mask,\n padding=snt.VALID,\n use_bias=False,\n initializers=create_constant_initializers(1.0, 0.0, use_bias=False))\n out = conv1(inputs)\n expected_out = 135 * np.ones((5, 3, 3, 3, 1), dtype=np.float32)\n with self.test_session():\n tf.variables_initializer([conv1.w]).run()\n self.assertAllClose(out.eval(), expected_out)", "def s3_masking(\n S8_BT_in,\n S9_BT_in,\n S1_reflectance_an,\n S5_reflectance_an,\n S7_BT_in,\n):\n return s3_masking_old(\n S8_BT_in,\n S9_BT_in,\n S1_reflectance_an,\n S5_reflectance_an,\n S7_BT_in,\n solar_angle=None,\n max_solar_angle=None,\n)\n bt11 = S8_BT_in\n bt12 = S9_BT_in\n r550 = S1_reflectance_an\n r1600 = S5_reflectance_an\n bt37 = S7_BT_in\n\n cloud_mask = _scda2(r550, r1600, bt37, bt11, bt12)\n\n mask = np.zeros_like(cloud_mask)\n mask[cloud_mask] = 1\n mask[np.bitwise_not(cloud_mask)] = 2\n\n mask[(r550 + r1600) <= 0] = 0 # No data\n\n return mask", "def avg_3_op(array_1, array_2, array_3, nodata):\r\n result = numpy.empty_like(array_1)\r\n result[:] = nodata\r\n valid_mask = (\r\n ~numpy.isclose(array_1, nodata) &\r\n ~numpy.isclose(array_2, nodata) &\r\n ~numpy.isclose(array_3, nodata))\r\n result[valid_mask] = (\r\n array_1[valid_mask] +\r\n array_2[valid_mask] +\r\n array_3[valid_mask]) / 3.\r\n return result", "def run_global(start_year, end_year, depth_from, depth_to, animate=True):\n# years, times, rootgrps = retrieve(1950,2018)\n# rootgrps_1950 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1950\\EN.4.2.1.f.analysis.g10.195001.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_1951 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1951\\EN.4.2.1.f.analysis.g10.195101.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_1952 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1952\\EN.4.2.1.f.analysis.g10.195201.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_1953 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1953\\EN.4.2.1.f.analysis.g10.195301.nc\", \"r+\", format=\"NETCDF4\")]\n#\n#\n# rootgrps_2015 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2015\\EN.4.2.1.f.analysis.g10.201501.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_2016 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2016\\EN.4.2.1.f.analysis.g10.201601.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_2017 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2017\\EN.4.2.1.f.analysis.g10.201701.nc\", \"r+\", format=\"NETCDF4\")]\n rootgrps_2018 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2018\\EN.4.2.1.f.analysis.g10.201801.nc\", \"r+\", format=\"NETCDF4\")]\n\n# HC_1950 = calculate_HC_global(rootgrps_1950, 0, 2000)\n# print('1950', time.time()-start)\n# HC_1951 = calculate_HC_global(rootgrps_1951, 0, 2000)\n# print('1951', time.time()-start)\n# HC_1952 = calculate_HC_global(rootgrps_1952, 0, 2000)\n# print('1952', time.time()-start)\n# HC_1953 = calculate_HC_global(rootgrps_1953, 0, 2000)\n# print('1953', time.time()-start) \n#\n# HC_2015 = calculate_HC_global(rootgrps_2015, 0, 2000)\n# print('2015', time.time()-start)\n# HC_2016 = calculate_HC_global(rootgrps_2016, 0, 2000)\n# print('2016', time.time()-start)\n# HC_2017 = calculate_HC_global(rootgrps_2017, 0, 2000)\n# print('2017', time.time()-start)\n HC_2018 = calculate_HC_global(rootgrps_2018, 0, 2000)\n# print('2018', time.time()-start)\n# HC_1950_mean = (HC_1950+HC_1951+HC_1952+HC_1953)/4\n# HC_2018_mean = (HC_2015+HC_2016+HC_2017+HC_2018)/4\n\n# dHC = (HC_2018_mean-HC_1950_mean)/(65*365*24*3600)\n if animate == True:\n plot(rootgrps_2018, HC_2018)\n return HC_2018", "def construct3by3(listOfDBZs, showCoast=True, \n plotCentroidTrajectory=True, # this parameter and below added 2013-11-18\n DBZstream=\"\",\n verbose=False,\n ): #\n from armor.geometry.frames import setSideBySide, setUpDown\n L = listOfDBZs #alias\n #print [type(v) for v in L] #debug\n #time.sleep(3) #debug\n #L = [v for v in L if isinstance(v, dbz)]\n for im in L:\n if not isinstance(im.matrix, np.ma.MaskedArray):\n im.matrix = np.ma.array(im.matrix)\n im.load()\n im.setThreshold(0)\n if plotCentroidTrajectory:\n im.matrix= im.shortTermTrajectory(hours=6, timeInterval=3, radius=40, verbose=verbose, drawCoast=showCoast).matrix\n im.drawFrame(intensity=9999)\n #im.show()\n #im.showWithCoast(intensity=68)\n #if showCoast:\n # im.drawCoast(intensity=9999)\n #im.show()\n\n #debug\n #print L\n #print L.name\n #print '\\n'.join([v.name for v in L])\n #time.sleep(1)\n #print \"shapes for L[5], L[0], L[6]:\", L[5].matrix.shape, L[0].matrix.shape, L[6].matrix.shape\n #debug end\n if len(L) < 9:\n for i in range(9-len(L)):\n L.append(dbz(name='', matrix=L[0].matrix*0))\n #print [type(v) for v in L] #debug\n #time.sleep(3) #debug\n #L = [v for v in L if isinstance(v, dbz)]\n\n a = setSideBySide(L[1:4])\n b = setSideBySide([L[4],L[0],L[5]]) #bug fixed 2013-11-22\n c = setSideBySide(L[6:9])\n #output = setUpDown([a,b,c])\n output = setUpDown([c, b, a]) # 2013-11-22 \n output.name = L[1].name + ', ' + L[2].name + ', ' + L[3].name + '\\n' +\\\n L[4].name + ', ' + L[0].name + ', ' + L[5].name + '\\n' +\\\n L[6].name + ', ' + L[7].name + ', ' + L[8].name\n return output", "def preprocess_land_cover(\n src_files, dst_raster, dst_crs, dst_bounds, dst_res, geom=None, overwrite=False\n):\n if os.path.isfile(dst_raster) and not overwrite:\n log.info(\"Land cover data already preprocessed. Skipping.\")\n return\n log.info(\"Starting preprocessing of land cover data.\")\n LC_CLASSES = [\n \"bare\",\n \"crops\",\n \"grass\",\n \"moss\",\n \"shrub\",\n \"tree\",\n \"urban\",\n \"water-permanent\",\n \"water-seasonal\",\n ]\n with TemporaryDirectory(prefix=\"geohealthaccess_\") as tmpdir:\n\n tmpdir = Path(tmpdir)\n for tile in src_files:\n unzip(tile, tmpdir)\n\n reprojected_files = []\n tile_names = unique_tiles(tmpdir)\n\n if not tile_names:\n raise MissingDataError(\"Land cover data not found.\")\n\n for lc_class in LC_CLASSES:\n tiles = [\n p.as_posix()\n for p in tmpdir.glob(f\"*{lc_class}-coverfraction-layer*.tif\")\n ]\n if len(tiles) > 1:\n src_file = merge_tiles(\n tiles, os.path.join(tmpdir, f\"{lc_class}_mosaic.tif\"), nodata=255,\n )\n else:\n src_file = tiles[0]\n reprojected_files.append(\n reproject(\n src_raster=src_file,\n dst_raster=os.path.join(tmpdir, f\"{lc_class}.tif\"),\n dst_crs=dst_crs,\n dst_bounds=dst_bounds,\n dst_res=dst_res,\n src_nodata=255,\n dst_nodata=255,\n dst_dtype=\"Byte\",\n resampling_method=\"cubic\",\n overwrite=overwrite,\n )\n )\n\n if len(reprojected_files) > 1:\n raster = concatenate_bands(\n src_files=reprojected_files,\n dst_file=dst_raster,\n band_descriptions=LC_CLASSES,\n )\n else:\n raster = reprojected_files[0]\n\n if geom:\n mask_raster(raster, geom)", "def read_satellite(filename, ftype):\n #ftype = 'l3c'\n #filename = '/gws/nopw/j04/cds_c3s_sst/output/v2.6.0/l3c/AVHRR19_G/2018/03/01/20180301120000-C3S-L3C_GHRSST-SSTskin-AVHRR19_G-ICDR2.0_day-v02.0-fv01.0.nc'\n #ftype = 'l4'\n #filename = '/gws/nopw/j04/cds_c3s_sst/public/data/ICDR_v2/Analysis/L4/v2.0/2018/01/01/20180101120000-C3S-L4_GHRSST-SSTdepth-OSTIA-GLOB_ICDR2.0-v02.0-fv01.0.nc'\n print \"Reading %s file: %s\" % (ftype, filename)\n \n # Read data - L4 or L3C (note L4 mask and L3C quality level have same array name)\n ncin = netCDF4.Dataset(filename)\n if ftype == 'l4':\n lon = ncin.variables['lon'][:]\n lat = ncin.variables['lat'][:]\n time_read = ncin.variables['time'][:]\n sst = ncin.variables['analysed_sst'][:]\n unc = ncin.variables['analysis_uncertainty'][:]\n sea_ice_frac = ncin.variables['sea_ice_fraction'][:]\n ql = ncin.variables['mask'][:]\n sstfill = ncin.variables['analysed_sst']._FillValue\n sstao = ncin.variables['analysed_sst'].add_offset\n sstsf = ncin.variables['analysed_sst'].scale_factor\n elif ftype == 'l3c':\n lon = ncin.variables['lon'][:]\n lat = ncin.variables['lat'][:]\n time_read = ncin.variables['time'][:]\n time_bnds = ncin.variables['time_bnds'][:]\n sst = ncin.variables['sea_surface_temperature'][:]\n sst_depth = ncin.variables['sea_surface_temperature_depth'][:]\n sst_dtime = ncin.variables['sst_dtime'][:]\n sst_depth_dtime = ncin.variables['sst_depth_dtime'][:]\n sses_bias = ncin.variables['sses_bias'][:]\n sses_sd = ncin.variables['sses_standard_deviation'][:]\n sst_depth_total_unc = ncin.variables['sst_depth_total_uncertainty'][:]\n l2p_flags = ncin.variables['l2p_flags'][:]\n ql = ncin.variables['quality_level'][:]\n wind_speed = ncin.variables['wind_speed'][:]\n large_scale_cor_unc = ncin.variables['large_scale_correlated_uncertainty'][:]\n synop_cor_unc = ncin.variables['synoptically_correlated_uncertainty'][:]\n uncor_unc = ncin.variables['uncorrelated_uncertainty'][:]\n adj_unc = ncin.variables['adjustment_uncertainty'][:]\n aerosol_dyn_ind = ncin.variables['aerosol_dynamic_indicator'][:]\n sens = ncin.variables['sensitivity'][:]\n tfill = ncin.variables['sst_dtime']._FillValue\n sstfill = ncin.variables['sea_surface_temperature']._FillValue\n sstao = ncin.variables['sea_surface_temperature'].add_offset\n sstsf = ncin.variables['sea_surface_temperature'].scale_factor\n else:\n print 'ftype not recognised or supported'\n \n # Create time field\n # -> If L4 then create a time field set to time in L4 file\n # -> Also add a time fill value to keep coding simple later on\n if ftype == 'l4':\n time = np.empty((7200,3600))\n time[:,:] = time_read\n tfill = -2147483648\n else:\n time = copy.deepcopy(sst_dtime) # Need to make a hard copy\n mask = sst_dtime.mask == False; mask = mask[0,:,:]\n row, col = np.where(mask==True)\n time.data[0, row, col] = time.data[0,row, col] + time_read\n \n # Create output structure\n if ftype == 'l4':\n data = dict(lon=lon,\n lat=lat,\n time_read=time_read,\n time=time,\n sst=sst,\n unc=unc,\n sea_ice_frac=sea_ice_frac,\n ql=ql,\n tfill=tfill,\n sstfill=sstfill,\n sstao=sstao,\n sstsf=sstsf)\n elif ftype == 'l3c':\n data = dict(lon=lon,\n lat=lat,\n time_read=time_read,\n time=time,\n time_bnds=time_bnds,\n sst=sst,\n sst_depth=sst_depth,\n sst_dtime=sst_dtime,\n sst_depth_dtime=sst_depth_dtime,\n sses_bias=sses_bias,\n sses_sd=sses_sd,\n sst_depth_total_unc=sst_depth_total_unc,\n l2p_flags=l2p_flags,\n ql=ql,\n wind_speed=wind_speed,\n large_scale_cor_unc=large_scale_cor_unc,\n synop_cor_unc=synop_cor_unc,\n uncor_unc=uncor_unc,\n adj_unc=adj_unc,\n aerosol_dyn_ind=aerosol_dyn_ind,\n sens=sens,\n tfill=tfill,\n sstfill=sstfill,\n sstao=sstao,\n sstsf=sstsf)\n else:\n print 'ftype not recognised or supported'\n \n return data", "def get_time_filtered_correlations(a_lt3,a_lt4,adwin_filt_bool,**kw):\r\n verbose = kw.pop('verbose',False)\r\n ### prepare RO results and sort them according to sweep point\r\n for a in [a_lt3,a_lt4]:\r\n a.pts = a.g.attrs['sweep_length']\r\n a.ssros = a.agrp['ssro_results'].value\r\n a.readouts = a.g.attrs['nr_of_ROsequences']\r\n # a.sorted_results = a_ssros.reshape((-1,a.pts,a.readouts))\r\n\r\n\r\n ### correlate the ROs with each other by making a boolean filter:\r\n ### variables here are described in terms of spin states!\r\n m00 = (a_lt3.ssros == 1)*(a_lt4.ssros == 1)\r\n m10 = (a_lt3.ssros == 1)*(a_lt4.ssros == 0)\r\n m01 = (a_lt3.ssros == 0)*(a_lt4.ssros == 1)\r\n m11 = (a_lt3.ssros == 0)*(a_lt4.ssros == 0)\r\n \r\n ### now define unique identifiers for each Ro correlation and recast the correlations into a single array.\r\n ### As identifieres I choose 1 = index 0 in the output list, i.e. 11; 2 = index 1 in the output list ... and so forth\r\n RO_correlators = np.array(len(a_lt3.ssros)*[1])*m11 \\\r\n + np.array(len(a_lt3.ssros)*[2])*m10 \\\r\n + np.array(len(a_lt3.ssros)*[3])*m01 \\\r\n + np.array(len(a_lt3.ssros)*[4])*m00 \r\n ### PH - added to make sure that has a full set of repetitions\r\n RO_correlators = RO_correlators[:(a.g.attrs['sweep_length']*(len(RO_correlators)/a.g.attrs['sweep_length']))]\r\n adwin_filt_bool = adwin_filt_bool[:(a.g.attrs['sweep_length']*(len(RO_correlators)/a.g.attrs['sweep_length']))]\r\n\r\n \r\n ### now sort the correlators and the adwin fltr according to the sweep pts\r\n sorted_RO_correlators = RO_correlators.reshape((-1,a_lt3.pts,a_lt3.readouts))\r\n sorted_adwin_fltr = adwin_filt_bool.reshape((-1,a_lt3.pts,a_lt3.readouts))\r\n\r\n ### from now on: no numpy magic anymore. from here it is brutforce 'for-looping'\r\n ### (all conceived arrays will have different lengths due to temporal filtering. this break most np methods)\r\n ### although vstack and hstack would probably work...\r\n \r\n return_list = range(a_lt3.pts) ## all of these pts will be substituted with the correlator occurence\r\n for i in range(a_lt3.pts): \r\n correlators_at_sweep_pt = [0,0,0,0]\r\n for j in [1,2,3,4]: ### loop over the correlator identifiers\r\n correlators_at_sweep_pt[j-1] = np.sum(np.logical_and(sorted_adwin_fltr[:,i,:],sorted_RO_correlators[:,i,:]==j)) ## exclude adwin filter and do a logical and with the correlator identifier. Then sum over the number of occurences\r\n\r\n\r\n return_list[i] = correlators_at_sweep_pt\r\n\r\n return return_list", "def s3_masking_old(\n S8_BT_in,\n S9_BT_in,\n S1_reflectance_an,\n S5_reflectance_an,\n S7_BT_in,\n solar_angle=None,\n max_solar_angle=None,\n):\n ## 'BT 10850 nm'\n ## 'BT 12000 nm'\n ## 'reflectance 555 nm (green)'\n ## 'reflectance 1610 nm (SWIR 2)'\n ## 'BT 3740 nm'\n bt11 = S8_BT_in\n bt12 = S9_BT_in\n r550 = S1_reflectance_an\n r1600 = S5_reflectance_an\n bt37 = S7_BT_in\n\n nl, ns = bt11.shape[:2]\n dt = bt11.dtype\n\n ndsi = (r550 - r1600) / (r550 + r1600) ## snowindex\n\n # Determine where in image there is data\n inside_map = np.zeros((nl, ns), dt)\n inside_map[:] = 0\n inside_map[(r550 >= -1.0) & (r1600 >= -1.0)] = 1\n\n # Cloud algorithm\n cloud_map_1 = ((r550 > 0.30) & (ndsi / r550 < 0.8) & (bt12 <= 290)) * 1\n cloud_map_2 = (\n (bt11 - bt37 < -13)\n & (r550 > 0.15)\n & (ndsi >= -0.3)\n & (r1600 > 0.1)\n & (bt12 <= 293)\n ) * 1\n\n thrmax = ((r550 < 0.75) & (bt12 > 265)) * -5.5\n thrmax[((r550 >= 0.75) | (bt12 <= 265))] = -8.0\n\n s = (r550 > 0.75) * 1.1\n s[(r550 <= 0.75)] = 1.5\n thr1 = 0.5 * bt12 - 133\n thr = (thr1 < thrmax) * thr1 + (thr1 >= thrmax) * thrmax\n cloud_map_3 = (\n (bt11 - bt37 < thr)\n & (ndsi / r550 < s)\n & (-0.02 <= ndsi)\n & (ndsi <= 0.75)\n & (bt12 <= 270)\n & (r550 > 0.18)\n ) * 1\n cloud_map_4 = (bt11 - bt37 < -30) * 1\n\n # Make cloud map\n cloud_map = np.zeros_like(bt11, dtype=\"int8\") # Default to no data\n cloud_map[\n ((cloud_map_1 + cloud_map_2 + cloud_map_3 + cloud_map_4) >= 1)\n ] = 1 # 1 is used to indicate cloud\n cloud_map[\n ((cloud_map_1 + cloud_map_2 + cloud_map_3 + cloud_map_4) < 1)\n ] = 2 # 2 is used to indicate non-cloud\n\n # Mask out pixels with no data\n cloud_map[np.isnan(bt11)] = 0\n cloud_map[np.isnan(bt12)] = 0\n cloud_map[np.isnan(r550)] = 0\n cloud_map[np.isnan(r1600)] = 0\n cloud_map[np.isnan(bt37)] = 0\n cloud_map[inside_map == 0] = 0\n\n cloud_map[(r550 + r1600) <= 0] = 0 # Outside data\n\n if solar_angle is not None:\n cloud_map[solar_angle.mask] = 0\n cloud_map[(solar_angle > max_solar_angle)] = 0\n\n return cloud_map", "def shift_photo_north(gflux=None, rflux=None, zflux=None):\n # ADM if floats were sent, treat them like arrays.\n flt = False\n if _is_row(gflux):\n flt = True\n gflux = np.atleast_1d(gflux)\n rflux = np.atleast_1d(rflux)\n zflux = np.atleast_1d(zflux)\n\n # ADM only use the g-band color shift when r and g are non-zero\n gshift = gflux * 10**(-0.4*0.004)\n w = np.where((gflux != 0) & (rflux != 0))\n gshift[w] = (gflux[w] * 10**(-0.4*0.004) * (gflux[w]/rflux[w])**complex(-0.059)).real\n\n # ADM only use the r-band color shift when r and z are non-zero\n # ADM and only use the z-band color shift when r and z are non-zero\n w = np.where((rflux != 0) & (zflux != 0))\n rshift = rflux * 10**(0.4*0.003)\n zshift = zflux * 10**(0.4*0.013)\n\n rshift[w] = (rflux[w] * 10**(0.4*0.003) * (rflux[w]/zflux[w])**complex(-0.024)).real\n zshift[w] = (zflux[w] * 10**(0.4*0.013) * (rflux[w]/zflux[w])**complex(+0.015)).real\n\n if flt:\n return gshift[0], rshift[0], zshift[0]\n\n return gshift, rshift, zshift", "def _build_multiband_mask(data, tractor, filt2pixscale, fill_value=0.0,\n threshmask=0.01, r50mask=0.05, maxshift=10,\n relmaxshift=0.1,\n sigmamask=3.0, neighborfactor=1.0, verbose=False):\n import numpy.ma as ma\n from copy import copy\n from skimage.transform import resize\n from legacyhalos.mge import find_galaxy\n from legacyhalos.misc import srcs2image, ellipse_mask\n\n import matplotlib.pyplot as plt\n from astropy.visualization import simple_norm\n\n bands, refband = data['bands'], data['refband']\n #residual_mask = data['residual_mask']\n\n #nbox = 5\n #box = np.arange(nbox)-nbox // 2\n #box = np.meshgrid(np.arange(nbox), np.arange(nbox))[0]-nbox//2\n\n xobj, yobj = np.ogrid[0:data['refband_height'], 0:data['refband_width']]\n\n # If the row-index of the central galaxy is not provided, use the source\n # nearest to the center of the field.\n if 'galaxy_indx' in data.keys():\n galaxy_indx = np.atleast_1d(data['galaxy_indx'])\n else:\n galaxy_indx = np.array([np.argmin((tractor.bx - data['refband_height']/2)**2 +\n (tractor.by - data['refband_width']/2)**2)])\n data['galaxy_indx'] = np.atleast_1d(galaxy_indx)\n data['galaxy_id'] = ''\n\n #print('Import hack!')\n #norm = simple_norm(img, 'log', min_percent=0.05, clip=True)\n #import matplotlib.pyplot as plt ; from astropy.visualization import simple_norm\n\n ## Get the PSF sources.\n #psfindx = np.where(tractor.type == 'PSF')[0]\n #if len(psfindx) > 0:\n # psfsrcs = tractor.copy()\n # psfsrcs.cut(psfindx)\n #else:\n # psfsrcs = None\n\n def tractor2mge(indx, factor=1.0):\n # Convert a Tractor catalog entry to an MGE object.\n class MGEgalaxy(object):\n pass\n\n default_majoraxis = tractor.diam_init[indx] * 60 / 2 / filt2pixscale[refband] # [pixels]\n default_pa = tractor.pa_init[indx]\n default_ba = tractor.ba_init[indx]\n #default_theta = (270 - default_pa) % 180\n #default_eps = 1 - tractor.ba_init[indx]\n\n #if tractor.sga_id[indx] > -1:\n if tractor.type[indx] == 'PSF' or tractor.shape_r[indx] < 2:\n pa = tractor.pa_init[indx]\n ba = tractor.ba_init[indx]\n # take away the extra factor of 2 we put in in read_sample()\n r50 = tractor.diam_init[indx] * 60 / 2 / 2\n if r50 < 5:\n r50 = 5.0 # minimum size, arcsec\n majoraxis = factor * r50 / filt2pixscale[refband] # [pixels]\n #majoraxis = factor * tractor.diam_init[indx] * 60 / 2 / 2 / filt2pixscale[refband] # [pixels]\n else:\n ee = np.hypot(tractor.shape_e1[indx], tractor.shape_e2[indx])\n ba = (1 - ee) / (1 + ee)\n pa = 180 - (-np.rad2deg(np.arctan2(tractor.shape_e2[indx], tractor.shape_e1[indx]) / 2))\n pa = pa % 180\n\n # can be zero (or very small) if fit as a PSF or REX\n if tractor.shape_r[indx] > 1:\n majoraxis = factor * tractor.shape_r[indx] / filt2pixscale[refband] # [pixels]\n else:\n majoraxis = factor * tractor.diam_init[indx] * 60 / 2 / 2 / filt2pixscale[refband] # [pixels]\n\n mgegalaxy = MGEgalaxy()\n \n mgegalaxy.xmed = tractor.by[indx]\n mgegalaxy.ymed = tractor.bx[indx]\n mgegalaxy.xpeak = tractor.by[indx]\n mgegalaxy.ypeak = tractor.bx[indx]\n\n # never use the Tractor geometry (only the centroid)\n # https://portal.nersc.gov/project/cosmo/temp/ioannis/virgofilaments-html/215/NGC5584/NGC5584.html\n if True:\n mgegalaxy.eps = 1-ba\n mgegalaxy.pa = pa\n mgegalaxy.theta = (270 - pa) % 180\n mgegalaxy.majoraxis = majoraxis\n else:\n mgegalaxy.eps = 1 - default_ba\n mgegalaxy.pa = default_pa\n mgegalaxy.theta = (270 - default_pa) % 180\n mgegalaxy.majoraxis = default_majoraxis\n\n # always restore all pixels within the nominal / initial size of the galaxy\n #objmask = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n # default_majoraxis,\n # default_majoraxis * (1-default_eps), \n # np.radians(default_theta-90), xobj, yobj)\n #objmask = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n # default_majoraxis, default_majoraxis, 0.0, xobj, yobj)\n\n objmask = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n mgegalaxy.majoraxis,\n mgegalaxy.majoraxis * (1-mgegalaxy.eps), \n np.radians(mgegalaxy.theta-90), xobj, yobj)\n\n # central 10% pixels can override the starmask\n objmask_center = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n 0.1*mgegalaxy.majoraxis,\n 0.1*mgegalaxy.majoraxis * (1-mgegalaxy.eps), \n np.radians(mgegalaxy.theta-90), xobj, yobj)\n\n return mgegalaxy, objmask, objmask_center\n\n # Now, loop through each 'galaxy_indx' from bright to faint.\n data['mge'] = []\n for ii, central in enumerate(galaxy_indx):\n print('Determing the geometry for galaxy {}/{}.'.format(\n ii+1, len(galaxy_indx)))\n\n # [1] Determine the non-parametric geometry of the galaxy of interest\n # in the reference band. First, subtract all models except the galaxy\n # and galaxies \"near\" it. Also restore the original pixels of the\n # central in case there was a poor deblend.\n largeshift = False\n mge, centralmask, centralmask2 = tractor2mge(central, factor=1.0)\n #plt.clf() ; plt.imshow(centralmask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-mask.png') ; pdb.set_trace()\n\n iclose = np.where([centralmask[np.int(by), np.int(bx)]\n for by, bx in zip(tractor.by, tractor.bx)])[0]\n \n srcs = tractor.copy()\n srcs.cut(np.delete(np.arange(len(tractor)), iclose))\n model = srcs2image(srcs, data['{}_wcs'.format(refband.lower())],\n band=refband.lower(),\n pixelized_psf=data['{}_psf'.format(refband.lower())])\n\n img = data[refband].data - model\n img[centralmask] = data[refband].data[centralmask]\n\n mask = np.logical_or(ma.getmask(data[refband]), data['residual_mask'])\n #mask = np.logical_or(data[refband].mask, data['residual_mask'])\n\n # restore the central pixels but not the masked stellar pixels\n centralmask[np.logical_and(data['starmask'], np.logical_not(centralmask2))] = False\n mask[centralmask] = False\n\n img = ma.masked_array(img, mask)\n ma.set_fill_value(img, fill_value)\n #if ii == 1:\n # pdb.set_trace()\n\n mgegalaxy = find_galaxy(img, nblob=1, binning=1, quiet=False)#, plot=True) ; plt.savefig('cosmo-www/tmp/junk-mge.png')\n #plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('junk-mask.png')\n ##plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n #pdb.set_trace()\n\n # Did the galaxy position move? If so, revert back to the Tractor geometry.\n if np.abs(mgegalaxy.xmed-mge.xmed) > maxshift or np.abs(mgegalaxy.ymed-mge.ymed) > maxshift:\n print('Large centroid shift (x,y)=({:.3f},{:.3f})-->({:.3f},{:.3f})'.format(\n mgegalaxy.xmed, mgegalaxy.ymed, mge.xmed, mge.ymed))\n print(' Reverting to the default geometry and the Tractor centroid.')\n largeshift = True\n mgegalaxy = copy(mge)\n\n radec_med = data['{}_wcs'.format(refband.lower())].pixelToPosition(\n mgegalaxy.ymed+1, mgegalaxy.xmed+1).vals\n radec_peak = data['{}_wcs'.format(refband.lower())].pixelToPosition(\n mgegalaxy.ypeak+1, mgegalaxy.xpeak+1).vals\n mge = {\n 'largeshift': largeshift,\n 'ra': tractor.ra[central], 'dec': tractor.dec[central],\n 'bx': tractor.bx[central], 'by': tractor.by[central],\n #'mw_transmission_g': tractor.mw_transmission_g[central],\n #'mw_transmission_r': tractor.mw_transmission_r[central],\n #'mw_transmission_z': tractor.mw_transmission_z[central],\n 'ra_moment': radec_med[0], 'dec_moment': radec_med[1],\n #'ra_peak': radec_med[0], 'dec_peak': radec_med[1]\n }\n for key in ('eps', 'majoraxis', 'pa', 'theta', 'xmed', 'ymed', 'xpeak', 'ypeak'):\n mge[key] = np.float32(getattr(mgegalaxy, key))\n if key == 'pa': # put into range [0-180]\n mge[key] = mge[key] % np.float32(180)\n data['mge'].append(mge)\n\n #if False:\n # #plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n # plt.clf() ; mgegalaxy = find_galaxy(img, nblob=1, binning=1, quiet=True, plot=True)\n # plt.savefig('/mnt/legacyhalos-data/debug.png')\n\n # [2] Create the satellite mask in all the bandpasses. Use srcs here,\n # which has had the satellites nearest to the central galaxy trimmed\n # out.\n print('Building the satellite mask.')\n satmask = np.zeros(data[refband].shape, bool)\n for filt in bands:\n # do not let GALEX and WISE contribute to the satellite mask\n if data[filt].shape != satmask.shape:\n continue\n \n cenflux = getattr(tractor, 'flux_{}'.format(filt.lower()))[central]\n satflux = getattr(srcs, 'flux_{}'.format(filt.lower()))\n if cenflux <= 0.0:\n #raise ValueError('Central galaxy flux is negative!')\n print('Central galaxy flux is negative! Proceed with caution...')\n #pdb.set_trace()\n \n satindx = np.where(np.logical_or(\n (srcs.type != 'PSF') * (srcs.shape_r > r50mask) *\n (satflux > 0.0) * ((satflux / cenflux) > threshmask),\n srcs.ref_cat == 'R1'))[0]\n #satindx = np.where(srcs.ref_cat == 'R1')[0]\n #if np.isin(central, satindx):\n # satindx = satindx[np.logical_not(np.isin(satindx, central))]\n if len(satindx) == 0:\n #raise ValueError('All satellites have been dropped!')\n #print('Warning! All satellites have been dropped from band {}!'.format(filt))\n print('Note: no satellites to mask in band {}.'.format(filt))\n else:\n satsrcs = srcs.copy()\n #satsrcs = tractor.copy()\n satsrcs.cut(satindx)\n satimg = srcs2image(satsrcs, data['{}_wcs'.format(filt.lower())],\n band=filt.lower(),\n pixelized_psf=data['{}_psf'.format(filt.lower())])\n thissatmask = satimg > sigmamask*data['{}_sigma'.format(filt.lower())]\n #if filt == 'FUV':\n # plt.clf() ; plt.imshow(thissatmask, origin='lower') ; plt.savefig('junk-{}.png'.format(filt.lower()))\n # #plt.clf() ; plt.imshow(data[filt], origin='lower') ; plt.savefig('junk-{}.png'.format(filt.lower()))\n # pdb.set_trace()\n if satmask.shape != satimg.shape:\n thissatmask = resize(thissatmask*1.0, satmask.shape, mode='reflect') > 0\n\n satmask = np.logical_or(satmask, thissatmask)\n #if True:\n # import matplotlib.pyplot as plt\n # plt.clf() ; plt.imshow(np.log10(satimg), origin='lower') ; plt.savefig('debug.png')\n # plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('debug.png')\n ## #plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n # pdb.set_trace()\n\n #print(filt, np.sum(satmask), np.sum(thissatmask))\n\n #plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-satmask.png')\n \n # [3] Build the final image (in each filter) for ellipse-fitting. First,\n # subtract out the PSF sources. Then update the mask (but ignore the\n # residual mask). Finally convert to surface brightness.\n #for filt in ['W1']:\n for filt in bands:\n thismask = ma.getmask(data[filt])\n if satmask.shape != thismask.shape:\n _satmask = (resize(satmask*1.0, thismask.shape, mode='reflect') > 0) == 1.0\n _centralmask = (resize(centralmask*1.0, thismask.shape, mode='reflect') > 0) == 1.0\n mask = np.logical_or(thismask, _satmask)\n mask[_centralmask] = False\n else:\n mask = np.logical_or(thismask, satmask)\n mask[centralmask] = False\n #if filt == 'r':\n # #plt.imshow(_satmask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-satmask-{}.png'.format(filt))\n # plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-mask-{}.png'.format(filt))\n # plt.clf() ; plt.imshow(img, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-img-{}.png'.format(filt))\n # pdb.set_trace()\n\n varkey = '{}_var'.format(filt.lower())\n imagekey = '{}_masked'.format(filt.lower())\n psfimgkey = '{}_psfimg'.format(filt.lower())\n thispixscale = filt2pixscale[filt]\n if imagekey not in data.keys():\n data[imagekey], data[varkey], data[psfimgkey] = [], [], []\n\n img = ma.getdata(data[filt]).copy()\n \n # Get the PSF sources.\n psfindx = np.where((tractor.type == 'PSF') * (getattr(tractor, 'flux_{}'.format(filt.lower())) / cenflux > threshmask))[0]\n if len(psfindx) > 0 and filt.upper() != 'W3' and filt.upper() != 'W4': \n #if len(psfindx) > 0 and filt.upper() != 'NUV' and filt.upper() != 'FUV' and filt.upper() != 'W3' and filt.upper() != 'W4':\n psfsrcs = tractor.copy()\n psfsrcs.cut(psfindx)\n else:\n psfsrcs = None\n \n if psfsrcs:\n psfimg = srcs2image(psfsrcs, data['{}_wcs'.format(filt.lower())],\n band=filt.lower(),\n pixelized_psf=data['{}_psf'.format(filt.lower())])\n if False:\n #import fitsio ; fitsio.write('junk-psf-{}.fits'.format(filt.lower()), data['{}_psf'.format(filt.lower())].img, clobber=True)\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)\n im = ax1.imshow(np.log10(img), origin='lower') ; fig.colorbar(im, ax=ax1)\n im = ax2.imshow(np.log10(psfimg), origin='lower') ; fig.colorbar(im, ax=ax2)\n im = ax3.imshow(np.log10(data['{}_psf'.format(filt.lower())].img), origin='lower') ; fig.colorbar(im, ax=ax3)\n im = ax4.imshow(img-psfimg, origin='lower') ; fig.colorbar(im, ax=ax4)\n plt.savefig('desi-users/ioannis/tmp/qa-psf-{}.png'.format(filt.lower()))\n if filt == 'r':# or filt == 'r':\n pdb.set_trace()\n img -= psfimg\n else:\n psfimg = np.zeros((2, 2), 'f4')\n\n data[psfimgkey].append(psfimg)\n \n img = ma.masked_array((img / thispixscale**2).astype('f4'), mask) # [nanomaggies/arcsec**2]\n var = data['{}_var_'.format(filt.lower())] / thispixscale**4 # [nanomaggies**2/arcsec**4]\n\n # Fill with zeros, for fun--\n ma.set_fill_value(img, fill_value)\n #if ii == 0 and filt == 'r': #filt == 'W1' or \n # plt.clf() ; plt.imshow(img, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-img-{}.png'.format(filt.lower()))\n # plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-mask-{}.png'.format(filt.lower()))\n ##### plt.clf() ; plt.imshow(thismask, origin='lower') ; plt.savefig('junk-thismask-{}.png'.format(filt.lower()))\n # pdb.set_trace()\n \n data[imagekey].append(img)\n data[varkey].append(var)\n\n #test = data['r_masked'][0]\n #plt.clf() ; plt.imshow(np.log(test.clip(test[mgegalaxy.xpeak, mgegalaxy.ypeak]/1e4)), origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n #pdb.set_trace()\n\n # Cleanup?\n for filt in bands:\n del data[filt]\n del data['{}_var_'.format(filt.lower())]\n\n return data", "def run(path, f3_param=[[1, 0.01]], minArea=20, saveNumber=0):\n\tprint('=== path:', path)\n\t\n\t# load x/y/z voxel size (assumes .tif was saved with Fiji\n\txVoxel, yVoxel, zVoxel = readVoxelSize(path)\n\tprint(' xVoxel:', xVoxel, 'yVoxel:', yVoxel, 'zVoxel:', zVoxel)\n\t\n\t# load the data\n\treader = AICSImage(path) \n\tIMG = reader.data.astype(np.float32)\n\tprint(' IMG.shape:', IMG.shape)\n\n\tstructure_channel = 0\n\tstruct_img0 = IMG[0,structure_channel,:,:,:].copy()\n\n\t# give us a guess for our intensity_scaling_param parameters\n\t#from aicssegmentation.core.pre_processing_utils import suggest_normalization_param\n\t#suggest_normalization_param(struct_img0)\n\tlow_ratio, high_ratio = my_suggest_normalization_param(struct_img0)\n\n\t#intensity_scaling_param = [0.0, 22.5]\n\tintensity_scaling_param = [low_ratio, high_ratio]\n\tprint('*** intensity_normalization() intensity_scaling_param:', intensity_scaling_param)\n\t\n\t# intensity normalization\n\tprint('=== calling intensity_normalization()')\n\tstruct_img = intensity_normalization(struct_img0, scaling_param=intensity_scaling_param)\n\n\t# smoothing with edge preserving smoothing \n\tprint('=== calling edge_preserving_smoothing_3d()')\n\tstructure_img_smooth = edge_preserving_smoothing_3d(struct_img)\n\n\t#\n\t\"\"\"\n\tsee: notebooks/playground_filament3d.ipynb\n\n\tscale_x is set based on the estimated thickness of your target filaments.\n\t\tFor example, if visually the thickness of the filaments is usually 3~4 pixels,\n\t\tthen you may want to set scale_x as 1 or something near 1 (like 1.25).\n\t\tMultiple scales can be used, if you have filaments of very different thickness.\n\tcutoff_x is a threshold applied on the actual filter reponse to get the binary result.\n\t\tSmaller cutoff_x may yielf more filaments, especially detecting more dim ones and thicker segmentation,\n\t\twhile larger cutoff_x could be less permisive and yield less filaments and slimmer segmentation.\n\t\"\"\"\n\t#f3_param = [[1, 0.01]] # [scale_1, cutoff_1]\n\tprint('=== calling filament_3d_wrapper() f3_param:', f3_param)\n\tbw = filament_3d_wrapper(structure_img_smooth, f3_param)\n\t\t\n\t#\n\t#minArea = 20 # from recipe\n\tprint('=== calling remove_small_objects() minArea:', minArea)\n\tseg = remove_small_objects(bw>0, min_size=minArea, connectivity=1, in_place=False)\n\n\t#\n\t# save original file again (with saveNumber\n\tsaveNumberStr = ''\n\tif saveNumber>1:\n\t\tsaveNumberStr = '_' + str(saveNumber)\n\t\t\n\t#\n\t# save mask\n\tseg = seg >0\n\tout=seg.astype(np.uint8)\n\tout[out>0]=255\n\t\n\t# save _dvMask\n\tmaskPath = os.path.splitext(path)[0] + '_dvMask' + saveNumberStr + '.tif'\n\tprint('=== saving 3D mask [WILL FAIL IF FILE EXISTS] as maskPath:', maskPath)\n\ttry:\n\t\twriter = omeTifWriter.OmeTifWriter(maskPath)\n\t\twriter.save(out)\n\texcept(OSError) as e:\n\t\tprint(' error: file already exists, di dnot resave, maskPath:', maskPath)\n\t\t\n\t#\n\t# analyze skeleton, take a 3d mask and analyze as a 1-pixel skeleton\n\tretDict0, mySkeleton = myAnalyzeSkeleton(out=out, imagePath=path)\n\tretDict = OrderedDict()\n\tretDict['tifPath'] = path\n\tretDict['maskPath'] = maskPath\n\tretDict['tifFile'] = os.path.basename(path)\n\tretDict['xVoxel'] = xVoxel\n\tretDict['yVoxel'] = yVoxel\n\tretDict['zVoxel'] = zVoxel\n\t#\n\tretDict['params'] = OrderedDict()\n\tretDict['params']['saveNumber'] = saveNumber\n\tretDict['params']['intensity_scaling_param'] = intensity_scaling_param # calculated in my_suggest_normalization_param\n\tretDict['params']['f3_param'] = f3_param[0] # cludge, not sure where to put this. f3_param is a list of list but screws up my .csv output !!!\n\tretDict['params']['minArea'] = minArea\n\n\tretDict.update( retDict0 )\n\n\t# save 1-pixel skeleton: mySkeleton\n\t# save _dvSkel\n\tskelPath = os.path.splitext(path)[0] + '_dvSkel' + saveNumberStr + '.tif'\n\tprint('=== saving 3D skel [WILL FAIL IF FILE EXISTS] as maskPath:', skelPath)\n\ttry:\n\t\twriter = omeTifWriter.OmeTifWriter(skelPath)\n\t\twriter.save(mySkeleton)\n\texcept(OSError) as e:\n\t\tprint(' error: file already exists, di dnot resave, skelPath:', skelPath)\n\t\t\t\n\treturn retDict", "def include_wcs_in_masks(input_images):\n img_list = [astroim.Astroim(im_name, memmap=True) for im_name in input_images]\n mask_names = [im.primary_header.get(\"MASK\") for im in img_list]\n output = []\n for im_object, mask_name in zip(img_list, mask_names):\n with fits.open(mask_name, 'readonly') as mask:\n mask_header = im_object.chips[0].header.hdr\n mask_data = mask[0].data.copy()\n mask_data[mask_data>0] = 1\n _, path = tempfile.mkstemp(suffix=\".fits\")\n fits.writeto(path, mask_data * 1., mask_header, clobber=True)\n output.append(path)\n return output", "def masked(months=range(1, 13), years=[2009], folder=\"data/\", layer=\"BHR_VIS\"):\n data = []\n file_template = 'NETCDF:\"{:s}\":{:s}' # Template for the Netcdf path\n # the actual filename\n fname_template = '{:s}/GlobAlbedo.merge.albedo.05.{:d}{:02d}.nc'\n for year in years:\n for month in months:\n fname = fname_template.format(folder, year, month)\n netcdf_fname = file_template.format(fname, layer)\n g = gdal.Open(netcdf_fname)\n if g is None:\n raise IOError(\"Problem with reading file {}\".format(fname))\n the_data = g.ReadAsArray()\n masked_data = np.ma.array(the_data,mask=np.isnan(the_data))\n data.append(masked_data)\n output_data = np.ma.array(data)\n return output_data", "def _covfit3d(self, coa_map, thresh=0.88, win=None):\n\n # Normalise\n coa_map = coa_map / (np.nanmax(coa_map))\n\n # Get shape of 3-D coalescence map and max coalesence grid location\n nx, ny, nz = coa_map.shape\n mx, my, mz = np.unravel_index(np.nanargmax(coa_map), coa_map.shape)\n\n # If window is specified, clip the grid to only look here.\n if win:\n flg = np.logical_and(coa_map > thresh,\n self._mask3d([nx, ny, nz], [mx, my, mz], win))\n ix, iy, iz = np.where(flg)\n msg = \"Variables\", min(ix), max(ix), min(iy), max(iy), min(iz), max(iz)\n self.output.log(msg, self.log)\n else:\n flg = np.where(coa_map > thresh, True, False)\n ix, iy, iz = nx, ny, nz\n\n smp_weights = coa_map.flatten()\n smp_weights[~flg.flatten()] = np.nan\n\n lc = self.lut.cell_count\n # Ordering below due to handedness of the grid\n ly, lx, lz = np.meshgrid(np.arange(lc[1]),\n np.arange(lc[0]),\n np.arange(lc[2]))\n x_samples = lx.flatten() * self.lut.cell_size[0]\n y_samples = ly.flatten() * self.lut.cell_size[1]\n z_samples = lz.flatten() * self.lut.cell_size[2]\n\n ssw = np.nansum(smp_weights)\n\n # Expectation values:\n x_expect = np.nansum(smp_weights * x_samples) / ssw\n y_expect = np.nansum(smp_weights * y_samples) / ssw\n z_expect = np.nansum(smp_weights * z_samples) / ssw\n\n # Covariance matrix:\n cov_matrix = np.zeros((3, 3))\n cov_matrix[0, 0] = np.nansum(smp_weights\n * (x_samples - x_expect) ** 2) / ssw\n cov_matrix[1, 1] = np.nansum(smp_weights\n * (y_samples - y_expect) ** 2) / ssw\n cov_matrix[2, 2] = np.nansum(smp_weights\n * (z_samples - z_expect) ** 2) / ssw\n cov_matrix[0, 1] = np.nansum(smp_weights\n * (x_samples - x_expect)\n * (y_samples - y_expect)) / ssw\n cov_matrix[1, 0] = cov_matrix[0, 1]\n cov_matrix[0, 2] = np.nansum(smp_weights\n * (x_samples - x_expect)\n * (z_samples - z_expect)) / ssw\n cov_matrix[2, 0] = cov_matrix[0, 2]\n cov_matrix[1, 2] = np.nansum(smp_weights\n * (y_samples - y_expect)\n * (z_samples - z_expect)) / ssw\n cov_matrix[2, 1] = cov_matrix[1, 2]\n\n expect_vector_cov = np.array([x_expect,\n y_expect,\n z_expect],\n dtype=float)\n loc_cov_gc = np.array([[expect_vector_cov[0] / self.lut.cell_size[0],\n expect_vector_cov[1] / self.lut.cell_size[1],\n expect_vector_cov[2] / self.lut.cell_size[2]]])\n loc_cov_err = np.array([np.sqrt(cov_matrix[0, 0]),\n np.sqrt(cov_matrix[1, 1]),\n np.sqrt(cov_matrix[2, 2])])\n\n # Convert grid location to XYZ / coordinates\n xyz = self.lut.xyz2loc(loc_cov_gc, inverse=True)\n loc_cov = self.lut.xyz2coord(xyz)[0]\n\n return loc_cov, loc_cov_err", "def contours_and_data(epoch, model, features, filters, figname, fgal=0.5,\n idx=-1, data='s82', N=60000):\n if data == 's82':\n # fetch Stripe 82 data\n X, Xcov = fetch_prepped_s82data(epoch, fgal, features, filters)\n Xcoadd, Xcoaddcov = fetch_prepped_s82data(epoch, fgal, features,\n filters, use_single=False)\n sind = np.abs(Xcoadd[:, idx]) < 0.03\n gind = np.abs(Xcoadd[:, idx]) > 0.03\n\n else:\n # fetch DR10 data\n X, Xcov = fetch_prepped_dr10data(N, fgal, features, filters)\n sind = np.abs(X[:, idx]) < 0.145\n gind = np.abs(X[:, idx]) > 0.145\n\n # unpickle the XD model\n if type(model) == str: \n f = open(model, 'rb')\n model = cPickle.load(f)\n f.close()\n\n fs = 5\n ms = 1\n lsize = 20\n idx = [[0, -1], [2, 3], [3, 4]]\n xlim = [(18., 22), (-0.5, 2.5), (-0.5, 2)]\n ylim = [(-0.1, 0.5), (-0.5, 2.5), (-0.5, 1.5)]\n xlab = ['psfmag $r$', 'modelmag $g-r$', 'modelmag $r-i$']\n ylab = ['psfmag - modelmag $r$', 'modelmag $r-i$', 'modelmag $i-z$']\n\n f = pl.figure(figsize=(3 * fs, 3 * fs))\n Nstar = len(np.where(model.fixed_means[:, idx] != np.inf)[0])\n pl.subplots_adjust(wspace=0.3)\n for i in range(1, 10):\n k = (i - 1) % 3\n if i < 4:\n ind = np.arange(X.shape[0], dtype=np.int)\n rng = range(model.n_components)\n elif 3 < i < 7:\n ind = sind\n rng = range(Nstar)\n else:\n ind = gind\n rng = range(Nstar, model.n_components)\n ax = pl.subplot(3, 3, i)\n for j in rng:\n if model.alpha[j] > 1.e-3:\n draw_ellipse(model.mu[j, idx[k]],\n model.V[j, idx[k]][:, idx[k]],\n scales=[2], ec='k', fc='gray', alpha=0.2)\n pl.plot(X[ind][::10, idx[k][0]],\n X[ind][::10, idx[k][1]], '.k',ms=ms)\n pl.xlim(xlim[k])\n pl.ylim(ylim[k])\n pl.xlabel(xlab[k], fontsize=lsize)\n pl.ylabel(ylab[k], fontsize=lsize)\n if ('psf' in ylab[k]) & ('model' in ylab[k]):\n ytick = ['%0.1f' % v for v in np.linspace(-.1, 0.4, 6)]\n ytick[0] = ''\n ax.set_yticklabels(ytick)\n if i == 1:\n s = 'All'\n elif i == 3:\n s = '\"Stars\"'\n else:\n s = '\"Galaxies\"'\n ax.text(-.3, 0.5, s, ha='center', va='center', fontsize=25,\n rotation='vertical', transform=ax.transAxes)\n f.savefig(figname, bbox_inches='tight')", "def build_mask(sst, qual, qual_thresh=2, temp_bounds=(-2,33)):\n sst[np.isnan(sst)] = np.nan\n qual[np.isnan(qual)] = np.nan\n # Deal with NaN\n masks = np.logical_or(np.isnan(sst), np.isnan(qual))\n # Temperature bounds and quality\n qual_masks = np.zeros_like(masks)\n qual_masks[~masks] = (qual[~masks] > qual_thresh) | (sst[~masks] <= temp_bounds[0]) | (sst[~masks] > temp_bounds[1])\n masks = np.logical_or(masks, qual_masks)\n # Return\n return masks", "def _read_cdf_wind3dp(fname, ignore_vars=[]):\n import astropy.units as u\n from cdflib.epochs import CDFepoch\n from packaging.version import Version\n from sunpy import log\n from sunpy.timeseries import GenericTimeSeries\n from sunpy.util.exceptions import warn_user\n cdf = cdflib.CDF(str(fname))\n # Extract the time varying variables\n cdf_info = cdf.cdf_info()\n meta = cdf.globalattsget()\n if hasattr(cdflib, \"__version__\") and Version(cdflib.__version__) >= Version(\"1.0.0\"):\n all_var_keys = cdf_info.rVariables + cdf_info.zVariables\n else:\n all_var_keys = cdf_info['rVariables'] + cdf_info['zVariables']\n var_attrs = {key: cdf.varattsget(key) for key in all_var_keys}\n\n # Get keys that depend on time\n # var_keys = [var for var in var_attrs if 'DEPEND_0' in var_attrs[var] and var_attrs[var]['DEPEND_0'] is not None]\n # Manually define keys that depend on time for Wind/3DP cdf files, as they don't follow the standard\n var_keys = all_var_keys\n\n # Get unique time index keys\n # time_index_keys = sorted(set([var_attrs[var]['DEPEND_0'] for var in var_keys]))\n # Manually define time index key for Wind/3DP cdf files, as they don't follow the standard\n time_index_keys = [var_keys.pop(var_keys.index('Epoch'))]\n\n all_ts = []\n # For each time index, construct a GenericTimeSeries\n for index_key in time_index_keys:\n try:\n index = cdf.varget(index_key)\n except ValueError:\n # Empty index for cdflib >= 0.3.20\n continue\n # TODO: use to_astropy_time() instead here when we drop pandas in timeseries\n index = CDFepoch.to_datetime(index)\n df = pd.DataFrame(index=pd.DatetimeIndex(name=index_key, data=index))\n # units = {}\n\n # for var_key in sorted(var_keys):\n for var_key in var_keys:\n if var_key in ignore_vars:\n continue # leave for-loop, skipping var_key\n\n attrs = var_attrs[var_key]\n # Skip the following check for Wind/3DP cdf files, as they don't follow the standard\n # # If this variable doesn't depend on this index, continue\n # if attrs['DEPEND_0'] != index_key:\n # continue\n\n # Get data\n if hasattr(cdflib, \"__version__\") and Version(cdflib.__version__) >= Version(\"1.0.0\"):\n var_last_rec = cdf.varinq(var_key).Last_Rec\n else:\n var_last_rec = cdf.varinq(var_key)['Last_Rec']\n if var_last_rec == -1:\n log.debug(f'Skipping {var_key} in {fname} as it has zero elements')\n continue\n\n data = cdf.varget(var_key)\n\n # Skip the following code block for Wind/3DP cdf files, as they don't follow the standard\n # # Set fillval values to NaN\n # # It would be nice to properley mask these values to work with\n # # non-floating point (ie. int) dtypes, but this is not possible with pandas\n # if np.issubdtype(data.dtype, np.floating):\n # data[data == attrs['FILLVAL']] = np.nan\n\n # Skip the following code block for Wind/3DP cdf files, as they don't follow the standard\n # # Get units\n # if 'UNITS' in attrs:\n # unit_str = attrs['UNITS']\n # try:\n # unit = u.Unit(unit_str)\n # except ValueError:\n # if unit_str in _known_units:\n # unit = _known_units[unit_str]\n # else:\n # warn_user(f'astropy did not recognize units of \"{unit_str}\". '\n # 'Assigning dimensionless units. '\n # 'If you think this unit should not be dimensionless, '\n # 'please raise an issue at https://github.com/sunpy/sunpy/issues')\n # unit = u.dimensionless_unscaled\n # else:\n # warn_user(f'No units provided for variable \"{var_key}\". '\n # 'Assigning dimensionless units.')\n # unit = u.dimensionless_unscaled\n\n if data.ndim > 3:\n # Skip data with dimensions >= 3 and give user warning\n warn_user(f'The variable \"{var_key}\" has been skipped because it has more than 3 dimensions, which is unsupported.')\n elif data.ndim == 3:\n # Multiple columns, give each column a unique label.\n # Numbering hard-corded to Wind/3DP data!\n for j in range(data.T.shape[0]):\n for i, col in enumerate(data.T[j, :, :]):\n var_key_mod = var_key + f'_E{j}'\n df[var_key_mod + f'_P{i}'] = col\n # units[var_key_mod + f'_{i}'] = unit\n elif data.ndim == 2:\n # Multiple columns, give each column a unique label\n for i, col in enumerate(data.T):\n df[var_key + f'_{i}'] = col\n # units[var_key + f'_{i}'] = unit\n else:\n # Single column\n df[var_key] = data\n # units[var_key] = unit\n\n # all_ts.append(GenericTimeSeries(data=df, units=units, meta=meta))\n\n # if not len(all_ts):\n # log.debug(f'No data found in file {fname}')\n return df # all_ts[0].to_dataframe()", "def retrieve_cloudmask(\n self, output_binary=True, include_thermal_test=True, include_channel_r5=True\n ):\n\n # Read visual near infrared (VNIR) channels at 15m resolution.\n r1 = self.get_reflectance(channel=\"1\")\n r2 = self.get_reflectance(channel=\"2\")\n r3N = self.get_reflectance(channel=\"3N\")\n\n # Read short-wave infrared (SWIR) channels at 30m resolution and match\n # VNIR resolution.\n r5 = self.get_reflectance(channel=\"5\")\n if self.datetime > datetime.datetime(2007, 5, 1) or not include_channel_r5:\n # The SWIR sensor suffered from temperature problems after May\n # 2007. Images later on are set to a dummy value \"1\", which won't\n # influence the following thresholding tests. Swath edge NaN pixels\n # stay NaN.\n r5[~np.isnan(r5)] = 1\n r5 = np.repeat(np.repeat(r5, 2, axis=0), 2, axis=1)\n\n # Read thermal (TIR) channel at 90m resolution and match VNIR\n # resolution.\n bt14 = self.get_brightnesstemperature(channel=\"14\")\n bt14 = np.repeat(np.repeat(bt14, 6, axis=0), 6, axis=1)\n\n # Ratios for clear-cloudy-tests.\n r3N2 = r3N / r2\n r12 = r1 / r2\n\n ### TEST 1-4 ###\n # Set cloud mask to default \"confidently clear\".\n clmask = np.ones(r1.shape, dtype=np.float) * 2\n\n with np.warnings.catch_warnings():\n np.warnings.filterwarnings(\"ignore\", r\"invalid value encountered\")\n\n # Set \"probably clear\" pixels.\n clmask[\n multiple_logical(\n r3N > 0.03,\n r5 > 0.01,\n 0.7 < r3N2,\n r3N2 < 1.75,\n r12 < 1.45,\n func=np.logical_and,\n )\n ] = PROBABLY_CLEAR\n\n # Set \"probably cloudy\" pixels.\n clmask[\n multiple_logical(\n r3N > 0.03,\n r5 > 0.015,\n 0.75 < r3N2,\n r3N2 < 1.75,\n r12 < 1.35,\n func=np.logical_and,\n )\n ] = PROBABLY_CLOUDY\n\n # Set \"confidently cloudy\" pixels\n clmask[\n multiple_logical(\n r3N > 0.065,\n r5 > 0.02,\n 0.8 < r3N2,\n r3N2 < 1.75,\n r12 < 1.2,\n func=np.logical_and,\n )\n ] = CONFIDENTLY_CLOUDY\n\n # Combine swath edge pixels.\n clmask[\n multiple_logical(\n np.isnan(r1),\n np.isnan(r2),\n np.isnan(r3N),\n np.isnan(r5),\n func=np.logical_or,\n )\n ] = np.nan\n\n if include_thermal_test:\n ### TEST 5 ###\n # Uncertain warm ocean pixels, higher than the 5th percentile of\n # brightness temperature values from all \"confidently clear\"\n # labeled pixels, are overwritten with \"confidently clear\".\n\n # Check for available \"confidently clear\" pixels.\n nc = np.sum(clmask == 2) / np.sum(~np.isnan(clmask))\n if nc > 0.03:\n bt14_p05 = np.nanpercentile(bt14[clmask == 2], 5)\n else:\n # If less than 3% of pixels are \"confidently clear\", test 5\n # cannot be applied according to Werner et al., 2016. However,\n # a sensitivity study showed that combining \"probably clear\"\n # and \"confidently clear\" pixels in such cases leads to\n # plausible results and we derive a threshold correspondingly.\n bt14_p05 = np.nanpercentile(\n bt14[np.logical_or(clmask == 2, clmask == 3)], 5\n )\n\n with np.warnings.catch_warnings():\n np.warnings.filterwarnings(\"ignore\", r\"invalid value encountered\")\n # Pixels with brightness temperature values above the 5th\n # percentile of clear ocean pixels are overwritten with\n # \"confidently clear\".\n clmask[np.logical_and(bt14 > bt14_p05, ~np.isnan(clmask))] = 2\n\n # Combine swath edge pixels.\n clmask[np.logical_or(np.isnan(clmask), np.isnan(bt14))] = np.nan\n\n if output_binary:\n clmask[np.logical_or(clmask == 2, clmask == 3)] = 0 # clear\n clmask[np.logical_or(clmask == 4, clmask == 5)] = 1 # cloudy\n\n return clmask", "def itkSpeckleNoiseImageFilterIF3IF3_cast(*args):\n return _itkSpeckleNoiseImageFilterPython.itkSpeckleNoiseImageFilterIF3IF3_cast(*args)", "def bqa_fmask_func(qa):\n # Extracting cloud masks from BQA using np.right_shift() and np.bitwise_and()\n # Cloud (med & high confidence), then snow, then shadow, then fill\n # Low confidence clouds tend to be the FMask buffer\n fill_mask = np.bitwise_and(np.right_shift(qa, 0), 1) >= 1\n cloud_mask = np.bitwise_and(np.right_shift(qa, 4), 1) >= 1 # cloud bit\n cloud_mask &= np.bitwise_and(np.right_shift(qa, 5), 3) >= 2 # cloud conf.\n cloud_mask |= np.bitwise_and(np.right_shift(qa, 11), 3) >= 3 # cirrus\n shadow_mask = np.bitwise_and(np.right_shift(qa, 7), 3) >= 3\n snow_mask = np.bitwise_and(np.right_shift(qa, 9), 3) >= 3\n\n fmask = (fill_mask != True).astype(np.uint8)\n fmask[shadow_mask] = 2\n fmask[snow_mask] = 3\n fmask[cloud_mask] = 4\n\n return fmask", "def __init__(self):\n self.datasets = [\"ISCCP\",\"ISCCP_raw\",\"PATMOSX\",\"PATMOSX_raw\"]\n f = cdms.open(\"OBS/clt_ISCCP_corrected_198301-200912.nc\")\n fp = cdms.open(\"OBS/clt_PATMOSX_corrected_198301-200912.nc\")\n \n f_old = cdms.open(\"OBS/clt_ISCCP_198307-200806.nc\")\n fp_old = cdms.open(\"OBS/clt_PATMOSX_198200-200912.nc\")\n\n fgpcp = cdms.open(\"OBS/GPCP.precip.mon.mean.nc\")\n fcmap = cdms.open(\"OBS/CMAP.std.precip.mon.mean.nc\")\n \n \n self.ISCCP = f(\"clt\",time=('1984-1-1','2009-12-31'))\n self.ISCCP = MV.masked_where(np.isnan(self.ISCCP),self.ISCCP)\n cdutil.setTimeBoundsMonthly(self.ISCCP)\n\n self.PATMOSX = fp(\"clt\",time=('1984-1-1','2009-12-31'))\n self.PATMOSX = MV.masked_where(np.isnan(self.PATMOSX),self.PATMOSX)\n cdutil.setTimeBoundsMonthly(self.PATMOSX)\n\n self.ISCCP_raw = f_old(\"clt\",time=('1984-1-1','2008-6-31'))\n self.ISCCP_raw = MV.masked_where(np.isnan(self.ISCCP_raw),self.ISCCP_raw)\n cdutil.setTimeBoundsMonthly(self.ISCCP_raw)\n\n self.PATMOSX_raw = fp_old(\"clt\",time=('1982-1-1','2009-12-31'))\n self.PATMOSX_raw = MV.masked_where(np.isnan(self.PATMOSX_raw),self.PATMOSX_raw)\n cdutil.setTimeBoundsMonthly(self.PATMOSX_raw)\n\n self.GPCP = cdutil.averager(fgpcp(\"precip\",time=('1979-1-1','2014-12-31'),latitude=(-90,90)),axis='x')\n cdutil.setTimeBoundsMonthly(self.GPCP)\n self.CMAP = cdutil.averager(fcmap(\"precip\",time=('1979-1-1','2014-12-31'),latitude=(-90,90)),axis='x')\n self.CMAP.setAxis(0,self.GPCP.getTime())\n cdutil.setTimeBoundsMonthly(self.CMAP)", "def calbands( band = 0, tmo = 30 ) :\n optimizeThresholds(band,tmo)\n flattenPhases(band,tmo)\n calibrateSpectra(band=band,tmo=tmo)", "def preprocess_3d(im_stack):\n im_stack /= 127.5\n im_stack -= 1.0\n return im_stack", "def seasonal_avg(var_nc,the_season,lat_slice=None,lon_slice=None): \n the_season=np.array(the_season,dtype=np.int32)\n if (lat_slice is None) and (lon_slice is None):\n num_lats=var_nc.shape[2]\n num_lons=var_nc.shape[3]\n lat_slice=slice(0,num_lats)\n lon_slice=slice(0,num_lons)\n else:\n if lat_slice.stop is None:\n num_lats=var_nc.shape[2]\n else:\n num_lats=lat_slice.stop - lat_slice.start\n if lon_slice.stop is None:\n num_lons=var_nc.shape[3]\n else:\n num_lons=lon_slice.stop - lon_slice.start\n num_levs=var_nc.shape[1]\n accumulate=ma.zeros([num_levs,num_lats,num_lons],dtype=var_nc.dtype)\n num_years=var_nc.shape[0]//12\n\n for the_year in np.arange(0,num_years):\n the_slice=var_nc[the_season,:,lat_slice,lon_slice]\n the_slice=ma.mean(the_slice,axis=0)\n accumulate+=the_slice\n the_season=the_season+12\n accumulate=accumulate/num_years \n the_avg=ma.mean(accumulate,axis=1)\n the_avg=ma.mean(the_avg,axis=1)\n return the_avg", "def itkBoundedReciprocalImageFilterIUC3IUC3_cast(*args):\n return _itkBoundedReciprocalImageFilterPython.itkBoundedReciprocalImageFilterIUC3IUC3_cast(*args)", "def apply3filter(array, filter_):\n s = int(len(filter_)/2)\n width = len(array[0])\n height = len(array)\n new_array = np.array(np.zeros((height,width)))\n for row in range(s, (height-s)):\n for col in range(s, (width-s)):\n new_array[row,col] = np.sum(filter_ * array[(row-s):(row+s+1),(col-s):(col+s+1)])\n return new_array", "def getsky(image,silent=False,circlerad=False,meanback=False,highbad=None,\n readnoise=None,integer=False,histback=False,nan=False): \n\n if image.ndim not in [1,2]:\n raise ValueError('ERROR - Input array (first parameter) must be 1 or 2 dimensional')\n \n checkbad = ( (highbad is not None) or circlerad or nan)\n sh = image.shape\n if image.ndim==1:\n ncol = 1\n nrow = image.size\n else:\n ncol,nrow = image.shape\n\n if circlerad:\n if ncol != nrow: \n raise ValueError('ERROR - The CIRCLERAD keyword only applies to a 2-d square array')\n \n if checkbad: \n mask = np.ones(image.shape,bool)\n if highbad is not None:\n mask = mask & (image < highbad) \n if nan: \n mask = mask & np.isfinite(image) \n if circlerad: \n if circlerad == 1: \n rad = nrow/2 \n else: \n rad = int(circlerad) \n # Make image where each value is its distance to a given center\n xv,yv = np.meshgrid(np.arange(nrow),np.arange(nrow))\n cen = (nrow-1)/2.\n drad = np.sqrt((xv-cen)**2+(yv-cen)**2) \n #dist_circle,drad, nrow \n mask = mask and (drad < rad) \n npts = np.sum(mask)\n else:\n npts = image.size\n \n # Use ~10000 data points or at least 2 points per row \n maxsky = np.maximum(2*npts//(nrow-1), 10000) # Maximum # of pixels to be used in sky calculation \n # Maintain the same data type as the input image Nov 2005 \n istep = npts//maxsky +1\n skyvec = np.zeros(maxsky+500,dtype=image.dtype)\n #skyvec = make_array(maxsky+200,type=size(image,/type)) \n nstep = (nrow//istep) \n \n jj = 0 \n index0 = istep*np.arange(nstep) \n if nstep > 1: \n i0 = np.maximum((nrow-1 - max(index0) - istep)//2, 0) # Adjust margin for symmetry \n index0 = index0 + i0 \n \n # The beginning index in each row is staggered to avoid emphasizing possible \n # bad columns \n \n for i in range(ncol): \n index = index0 + (i % istep) \n row = image[i,:] \n if checkbad: \n g, = np.where(mask[i,:]) \n ng = len(g)\n if ng==0:\n break\n row = row[g] \n else:\n ng = nrow \n imax = np.maximum(np.searchsorted(index, ng-1), 0)\n #imax = value_locate( index, ng-1) > 0 \n ix = np.minimum( index[0:imax], ng-1)\n skyvec[jj:jj+len(ix)] = row[ix] \n jj += imax\n if jj > maxsky: \n break \n\n skyvec = skyvec[0:jj] \n\n if meanback: \n skymode, skysig, subs = utils.meanclip(skyvec)\n nsky = len(subs) \n else:\n skymode,skysig,skynew,nsky = utils.mmm(skyvec,readnoise=readnoise,integer=integer,highbad=highbad)\n \n # Use histogram around median to get mode \n if histback:\n gd = (np.abs(image-skymode) < 4*skysig) \n xhist = np.arange(np.min(image[gd]),np.max(image[gd]),skysig/40)\n hist,bin_edges = np.histogram(image[gd],bins=xhist)\n xhist2 = np.linspace(np.min(xhist),np.max(xhist),1000)\n hist2 = np.interp(xhist2,xhist[:-1],hist)\n bestind = np.argmax(hist2)\n skymode1 = np.copy(skymode) # save original one \n skymode = xhist2[bestind] \n \n \n skymode = float(skymode)\n skysig = float(skysig) \n if silent==False:\n print('Number of points used to find sky = ',nsky)\n print('Approximate sky value for this frame = ',skymode)\n print('Standard deviation of sky brightness = ',skysig)\n \n return skymode,skysig", "def adjust_images_task3(t1, t2, flair, mask, max_values=[150, 150, 180]):\n \n assert t1.shape == t2.shape == flair.shape == mask.shape\n t1 = cut_outliers_task3(t1, mask, cut_value=max_values[0])\n t1 = rescale_data(t1, mask, min_value=0, max_value=max_values[0])\n \n t2 = cut_outliers_task3(t2, mask, cut_value=max_values[1])\n t2 = rescale_data(t2, mask, min_value=0, max_value=max_values[1])\n \n flair = cut_outliers_task3(flair, mask, cut_value=max_values[2])\n flair = rescale_data(flair, mask, min_value=0, max_value=max_values[2])\n \n return t1, t2, flair", "def computeCloudMasking(image_name, numberOfTrees=NUMBER_TREES, threshold=CUTTOF):\n\n # Import training data as GEE object\n # Build randomForest model at each run\n fc_training = ee.FeatureCollection(\n 'ft:1XzZPz8HZMARKQ9OPTWvfuRkPaGIASzkRYMfhKT8H')\n\n # Use these methods for prediction.\n methods_name = ee.List(['percentile1', 'percentile5', 'tree2', 'tree3'])\n\n # Random Forest model\n randomForest = ee.Classifier.randomForest(numberOfTrees=numberOfTrees)\n randomForest = randomForest.train(fc_training, 'cloud', methods_name)\n\n # Image + region of interest\n image = ee.Image(image_name)\n roi = getGeometryImage(image)\n\n # UK BORDER <=> mask sea\n land_geometry = ee.FeatureCollection(parameters.land_geometry)\n # image = image.clip(land_geometry)\n\n # Apply the different methods\n # tree1 = getMaskTree1(image, roi)\n tree2 = getMaskTree2(image, roi)\n tree3 = getMaskTree3(image, roi)\n percentile1, percentile5 = CloudClusterScore(image, roi)\n\n # Add each result as a band of the final image\n final_image = tree3.addBands([tree2, percentile1, percentile5]) \\\n .clip(land_geometry)\n\n # Apply the random Forest classification\n masked_image = final_image.classify(randomForest) \\\n .gt(threshold)\n\n # Add meta data: geometry + date\n masked_image = masked_image.set(\"system:footprint\", image.get('system:footprint'))\n masked_image = masked_image.set(\"system:time_start\", image.get('system:time_start'))\n masked_image = masked_image.set(\"system:time_end\", image.get('system:time_end'))\n\n return masked_image", "def precip_stats_to_climatology(fili, start_year=1981, end_year=2015):\n\n nyear = end_year - start_year + 1\n \n ds = xr.open_dataset(fili)\n\n year = ds['time'].dt.year\n #dsMsk = ds.isel( time=( (year >= start_year) & (year <= end_year) ) ).count(dim='time')\n dsClm = ds.isel( time=( (year >= start_year) & (year <= end_year) ) ).mean(dim='time', skipna=False)\n #dsClm = dsClm.where(dsMsk == nyear)\n \n #dsMsk.to_netcdf('era5.count.nc4')\n\n print (dsClm)\n \n filo = fili.replace('annual','annual.clm')\n print (f'Writing climatology to {filo}') \n dsClm.to_netcdf(filo)\n\n return", "def roi_to_wm(img,brain_wm,nth):\n \n data = img.get_data()\n wmdata = brain_wm.get_data()\n shape = data.shape\n\n roi_ids = np.unique(data)\n roi = roi_ids[1:]\n roi = [int(i) for i in roi]\n print roi\n \n wmdata = wmdata!=0\n result_mask = np.zeros(data.shape)\n #print wmdata \n \n #First, get the nonzero voxel index in image data.\n #Here image data is a label volume.\n #ROIs is in it\n for roi_id in roi:\n #print roi_id\n tmp_mask = data==roi_id\n #print tmp_mask\n indexs = np.transpose(tmp_mask.nonzero())\n #print indexs\n \n #Second, find the nearest wm voxel for each indexs.\n print indexs.shape\n for coor in indexs:\n #print coor\n x = coor[0]\n y = coor[1]\n z = coor[2]\n \n if wmdata[x,y,z]==1:\n result_mask[x,y,z] = roi_id\n else:\n #find the nearest neighbor.\n flag = False\n radius = 1\n mindist_voxel = []\n mindist = 1000 \n while radius<100: \n neigh_list = get_neighbors(coor,radius,shape)\n radius += 1\n #find the nearest white matter voxel.\n for n in neigh_list:\n #print n\n if wmdata[n[0],n[1],n[2]]==1:\n flag = True\n dist = np.sqrt((n[0]-x)**2+(n[1]-y)**2+(n[2]-z)**2)\n # if the distance is smaller than tag, choose it to be nearest.\n \n if dist < mindist:\n mindist = dist\n mindist_voxel = n\n \n if flag:\n break\n #print mindist_voxel\n if mindist_voxel!=[]:\n result_mask[mindist_voxel[0],mindist_voxel[1],mindist_voxel[2]] = roi_id \n for roi_id in roi:\n tmp_mask = result_mask==roi_id\n roi_size = tmp_mask.sum() \n print roi_id, roi_size\n result = img\n result._data = result_mask\n #roi_name = os.path.join(mkdir,'roi_%s.nii.gz'%i)\n nib.save(result,\"test_regroi.nii.gz\")\n \n return True", "def itkShotNoiseImageFilterIF3IF3_cast(*args):\n return _itkShotNoiseImageFilterPython.itkShotNoiseImageFilterIF3IF3_cast(*args)", "def read_exposure(fname, patchralims, patchdeclims, mask=True):\n from astropy.io import fits\n \n hdr = fits.getheader(fname)\n data = fits.getdata(fname)\n unc = fits.getdata(fname.replace(\"sci\", \"unc\"))\n \n s = PostageStamp()\n s.filtername = hdr[\"FILTER\"]\n s.nx, s.ny = hdr[\"NAXIS1\"], hdr[\"NAXIS2\"]\n pixscale = hdr[\"PIXSCALE\"]\n PA = hdr[\"ROT\"]\n npsf = hdr[\"NPSF\"]\n\n # --- WCS ---\n s.scale = 1.0/pixscale * np.eye(2)\n s.dpix_dsky = np.matmul(s.scale, rotation_matrix(np.deg2rad(PA)))\n s.crpix = np.array([hdr[\"CRPIX0\"], hdr[\"CRPIX1\"]])\n s.crval = np.array([hdr[\"CRVAL0\"], hdr[\"CRVAL1\"]])\n\n # --- PSF ---\n s.psf = get_psf(npsf)\n\n # -- PIXEL DATA ---\n # x,y\n # note inversion here\n s.ypix, s.xpix = np.meshgrid(np.arange(s.ny), np.arange(s.nx))\n \n # restrict to pixels in patch, and reshape all images to 1D\n sky = pixelcoords_to_skycoords(s)\n inpatch = ((sky[0] > patchralims[0]) & (sky[0] < patchralims[1]) &\n (sky[1] > patchdeclims[0]) & (sky[1] < patchdeclims[1]))\n assert inpatch.sum() > 0\n\n if not mask:\n s.good_pixel = np.copy(inpatch)\n inpatch = slice(None)\n else:\n s.nx = inpatch.sum()\n s.ny = 1\n\n s.xpix = s.xpix.reshape(-1)[inpatch]\n s.ypix = s.ypix.reshape(-1)[inpatch]\n \n # fluxes and uncertainties within patch\n s.pixel_values = data.reshape(-1)[inpatch]\n s.ierr = 1. / unc.reshape(-1)[inpatch]\n \n return s", "def smooth_climatologies(thresh_climYear, seas_climYear, smoothPercentileWidth):\n # If the climatology contains NaNs, then assume it is a <365-day year and deal accordingly\n if np.sum(np.isnan(seas_climYear)) + np.sum(np.isnan(thresh_climYear)):\n valid = ~np.isnan(thresh_climYear)\n thresh_climYear[valid] = runavg(thresh_climYear[valid], smoothPercentileWidth)\n valid = ~np.isnan(seas_climYear)\n seas_climYear[valid] = runavg(seas_climYear[valid], smoothPercentileWidth)\n else: # >= 365-day year (no nans)\n thresh_climYear = runavg(thresh_climYear, smoothPercentileWidth)\n seas_climYear = runavg(seas_climYear, smoothPercentileWidth)\n\n return thresh_climYear, seas_climYear", "def _bands_competed_last_year():\n lLastYear = datetime.datetime.now().year - 1\n cursor = connection.cursor()\n cursor.execute(\"SELECT count(distinct(r.band_id)) FROM contests_contestevent e, contests_contestresult r WHERE r.contest_event_id = e.id AND extract(year from e.date_of_event) = %(year)s GROUP BY extract(year from e.date_of_event) ORDER BY extract(year from e.date_of_event) desc\", {'year' : lLastYear})\n rows = cursor.fetchall()\n lReturn = 0\n if rows and rows[0]:\n lReturn = rows[0][0]\n cursor.close()\n return lReturn", "def SetMaskImage(self, arg0: 'itkImageUS3') -> \"void\":\n return _itkScalarImageToRunLengthFeaturesFilterPython.itkScalarImageToRunLengthFeaturesFilterIUS3_SetMaskImage(self, arg0)", "def test_3d_time_lowmem():\n dic,data = ng.pipe.read_lowmem(\"common_data/3d_pipe/data/test%03d.fid\")\n assert data.shape == (128, 88, 1250)\n assert data.dtype == 'complex64'\n assert round(data[0,1,2].real,2) == -7.98\n assert round(data[0,1,2].imag,2) == 33.82\n assert round(data[10,22,5].real,2) == 15.71\n assert round(data[10,22,5].imag,2) == 15.1\n lowmem_write_readback_3D(dic,data)", "def itkShotNoiseImageFilterIUC3IUC3_cast(*args):\n return _itkShotNoiseImageFilterPython.itkShotNoiseImageFilterIUC3IUC3_cast(*args)", "def load_images(filename, bands, Args):\n image = np.zeros([Args.num, Args.out_size,\n Args.out_size, len(bands)])\n for i, band in enumerate(bands):\n print (\"Getting pstamps for band\", band)\n full_image = fits.open(filename.replace(\"band\", band))[0].data\n image[:, :, :, i] = get_stamps(full_image, Args)\n return image", "def crop_acc_mask(images_dir, images_output_dir, masks_dir, mask_suffix=None, masks_output_dir=None): \n image_suffix_list = [\"C0\", \"DE\", \"T2\"]\n if not os.path.exists(images_output_dir):\n os.makedirs(images_output_dir)\n if masks_output_dir is not None and (not os.path.exists(masks_output_dir)):\n os.makedirs(masks_output_dir)\n margin = [0, 30, 30]\n masks_list = os.listdir(masks_dir)\n masks_list.sort()\n json_dict = OrderedDict()\n for mask in masks_list:\n mask_path = os.path.join(masks_dir, mask)\n if mask.endswith(\".nii.gz\"):\n print(\"#\" * 11 *11)\n print(mask_path)\n mask_sitk = sitk.ReadImage(mask_path)\n mask_npy = sitk.GetArrayFromImage(mask_sitk)\n mask_shape = mask_npy.shape\n crop_bbox_min, crop_bbox_max = get_ND_bounding_box(mask_npy, margin=margin)\n # do not crop along depth dimension\n crop_bbox_min[0] = 0\n crop_bbox_max[0] = mask_shape[0]\n print(crop_bbox_min, crop_bbox_max)\n json_dict[mask_path] = {\"crop_bbox_min\": crop_bbox_min, \"crop_bbox_max\": crop_bbox_max}\n mask_output_npy = crop_ND_volume_with_bounding_box(mask_npy, crop_bbox_min, crop_bbox_max)\n if mask_suffix is not None:\n mask = mask.replace(\"_\" + mask_suffix + \".nii.gz\", \".nii.gz\")\n if masks_output_dir is not None:\n save_cropped_array_as_nifty_volume(mask_output_npy, os.path.join(masks_output_dir, mask), mask_sitk)\n save_cropped_array_as_nifty_volume(convert_label(mask_output_npy, [1, 2, 3, 4, 5], [1, 2, 3, 1, 1]), \\\n os.path.join(images_output_dir, mask.replace(\".nii.gz\", \"_{0:04d}.nii.gz\".format(len( \\\n image_suffix_list)))), mask_sitk)\n for i, image_suffix in enumerate(image_suffix_list):\n image = mask.replace(\".nii.gz\", \"_{}.nii.gz\".format(image_suffix))\n image_path = os.path.join(images_dir, image)\n print(image_path)\n image_sitk = sitk.ReadImage(image_path)\n image_npy = sitk.GetArrayFromImage(image_sitk)\n image_output_npy = crop_ND_volume_with_bounding_box(image_npy, crop_bbox_min, crop_bbox_max)\n save_cropped_array_as_nifty_volume(image_output_npy, os.path.join(images_output_dir, mask.replace( \\\n \".nii.gz\", \"_{0:04d}.nii.gz\".format(i))), image_sitk)\n save_json(json_dict, os.path.join(images_output_dir, \"crop_information.json\"))\n if masks_output_dir is not None:\n save_json(json_dict, os.path.join(masks_output_dir, \"crop_information.json\"))", "def request_band_extract(file_prefix, points_layer, region, years, filter_bounds=False):\n roi = ee.FeatureCollection(region)\n plots = ee.FeatureCollection(points_layer)\n for yr in years:\n stack = stack_bands(yr, roi)\n\n if filter_bounds:\n plots = plots.filterBounds(roi)\n\n filtered = plots.filter(ee.Filter.eq('YEAR', yr))\n\n plot_sample_regions = stack.sampleRegions(\n collection=filtered,\n properties=['POINT_TYPE', 'YEAR'],\n scale=30,\n tileScale=16)\n\n task = ee.batch.Export.table.toCloudStorage(\n plot_sample_regions,\n description='{}_{}'.format(file_prefix, yr),\n bucket='wudr',\n fileNamePrefix='{}_{}'.format(file_prefix, yr),\n fileFormat='CSV')\n\n task.start()\n print(yr)\n exit()", "def make_skydark(files, ext=1, nproc=6, title='ext_1', overwrite=False):\n\n # See if outfile already exists\n outfile = 'skydark_{}.fits'.format(title)\n if (os.path.exists(outfile)) & (overwrite is False):\n print('{} already exists, stopping...'.format(outfile))\n\n else:\n print('Making a stack of the input files...')\n stack = np.zeros((len(files), 2051, 4096))\n for i,f in enumerate(files):\n h = fits.open(f)\n data = h[ext].data\n #dq = h[ext+2].data\n\n # Get the segmap for this file\n segmap_file = f.replace('.fits', '_seg_ext_{}.fits'.format(ext))\n if not os.path.isfile(segmap_file): # sometimes input files are medsub/equalized\n segmap_file = f.replace('_medsub', '').replace('_eq', '').replace('.fits', '_seg_ext_{}.fits'.format(ext))\n segmap = fits.getdata(segmap_file)\n\n # Mask bad pixels and sources\n #data[dq!=0] = np.nan\n data[segmap>0] = np.nan\n stack[i] = data\n h.close()\n\n # Make the skydark\n print('Calculating the median through the stack of input files...')\n if nproc==1:\n skydark = np.nanmedian(stack, axis=0)\n else:\n stacks = np.split(stack, 16, axis=2) # split stack into 16 2048x256 sections\n p = Pool(nproc)\n results = p.map(med_stack, stacks)\n skydark = np.concatenate(results, axis=1)\n\n # Write out the sky dark\n fits.writeto(outfile, skydark, overwrite=True)\n print('Sky dark generated.')\n\n # Make a filtered version of the skydark\n print('Filtering the sky dark...')\n amp1, amp2 = np.split(skydark, 2, axis=1) # treat amps separately\n sigma_clip = SigmaClip(sigma=3.)\n bkg_estimator = MedianBackground()\n bkg1 = Background2D(amp1, (100, 100), filter_size=(10, 10), \n sigma_clip=sigma_clip, bkg_estimator=bkg_estimator)\n bkg2 = Background2D(amp2, (100, 100), filter_size=(10, 10), \n sigma_clip=sigma_clip, bkg_estimator=bkg_estimator)\n filtered = np.concatenate((bkg1.background, bkg2.background), axis=1)\n fits.writeto('{}_filtered.fits'.format(outfile.replace('.fits','')), \n filtered, overwrite=True)\n print('Filtered sky dark generated.')", "def wind3dp_load(dataset, startdate, enddate, resample=\"1min\", multi_index=True,\n path=None, threshold=None, **kwargs):\n files = wind3dp_download(dataset, startdate, enddate, path)\n if len(files) > 0:\n df = _wind3dp_load(files, resample, threshold)\n\n # download master file from CDAWeb\n path_to_metafile = _download_metafile(dataset, path=path)\n\n # open master file from CDAWeb as cdf\n metacdf = cdflib.CDF(path_to_metafile)\n\n e_mean = df.filter(like='ENERGY_').mean()\n # ∼30% ΔE/E => ΔE = 0.3*E\n # from Table 3 of Wilson et al. 2021, https://doi.org/10.1029/2020RG000714\n delta_e = 0.3 * e_mean\n e_low = e_mean - delta_e\n e_high = e_mean + delta_e\n energies = pd.concat([e_mean, delta_e, e_low, e_high], axis=1, keys=['mean_E', 'DE', 'lower_E', 'upper_E'])\n energies['Bins_Text']= np.around(e_low/1e3, 2).astype('string') +' - '+ np.around(e_high/1e3, 2).astype('string') + ' keV'\n\n meta = {'channels_dict_df': energies,\n 'APPROX_ENERGY_LABELS': metacdf.varget('APPROX_ENERGY_LABELS'),\n 'ENERGY_UNITS': metacdf.varattsget('ENERGY')['UNITS'],\n 'FLUX_UNITS': metacdf.varattsget('FLUX')['UNITS'],\n 'FLUX_FILLVAL': metacdf.varattsget('FLUX')['FILLVAL'],\n 'FLUX_LABELS': metacdf.varget('FLUX_ENERGY_LABL'),\n }\n\n # create multi-index data frame of flux\n if multi_index:\n if dataset == 'WI_SFPD_3DP' or dataset == 'WI_SOPD_3DP':\n no_channels = len(df[df.columns[df.columns.str.startswith(\"ENERGY\")]].columns)\n t_df = [''] * no_channels\n multi_keys = np.append([f\"FLUX_E{i}\" for i in range(no_channels)],\n df.drop(df.columns[df.columns.str.startswith(f\"FLUX_\")], axis=1).columns,\n )\n for i in range(no_channels):\n t_df[i] = df[df.columns[df.columns.str.startswith(f\"FLUX_E{i}\")]]\n t_df.extend([df[col] for col in df.drop(df.columns[df.columns.str.startswith(f\"FLUX_\")], axis=1).columns.values])\n df = pd.concat(t_df, axis=1, keys=multi_keys)\n else:\n print('')\n print('Multi-index function only available (and necessary) for pitch-angle resolved fluxes. Skipping.')\n else:\n df = []\n meta = ''\n return df, meta", "def prepare_ERA5_moisture_flux(era5_path=era5_path):\n import xarray as xr\n from aux_gps import save_ncfile\n from aux_gps import anomalize_xr\n import numpy as np\n from aux_gps import convert_wind_direction\n from dask.diagnostics import ProgressBar\n ds = xr.open_dataset(\n era5_path / 'ERA5_UVQ_4xdaily_israel_1996-2019.nc', chunks={'level': 5})\n # ds = ds.resample(time='D', keep_attrs=True).mean(keep_attrs=True)\n # ds.attrs['action'] = 'resampled to 1D from 12:00UTC data points'\n mf = (ds['q'] * ds['u']).to_dataset(name='qu')\n mf.attrs = ds.attrs\n mf['qu'].attrs['units'] = ds['u'].attrs['units']\n mf['qu'].attrs['long_name'] = 'U component of moisture flux'\n mf['qu'].attrs['standard_name'] = 'eastward moisture flux'\n mf['qv'] = ds['q'] * ds['v']\n mf['qv'].attrs['units'] = ds['v'].attrs['units']\n mf['qv'].attrs['long_name'] = 'V component moisture flux'\n mf['qv'].attrs['standard_name'] = 'northward moisture flux'\n mf['qf'], mf['qfdir'] = convert_wind_direction(u=mf['qu'], v=mf['qv'])\n mf['qf'].attrs['units'] = ds['v'].attrs['units']\n mf['qf'].attrs['long_name'] = 'moisture flux magnitude'\n # mf['qfdir'] = 270 - np.rad2deg(np.arctan2(mf['qv'], mf['qu']))\n mf['qfdir'].attrs['units'] = 'deg'\n mf['qfdir'].attrs['long_name'] = 'moisture flux direction (meteorological)'\n mf = mf.sortby('latitude')\n mf = mf.sortby('level', ascending=False)\n comp = dict(zlib=True, complevel=9)\n encoding_mf = {var: comp for var in mf}\n mf_delayed = mf.to_netcdf(era5_path / 'ERA5_MF_4xdaily_israel_1996-2019.nc',\n 'w', encoding=encoding_mf, compute=False)\n mf_anoms = anomalize_xr(mf, freq='MS', time_dim='time')\n mf_anoms_mean = mf_anoms.mean('latitude').mean('longitude')\n encoding_mf_anoms = {var: comp for var in mf_anoms}\n mf_anoms_delayed = mf_anoms_mean.to_netcdf(era5_path / 'ERA5_MF_anomalies_4xdaily_israel_mean_1996-2019.nc',\n 'w', encoding=encoding_mf_anoms, compute=False)\n with ProgressBar():\n results = mf_delayed.compute()\n with ProgressBar():\n results1 = mf_anoms_delayed.compute()\n # save_ncfile(mf, era5_path, 'ERA5_MF_4xdaily_israel_1996-2019.nc')\n # mf_anoms = anomalize_xr(mf, freq='MS', time_dim='time')\n # mf_anoms_mean = mf_anoms.mean('latitude').mean('longitude')\n # save_ncfile(mf_anoms_mean, era5_path,\n # 'ERA5_MF_anomalies_4xdaily_israel_mean_1996-2019.nc')\n return", "def itkBoundedReciprocalImageFilterIUL3IUL3_cast(*args):\n return _itkBoundedReciprocalImageFilterPython.itkBoundedReciprocalImageFilterIUL3IUL3_cast(*args)", "def band_filter(self, bands) -> 'ImageCollection':\n\n process_id = 'filter_bands'\n args = {\n 'imagery': self.graph,\n 'bands': bands\n }\n return self.graph_add_process(process_id, args)", "def SetMaskImage(self, arg0: 'itkImageUC3') -> \"void\":\n return _itkScalarImageToRunLengthFeaturesFilterPython.itkScalarImageToRunLengthFeaturesFilterIUC3_SetMaskImage(self, arg0)", "def calculateNumberOfChanges(image, bandNames):\n #Get a collection of images where each image has 2 bands: classifications for year(i) and classifications for year(i+1)\n lc_one_change_col = npv.getYearStackIC(image,bandNames, band_indices=[0,1])\n #Get a collection of images where each image represents whether there was change from year(i) to year(i+1) and convert to an image\n lc_one_change_col = lc_one_change_col.map(npv.LC_OneChange)\n lc_one_change_image = lc_one_change_col.toBands()\n #Calculate the number of changes by applying the sum reducer\n lc_sum_changes = lc_one_change_image.reduce(ee.Reducer.sum().unweighted())\n return lc_sum_changes", "def cygx3MWLC(self):\n # --------------------------------------------------------------------------------------------- #\n # Fermi data\n fitsNnam = os.path.join(self.workpath, 'LCresults.fits')\n lcTab = Table.read(fitsNnam)\n if (self.tstart is not None) and (self.tstop is not None):\n lcTab = lcTab[ (self.tstart <= lcTab['mjd']) & (lcTab['mjd'] <= self.tstop)]\n lcTab = lcTab[lcTab['flux'] != -1.] # avoid undone analyses\n\n timeMJD = lcTab['mjd']\n tref = int(np.floor( timeMJD[0] / 100.0)) * 100 # round to lowest hundred\n timeMJD -= tref\n ts = lcTab['ts']\n detect = lcTab['ts'] >= self.tsmin\n undet = lcTab['ts'] < self.tsmin\n flux = lcTab['flux'][detect]\n fluxerr = lcTab['fluxerr'][detect]\n upperl = lcTab['upperlim'][undet]\n upperl[upperl == -1.] = 0. # for when it failed\n scale = 10**int(np.floor(np.log10( np.mean( np.concatenate( (flux, upperl), axis=0) ) )))\n\n # --------------------------------------------------------------------------------------------- #\n # X-ray data\n batFile = os.path.join(self.workpath, 'CygX3_BAT.fits')\n #maxiFile = os.path.join(self.workpath, 'CygX3_MAXI.csv')\n maxiFile = os.path.join(self.workpath, 'CygX3_MAXI.dat')\n asmFile = os.path.join(self.workpath, 'CygX3_ASM.fits')\n if not os.path.isfile(batFile) or self.clobber:\n os.system('wget http://swift.gsfc.nasa.gov/results/transients/CygX-3.lc.fits -O {}'.format(batFile))\n batTab = Table.read(batFile)\n if not os.path.isfile(maxiFile) or self.clobber:\n #os.system('wget http://www.maxi.jaxa.jp/obs/agn_etc/data/J2032+409/J2032+409.txt -O {}'.format(maxiFile))\n os.system('wget http://134.160.243.77/star_data/J2032+409/J2032+409_g_lc_1day_all.dat -O {}'.format(maxiFile))\n maxiTab = Table.read(maxiFile, format='ascii') #t, f2-20, e2-20, f2-4, e2-4, f4-10, e4-10, f10-20, e10-20\n if not os.path.isfile(asmFile) or self.clobber:\n os.system('wget https://www.dropbox.com/s/65qrhi1oyifvjfn/CygX3_ASM.fits?dl=0 -O {}'.format(asmFile))\n asmTab = Table.read(asmFile) #mjd, 1.3-12.2 keV, 1.3-3.0 keV, 3.0-5.0 keV, and 5.0-12.2 keV\n asmTab = asmTab[asmTab['col1'] > 54500]\n\n # --------------------------------------------------------------------------------------------- #\n # Radio data\n amiFile = os.path.join(self.workpath, 'CygX3_AMI.fits')\n ovroFile = os.path.join(self.workpath, 'CygX3_OVRO.fits')\n if not os.path.isfile(amiFile) or self.clobber:\n os.system('wget https://www.dropbox.com/s/bz9xbdbq6hbrant/AMI_2008_14.fits?dl=0 -O {}'.format(amiFile))\n amiTab = Table.read(amiFile)\n if not os.path.isfile(ovroFile) or self.clobber:\n os.system('wget https://www.dropbox.com/s/rs7xlztd66j6fej/CygX3_OVRO.fits?dl=0 -O {}'.format(ovroFile))\n ovroTab = Table.read(ovroFile)\n ovroOff = - 0.124\n\n # --------------------------------------------------------------------------------------------- #\n # Plot\n lcplt = FermiPlot(savepath='', xsize=8.5, ysize=17)\n lcplt.figname = os.path.join(self.workpath, 'CygX3_MWLC.pdf')\n lcplt.xlabel = r'Time (MJD $-$ {})'.format(tref)\n lcplt.ylabel = [r'Flux density (Jy)', r'Count rate', r'Rate (cm$^{-2}$\\,s$^{-1}$)', r'Flux ($10^{%d}$ ph\\,cm$^{-2}$\\,s$^{-1}$)'%(int(np.log10(scale))), r'TS']\n lcplt.label = [r'AMI', r'OVRO', r'ISS/MAXI ($\\times 30$ ct\\,cm$^{-2}$\\,s$^{-1}$)', r'RXTE/ASM (ct\\,s$^{-1}$)', r'\\textit{Swift}/BAT', None, r'\\textit{Fermi}/LAT', None]\n lcplt.hline = [None, None, None, None, self.tsmin]\n\n deltaY = max(np.concatenate((flux+fluxerr, upperl), axis=0)) - min(np.concatenate((flux-fluxerr, upperl), axis=0))\n lcplt.ymin = [5.e-2,\n None,\n -0.01,\n (min(np.concatenate((flux-fluxerr, upperl-upperl*0.1), axis=0)) - 0.05*deltaY) / scale,\n min(ts) - 0.05*(max(ts)-min(ts))]\n lcplt.ymax = [3.e1,\n None,\n 0.08,\n (max(np.concatenate((flux+fluxerr, upperl), axis=0)) + 0.05*deltaY) / scale, \n max(ts) + 0.05*(max(ts)-min(ts))]\n deltaX = (timeMJD[-1] + lcTab['mjderr'][-1]) - (timeMJD[0] - lcTab['mjderr'][0]) \n lcplt.xmin = timeMJD[0] - lcTab['mjderr'][0] - 0.05*deltaX\n lcplt.xmax = timeMJD[-1] + lcTab['mjderr'][-1] + 0.05*deltaX\n\n lcplt.fill = [item for sublist in zip( timeMJD[detect]-lcTab['mjderr'][detect], timeMJD[detect]+lcTab['mjderr'][detect] ) for item in sublist]\n lcplt.shadecol= self.loran \n\n lcplt.mksize = [1, 1, 1, 1, 1, 2, 2, 2]\n lcplt.ymode = ['log', 'log', 'linear', 'linear', 'linear', 'linear', 'linear', 'linear']\n lcplt.color = ['black', self.lblue, 'black', self.lblue, 'black', 'gray', 'black', 'black']\n lcplt.prop = [3, 3, 3, 3, 1]\n lcplt.limit = [[False, False], [False, False], False, [True, False], False]\n lcplt.multiplot(x = [ [amiTab['MJD']-tref, ovroTab['mjd']-tref],\n [maxiTab['col1']-tref, asmTab['col1']-tref],\n batTab['TIME']+np.ones(len(batTab['TIME']))*0.5-tref,\n [timeMJD[undet], timeMJD[detect]],\n timeMJD ],\n y = [ [amiTab['Jy'], ovroTab['flux']+ovroOff],\n [maxiTab['col4']*30, asmTab['col6']], \n batTab['RATE'],\n [upperl/scale, flux/scale],\n ts ],\n xerr = [ [None, None],\n [None, None],\n np.ones(len(batTab['TIME']))*0.5,\n [lcTab['mjderr'][undet], lcTab['mjderr'][detect]],\n lcTab['mjderr']],\n yerr = [ [None, None],\n [maxiTab['col5']*30, asmTab['col7']],\n batTab['ERROR'],\n [upperl/scale*0.1, fluxerr/scale],\n None])\n lcplt.save()\n\n print(\"\\t=== Figure '{}' created ===\".format(lcplt.figname)) \n return", "def weighted_avg_3_op(\r\n array_1, array_2, array_3,\r\n scalar_1, scalar_2, scalar_3,\r\n nodata):\r\n result = numpy.empty_like(array_1)\r\n result[:] = nodata\r\n valid_mask = (\r\n ~numpy.isclose(array_1, nodata) &\r\n ~numpy.isclose(array_2, nodata) &\r\n ~numpy.isclose(array_3, nodata))\r\n result[valid_mask] = (\r\n array_1[valid_mask]/scalar_1 +\r\n array_2[valid_mask]/scalar_2 +\r\n array_3[valid_mask]/scalar_3) / 3.\r\n return result", "def interpolate_dataframes(ff):\n assert isinstance(ff, dict)\n year_min = ff['CA'][0].index[0]\n year_max = ff['CA'][0].index[-1]\n years = list(range(year_min, year_max + 1))\n for state in ff.keys():\n for cf in ff[state]:\n for year in years:\n if year not in cf.index:\n cf.loc[year] = cf.loc[year-1:year+1, :].sum(axis=0)\n cf.loc[year] = (cf.loc[year] / 2).astype(np.int64)\n cf.sort_index(inplace=True)\n return(ff)", "def movie(band,skypos,tranges,skyrange,framesz=0,width=False,height=False,\n\t\t verbose=0,tscale=1000.,memlight=False,coadd=False,\n\t\t response=False,calpath='../cal/',hdu=False,retries=20):\n\t# Not defining stepsz effectively creates a count map.\n\tmv = []\n\trr = []\n\tif coadd:\n\t\tif verbose>2:\n\t\t\tprint 'Coadding across '+str(tranges)\n\t\tmv.append(countmap(band,skypos,tranges,skyrange,width=width,\n\t\t\t\t height=height,verbose=verbose,tscale=tscale,memlight=memlight,\n\t\t\t\t hdu=hdu,retries=retries))\n\t\trr.append(rrhr(band,skypos,tranges,skyrange,response=response,width=width,height=height,stepsz=1.,verbose=verbose,calpath=calpath,tscale=tscale,hdu=hdu,retries=retries)) if response else rr.append(np.ones(np.shape(mv)[1:]))\n\telse:\n\t\tfor trange in tranges:\n\t\t\tstepsz = framesz if framesz else trange[1]-trange[0]\n\t\t\tsteps = np.ceil((trange[1]-trange[0])/stepsz)\n\t\t\tfor i,t0 in enumerate(np.arange(trange[0],trange[1],stepsz)):\n\t\t\t\tif verbose>1:\n\t\t\t\t\tprint_inline('Movie frame '+str(i+1)+' of '+str(int(steps)))\n\t\t\t\tt1 = trange[1] if i==steps else t0+stepsz\n\t\t\t\tmv.append(countmap(band,skypos,[[t0,t1]],skyrange,width=width,height=height,verbose=verbose,tscale=tscale,memlight=memlight,hdu=hdu,retries=retries))\n\t# FIXME: This should not create an rr unless it's requested...\n\t\t\t\trr.append(rrhr(band,skypos,[[t0,t1]],skyrange,response=response,width=width,height=height,stepsz=1.,verbose=verbose,calpath=calpath,tscale=tscale,retries=retries)) if response else rr.append(np.ones(np.shape(mv)[1:]))\n\n\treturn np.array(mv),np.array(rr)", "def bin_obs_data(ds, s_lat=-30, n_lat=30, bin_var_nm='omega500',\n grp_time_var='year', bins=np.arange(0,1.1,0.1), land_sea='global', land_mask_dir='./data/'):\n ds_m = ds.where(np.logical_and(ds.lat>=s_lat, ds.lat<=n_lat), drop=True)\n\n ds_mask = xr.open_dataset(os.path.join(land_mask_dir, 'era_land_t42.nc'), decode_times=False)\n ds_mask = ds_mask.where(np.logical_and(ds_mask.lat>=s_lat,ds_mask.lat<=n_lat), drop=True)\n #ds_m.coords['mask'] = (('lat', 'lon'), ds_mask.land_mask.values)\n\n bin_data_dict = {'omega500': ds_m.omega500} \n\n vars_dict = {}\n\n ## 3d variables\n bin_data_dict2 = copy.deepcopy(bin_data_dict)\n pdf_m, ds_bin_mean_m, dims, coords2 = select_3d_obs_data(ds_m, bin_data_dict2, ds_mask,\n bins, bin_var_nm=bin_var_nm, land_sea=land_sea, grp_time_var=grp_time_var)\n for key, val in ds_bin_mean_m.items():\n vars_dict[key] = (dims, val)\n \n vars_dict['pdf'] = (dims, pdf_m)\n ds_bin_mean_m_array = xr.Dataset(vars_dict, coords=coords2)\n\n return ds_bin_mean_m_array" ]
[ "0.6262926", "0.61455876", "0.6122352", "0.59712267", "0.5810346", "0.56390077", "0.5292091", "0.5260056", "0.52369714", "0.5159779", "0.5131087", "0.50615054", "0.49572933", "0.48659185", "0.4854936", "0.4852849", "0.48406282", "0.48388356", "0.47845525", "0.47620434", "0.47155675", "0.47076923", "0.4703111", "0.46666485", "0.46103007", "0.4597425", "0.45727402", "0.4557659", "0.4546504", "0.45439723", "0.45329225", "0.44977283", "0.4490317", "0.44702306", "0.44685078", "0.4428085", "0.44245106", "0.44227433", "0.44181687", "0.44076887", "0.44032732", "0.4380248", "0.4372288", "0.43720502", "0.43698093", "0.43588927", "0.434532", "0.43452352", "0.43419787", "0.4337868", "0.4337508", "0.432546", "0.4313177", "0.43122023", "0.4302151", "0.43013224", "0.42969278", "0.42840385", "0.42825887", "0.4281267", "0.42772967", "0.42701238", "0.42635828", "0.4260882", "0.4243931", "0.42436457", "0.4243032", "0.4232459", "0.4227679", "0.42223996", "0.41899836", "0.4186404", "0.41833043", "0.41830707", "0.4180161", "0.4179163", "0.4176288", "0.4173459", "0.41706917", "0.4170426", "0.4170252", "0.41668978", "0.4162793", "0.4158864", "0.4158061", "0.41568795", "0.41543055", "0.41508165", "0.4133593", "0.41310546", "0.41305658", "0.41285738", "0.4126506", "0.4125918", "0.41177422", "0.411119", "0.40995276", "0.4098472", "0.40972763", "0.4090885" ]
0.7711574
0
Function to perform a 3 year window filter for a single land cover value (such as Forest as 1) for the first year in an image. The image bands do not need to be in order, but the bandNames argument must be in chronological order. For the first year of land cover classifications, a three consecutive years window is used and if the classifications of the first and last years are different from its neighbours, this values are replaced by the classification of its matching neighbours. This function can be applied to whichever land cover values the user decides, such as all of the land cover values or a select few.
def applyMask3first(imagem, value, bandNames): mask = imagem.select(bandNames[0]).neq(value) \ .bitwiseAnd(imagem.select(bandNames[1]).eq(value)) \ .bitwiseAnd(imagem.select(bandNames[2]).eq(value)) change_img = imagem.select(bandNames[0]).mask(mask.eq(1)).where(mask.eq(1), value) img_out = imagem.select(bandNames[0]).blend(change_img) img_out = img_out.addBands(imagem.select(bandNames[1:])) return img_out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyWindow3years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-1):\n img_out = img_out.addBands(mask3(imagem, value,bandNames[(i-1):(i+2)]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def applyWindow4years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-2):\n img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def applyForwardNoDataFilter(image, bandNames):\n #Get a list of band names from year(1) through the last year\n bandNamesEE = ee.List(bandNames[1:])\n \n #Define forwards filter\n #In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for the first year\n #currentImage = image.select(bandNames[1]), the image for the second year\n #previousImage = image.select(bandNames[0]), the first year\n #Find where the second year has missing data, replace those values with the values of the first year\n #Append previousImage to currentImage, so now currentImage is a two band image, with the first band being the second year with the gap fill\n #and the second band is the first years classification\n #The iteration continues, now with followingImage.select[0] being the second year with the gap fill applied, and bandName is the third year\n def forwardNoDataFilter(bandName, previousImage):\n currentImage = image.select(ee.String(bandName))\n previousImage = ee.Image(previousImage)\n currentImage = currentImage.unmask(previousImage.select([0]))\n return currentImage.addBands(previousImage)\n \n #Iterate through all the years, starting with the first year's classification\n filtered = bandNamesEE.iterate(forwardNoDataFilter,ee.Image(image.select(bandNames[0])))\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def applyWindow5years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-3):\n img_out = img_out.addBands(mask5(imagem, value,bandNames[(i-1):(i+4)]))\n img_out = img_out.addBands(imagem.select(bandNames[-3]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): \n #Grab land cover classes as a list of strings\n lc_classes = classDictionary.keys().getInfo()\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Get the frequency of each class through the years by reducing the image collection to an image using the sum reducer\n class_frequency = binary_class_images.reduce(ee.Reducer.sum().unweighted()).rename(lc_classes)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Define an image to add bands with frequency filter applied\n out_img = ee.Image()\n \n #Loop through years\n for yearBand in yearBandNames:\n #Select the target year from the image\n yearImage = image.select(yearBand)\n \n #Loop through land cover classes in filterParams\n for lc_class in lc_classes:\n #Get the minimum occurance allowed in that land cover class\n min_occurance = filterParams.get(lc_class)\n \n #Find if the land cover class had less than the number of min_occurances in each pixel\n change_class = class_frequency.select(lc_class).lt(ee.Image.constant(min_occurance))\n \n #If change_class==1, then replace that pixel with the mode of all the years in that pixel\n #This filter is only applied to pixels of this land cover class\n #First mask yearImage to pixels of this land cover class, then get the union of pixels where change_class==1,\n #if both conditions are true, then the pixel is replaced with the mode\n yearImage = yearImage.where(yearImage.eq(ee.Number(classDictionary.get(lc_class))).bitwiseAnd(change_class),mode_image)\n #Rename yearImage to bandName\n yearImage = yearImage.rename(yearBand)\n #Append to output image\n out_img = out_img.addBands(yearImage)\n \n return out_img", "def applyBackwardNoDataFilter(image, bandNames):\n #Get a list of band names to iterate over, from year(-2) through year(0)\n bandNamesEE = ee.List(bandNames[:-1]).reverse()\n \n #Define backwards filter\n #In first iteration, bandName=bandNames[-2] and followingImage is image.select(bandNames[-1]), or the classifications for the final year\n #currentImage = image.select(bandNames[-2]), the second to last year\n #followingImage = image.select(bandNames[-1]), the final year\n #Find where the second to last year has missing data, replace those values with the values of the following year\n #Append followingImage to currentImage, so now currentImage is a two band image, with the first band being the second to last year with the gap fill\n #and the second band is the final years classification\n #The iteration continues, now with followingImage.select[0] being the second to last year with the gap fill applied, and bandName is the third to last year\n def backwardNoDataFilter(bandName, followingImage):\n currentImage = image.select(ee.String(bandName))\n followingImage = ee.Image(followingImage)\n currentImage = currentImage.unmask(followingImage.select([0]))\n return currentImage.addBands(followingImage)\n \n #Apply backwards filter, starting with the final year and iterating through to year(0) \n filtered = bandNamesEE.iterate(backwardNoDataFilter,ee.Image(image.select(bandNames[-1])))\n #Re-order bands to be in chronological order\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def stack_bands(yr, roi):\n\n water_year_start = '{}-10-01'.format(yr - 1)\n\n winter_s, winter_e = '{}-01-01'.format(yr), '{}-03-01'.format(yr),\n spring_s, spring_e = '{}-03-01'.format(yr), '{}-05-01'.format(yr),\n late_spring_s, late_spring_e = '{}-05-01'.format(yr), '{}-07-01'.format(yr)\n summer_s, summer_e = '{}-07-01'.format(yr), '{}-09-01'.format(yr)\n fall_s, fall_e = '{}-09-01'.format(yr), '{}-12-31'.format(yr)\n\n periods = [('cy', winter_s, fall_e),\n ('1', spring_s, spring_e),\n ('2', late_spring_s, late_spring_e),\n ('3', summer_s, summer_e),\n ('4', fall_s, fall_e)]\n\n first = True\n for name, start, end in periods:\n bands = landsat_composites(yr, start, end, roi, name)\n if first:\n input_bands = bands\n proj = bands.select('B2_cy').projection().getInfo()\n first = False\n else:\n input_bands = input_bands.addBands(bands)\n\n for s, e, n in [(spring_s, spring_e, 'espr'),\n (late_spring_s, late_spring_e, 'lspr'),\n (summer_s, summer_e, 'smr'),\n (fall_s, fall_e, 'fl'),\n (water_year_start, spring_e, 'wy_espr'),\n (water_year_start, late_spring_e, 'wy_espr'),\n (water_year_start, summer_e, 'wy_smr'),\n (water_year_start, fall_e, 'wy')]:\n gridmet = ee.ImageCollection(\"IDAHO_EPSCOR/GRIDMET\").filterBounds(\n roi).filterDate(s, e).select('pr', 'eto', 'tmmn', 'tmmx')\n temp_reducer = ee.Reducer.mean()\n t_names = ['tmax'.format(n), 'tmin'.format(n)]\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_{}'.format(n), 'pet_total_{}'.format(n)).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n wd_estimate = precip_sum.select('precip_total_{}'.format(n)).subtract(precip_sum.select(\n 'pet_total_{}'.format(n))).rename('wd_est_{}'.format(n))\n input_bands = input_bands.addBands([temp_perc, precip_sum, wd_estimate])\n\n temp_reducer = ee.Reducer.percentile([10, 50, 90])\n t_names = ['tmmn_p10_cy', 'tmmn_p50_cy', 'tmmn_p90_cy', 'tmmx_p10_cy', 'tmmx_p50_cy', 'tmmx_p90_cy']\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_cy', 'pet_total_cy').resample('bilinear').reproject(crs=proj['crs'], scale=30)\n wd_estimate = precip_sum.select('precip_total_cy').subtract(precip_sum.select(\n 'pet_total_cy')).rename('wd_est_cy')\n\n coords = ee.Image.pixelLonLat().rename(['Lon_GCS', 'LAT_GCS']).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n ned = ee.Image('USGS/NED')\n terrain = ee.Terrain.products(ned).select('elevation', 'slope', 'aspect').reduceResolution(\n ee.Reducer.mean()).reproject(crs=proj['crs'], scale=30)\n\n world_climate = get_world_climate(proj=proj)\n elev = terrain.select('elevation')\n tpi_1250 = elev.subtract(elev.focal_mean(1250, 'circle', 'meters')).add(0.5).rename('tpi_1250')\n tpi_250 = elev.subtract(elev.focal_mean(250, 'circle', 'meters')).add(0.5).rename('tpi_250')\n tpi_150 = elev.subtract(elev.focal_mean(150, 'circle', 'meters')).add(0.5).rename('tpi_150')\n static_input_bands = coords.addBands([temp_perc, wd_estimate, terrain, tpi_1250, tpi_250, tpi_150, world_climate])\n\n nlcd = ee.Image('USGS/NLCD/NLCD2011').select('landcover').reproject(crs=proj['crs'], scale=30).rename('nlcd')\n\n cdl_cult = ee.Image('USDA/NASS/CDL/2017').select('cultivated'). \\\n remap([1, 2], [0, 1]).reproject(crs=proj['crs'], scale=30).rename('cdlclt')\n\n cdl_crop = ee.Image('USDA/NASS/CDL/2017').select('cropland').reproject(crs=proj['crs'],\n scale=30).rename('cdlcrp')\n\n gsw = ee.Image('JRC/GSW1_0/GlobalSurfaceWater')\n occ_pos = gsw.select('occurrence').gt(0)\n water = occ_pos.unmask(0).rename('gsw')\n\n static_input_bands = static_input_bands.addBands([nlcd, cdl_cult, cdl_crop, water])\n\n input_bands = input_bands.addBands(static_input_bands).clip(roi)\n\n # standardize names to match EE javascript output\n standard_names = []\n temp_ct = 1\n prec_ct = 1\n names = input_bands.bandNames().getInfo()\n for name in names:\n if 'tavg' in name and 'tavg' in standard_names:\n standard_names.append('tavg_{}'.format(temp_ct))\n temp_ct += 1\n elif 'prec' in name and 'prec' in standard_names:\n standard_names.append('prec_{}'.format(prec_ct))\n prec_ct += 1\n else:\n standard_names.append(name)\n\n input_bands = input_bands.rename(standard_names)\n return input_bands", "def applyIncidenceFilter(image, bandNames, classDictionary, numChangesCutoff = 8, connectedPixelCutoff=6):\n #Calculate the number of times a pixel changes throughout the time series and determine if it is over the numChangesCutoff\n num_changes = calculateNumberOfChanges(image, bandNames)\n too_many_changes = num_changes.gt(numChangesCutoff)\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Calculate the number of connected pixels for each land cover class and year, reduce to a single band image representing the number\n #of connected pixels of the same land cover class as the central pixel, and determine if it is over the connectedPixelCutoff\n connected_pixel_count = ee.ImageCollection(binary_class_images.map(lambda x: x.mask(x).connectedPixelCount(100,False).reduce(ee.Reducer.sum()).lt(connectedPixelCutoff)))\n \n #Get a bitwiseAnd determination if the number of connected pixels <= connectedPixelCutoff and the number of changes > numChangesCutoff \n incidence_filter = ee.ImageCollection(connected_pixel_count.map(lambda x: x.bitwiseAnd(too_many_changes))).toBands().rename(bandNames)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Replace pixels of image where incidence_filter is True with mode_image\n incidence_filtered = image.where(incidence_filter, mode_image)\n \n return incidence_filtered", "def _bands_competed_last_year():\n lLastYear = datetime.datetime.now().year - 1\n cursor = connection.cursor()\n cursor.execute(\"SELECT count(distinct(r.band_id)) FROM contests_contestevent e, contests_contestresult r WHERE r.contest_event_id = e.id AND extract(year from e.date_of_event) = %(year)s GROUP BY extract(year from e.date_of_event) ORDER BY extract(year from e.date_of_event) desc\", {'year' : lLastYear})\n rows = cursor.fetchall()\n lReturn = 0\n if rows and rows[0]:\n lReturn = rows[0][0]\n cursor.close()\n return lReturn", "def msatna_blocks_3lag_year(year: int) -> pd.Series:\n return msatna_blocks_3lag_panel()[year]", "def calculate_daily_climatology(\n pctile,\n windowHalfWidth,\n lenClimYear,\n smoothPercentile,\n smoothPercentileWidth,\n thresh_climYear, # empty array\n seas_climYear, # empty array\n clim, # empty dict\n feb29,\n doyClim,\n clim_start,\n clim_end,\n tempClim,\n temp,\n):\n # Loop over all day-of-year values, and calculate threshold and seasonal climatology across years\n for d in range(1, lenClimYear + 1):\n # Special case for Feb 29\n if d == feb29:\n continue\n # find all indices for each day of the year +/- windowHalfWidth and from them calculate the threshold\n tt0 = np.where(doyClim[clim_start : clim_end + 1] == d)[\n 0\n ] # the index for that day each year\n # If this doy value does not exist (i.e. in 360-day calendars) then skip it\n if len(tt0) == 0:\n continue\n tt = np.array([])\n for w in range(-windowHalfWidth, windowHalfWidth + 1): # -5 : 5 default\n tt = np.append(\n tt, clim_start + tt0 + w\n ) # append the daily values 5days before and 5days after\n tt = tt[tt >= 0] # Reject indices \"before\" the first element\n tt = tt[tt < TClim] # Reject indices \"after\" the last element\n thresh_climYear[d - 1] = np.percentile(nonans(tempClim[tt.astype(int)]), pctile)\n seas_climYear[d - 1] = np.mean(nonans(tempClim[tt.astype(int)]))\n\n # Special case for Feb 29 (LEAP YEAR)\n thresh_climYear[feb29 - 1] = (\n 0.5 * thresh_climYear[feb29 - 2] + 0.5 * thresh_climYear[feb29]\n )\n seas_climYear[feb29 - 1] = (\n 0.5 * seas_climYear[feb29 - 2] + 0.5 * seas_climYear[feb29]\n )\n\n if smoothPercentile:\n thresh_climYear, seas_climYear = smooth_climatologies(\n thresh_climYear, seas_climYear, smoothPercentileWidth\n )\n\n # Generate threshold for full time series\n clim[\"thresh\"] = thresh_climYear[doy.astype(int) - 1]\n clim[\"seas\"] = seas_climYear[doy.astype(int) - 1]\n # Save vector indicating which points in temp are missing values\n clim[\"missing\"] = np.isnan(temp)\n\n return clim", "def applyMask3last(imagem, value, bandNames):\n mask = imagem.select(bandNames[-3]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[-2]).eq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[-1]).neq(value))\n change_img = imagem.select(bandNames[-1]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[0:-1])\n img_out = img_out.addBands(imagem.select(bandNames[-1]).blend(change_img))\n return img_out", "def applyGapFilter(image, bandNames):\n filtered = applyForwardNoDataFilter(image, bandNames)\n filtered = applyBackwardNoDataFilter(filtered, bandNames)\n return filtered", "def band_filter(self, bands) -> 'ImageCollection':\n\n process_id = 'filter_bands'\n args = {\n 'imagery': self.graph,\n 'bands': bands\n }\n return self.graph_add_process(process_id, args)", "def smooth_climatologies(thresh_climYear, seas_climYear, smoothPercentileWidth):\n # If the climatology contains NaNs, then assume it is a <365-day year and deal accordingly\n if np.sum(np.isnan(seas_climYear)) + np.sum(np.isnan(thresh_climYear)):\n valid = ~np.isnan(thresh_climYear)\n thresh_climYear[valid] = runavg(thresh_climYear[valid], smoothPercentileWidth)\n valid = ~np.isnan(seas_climYear)\n seas_climYear[valid] = runavg(seas_climYear[valid], smoothPercentileWidth)\n else: # >= 365-day year (no nans)\n thresh_climYear = runavg(thresh_climYear, smoothPercentileWidth)\n seas_climYear = runavg(seas_climYear, smoothPercentileWidth)\n\n return thresh_climYear, seas_climYear", "def octave_bands(fc=1000, third=False, start=0.0, n=8):\n\n div = 1\n if third:\n div = 3\n\n # Octave Bands\n fcentre = fc * (\n 2.0 ** (np.arange(start * div, (start + n) * div - (div - 1)) / div)\n )\n fd = 2 ** (0.5 / div)\n bands = np.array([[f / fd, f * fd] for f in fcentre])\n\n return bands, fcentre", "def plot_land_cover(data, year=None, measurement=None, out_width=15, cols=4,):\n # get measurement name\n measurement = get_layer_name(measurement, data)\n\n # get colour map, normalisation\n try:\n cmap, norm = lc_colourmap(measurement)\n except AssertionError:\n\n raise KeyError('Could not automatically determine colour scheme from'\n f'DataArray name {measurement}. Please specify which '\n 'DEA Landcover measurement is being plotted by providing'\n 'the name using the \"measurement\" variable For example'\n '(measurement = \"full_classification\")')\n\n height, width = data.geobox.shape\n scale = out_width / width\n\n if year:\n #plotting protocall if 'year' variable is passed\n year_string = f\"{year}-01-01\"\n data = data.sel(time=year_string, method=\"nearest\")\n \n fig, ax = plt.subplots()\n fig.set_size_inches(width * scale, height * scale)\n make_colorbar(fig, ax, measurement)\n im = ax.imshow(data, cmap=cmap, norm=norm, interpolation=\"nearest\")\n\n \n elif len(data.time) == 1:\n #plotting protocall if only one timestep is passed and not a year variable\n fig, ax = plt.subplots()\n fig.set_size_inches(width * scale, height * scale)\n make_colorbar(fig, ax, measurement)\n im = ax.imshow(data.isel(time=0), cmap=cmap, norm=norm, interpolation=\"nearest\")\n else:\n #plotting protocall if multible time steps are passed to plot\n if cols > len(data.time):\n cols = len(data.time)\n rows = int((len(data.time) + cols-1)/cols)\n\n fig, ax = plt.subplots(nrows=rows, ncols=cols)\n fig.set_size_inches(\n width * scale, (height * scale / cols) * (len(data.time) / cols))\n\n make_colorbar(fig, ax.flat[0], measurement)\n\n for a, b in enumerate(ax.flat):\n if a < data.shape[0]:\n im = b.imshow(data[a], cmap=cmap, norm=norm,\n interpolation=\"nearest\")\n\n return im", "def request_band_extract(file_prefix, points_layer, region, years, filter_bounds=False):\n roi = ee.FeatureCollection(region)\n plots = ee.FeatureCollection(points_layer)\n for yr in years:\n stack = stack_bands(yr, roi)\n\n if filter_bounds:\n plots = plots.filterBounds(roi)\n\n filtered = plots.filter(ee.Filter.eq('YEAR', yr))\n\n plot_sample_regions = stack.sampleRegions(\n collection=filtered,\n properties=['POINT_TYPE', 'YEAR'],\n scale=30,\n tileScale=16)\n\n task = ee.batch.Export.table.toCloudStorage(\n plot_sample_regions,\n description='{}_{}'.format(file_prefix, yr),\n bucket='wudr',\n fileNamePrefix='{}_{}'.format(file_prefix, yr),\n fileFormat='CSV')\n\n task.start()\n print(yr)\n exit()", "def lower_middle_income_countries():\r\n lower_middle_countries_data = []\r\n years = []\r\n medians = []\r\n lst = []\r\n for idx in range(1960, 2016):\r\n years.append(idx)\r\n for idx in lower_middle_countries:\r\n lower_middle_countries_data.append(life_expectancy_graph(idx))\r\n y_idx = 0\r\n for idx in lower_middle_countries_data:\r\n if idx != {}:\r\n if (list(idx.keys())[y_idx]) == years[y_idx]:\r\n lst.append((list(idx.values())[y_idx]))\r\n lst = sorted(lst)\r\n medians.append(median(lst))\r\n return medians", "def constant_2015():\n\n #Load the CMIP6 historical\n cubes = iris.load(data_dir+'SO2DMS-em-anthro_input4MIPs_emissions_CMIP_CEDS-v2016-07-26-gr_200001-201412_n48.nc')\n #Get low and high level emissions just in the last year (2014)\n cubes = iris.cube.CubeList([cubes[2],cubes[1]])\n final_cubes = iris.cube.CubeList()\n for cube in cubes:\n final_cube = cube[-12:]\n final_cubes.append(final_cube)\n \n #Set the year-on-year proportional reductions to be nothing\n yoy_rates = calc_perc_reducts()\n yoy_rates = np.array(yoy_rates)\n yoy_rates = np.ones_like(yoy_rates)\n\n #Create coordinates for new nc file between 2014 and 2100\n lat_coord = cubes[0].coord('latitude')\n lon_coord = cubes[0].coord('longitude')\n time_coord = DimCoord(np.arange(95055.,95055.+(2100-2014+1)*360.,30.),standard_name=u'time', units=cf_units.Unit('days since 1750-1-1 00:00:00', calendar='360_day'), long_name=u'time', var_name='time')\n\n #Create the cube date\n cube_data_surf = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n cube_data_high = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n #Set first year equal to 2014 in CMIP6 historical\n cube_data_surf[:12,...] = final_cubes[0].data\n cube_data_high[:12,...] = final_cubes[1].data\n #Apply equal emissions in all other years too\n for i in range(12,cube_data_surf.shape[0]):\n cube_data_surf[i,...] = cube_data_surf[(i-12),...] * yoy_rates[0,i]\n cube_data_high[i,...] = cube_data_high[(i-12),...] * yoy_rates[1,i]\n #Make the output cubes\n fut_cube_surf = iris.cube.Cube(cube_data_surf,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[0].standard_name, long_name=final_cubes[0].long_name, var_name=final_cubes[0].var_name, units=final_cubes[0].units, attributes=final_cubes[0].attributes)\n fut_cube_high = iris.cube.Cube(cube_data_high,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[1].standard_name, long_name=final_cubes[1].long_name, var_name=final_cubes[1].var_name, units=final_cubes[1].units, attributes=final_cubes[1].attributes)\n\n fut_cube_high.var_name = 'field569_1'\n fut_cube_high.units='kg/m2/s'\n fut_cube_high.long_name ='HIGH LEVEL SO2 EMISSIONS KG/M2/S'\n fut_cube_surf.var_name = 'field569'\n fut_cube_surf.units='kg/m2/s'\n fut_cube_surf.long_name ='SULPHUR DIOXIDE EMISSIONS'\n\n #Load the DMS cube from standard RCP2.6\n dms_cube = iris.load(data_dir+'DMSSO2NH3_18502100_RCP26_monthly.nc')[0]\n iris.coord_categorisation.add_year(dms_cube,'time',name='year')\n dms_cube = dms_cube.extract(iris.Constraint(year = lambda y: y>=2014))\n\n dms_cube.var_name = 'field570'\n dms_cube.attributes.pop('name')\n dms_cube.coord('time').var_name = 'time'\n dms_cube.coord('time').long_name = 'time'\n\n fut_cube_high = fut_cube_high[:-2]\n fut_cube_surf = fut_cube_surf[:-2]\n\n fut_dms = iris.cube.Cube(dms_cube.data[:,0,::-1,:],dim_coords_and_dims=[(fut_cube_surf.coord('time'),0),(fut_cube_surf.coord('latitude'),1),(fut_cube_surf.coord('longitude'), 2)],standard_name=dms_cube.standard_name, long_name=dms_cube.long_name, var_name=dms_cube.var_name, units=dms_cube.units, attributes=dms_cube.attributes)\n\n #Save the final cubes as netcdf (cutting them to be the same length)\n iris.save(iris.cube.CubeList([fut_dms,fut_cube_high,fut_cube_surf]),data_dir+ \"SO2DMS_const2014.nc\")\n os.system('ncatted -O -a calendar,time,m,c,\"360_day\" '+data_dir+ \"SO2DMS_const2014.nc\")\n\n return", "def mask3(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).eq(value))\n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[1]).blend(change_img)\n return img_out", "def winter_gif(self):\n # Create the directory.\n os.mkdir('./medal_figures_winter')\n start = self.start_year\n end = self.end_year\n duration = self.duration\n # Specify the years.\n years = [i for i in self.years_winter if (i >= start) and (i <= end)]\n # Setup the colormap.\n cmap = sns.cubehelix_palette(n_colors=6, start=2.5, rot=0.1, hue=2, dark=0.3, light=1, as_cmap=True)\n # Important variable and keywords to initialize cartopy.\n shapename = 'admin_0_countries'\n countries_shp = shpreader.natural_earth(resolution='110m', category='cultural', name=shapename)\n filenames = []\n # Loop in the specific years.\n for i in years:\n fig = plt.figure(figsize=(10, 8))\n ax = fig.add_subplot(1, 1, 1, projection=ccrs.Mercator())\n ax.set_extent([-169.95, 169.95, -65, 80], crs=ccrs.PlateCarree())\n ax.add_feature(cfeature.BORDERS)\n ax.coastlines(resolution='110m')\n # Add some titles for specific years.\n if i == 1924:\n fig.suptitle('The First Winter Olympics.', y=0.9, fontsize=14, fontweight='bold')\n if i == 1994:\n fig.suptitle('The International Olympic Committee voted to separate the Summer and Winter Games.',\n y=0.9, fontsize=12, fontweight='bold')\n if i == 2018:\n fig.suptitle('Suspension of the Russian Olympic Committee due to Olympic Doping Controversy.',\n y=0.9, fontsize=12, fontweight='bold')\n iso_lib = list(self.conv['ISO'])\n if i != 2018:\n city = self.df_winter.loc[self.df_winter['Year'] == i]['City'].iloc[0]\n ax.title.set_text('Total Number of Medals of Winter Olympics Year: %d City: %s' % (i, city))\n df_tmp = self.df_winter.loc[self.df_winter['Year'] == i]\n d = dict(df_tmp.groupby(df_tmp['Country']).size())\n else:\n ax.title.set_text('Total Number of Medals of Winter Olympics Year: %d City: %s' % (i, 'Pyeongchang'))\n m = []\n for j in self.df_2018_winter['NOC'].tolist():\n n = j[j.find('(')+1:j.find(')')]\n m.append(n)\n k = self.df_2018_winter['Total'].tolist()\n d = dict(zip(m, k))\n d.pop('30 NOCs', None)\n max_medal = float(max(d.values()))\n for country in shpreader.Reader(countries_shp).records():\n iso = country.attributes['ADM0_A3']\n medal_num = 0\n if iso in iso_lib:\n ioc = self.conv.loc[self.conv['ISO'] == iso,'IOC'].iloc[0]\n if not pd.isna(ioc):\n if ioc in d.keys():\n medal_num = d[ioc]\n if all([iso == 'RUS', i>=1956, i<=1988]):\n medal_num = d['URS']\n if all([iso=='DEU', i>=1968, i<=1988]):\n medal_num = d['FRG'] + d['GDR']\n if all([iso=='DEU', i>=1956, i<=1964]):\n medal_num = d['EUA']\n if i==1952 and iso=='DEU':\n medal_num = d['FRG']\n if i==1992 and iso=='RUS':\n medal_num = d['EUN']\n if i==2018 and iso=='RUS':\n medal_num = d['OAR']\n ax.add_geometries(country.geometry, ccrs.PlateCarree(),\n facecolor=cmap(medal_num / max_medal, 1))\n sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(0, max_medal))\n sm._A = []\n plt.colorbar(sm, ax=ax, orientation=\"horizontal\", fraction=0.046, pad=0.04)\n fname = './medal_figures_winter/year_%d.png' % i\n filenames.append(fname)\n plt.savefig(fname=fname, format='png')\n plt.close(fig)\n images = []\n # Create the gif.\n for filename in filenames:\n images.append(imageio.imread(filename))\n imageio.mimsave('./medal_figures_winter/movie.gif', images, duration=duration)\n return", "def multiple_years(our_data, start, end):\n count = start\n album_list = []\n while count <= end:\n album_list.append(find_by_year(our_data,count))\n count += 1", "def foldcurve(_band, _period):\n # Set epoch to first date observed\n _epoch = _band[0][0]\n # Iterate through array, update date to phase\n for i in range(0, _band.shape[0]):\n _band[i, 0] = ((_band[i, 0] - _epoch) / _period) % 1\n # Return folded array\n return _band", "def calculateNumberOfChanges(image, bandNames):\n #Get a collection of images where each image has 2 bands: classifications for year(i) and classifications for year(i+1)\n lc_one_change_col = npv.getYearStackIC(image,bandNames, band_indices=[0,1])\n #Get a collection of images where each image represents whether there was change from year(i) to year(i+1) and convert to an image\n lc_one_change_col = lc_one_change_col.map(npv.LC_OneChange)\n lc_one_change_image = lc_one_change_col.toBands()\n #Calculate the number of changes by applying the sum reducer\n lc_sum_changes = lc_one_change_image.reduce(ee.Reducer.sum().unweighted())\n return lc_sum_changes", "def year_cv_split(X, year_range):\n return [\n ((X[\"year\"] < year).to_numpy(), (X[\"year\"] == year).to_numpy())\n for year in range(*year_range)\n ]", "def initialize_layers(self, years):\n min_year = min(years)\n max_year = max(years)\n ordered_years = list(range(min_year, max_year + 1))\n self.layers = [Layer(y) for y in ordered_years]", "def preprocess_land_cover(\n src_files, dst_raster, dst_crs, dst_bounds, dst_res, geom=None, overwrite=False\n):\n if os.path.isfile(dst_raster) and not overwrite:\n log.info(\"Land cover data already preprocessed. Skipping.\")\n return\n log.info(\"Starting preprocessing of land cover data.\")\n LC_CLASSES = [\n \"bare\",\n \"crops\",\n \"grass\",\n \"moss\",\n \"shrub\",\n \"tree\",\n \"urban\",\n \"water-permanent\",\n \"water-seasonal\",\n ]\n with TemporaryDirectory(prefix=\"geohealthaccess_\") as tmpdir:\n\n tmpdir = Path(tmpdir)\n for tile in src_files:\n unzip(tile, tmpdir)\n\n reprojected_files = []\n tile_names = unique_tiles(tmpdir)\n\n if not tile_names:\n raise MissingDataError(\"Land cover data not found.\")\n\n for lc_class in LC_CLASSES:\n tiles = [\n p.as_posix()\n for p in tmpdir.glob(f\"*{lc_class}-coverfraction-layer*.tif\")\n ]\n if len(tiles) > 1:\n src_file = merge_tiles(\n tiles, os.path.join(tmpdir, f\"{lc_class}_mosaic.tif\"), nodata=255,\n )\n else:\n src_file = tiles[0]\n reprojected_files.append(\n reproject(\n src_raster=src_file,\n dst_raster=os.path.join(tmpdir, f\"{lc_class}.tif\"),\n dst_crs=dst_crs,\n dst_bounds=dst_bounds,\n dst_res=dst_res,\n src_nodata=255,\n dst_nodata=255,\n dst_dtype=\"Byte\",\n resampling_method=\"cubic\",\n overwrite=overwrite,\n )\n )\n\n if len(reprojected_files) > 1:\n raster = concatenate_bands(\n src_files=reprojected_files,\n dst_file=dst_raster,\n band_descriptions=LC_CLASSES,\n )\n else:\n raster = reprojected_files[0]\n\n if geom:\n mask_raster(raster, geom)", "def winter_avg(var_nc,lat_slice=None,lon_slice=None): \n #\n # accumulate in shape [plev,lat,lon]\n #\n # use the whole array if slice objects are missing\n #\n if (lat_slice is None) and (lon_slice is None):\n num_lats=var_nc.shape[2]\n num_lons=var_nc.shape[3]\n lat_slice=slice(0,num_lats)\n lon_slice=slice(0,num_lons)\n print \"in winter avg: \",lat_slice,lon_slice\n else:\n num_lats=lat_slice.stop - lat_slice.start\n num_lons=lon_slice.stop - lon_slice.start\n num_levs=var_nc.shape[1]\n accumulate=ma.zeros([num_levs,num_lats,num_lons],dtype=var_nc.dtype)\n #\n # year 0 is special case since it doesn't have a december\n #\n djf0=np.array([0,1],dtype=np.int32) #january and feburary\n the_slice=var_nc[djf0,:,lat_slice,lon_slice]\n the_slice=ma.mean(the_slice,axis=0) #average over the two months\n accumulate+=the_slice\n num_years=var_nc.shape[0]//12\n #\n # now year 1 has year 0's december\n #\n djf=np.array([11,12,13],dtype=np.int32)\n #\n # iterate one year less because we've alread\n # done year zero as a special case\n #\n for the_year in np.arange(0,num_years-1):\n the_slice=var_nc[djf,:,lat_slice,lon_slice]\n the_slice=ma.mean(the_slice,axis=0)\n accumulate+=the_slice\n djf=djf+12\n accumulate=accumulate/num_years \n the_avg=ma.mean(accumulate,axis=1)\n the_avg=ma.mean(the_avg,axis=1)\n return the_avg", "def load_images(filename, bands, Args):\n image = np.zeros([Args.num, Args.out_size,\n Args.out_size, len(bands)])\n for i, band in enumerate(bands):\n print (\"Getting pstamps for band\", band)\n full_image = fits.open(filename.replace(\"band\", band))[0].data\n image[:, :, :, i] = get_stamps(full_image, Args)\n return image", "def dwt(image_array, quantization_Array):\n # Create the high pass and low pass filters\n # both filters are non-causal\n # symmetric\n # [-2, -1, 0, 1, 2]\n LPF = [-0.125, 0.25, 0.75, 0.25, -0.125]\n LPF_center = 2\n\n # [ -2,-1, 0]\n HPF = [-0.5, 1, -0.5]\n HPF_center = 2\n\n nrow, ncol = image_array.shape\n\n # create an array that will contain the 4 different subbands of the image\n LL = np.zeros((nrow, ncol))\n LH = np.zeros((nrow, ncol))\n HL = np.zeros((nrow, ncol))\n HH = np.zeros((nrow, ncol))\n filtered_image = [LL, LH, HL, HH]\n\n # filtering the rows using a low pass and high pass filters\n LowPass_rows = np.zeros((nrow, ncol))\n HighPass_rows = np.zeros((nrow, ncol))\n for i in range(0, nrow):\n LowPass_rows[i, :] = lfilter(LPF, image_array[i, :], LPF_center)\n HighPass_rows[i, :] = lfilter(HPF, image_array[i, :], HPF_center)\n\n # down sample rows.\n # which means we will have half the number of columns\n for i in range(0, len(filtered_image)):\n filtered_image[i] = filtered_image[i][:, ::2]\n\n # apply filters accross columns\n for i in range(0, ncol):\n LL[:, i] = lfilter(LPF, LowPass_rows[:, i], LPF_center)\n LH[:, i] = lfilter(HPF, LowPass_rows[:, i], HPF_center)\n HL[:, i] = lfilter(LPF, HighPass_rows[:, i], LPF_center)\n HH[:, i] = lfilter(HPF, HighPass_rows[:, i], HPF_center)\n\n # down sample columns and quantize\n for i in range(0, len(filtered_image)):\n filtered_image[i] = filtered_image[i][::2, :]\n filtered_image[i] = np.round(\n filtered_image[i]/quantization_Array[i]).astype(int)\n\n return filtered_image", "def fmask(bandname=\"fmask\"):\n\n def fmask(image):\n imgFmask = image.select(bandname)\n shadow = imgFmask.eq(3)\n snow = imgFmask.eq(4)\n cloud = imgFmask.eq(5)\n\n mask = shadow.Or(snow).Or(cloud)\n\n imgMask = image.updateMask(mask.Not())\n return imgMask\n return fmask", "def _correct_band(image, band_name, kvol, kvol0, f_iso, f_geo, f_vol):\n\t\t\tiso = ee.Image(f_iso)\n\t\t\tgeo = ee.Image(f_geo)\n\t\t\tvol = ee.Image(f_vol)\n\t\t\tpred = vol.multiply(kvol).add(geo.multiply(kvol)).add(iso).rename(['pred'])\n\t\t\tpred0 = vol.multiply(kvol0).add(geo.multiply(kvol0)).add(iso).rename(['pred0'])\n\t\t\tcfac = pred0.divide(pred).rename(['cfac'])\n\t\t\tcorr = image.select(band_name).multiply(cfac).rename([band_name])\n\t\t\treturn corr", "def interpolate_dataframes(ff):\n assert isinstance(ff, dict)\n year_min = ff['CA'][0].index[0]\n year_max = ff['CA'][0].index[-1]\n years = list(range(year_min, year_max + 1))\n for state in ff.keys():\n for cf in ff[state]:\n for year in years:\n if year not in cf.index:\n cf.loc[year] = cf.loc[year-1:year+1, :].sum(axis=0)\n cf.loc[year] = (cf.loc[year] / 2).astype(np.int64)\n cf.sort_index(inplace=True)\n return(ff)", "def sky_groups():\n cam = \"sky\"\n for light, lens, ndc, good, window in [(True, True, False, True, True),\n (True, True, False, True, False),\n (True, True, False, False, False),\n (True, False, False, True, False),\n (True, False, False, False, False),\n (False, True, False, True, True),\n (False, True, False, False, True)]:\n filenames = flatfiles(cam)\n filenames = get_light_sky(filenames, light)\n filenames = get_lens(filenames, lens)\n filenames = get_ndc(filenames, ndc)\n filenames = get_good(filenames, good)\n filenames = get_window_sky(filenames, window)\n images = valid_images(filenames)\n process_images(images, cam, (light, lens, ndc, good, window))", "def background(self, header):\n band = get_filt_band(header)\n if self.spl_dict[band] is None:\n return self.jd_b_dict[band][0][1]\n T = Time(header['DATE-OBS'], format='fits')\n return np.asscalar(self.spl_dict[band](T.jd))", "def get_rgb_bands(image, bands):\n if bands is not MONOCHROME:\n red = image[:, :, bands['red']]\n green = image[:, :, bands['green']]\n blue = image[:, :, bands['blue']]\n\n img = np.rollaxis(np.array([red, green, blue]), 0, 3)\n else:\n img = color.grey2rgb(image)\n\n return img", "def year_slice(df, first_year=None, date_col='start_date'):\r\n if first_year is None:\r\n return df\r\n if first_year <= df[date_col].min().year:\r\n # No need to slice\r\n return df\r\n return df[df[date_col] >= f\"{first_year}-01-01\"]", "def north_america_countries():\r\n north_america_data = []\r\n years = []\r\n medians = []\r\n lst = []\r\n for idx in range(1960, 2016):\r\n years.append(idx)\r\n for idx in north_america:\r\n north_america_data.append(life_expectancy_graph(idx))\r\n y_idx = 0\r\n for idx in north_america_data:\r\n if idx != None and idx != {}:\r\n if (list(idx.keys())[y_idx]) == years[y_idx]:\r\n lst.append((list(idx.values())[y_idx]))\r\n lst = sorted(lst)\r\n medians.append(median(lst))\r\n return medians", "def band_selector(image, colors):\n # convert band to list for downstream compatibilty, if necessary\n if len(colors) == 3: #then it's an RGB image\n\n #housekeeping\n try:\n nbands = len(colors['band'])\n except: \n colors['band'] = [colors['band']]\n nbands = len(colors['band'])\n\n try:\n len(colors['dark_on_light'])\n except:\n colors['dark_on_light'] = [colors['dark_on_light']]\n\n if colors['colorspace'] is 'gray' or colors['colorspace'] is 'grey':\n colors['band'] = [0]\n nbands = 1\n if len(colors['dark_on_light']) > 1:\n raise ValueError(\n \"\"\"Can't interpret multiple arguments for 'dark_on_light' when \n 'colorspace' is {}.\n \"\"\".format(colors['colorspace'])\n )\n \n if nbands != len(colors['dark_on_light']):\n raise ValueError(\n \"\"\"Number of items in `colors['dark_on_light']` doesn't\n equal the number of bands in `colors['band']`!\"\"\"\n )\n\n # convert colorspace if necessary\n try:\n working_image = getattr(color, \"rgb2\" + colors['colorspace'].lower())(image)\n except:\n working_image = image.copy()\n if colors['colorspace'].lower() != 'rgb':\n raise ValueError(\n \"\"\"Didn't recognize specified colorspace. \n See skimage.color.rgb2* for options.\"\"\"\n )\n \n # pull bands\n if len(working_image.shape) == 3: # excludes rgb2gray\n working_image = [img_split(working_image)[i] for i in colors['band']]\n else:\n working_image = [working_image]\n nbands = 1\n \n else: # it's a black and white image\n nbands = 1\n working_image = [image.copy()]\n if len(image.shape) != 2:\n raise ValueError(\n \"\"\"Your `color` argument suggested a grayscale image, but it has \\\n multiple bands!\"\"\"\n )\n \n return(working_image)", "def apply_photo_style(path, decade):\n flt_path = os.path.dirname(path) + \"/\" + str(uuid.uuid4()) + \".jpg\"\n shutil.copyfile(path, flt_path) # make a copy of image because part of the filters change image in place\n f = None\n if decade <= 1930 or decade == 1950 or decade == 1970:\n success = execute_js(js_path, arguments='{} {} {}'.format(path, decade, flt_path)) # execute js rendering with Naked\n if decade == 1930:\n f = Thirties(flt_path)\n if decade == 1940:\n f = Gotham(flt_path)\n \n if decade == 1950 or decade == 1960: # for non-standard photo frames \n padding_x = 80\n if decade == 1950: # kodachrome frame\n padding_top = 80\n padding_bottom = 240\n else: # polaroid frame\n padding_bottom = 80\n padding_x = padding_top = 0\n expand_rect_padding(flt_path, padding_x, padding_top, padding_bottom, flt_path)\n \n if decade == 1950:\n f = Fifties(flt_path)\n if decade == 1960:\n f = Toaster(flt_path)\n if decade == 1970:\n f = Seventies(flt_path)\n if decade == 1980:\n f = Nashville(flt_path)\n if decade == 1990:\n f = Lomo(flt_path)\n if decade == 2000:\n f = Davehill(flt_path)\n \n if f is not None:\n f.apply() # apply photo filter using imagemagick\n\n if decade == 1940:\n # resize fix - gotham filter output image slightly differs in size so resize it to sizes of original image\n origin_img = Image.open(path)\n width, height = origin_img.size \n img = Image.open(flt_path) \n img = img.resize([width,height], Image.ANTIALIAS)\n img.save(flt_path, \"JPEG\")\n\n return flt_path", "def filter_irrigated(asset, yr, region, filter_type='irrigated', addl_yr=None):\n filt_fc = None\n\n # filter out any weird geometries\n plots = ee.FeatureCollection(asset)\n plots = plots.map(lambda x: x.set('geo_type', x.geometry().type()))\n plots = plots.filter(ee.Filter.eq('geo_type', 'Polygon'))\n\n roi = ee.FeatureCollection(region)\n if filter_type == 'irrigated':\n\n summer_s, late_summer_e = '{}-05-01'.format(yr), '{}-07-15'.format(yr)\n late_summer_s_, summer_e = '{}-07-01'.format(yr), '{}-10-31'.format(yr)\n\n lsSR_masked = landsat_masked(yr, roi)\n\n early_nd = ee.Image(lsSR_masked.filterDate(summer_s, late_summer_e).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd')\n early_nd_max = early_nd.select('nd').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n early_int_mean = early_nd_max.reduceRegions(collection=plots,\n reducer=ee.Reducer.median(),\n scale=30.0)\n early_int_mean = early_int_mean.select('median')\n\n late_nd = ee.Image(lsSR_masked.filterDate(late_summer_s_, summer_e).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd_1')\n late_nd_max = late_nd.select('nd_1').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n\n combo = late_nd_max.reduceRegions(collection=early_int_mean,\n reducer=ee.Reducer.mean(),\n scale=30.0)\n\n filt_fc = combo # .filter(ee.Filter.Or(ee.Filter.gt('median', 0.9), ee.Filter.gt('mean', 0.8)))\n desc = '{}_{}_irr'.format(os.path.basename(region), yr)\n\n elif filter_type == 'dryland':\n\n summer_s, late_summer_e = '{}-07-01'.format(yr), '{}-10-31'.format(yr)\n late_summer_s_, late_summer_e_ = '{}-07-01'.format(addl_yr), '{}-10-31'.format(addl_yr)\n\n lsSR_masked = landsat_masked(yr, roi)\n early_nd = ee.Image(lsSR_masked.filterDate(summer_s, late_summer_e).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd')\n early_nd_max = early_nd.select('nd').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n early_int_mean = early_nd_max.reduceRegions(collection=plots,\n reducer=ee.Reducer.mean(),\n scale=30.0)\n early_int_mean = early_int_mean.select(['mean', 'MGRS_TILE', 'system:index', 'popper'],\n ['nd_e', 'MGRS_TILE', 'system:index', 'popper'])\n\n lsSR_masked = landsat_masked(addl_yr, roi)\n late_nd = ee.Image(lsSR_masked.filterDate(late_summer_s_, late_summer_e_).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd_1')\n late_nd_max = late_nd.select('nd_1').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n\n combo = late_nd_max.reduceRegions(collection=early_int_mean,\n reducer=ee.Reducer.mean(),\n scale=30.0)\n\n filt_fc = combo.filter(ee.Filter.Or(ee.Filter.lt('nd_e', 0.7), ee.Filter.lt('mean', 0.7)))\n desc = '{}_dry'.format(os.path.basename(region))\n\n else:\n raise NotImplementedError('must choose from filter_low or filter_high')\n\n task = ee.batch.Export.table.toCloudStorage(filt_fc,\n description=desc,\n bucket='wudr',\n fileFormat='SHP')\n print(yr, filter_type)\n task.start()", "def bandpass_filter(data, lowcut, highcut, fs=2000, numtaps=255):\n nyq = fs / 2\n\n # design filter\n fe1 = lowcut / nyq\n fe2 = highcut / nyq\n b = firwin(numtaps, (fe1, fe2), pass_zero=False)\n\n filtered = lfilter(b, 1, data)\n\n return filtered", "def filter_ic(ic,year,month):\n \n ic_filtered = (ic.filter(ee.Filter.eq(\"month\",month))\n .filter(ee.Filter.eq(\"year\",year)))\n image = ee.Image(ic_filtered.first())\n return(image)", "def year_slice(df, first_year = None, date_col = 'start_date'):\n if first_year is None:\n return df\n years = df[date_col].dt.year\n if first_year <= years.min():\n # No need to slice\n return df\n return df[years >= first_year]", "def mask4(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[3]).eq(value)) \n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value)\n change_img1 = imagem.select(bandNames[2]).mask(mask.eq(1)).where(mask.eq(1), value) \n img_out = imagem.select(bandNames[1]).blend(change_img).blend(change_img1)\n return img_out", "def sky_median_sig_clip(input_arr, sig_fract, percent_fract, max_iter=100, low_cut=True, high_cut=True):\r\n\t\r\n\twork_arr = np.ravel(input_arr)\r\n\told_sky = np.median(work_arr)\r\n\toldStaDesviation = work_arr.std()\r\n\tupper_limit = old_sky + sig_fract * oldStaDesviation\r\n\tlower_limit = old_sky - sig_fract * oldStaDesviation\r\n\tif low_cut and high_cut:\r\n\t\tindices = np.where((work_arr < upper_limit) & (work_arr > lower_limit))\r\n\telse:\r\n\t\tif low_cut:\r\n\t\t\tindices = np.where((work_arr > lower_limit))\r\n\t\telse:\r\n\t\t\tindices = np.where((work_arr < upper_limit))\r\n\twork_arr = work_arr[indices]\r\n\tnew_sky = np.median(work_arr)\r\n\titeration = 0\r\n\twhile ((math.fabs(old_sky - new_sky)/new_sky) > percent_fract) and (iteration < max_iter) :\r\n\t\titeration += 1\r\n\t\told_sky = new_sky\r\n\t\toldStaDesviation = work_arr.std()\r\n\t\tupper_limit = old_sky + sig_fract * oldStaDesviation\r\n\t\tlower_limit = old_sky - sig_fract * oldStaDesviation\r\n\t\tif low_cut and high_cut:\r\n\t\t\tindices = np.where((work_arr < upper_limit) & (work_arr > lower_limit))\r\n\t\telse:\r\n\t\t\tif low_cut:\r\n\t\t\t\tindices = np.where((work_arr > lower_limit))\r\n\t\t\telse:\r\n\t\t\t\tindices = np.where((work_arr < upper_limit))\r\n\t\twork_arr = work_arr[indices]\r\n\t\tnew_sky = np.median(work_arr)\r\n\treturn (new_sky, iteration)", "def find_by_year(our_data,year):\n return [album for album in our_data if album['number'] == str(year)]", "def _make_filters(self):\n\n \"\"\"\n filter_bank = bandpass_filterbank(\n self.bands, fs=self.fs, order=order, output=output\n )\n\n return [lambda sig: sosfiltfilt(bpf, sig) for bpf in filter_bank]\n \"\"\"\n\n # This seems to work only for Octave bands out of the box\n centers = self.centers\n n = len(self.centers)\n\n new_bands = [[centers[0] / 2, centers[1]]]\n for i in range(1, n - 1):\n new_bands.append([centers[i - 1], centers[i + 1]])\n new_bands.append([centers[-2], self.fs / 2])\n\n n_freq = self.n_fft // 2 + 1\n freq_resp = np.zeros((n_freq, n))\n freq = np.arange(n_freq) / self.n_fft * self.fs\n\n for b, (band, center) in enumerate(zip(new_bands, centers)):\n lo = np.logical_and(band[0] <= freq, freq < center)\n freq_resp[lo, b] = 0.5 * (1 + np.cos(2 * np.pi * freq[lo] / center))\n\n if b != n - 1:\n hi = np.logical_and(center <= freq, freq < band[1])\n freq_resp[hi, b] = 0.5 * (1 - np.cos(2 * np.pi * freq[hi] / band[1]))\n else:\n hi = center <= freq\n freq_resp[hi, b] = 1.0\n\n filters = np.fft.fftshift(\n np.fft.irfft(freq_resp, n=self.n_fft, axis=0),\n axes=[0],\n )\n\n # remove the first sample to make them odd-length symmetric filters\n self.filters = filters[1:, :]", "def shift_photo_north(gflux=None, rflux=None, zflux=None):\n # ADM if floats were sent, treat them like arrays.\n flt = False\n if _is_row(gflux):\n flt = True\n gflux = np.atleast_1d(gflux)\n rflux = np.atleast_1d(rflux)\n zflux = np.atleast_1d(zflux)\n\n # ADM only use the g-band color shift when r and g are non-zero\n gshift = gflux * 10**(-0.4*0.004)\n w = np.where((gflux != 0) & (rflux != 0))\n gshift[w] = (gflux[w] * 10**(-0.4*0.004) * (gflux[w]/rflux[w])**complex(-0.059)).real\n\n # ADM only use the r-band color shift when r and z are non-zero\n # ADM and only use the z-band color shift when r and z are non-zero\n w = np.where((rflux != 0) & (zflux != 0))\n rshift = rflux * 10**(0.4*0.003)\n zshift = zflux * 10**(0.4*0.013)\n\n rshift[w] = (rflux[w] * 10**(0.4*0.003) * (rflux[w]/zflux[w])**complex(-0.024)).real\n zshift[w] = (zflux[w] * 10**(0.4*0.013) * (rflux[w]/zflux[w])**complex(+0.015)).real\n\n if flt:\n return gshift[0], rshift[0], zshift[0]\n\n return gshift, rshift, zshift", "def imdb_crawl_by_years(years, verbose):\n for year in years:\n imdb_crawl_by_year(year, verbose)", "def reduce_dataset(years, values,flux_floor=0,max_tm_error=0,min_reduction_steps=200):\n non_zero_ind, min_retained_zero_years = remove_begin_end_zero_flux(years,values,flux_floor,min_reduction_steps)\n\n years_mod = years[non_zero_ind]\n values_mod = values[non_zero_ind]\n\n if years_mod.size <3:\n years_mod = years\n values_mod = values\n values_mod = 0\n else:\n #makes ure you have not removed more than 1% of the mass when removing 0 or flux floor rates\n o_mass = TimeSeries(years,values,None,None).integrate().values[-1]\n r_mass = TimeSeries(years_mod, values_mod, None, None).integrate().values[-1]\n if abs((o_mass-r_mass)/o_mass)*100 > 1:\n years_mod = years\n values_mod = values\n timeseries = TimeSeries(years_mod, values_mod, None, None)\n mass = timeseries.integrate()\n\n #normalize Values\n maxval = np.max(values_mod)\n values_mod = values_mod/maxval\n o_timeseries = TimeSeries(years,values/maxval,None,None)\n o_mass = o_timeseries.integrate()\n timeseries = TimeSeries(years_mod, values_mod, None, None)\n mass = timeseries.integrate()\n\n mx = np.argmax(timeseries.values)\n points = [0, mx, len(timeseries)]\n x = timeseries.times\n\n ythresh = 100*np.mean(timeseries.values)\n out_error = 1\n out_error_last = out_error\n OUT_ERROR_THRESHOLD = 1e-2\n\n UPPER_N = 200\n LOWER_N = 50\n last_result = None\n MAX_ITERATIONS = 80\n\n solve_type = SMOOTH\n simple_peaks = False\n last_result,ix = reduct_iter(timeseries,flux_floor,ythresh,out_error,out_error_last,OUT_ERROR_THRESHOLD,UPPER_N,LOWER_N,last_result,MAX_ITERATIONS)\n last_result = retain_min_years(last_result.reduced_flux,o_timeseries,o_mass,min_retained_zero_years)\n #if there are less points than the min_reduction_steps then use the remaining\n #points to rebalance the segments with the largest mass errors.\n play_points = min_reduction_steps - last_result.num_reduced_points\n bef = last_result.reduced_flux.times.size\n if play_points > 0:\n last_result = red_flux.rebalance_extra_points(last_result,play_points)\n\n rr = last_result\n\n #find peaks for data rebalance and reporting\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=3,rel_height=1)\n if peaks.size == 0 :\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=2,rel_height=1)\n if peaks.size == 0:\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=1,rel_height=1)\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=3,rel_height=1)\n if pneg.size == 0:\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=2,rel_height=1)\n if pneg.size == 0:\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=1,rel_height=1)\n\n peaks = rr.reduced_flux.times[peaks]\n pneg = rr.reduced_flux.times[pneg]\n\n peaks = np.isin(o_timeseries.times,peaks)\n pneg = np.isin(o_timeseries.times,pneg)\n peaks = np.where(peaks)\n pneg = np.where(pneg)\n\n peaks = peaks[0]\n pneg = pneg[0]\n iter = 0\n while iter < 100 and (abs(last_result.total_mass_error*maxval) > max_tm_error or abs(last_result.total_mass_error/last_result.mass.values[-1])*100 > .001) :\n rr = red_flux.rebalance_valleys(rr,peaks,pneg)\n #keep the lowest total_mass_error\n if abs(rr.total_mass_error) < abs(last_result.total_mass_error):\n last_result = rr\n else:\n break\n iter += 1\n\n out_times = last_result.reduced_flux.times\n out_values = last_result.reduced_flux.values\n #return the reduced data, undo normalize of the values (*maxval)\n return out_times, out_values*maxval,-(last_result.total_mass_error * maxval),peaks.size,iter", "def cover_crop_added(self):\n\n ## getting input parameter\n crop_input = self.soil_inputs.crop_cover.values[0]\n if pd.isnull(crop_input):\n crop_input = \"nan\"\n #climate_input = self.soil_inputs.climate.values[0]\n years_cropcover_tech = self.soil_inputs.time_using_crop_cover.values[0]\n\n if np.isnan(years_cropcover_tech):\n years_cropcover_tech = 10\n\n if self.language == \"spanish\":\n #climate_options = [i.lower() for i in tl.climate_options[0]]\n cover_crop_options = [i.lower() for i in tl.cover_crop_options[0]]\n else:\n #climate_options = [i.lower() for i in tl.climate_options[1]]\n cover_crop_options = [i.lower() for i in tl.cover_crop_options[1]]\n\n if crop_input.lower() in cover_crop_options:\n\n cc_eng_input = tl.cover_crop_options[1][cover_crop_options.index(crop_input.lower())]\n self._cc_eng_input = cc_eng_input\n #cl_eng_input = tl.climate_options[1][climate_options.index(self._cl_eng_input.lower())]\n\n covercropfilter = ef.cover_cropping_factors.Change.str.lower() == cc_eng_input.lower()\n climatefilter = ef.cover_cropping_factors.Climate.str.lower() == self._cl_eng_input.lower()\n\n if climatefilter.sum() == 0:\n cl_eng_input = tl.world_climate_bouwman[1][tl.world_climate_bouwman[0].index(self._cl_eng_input)]\n climatefilter = ef.cover_cropping_factors.Climate.str.lower() == cl_eng_input.lower()\n\n filter_conditions = climatefilter & covercropfilter\n if np.array(filter_conditions).sum() != 0:\n factor_change_20years = ef.cover_cropping_factors.Factor.loc[filter_conditions].values[0]\n else:\n factor_change_20years = 1\n\n self.cover_crop_soc_change = cumulative_socemissions_for_20years(years_cropcover_tech,\n factor_change_20years,\n self.soil_c_stock)\n else:\n self.cover_crop_soc_change = [0]", "def make_lightcurve(centroids, bands, band_idx, box_size, aperture_radius):\n band_names = np.sort(list(bands.keys()))\n num_stars= range(len(centroids))\n for star_idx in num_stars:\n xcenters, ycenters = [],[]\n aperture_sums = []\n background = []\n fwhms = []\n obs_time = []\n obs_mjd = []\n ##extract lightcurve (enumerate all frames) in a given band\n for i in tqdm(bands[band_names[band_idx]]):\n #import pdb; pdb.set_trace()\n hdr = fits.open(i)[0].header\n img = fits.open(i)[0].data\n #get dates from fits header\n date=dt.strptime(hdr['DATE-OBS'], '%Y-%m-%d')\n time=dt.strptime(hdr['EXP-STRT'], '%H:%M:%S.%f')\n newdate = time.replace(year=date.year, month=date.month, day=date.day)\n obs_time.append(newdate)\n obs_mjd.append(hdr['MJD-STRT'])\n\n #crop\n #import pdb; pdb.set_trace()\n image_crop = get_crop(img, centroids[star_idx], box_size)\n\n ###aperture photometry###\n #compute centroid\n centroid = get_centroid(image_crop)\n\n xcenters.append(centroid[0])\n ycenters.append(centroid[1])\n\n #compute backgound\n bkg_mean=get_bkg(image_crop, centroid, r_in=20., r_out=30.)\n\n #measure fwhm\n fwhm=get_fwhm(image_crop)\n\n #without aperture photometry\n\n aperture_sum = get_phot(image_crop, centroid, r=aperture_radius)\n\n #minus background wihtin annulus\n #aperture_sum = get_phot2(image_crop,bkg_mean,centroid,r=aperture_radius)\n\n aperture_sums.append(aperture_sum)\n background.append(bkg_mean)\n\n # if fwhm < 10*np.median(fwhms):\n # fwhms.append(fwhm)\n # else:\n # fwhms.append(np.nan)\n fwhms.append(fwhm)\n\n #output as dataframe of given band and star\n\n dfs.append(pd.DataFrame(\n {'{0}_{1}_x'.format(band_names[band_idx], str(star_idx)) : xcenters,\n '{0}_{1}_y'.format(band_names[band_idx], str(star_idx)) : ycenters,\n '{0}_{1}_flux_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : aperture_sums,\n '{0}_{1}_bkg_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : background,\n '{0}_{1}_fwhm_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : fwhms},\n #'airmass' : airmass\n index = obs_time))\n return dfs, band_idx, band_names", "def FoldChangeFilterToControl(X, data_headers, FCto, cutoff=0.4):\n XX = LinearFoldChange(X.copy(), data_headers, FCto)\n Xidx = np.any(XX[data_headers].values <= 1 - cutoff, axis=1) | np.any(XX[data_headers].values >= 1 + cutoff, axis=1)\n return X.iloc[Xidx, :]", "def seasonal_avg(var_nc,the_season,lat_slice=None,lon_slice=None): \n the_season=np.array(the_season,dtype=np.int32)\n if (lat_slice is None) and (lon_slice is None):\n num_lats=var_nc.shape[2]\n num_lons=var_nc.shape[3]\n lat_slice=slice(0,num_lats)\n lon_slice=slice(0,num_lons)\n else:\n if lat_slice.stop is None:\n num_lats=var_nc.shape[2]\n else:\n num_lats=lat_slice.stop - lat_slice.start\n if lon_slice.stop is None:\n num_lons=var_nc.shape[3]\n else:\n num_lons=lon_slice.stop - lon_slice.start\n num_levs=var_nc.shape[1]\n accumulate=ma.zeros([num_levs,num_lats,num_lons],dtype=var_nc.dtype)\n num_years=var_nc.shape[0]//12\n\n for the_year in np.arange(0,num_years):\n the_slice=var_nc[the_season,:,lat_slice,lon_slice]\n the_slice=ma.mean(the_slice,axis=0)\n accumulate+=the_slice\n the_season=the_season+12\n accumulate=accumulate/num_years \n the_avg=ma.mean(accumulate,axis=1)\n the_avg=ma.mean(the_avg,axis=1)\n return the_avg", "def _process_data(data, band):\n\n meta = {key:value for key,value in data[0].items() if key != \"subset\" }\n meta['band'] = band\n data_dict = {'dates': [], 'arrays': [], 'metadata': meta}\n for i in data:\n for j in i['subset']:\n if j['band'] == band:\n data_dict['dates'].append(j['calendar_date'])\n data = []\n for x in j['data']:\n try:\n data.append(float(x))\n except ValueError:\n data.append(np.nan) \n data_dict['arrays'].append(np.array(data).reshape(meta['nrows'], \n meta['ncols'])) \n dtdates = [dt.datetime.strptime(d,\"%Y-%m-%d\") for d in data_dict['dates']]\n xcoordinates = ([float(meta['xllcorner'])] + \n [i * meta['cellsize'] + float(meta['xllcorner']) \n for i in range(1, meta['ncols'])])\n ycoordinates = ([float(meta['yllcorner'])] + \n [i * meta['cellsize'] + float(meta['yllcorner'])\n for i in range(1, meta['nrows'])])\n return xr.DataArray(name = band,\n data = np.flipud(np.dstack(data_dict['arrays'])),\n coords = [np.array(ycoordinates), \n np.array(xcoordinates), dtdates],\n dims = [ \"y\", \"x\", \"time\" ],\n attrs = meta)", "def numberOfWideBands(config=None):\n # Get correlator configuration\n c = config\n if c == None: \n c = utils.getConfigAstroband()\n\n # Determine if we have both wideband and spectral line astrobands. \n # If we do, we return nwide & maxbandwidth for sl only since \n # this is the correlator which will be attached to all ants.\n astrobands = [ abc[0] for abc in c ]\n if len( astrobands ) == 0:\n raise Exception, \"No existing astroband configuration.\"\n if max( astrobands ) > 8 and min( astrobands ) < 9: \n astrobands = [ ab for ab in astrobands if ab < 9 ]\n\n # Check bandwidth\n nwide = 0\n maxbandwidth = 0\n for t in c:\n astroband = t[0]\n # Skip band if it is not being used or is not in astroband list above.\n mp = commands.queryString('SignalPath.Mapping.Astroband%d.confTag' % (astroband) )\n if mp == 'NONE' or astroband not in astrobands: continue\n\n # Get bandwidth\n if t[2] == commands.BW500:\n bw = 500\n elif t[2] == commands.BW250:\n bw = 250\n elif t[2] == commands.BW125:\n bw = 125\n elif t[2] == commands.BW62:\n bw = 62\n elif t[2] == commands.BW31:\n bw = 31\n elif t[2] == commands.BW8:\n bw = 8\n elif t[2] == commands.BW2:\n bw = 2\n else:\n raise Exception, 'Could not find bandwith for '+str(t[2])\n\n # Maximum?\n if bw > maxbandwidth: \n maxbandwidth = bw\n if utils.isDualPol( astroband ):\n nwide = 2 \n else:\n nwide = 1\n elif bw == maxbandwidth:\n if utils.isDualPol( astroband ): \n nwide += 2 \n else:\n nwide += 1\n\n return nwide, maxbandwidth", "def mask5(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[3]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[4]).eq(value))\n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value) \n change_img1 = imagem.select(bandNames[2]).mask(mask.eq(1)).where(mask.eq(1), value) \n change_img2 = imagem.select(bandNames[3]).mask(mask.eq(1)).where(mask.eq(1), value) \n img_out = imagem.select(bandNames[1]).blend(change_img).blend(change_img1).blend(change_img2)\n return img_out", "def __init__(self, predict_lowerbound: float, first_season: int, aug_num_cuts: int, aug_min_cuts_on: int,\n cdf_cutoff: float):\n super().__init__(CutLayer(MultiplyAggregateLayer(InnerAppearanceLayer(first_season, aug_num_cuts,\n aug_min_cuts_on, cdf_cutoff)), mean, 1.0, predict_lowerbound))", "def precover(\n self,\n orbit: Orbit,\n tolerance: float = 30 * ARCSEC,\n start_mjd: Optional[float] = None,\n end_mjd: Optional[float] = None,\n window_size: int = 7,\n datasets: Optional[set[str]] = None,\n ) -> Tuple[List[PrecoveryCandidate], List[FrameCandidate]]:\n # basically:\n \"\"\"\n find all windows between start and end of given size\n for each window:\n propagate to window center\n for each unique epoch,obscode in window:\n propagate to epoch\n find frames which match healpix of propagation\n for each matching frame\n find matching observations\n for each matching observation\n yield match\n \"\"\"\n if datasets is not None:\n self._warn_for_missing_datasets(datasets)\n\n if start_mjd is None or end_mjd is None:\n first, last = self.frames.idx.mjd_bounds()\n if start_mjd is None:\n start_mjd = first\n if end_mjd is None:\n end_mjd = last\n\n logger.info(\n \"precovering orbit %s from %.5f to %.5f, window=%d, datasets=%s\",\n orbit.orbit_id,\n start_mjd,\n end_mjd,\n window_size,\n datasets or \"all\",\n )\n\n windows = self.frames.idx.window_centers(\n start_mjd, end_mjd, window_size, datasets=datasets\n )\n\n # group windows by obscodes so that many windows can be searched at once\n matches = []\n for obscode, obs_windows in itertools.groupby(\n windows, key=lambda pair: pair[1]\n ):\n mjds = [window[0] for window in obs_windows]\n matches_window = self._check_windows(\n mjds,\n obscode,\n orbit,\n tolerance,\n start_mjd=start_mjd,\n end_mjd=end_mjd,\n window_size=window_size,\n datasets=datasets,\n )\n matches += list(matches_window)\n\n precovery_candidates, frame_candidates = sift_candidates(matches)\n\n return precovery_candidates, frame_candidates", "def upper_middle_income_countries():\r\n upper_middle_countries_data = []\r\n years = []\r\n medians = []\r\n lst = []\r\n for idx in range(1960, 2016):\r\n years.append(idx)\r\n for idx in upper_middle_countries:\r\n upper_middle_countries_data.append(life_expectancy_graph(idx))\r\n y_idx = 0\r\n for idx in upper_middle_countries_data:\r\n if idx != {}:\r\n if (list(idx.keys())[y_idx]) == years[y_idx]:\r\n lst.append((list(idx.values())[y_idx]))\r\n lst = sorted(lst)\r\n medians.append(median(lst))\r\n return medians", "def _resampler(df_year, year):\n # Aggregates data using mean for each time interval and gets a\n # sample count for each new data point.\n df_15 = df_year.resample('15T').apply(['mean', 'count'])\n df_30 = df_year.resample('30T').apply(['mean', 'count'])\n df_1h = df_year.resample('1H').apply(['mean', 'count'])\n df_1d = df_year.resample('D').apply(['mean', 'count'])\n\n # Removes top level title that is not needed.\n df_15.columns = df_15.columns.droplevel(0)\n df_30.columns = df_30.columns.droplevel(0)\n df_1h.columns = df_1h.columns.droplevel(0)\n df_1d.columns = df_1d.columns.droplevel(0)\n\n # Creating new date range to include all time intervals within the year.\n idx_15 = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:45:00', freq='15T')\n idx_30 = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:30:00', freq='30T')\n idx_1h = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:00:00', freq='1H')\n idx_1d = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:00:00', freq='D')\n\n # Reindexing so data that starts in, for example August, will now\n # have the months prior to August filled with nans.\n df_15_reindex = df_15.reindex(idx_15, fill_value=np.nan)\n df_15_reindex[['count']] = df_15_reindex[['count']].fillna(0).astype(int)\n # Adding all columns to match example excel.\n df_15_reindex = df_15_reindex.rename(columns={'mean': 'H(ft)'})\n df_15_reindex = df_15_reindex.rename(columns={'count': 'SampleCount'})\n\n # Adding meters column.\n df_15_reindex['H(m)'] = df_15_reindex['H(ft)'] / 3.28\n # Rounds meters column so significant digits match\n # original height column.\n df_15_reindex['H(m)'] = df_15_reindex['H(m)'].round(2)\n df_15_reindex['H(ft)'] = df_15_reindex['H(ft)'].round(2)\n df_15_reindex['DateTime2'] = df_15_reindex.index\n df_15_reindex['Date'] = df_15_reindex.index\n df_15_reindex['Date2'] = df_15_reindex.index\n df_15_reindex['Date_Python_generated'] = df_15_reindex['Date'].dt.date\n df_15_reindex['Time1'] = df_15_reindex['Date'].dt.time\n df_15_reindex['Time2'] = df_15_reindex['Date'].dt.time\n df_15_reindex['H(m)_final'] = df_15_reindex['H(m)']\n df_15_reindex = df_15_reindex.reset_index(drop=True)\n # Adding original datetime and height data to dataframe. To do this\n # pd.concat is used because the column lengths are different.\n df_15_reindex = pd.concat([\n df_15_reindex, df_year.reset_index(drop=True)], axis=1)\n df_15_reindex['dateTime'] = pd.to_datetime(df_15_reindex['dateTime'])\n # Reordering columns to match example excel.\n df_15_reindex = df_15_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n # Filling nans with empty cells in columns similar to example excel.\n df_15_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_15_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n\n # Similar to 15 minute interval code but 30 minutes interval.\n df_30_reindex = df_30.reindex(idx_30, fill_value=np.nan)\n df_30_reindex[['count']] = df_30_reindex[['count']].fillna(0).astype(int)\n df_30_reindex = df_30_reindex.rename(columns={'mean': 'H(ft)'})\n df_30_reindex = df_30_reindex.rename(columns={'count': 'SampleCount'})\n df_30_reindex['H(m)'] = df_30_reindex['H(ft)'] / 3.28\n df_30_reindex['H(m)'] = df_30_reindex['H(m)'].round(2)\n df_30_reindex['H(ft)'] = df_30_reindex['H(ft)'].round(2)\n df_30_reindex['DateTime2'] = df_30_reindex.index\n df_30_reindex['Date'] = df_30_reindex.index\n df_30_reindex['Date2'] = df_30_reindex.index\n df_30_reindex['Date_Python_generated'] = df_30_reindex['Date'].dt.date\n df_30_reindex['Time1'] = df_30_reindex['Date'].dt.time\n df_30_reindex['Time2'] = df_30_reindex['Date'].dt.time\n df_30_reindex['H(m)_final'] = df_30_reindex['H(m)']\n df_30_reindex = df_30_reindex.reset_index(drop=True)\n df_30_reindex = pd.concat([\n df_30_reindex, df_year.reset_index(drop=True)], axis=1)\n df_30_reindex['dateTime'] = pd.to_datetime(df_30_reindex['dateTime'])\n df_30_reindex = df_30_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n df_30_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_30_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n\n # Similar to 15 minute interval code but hourly interval.\n df_1h_reindex = df_1h.reindex(idx_1h, fill_value=np.nan)\n df_1h_reindex[['count']] = df_1h_reindex[['count']].fillna(0).astype(int)\n df_1h_reindex = df_1h_reindex.rename(columns={'mean': 'H(ft)'})\n df_1h_reindex = df_1h_reindex.rename(columns={'count': 'SampleCount'})\n df_1h_reindex['H(m)'] = df_1h_reindex['H(ft)'] / 3.28\n df_1h_reindex['H(m)'] = df_1h_reindex['H(m)'].round(2)\n df_1h_reindex['H(ft)'] = df_1h_reindex['H(ft)'].round(2)\n df_1h_reindex['DateTime2'] = df_1h_reindex.index\n df_1h_reindex['Date'] = df_1h_reindex.index\n df_1h_reindex['Date2'] = df_1h_reindex.index\n df_1h_reindex['Date_Python_generated'] = df_1h_reindex['Date'].dt.date\n df_1h_reindex['Time1'] = df_1h_reindex['Date'].dt.time\n df_1h_reindex['Time2'] = df_1h_reindex['Date'].dt.time\n df_1h_reindex['H(m)_final'] = df_1h_reindex['H(m)']\n df_1h_reindex = df_1h_reindex.reset_index(drop=True)\n df_1h_reindex = pd.concat([\n df_1h_reindex, df_year.reset_index(drop=True)], axis=1)\n df_1h_reindex['dateTime'] = pd.to_datetime(df_1h_reindex['dateTime'])\n df_1h_reindex = df_1h_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n df_1h_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_1h_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n\n # Similar to 15 minute interval code but daily interval.\n df_1d_reindex = df_1d.reindex(idx_1d, fill_value=np.nan)\n df_1d_reindex[['count']] = df_1d_reindex[['count']].fillna(0).astype(int)\n df_1d_reindex = df_1d_reindex.rename(columns={'mean': 'H(ft)'})\n df_1d_reindex = df_1d_reindex.rename(columns={'count': 'SampleCount'})\n df_1d_reindex['H(m)'] = df_1d_reindex['H(ft)'] / 3.28\n df_1d_reindex['H(m)'] = df_1d_reindex['H(m)'].round(2)\n df_1d_reindex['H(ft)'] = df_1d_reindex['H(ft)'].round(2)\n df_1d_reindex['DateTime2'] = df_1d_reindex.index\n df_1d_reindex['Date'] = df_1d_reindex.index\n df_1d_reindex['Date2'] = df_1d_reindex.index\n df_1d_reindex['Date_Python_generated'] = df_1d_reindex['Date'].dt.date\n df_1d_reindex['Time1'] = df_1d_reindex['Date'].dt.time\n df_1d_reindex['Time2'] = df_1d_reindex['Date'].dt.time\n df_1d_reindex['H(m)_final'] = df_1d_reindex['H(m)']\n df_1d_reindex = df_1d_reindex.reset_index(drop=True)\n df_1d_reindex = pd.concat([\n df_1d_reindex, df_year.reset_index(drop=True)], axis=1)\n df_1d_reindex['dateTime'] = pd.to_datetime(df_1d_reindex['dateTime'])\n df_1d_reindex = df_1d_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n df_1d_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_1d_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n return df_15_reindex, df_30_reindex, df_1h_reindex, df_1d_reindex", "def calbands( band = 0, tmo = 30 ) :\n optimizeThresholds(band,tmo)\n flattenPhases(band,tmo)\n calibrateSpectra(band=band,tmo=tmo)", "def run_global(start_year, end_year, depth_from, depth_to, animate=True):\n# years, times, rootgrps = retrieve(1950,2018)\n# rootgrps_1950 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1950\\EN.4.2.1.f.analysis.g10.195001.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_1951 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1951\\EN.4.2.1.f.analysis.g10.195101.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_1952 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1952\\EN.4.2.1.f.analysis.g10.195201.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_1953 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1953\\EN.4.2.1.f.analysis.g10.195301.nc\", \"r+\", format=\"NETCDF4\")]\n#\n#\n# rootgrps_2015 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2015\\EN.4.2.1.f.analysis.g10.201501.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_2016 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2016\\EN.4.2.1.f.analysis.g10.201601.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_2017 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2017\\EN.4.2.1.f.analysis.g10.201701.nc\", \"r+\", format=\"NETCDF4\")]\n rootgrps_2018 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2018\\EN.4.2.1.f.analysis.g10.201801.nc\", \"r+\", format=\"NETCDF4\")]\n\n# HC_1950 = calculate_HC_global(rootgrps_1950, 0, 2000)\n# print('1950', time.time()-start)\n# HC_1951 = calculate_HC_global(rootgrps_1951, 0, 2000)\n# print('1951', time.time()-start)\n# HC_1952 = calculate_HC_global(rootgrps_1952, 0, 2000)\n# print('1952', time.time()-start)\n# HC_1953 = calculate_HC_global(rootgrps_1953, 0, 2000)\n# print('1953', time.time()-start) \n#\n# HC_2015 = calculate_HC_global(rootgrps_2015, 0, 2000)\n# print('2015', time.time()-start)\n# HC_2016 = calculate_HC_global(rootgrps_2016, 0, 2000)\n# print('2016', time.time()-start)\n# HC_2017 = calculate_HC_global(rootgrps_2017, 0, 2000)\n# print('2017', time.time()-start)\n HC_2018 = calculate_HC_global(rootgrps_2018, 0, 2000)\n# print('2018', time.time()-start)\n# HC_1950_mean = (HC_1950+HC_1951+HC_1952+HC_1953)/4\n# HC_2018_mean = (HC_2015+HC_2016+HC_2017+HC_2018)/4\n\n# dHC = (HC_2018_mean-HC_1950_mean)/(65*365*24*3600)\n if animate == True:\n plot(rootgrps_2018, HC_2018)\n return HC_2018", "def low_income_countries():\r\n low_countries_data = []\r\n years = []\r\n medians = []\r\n lst = []\r\n for idx in range(1960, 2016):\r\n years.append(idx)\r\n for idx in low_countries:\r\n low_countries_data.append(life_expectancy_graph(idx))\r\n y_idx = 0\r\n for idx in low_countries_data:\r\n if idx != {}:\r\n if (list(idx.keys())[y_idx]) == years[y_idx]:\r\n lst.append((list(idx.values())[y_idx]))\r\n lst = sorted(lst)\r\n medians.append(median(lst))\r\n return medians", "def precip_stats_to_climatology(fili, start_year=1981, end_year=2015):\n\n nyear = end_year - start_year + 1\n \n ds = xr.open_dataset(fili)\n\n year = ds['time'].dt.year\n #dsMsk = ds.isel( time=( (year >= start_year) & (year <= end_year) ) ).count(dim='time')\n dsClm = ds.isel( time=( (year >= start_year) & (year <= end_year) ) ).mean(dim='time', skipna=False)\n #dsClm = dsClm.where(dsMsk == nyear)\n \n #dsMsk.to_netcdf('era5.count.nc4')\n\n print (dsClm)\n \n filo = fili.replace('annual','annual.clm')\n print (f'Writing climatology to {filo}') \n dsClm.to_netcdf(filo)\n\n return", "def countmap(band,skypos,tranges,skyrange,width=False,height=False,\n\t\t\t verbose=0,tscale=1000.,memlight=False,hdu=False,retries=20):\n\timsz = gxt.deg2pix(skypos,skyrange)\n\tcount = np.zeros(imsz)\n\tfor trange in tranges:\n\t\t# If memlight is requested, break the integration into\n\t\t# smaller chunks.\n\t\tstep = memlight if memlight else trange[1]-trange[0]\n\t\tfor i in np.arange(trange[0],trange[1],step):\n\t\t\tt0,t1=i,i+step\n\t\t\tif verbose:\n\t\t\t\tprint_inline('Coadding '+str(t0)+' to '+str(t1))\n\t\t\tevents = gQuery.getArray(gQuery.rect(band,skypos[0],skypos[1],t0,t1,\n\t\t\t\t\t\t\t\t\t\t\t\t skyrange[0],skyrange[1]),\n\t\t\t\t\t\t\t\t\t verbose=verbose,retries=retries)\n\n\t\t\t# Check that there is actually data here.\n\t\t\tif not events:\n\t\t\t\tif verbose>1:\n\t\t\t\t\tprint \"No data in \"+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\ttimes = np.array(events,dtype='float64')[:,0 ]/tscale\n\t\t\tcoo =\tnp.array(events,dtype='float64')[:,1:]\n\n\t\t\t# If there's no data, return a blank image.\n\t\t\tif len(coo)==0:\n\t\t\t\tif verbose:\n\t\t\t\t\tprint 'No data in this frame: '+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\t# Define World Coordinate System (WCS)\n\t\t\twcs = define_wcs(skypos,skyrange,width=False,height=False)\n\n\t\t\t# Map the sky coordinates onto the focal plane\n\t\t\tfoc = wcs.sip_pix2foc(wcs.wcs_world2pix(coo,1),1)\n\n\t\t\t# Bin the events into actual image pixels\n\t\t\tH,xedges,yedges=np.histogram2d(foc[:,1]-0.5,foc[:,0]-0.5,\n\t\t\t\t\t\t\t\tbins=imsz,range=([ [0,imsz[0]],[0,imsz[1]] ]))\n\t\t\tcount += H\n\n\treturn count", "def filter_bands(self, imagery, bands=None, names=None, wavelengths=None) -> 'ImageCollection':\n\n graph = {\n 'process_id': 'filter_bands',\n 'imagery': imagery.graph,\n }\n\n if bands:\n graph['bands'] = bands\n if names:\n graph['names'] = names\n if wavelengths:\n graph['wavelengths'] = wavelengths\n\n imagery.graph = graph\n return imagery", "def apply_trigger_first(cut_fn):\n def wrapped(arrays, cut):\n arrays = svjflatanalysis.arrayutils.apply_trigger_and_jetpt550(arrays, 2018)\n return cut_fn(arrays, cut)\n return wrapped", "def analysis(self, x, band=None):\n\n if band is None:\n bands = range(self.filters.shape[1])\n else:\n bands = [band]\n\n output = np.zeros((x.shape[0], len(bands)), dtype=x.dtype)\n\n for i, b in enumerate(bands):\n output[:, i] = fftconvolve(x, self.filters[:, b], mode=\"same\")\n\n if output.shape[1] == 1:\n return output[:, 0]\n else:\n return output", "def pil_image_mask_by_band_value(img, band, val, cval=0):\n # type: (PImage.Image, int, int) -> PImage.Image\n\n num_bands = len(img.getbands())\n\n if band >= num_bands:\n raise ValueError('Cannot get band with index {} from image with {} bands'.format(band, num_bands))\n\n # Create a look up table where only one value maps to itself and everything else to cval\n other_band_lut = [cval] * 256\n target_band_lut = [cval] * 256\n target_band_lut[val] = val\n lut = []\n\n for i in range(num_bands):\n if i == band:\n lut += target_band_lut\n else:\n lut += other_band_lut\n\n img = img.point(lut)\n return img", "def createDefaultFilterbank(window):\n # Gaussians:: G1 = N(0, 1), G2 = N(0, 2), G3 = N(0, 4)\n # Laplacian of Gaussians:: LoG1 = Lap(N(0, 1)), LoG2=Lap(N(0, 2)), LoG3=Lap(N(0, 4)), LoG4=Lap(N(0, 8))\n # Derivative of Gaussian (x):: Div1xG1 = d/dx N(0,2), Div1xG2=d/dx N(0,4)\n # Derivative of Gaussian (y): Div1yG1 = d/dy N(0,2), Div1yG2=d/dy N(0,4)\n \n G1 = gaussian_kernel(window, window, 1)\n G2 = gaussian_kernel(window, window, 2)\n G3 = gaussian_kernel(window, window, 4)\n \n # see http://homepages.inf.ed.ac.uk/rbf/HIPR2/log.htm\n LoG1 = laplacianOfGaussian_kernel(window, window, 1)\n LoG2 = laplacianOfGaussian_kernel(window, window, 2)\n LoG3 = laplacianOfGaussian_kernel(window, window, 4)\n LoG4 = laplacianOfGaussian_kernel(window, window, 8)\n \n dx_G1 = gaussian_1xDerivative_kernel(window, window, 2)\n dx_G2 = gaussian_1xDerivative_kernel(window, window, 4)\n \n dy_G1 = gaussian_1yDerivative_kernel(window, window, 2)\n dy_G2 = gaussian_1yDerivative_kernel(window, window, 4)\n \n return np.array([G1, G2, G3, LoG1, LoG2, LoG3, LoG4, dx_G1, dx_G2, dy_G1, dy_G2])", "def scrape_world_cup_scoreboard(year):\n # Replace this with the results logic somehow...\n\n d = world_cup_mapping[year]\n prefix = 'http://www.fifa.com'\n if type(d) == int:\n root_url = '/worldcup/archive/edition=%s/' % d\n else:\n root_url = '/worldcup/archive/%s/' % d\n data = scrape_url(prefix + root_url + \"results/index.html\")\n\n # Find urls in the page.\n match_re = re.compile(root_url + \"results/matches/match=\\d+/report.html\")\n urls = match_re.findall(data)\n return [prefix + e for e in urls]", "def imdb_crawl_by_year(year, verbose):\n _crawl_by_year_helper(year, verbose, True, False)", "def load_copernicus_ammonia(layers, time_slice, lat_slice, lon_slice, verbose=False):\n xr_layers = []\n\n if 'agl' in layers:\n xr_layers.append(xr.load_dataset(\n './data/copernicus/ammonia/CAMS-GLOB-ANT_Glb_0.1x0.1_anthro_nh3_v4.2_monthly_agl.nc').agl.sel(\n time=time_slice, lat=lat_slice, lon=lon_slice))\n\n if 'ags' in layers:\n xr_layers.append(xr.load_dataset(\n './data/copernicus/ammonia/CAMS-GLOB-ANT_Glb_0.1x0.1_anthro_nh3_v4.2_monthly_ags.nc').ags.sel(\n time=time_slice, lat=lat_slice, lon=lon_slice))\n\n nh3 = sum(xr_layers)\n nh3.name = 'nh3'\n\n if verbose:\n\n shape = gpd.read_file('./shp/lombardia/lombardia.shp').to_crs(epsg=4326)\n\n ncols = len(xr_layers) + 1\n fig, axs = plt.subplots(ncols=ncols, figsize=(8 * ncols, 5))\n\n for i in range(len(xr_layers)):\n shape.plot(ax=axs[i], color='black', alpha=0.5)\n xr_layers[i].mean(dim='time').plot(ax=axs[i], alpha=0.5)\n\n shape.plot(ax=axs[len(xr_layers)], color='black', alpha=0.5)\n nh3.mean(dim='time').plot(ax=axs[len(xr_layers)], alpha=0.5)\n\n plt.show()\n\n return nh3", "def ShowOneContour(index,all_images,all_pointing,thex0,they0,all_titles,object_name,all_expo,dir_top_img,all_filt,figname):\n plt.figure(figsize=(15,6))\n spec_index_min=100 # cut the left border\n spec_index_max=1900 # cut the right border\n star_halfwidth=70\n \n YMIN=-15\n YMAX=15\n \n figfilename=os.path.join(dir_top_img,figname) \n \n #center is approximately the one on the original raw image (may be changed)\n #x0=int(all_pointing[index][0])\n x0=int(thex0[index])\n \n \n # Extract the image \n full_image=np.copy(all_images[index])\n \n # refine center in X,Y\n star_region_X=full_image[:,x0-star_halfwidth:x0+star_halfwidth]\n \n profile_X=np.sum(star_region_X,axis=0)\n profile_Y=np.sum(star_region_X,axis=1)\n\n NX=profile_X.shape[0]\n NY=profile_Y.shape[0]\n \n X_=np.arange(NX)\n Y_=np.arange(NY)\n \n avX,sigX=weighted_avg_and_std(X_,profile_X**4) # take squared on purpose (weigh must be >0)\n avY,sigY=weighted_avg_and_std(Y_,profile_Y**4)\n \n x0=int(avX+x0-star_halfwidth)\n \n \n # find the center in Y on the spectrum\n yprofile=np.sum(full_image[:,spec_index_min:spec_index_max],axis=1)\n y0=np.where(yprofile==yprofile.max())[0][0]\n\n # cut the image in vertical and normalise by exposition time\n reduc_image=full_image[y0-20:y0+20,x0:spec_index_max]/all_expo[index] \n reduc_image[:,0:100]=0 # erase central star\n \n X_Size_Pixels=np.arange(0,reduc_image.shape[1])\n Y_Size_Pixels=np.arange(0,reduc_image.shape[0])\n Transverse_Pixel_Size=Y_Size_Pixels-int(float(Y_Size_Pixels.shape[0])/2.)\n \n # calibration in wavelength\n #grating_name=all_filt[index].replace('dia ','')\n grating_name=get_disperser_filtname(all_filt[index])\n \n lambdas=Pixel_To_Lambdas(grating_name,X_Size_Pixels,all_pointing[index],True)\n \n #if grating_name=='Ron200':\n # holo = Hologram('Ron400',verbose=True)\n #else: \n # holo = Hologram(grating_name,verbose=True)\n #lambdas=holo.grating_pixel_to_lambda(X_Size_Pixels,all_pointing[index])\n #if grating_name=='Ron200':\n # lambdas=lambdas*2.\n \n\n X,Y=np.meshgrid(lambdas,Transverse_Pixel_Size) \n T=np.transpose(reduc_image)\n \n \n plt.contourf(X, Y, reduc_image, 100, alpha=1., cmap='jet',origin='lower')\n C = plt.contour(X, Y, reduc_image , 20, colors='black', linewidth=.5,origin='lower')\n \n \n for line in LINES:\n if line == O2 or line == HALPHA or line == HBETA or line == HGAMMA:\n plt.plot([line['lambda'],line['lambda']],[YMIN,YMAX],'-',color='lime',lw=0.5)\n plt.text(line['lambda'],YMAX-3,line['label'],verticalalignment='bottom', horizontalalignment='center',color='lime', fontweight='bold',fontsize=16)\n \n \n \n plt.axis([X.min(), X.max(), Y.min(), Y.max()]); plt.grid(True)\n plt.title(all_titles[index])\n plt.grid(color='white', ls='solid')\n plt.text(200,-5.,all_filt[index],verticalalignment='bottom', horizontalalignment='center',color='yellow', fontweight='bold',fontsize=16)\n plt.xlabel('$\\lambda$ (nm)')\n plt.ylabel('pixels')\n plt.ylim(YMIN,YMAX)\n plt.xlim(0.,1200.)\n plt.savefig(figfilename)", "def stack_singlebands_vrt(srcs: List, band: int = 1):\n vrt_bands = []\n for srcnum, src in enumerate(srcs, start=1):\n with check_rasterio_im_load(src) as ras, MemoryFile() as mem:\n riocopy(ras, mem.name, driver='VRT')\n vrt_xml = mem.read().decode('utf-8')\n vrt_dataset = ET.fromstring(vrt_xml)\n for bandnum, vrt_band in enumerate(vrt_dataset.iter('VRTRasterBand'), start=1):\n if bandnum == band:\n vrt_band.set('band', str(srcnum))\n vrt_bands.append(vrt_band)\n vrt_dataset.remove(vrt_band)\n for vrt_band in vrt_bands:\n vrt_dataset.append(vrt_band)\n\n return ET.tostring(vrt_dataset).decode('UTF-8')", "def band(self, name, bands, new_name=None, label=None, text_key=None):\n if not self._is_numeric(name):\n msg = \"Can only band numeric typed data! {} is {}.\"\n msg = msg.format(name, self._get_type(name))\n raise TypeError(msg)\n if not text_key: text_key = self.text_key\n if not new_name: new_name = '{}_banded'.format(name)\n if not label: label = self.text(name, False, text_key)\n franges = []\n for idx, band in enumerate(bands, start=1):\n lab = None\n if isinstance(band, dict):\n lab = list(band.keys())[0]\n band = list(band.values())[0]\n if isinstance(band, tuple):\n if band[0] < 0:\n raise ValueError('Cannot band with lower bound < 0.')\n elif band[1] < 0:\n raise ValueError('Cannot band with upper bound < 0.')\n r = '{}-{}'.format(band[0], band[1])\n franges.append([idx, lab or r, {name: frange(r)}])\n else:\n r = str(band)\n franges.append([idx, lab or r, {name: [band]}])\n\n self.derive(new_name, 'single', label, franges,\n text_key=text_key)\n\n return None", "def year_data(self,year):\n idx = [i for i in range(self.dates.shape[0]) if self.dates[i].year == year]\n year_dates = self.dates[idx]\n year_dc = self.dc[idx]\n return year_dates, year_dc", "def filter_tracks(df, start_year=1980, end_year=2010, zeta=0, age=36):\n tracks = df.groupby('num')\n filterdf = tracks.filter(lambda x: (x['datetime'].dt.year.min() >= start_year) &\n (x['datetime'].dt.year.max() <= end_year) &\n (x['age'].max() >= age) &\n (np.abs(x['vorticity'].min()) > zeta))\n return filterdf", "def fir_filter(sig, sampling_freq, critical_freq, kernel_window = 'hamming', taps = 101, kind = 'band', **kwargs):\n\n kernel = make_fir_filter(sampling_freq, critical_freq, kernel_window, taps, kind, **kwargs) \n\n return np.roll(scipy.signal.lfilter(kernel, [1], sig), -taps/2+1)", "def winter_bar_chart(self):\n # Create the top n countries dataframe from 1994 to 2016\n df_winter = self.df_winter[self.df_winter['Year'] >= 1994]\n m = list(df_winter['Country'].value_counts()[:self.n_top].index)\n df_top = df_winter[df_winter['Country'].isin(m)].groupby(['Country', 'Medal']).size()\n new_index = pd.MultiIndex.from_product([m, ['Gold', 'Silver', 'Bronze']], names=df_top.index.names)\n df_top = df_top.reindex(new_index)\n unstacked_df_top = df_top.unstack().reindex(m, columns=['Gold', 'Silver', 'Bronze'])\n # Create the dataframe in 2018\n k = []\n for j in self.df_2018_winter['NOC'].tolist():\n n = j[j.find('(') + 1:j.find(')')]\n k.append((n, j))\n k = dict(k)\n winter_2018 = pd.DataFrame()\n for i in m:\n if i != 'RUS':\n df_tmp = self.df_2018_winter[self.df_2018_winter['NOC'] == k[i]]\n else:\n df_tmp = self.df_2018_winter[self.df_2018_winter['NOC'] == k['OAR']]\n winter_2018 = pd.concat([winter_2018, df_tmp])\n winter_2018['Country'] = m\n new_winter_2018 = winter_2018.set_index(['Country'])[['Gold', 'Silver', 'Bronze']]\n # Add two dataframes and plot.\n unstacked_df_top.add(new_winter_2018).reindex(m[::-1], columns=['Bronze', 'Silver', 'Gold']).plot(kind='barh')\n plt.title('Medal Result of Winter Olympics since 1994')\n fname = './medal_figures_winter/winter_bar_chart.png'\n plt.savefig(fname=fname, format='png')\n return", "def get_zps_dr1(Field, bands):\n zpfile = os.path.join(context.tables_dir, \"ZPfiles_2020_kadu\",\n \"zps_tiles-FLUX_AUTO.fits\")\n zpdata = fits.open(zpfile)\n zpdic = {a: {\"R\": b,\n \"F660\": c,\n \"I\": d}\n for a, b, c, d in zip(zpdata[1].data[\"TILE\"],\n zpdata[1].data[\"ZP_R\"],\n zpdata[1].data[\"ZP_F660\"],\n zpdata[1].data[\"ZP_I\"])} \n zps = np.array([zpdic[Field][band] for band in bands])\n return zps", "def get_brightest(self, object_type='star', num_srcs=1, band='r', return_idx=False):\n fluxes = np.array([s.params.flux_dict[band] for s in self.srcs])\n type_idx = np.where(self.source_types == object_type)[0]\n type_fluxes = fluxes[type_idx]\n type_idx = type_idx[np.argsort(type_fluxes)[::-1]][:num_srcs]\n blist = [self.srcs[i] for i in type_idx]\n if return_idx:\n return blist, type_idx\n else:\n return blist", "def middle_east_countries():\r\n middle_east_data = []\r\n years = []\r\n medians = []\r\n lst = []\r\n for idx in range(1960, 2016):\r\n years.append(idx)\r\n for idx in middle_east:\r\n middle_east_data.append(life_expectancy_graph(idx))\r\n y_idx = 0\r\n for idx in middle_east_data:\r\n if idx != None and idx != {}:\r\n if (list(idx.keys())[y_idx]) == years[y_idx]:\r\n lst.append((list(idx.values())[y_idx]))\r\n lst = sorted(lst)\r\n medians.append(median(lst))\r\n return medians", "def calc_signal_vs_noise(model, years=(1980, 2150), baseline=(1980, 2010)):\n\n years = range(*years)\n baseline_slice = slice(*baseline)\n\n Ss = [model.predict(year) for year in years]\n S = xr.concat(Ss, 'year')\n S['year'] = (['year'], years)\n\n noise = model.data.sel(**{model.time_dim: baseline_slice}).std(model.time_dim)\n SN = S / noise\n SN['year'] = (['year'], years)\n signal_ds = xr.Dataset({'S': S, 'S_N': SN, 'N': noise})\n\n return signal_ds", "def bandstop_filter(data, lowcut, highcut, fs=2000, numtaps=255):\n nyq = fs / 2\n\n # design filter\n fe1 = lowcut / nyq\n fe2 = highcut / nyq\n b = firwin(numtaps, (fe1, fe2), pass_zero=True)\n\n filtered = lfilter(b, 1, data)\n\n return filtered", "def get_land_conso_per_year(self, level, group_name=None):\n fields = Cerema.get_art_field(self.analyse_start_date, self.analyse_end_date)\n qs = self.get_cerema_cities(group_name=group_name)\n qs = qs.values(level)\n qs = qs.annotate(**{f\"20{field[3:5]}\": Sum(field) / 10000 for field in fields})\n return {row[level]: {year: row[year] for year in self.years} for row in qs}", "def slice_bands(self, band_idx):\n new_eigenvals = self.eigenvals.T[sorted(band_idx)].T\n return type(self)(kpoints=self.kpoints, eigenvals=new_eigenvals)", "def extract_cochlear_subbands(nets, SIGNAL_SIZE, SR, LOW_LIM, HIGH_LIM, N, SAMPLE_FACTOR, pad_factor, debug, subbands_ifft, return_subbands_only, rectify_and_lowpass_subbands, rFFT, custom_filts, erb_filter_kwargs, include_all_keys, compression_function, include_subbands_noise, subbands_noise_mean, subbands_noise_stddev):\n\n # make the erb filters tensor\n nets['filts_tensor'] = make_filts_tensor(SIGNAL_SIZE, SR, LOW_LIM, HIGH_LIM, N, SAMPLE_FACTOR, use_rFFT=rFFT, pad_factor=pad_factor, custom_filts=custom_filts, erb_filter_kwargs=erb_filter_kwargs)\n\n # make subbands by multiplying filts with fft of input\n nets['subbands'] = tf.multiply(nets['filts_tensor'],nets['fft_input'],name='mul_subbands')\n if debug: # return the real and imaginary parts of the subbands separately -- use if matching to their output\n nets['subbands_r'] = tf.real(nets['subbands'])\n nets['subbands_i'] = tf.imag(nets['subbands'])\n\n # TODO: with using subbands_ifft is redundant. \n # make the time subband operations if we are returning the subbands or if we want to include all of the keys in the graph\n if subbands_ifft or return_subbands_only or include_all_keys:\n if not rFFT:\n nets['subbands_ifft'] = tf.real(tf.ifft(nets['subbands'],name='ifft_subbands'),name='ifft_subbands_r')\n else:\n nets['subbands_ifft'] = tf.spectral.irfft(nets['subbands'],name='ifft_subbands')\n if return_subbands_only or include_all_keys:\n nets['subbands_time'] = nets['subbands_ifft']\n if rectify_and_lowpass_subbands: # TODO: the subband operations are hard coded in?\n nets['subbands_time_relu'] = tf.nn.relu(nets['subbands_time'], name='rectified_subbands')\n nets['subbands_time_lowpassed'] = hanning_pooling_1d_no_depthwise(nets['subbands_time_relu'], downsample=2, length_of_window=2*4, make_plots=False, data_format='NCW', normalize=True, sqrt_window=False)\n\n # TODO: noise is only added in the case when we are calcalculating the time subbands, but we might want something similar for the cochleagram\n if return_subbands_only or include_all_keys:\n # Compress subbands if specified and add noise. \n nets = compression_function(nets, input_node_name='subbands_time_lowpassed', output_node_name='subbands_time_lowpassed_compressed')\n if include_subbands_noise:\n nets = add_neural_noise(nets, subbands_noise_mean, subbands_noise_stddev, input_node_name='subbands_time_lowpassed_compressed', output_node_name='subbands_time_lowpassed_compressed_with_noise')\n nets['subbands_time_lowpassed_compressed_with_noise'] = tf.expand_dims(nets['subbands_time_lowpassed_compressed_with_noise'],-1)\n nets['subbands_time_processed'] = nets['subbands_time_lowpassed_compressed_with_noise']\n else:\n nets['subbands_time_lowpassed_compressed'] = tf.expand_dims(nets['subbands_time_lowpassed_compressed'],-1)\n nets['subbands_time_processed'] = nets['subbands_time_lowpassed_compressed']\n\n return nets", "def filter_image(brain, preprocessing_args=None):\n brain = brain.astype(np.float64, copy=False)\n if preprocessing_args and preprocessing_args.preprocessing == \"skip\":\n pass\n else: # default pre-processing\n for i in trange(brain.shape[-1], desc=\"filtering\", unit=\"plane\"):\n brain[..., i] = filter_plane(brain[..., i])\n brain = scale_and_convert_to_16_bits(brain)\n return brain", "def warm_region_cal(audio_samples, fs):\n #window the audio\n windowed_samples = timbral_util.window_audio(audio_samples)\n\n # need to define a function for the roughness stimuli, emphasising the 20 - 40 region (of the bark scale)\n min_bark_band = 10\n max_bark_band = 40\n mean_bark_band = (min_bark_band + max_bark_band) / 2.0\n array = np.arange(min_bark_band, max_bark_band)\n x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)\n x -= np.min(x)\n x /= np.max(x)\n\n wr_array = np.zeros(240)\n wr_array[min_bark_band:max_bark_band] = x\n\n # need to define a second array emphasising the 20 - 40 region (of the bark scale)\n min_bark_band = 80\n max_bark_band = 240\n mean_bark_band = (min_bark_band + max_bark_band) / 2.0\n array = np.arange(min_bark_band, max_bark_band)\n x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)\n x -= np.min(x)\n x /= np.max(x)\n\n hf_array = np.zeros(240)\n hf_array[min_bark_band:max_bark_band] = x\n\n windowed_loud_spec = []\n windowed_rms = []\n\n wr_vals = []\n hf_vals = []\n\n for i in range(windowed_samples.shape[0]):\n samples = windowed_samples[i, :]\n N_entire, N_single = timbral_util.specific_loudness(samples, Pref=100.0, fs=fs, Mod=0)\n\n # append the loudness spec\n windowed_loud_spec.append(N_single)\n windowed_rms.append(np.sqrt(np.mean(samples * samples)))\n\n wr_vals.append(np.sum(wr_array * N_single))\n hf_vals.append(np.sum(hf_array * N_single))\n\n mean_wr = np.mean(wr_vals)\n mean_hf = np.mean(hf_vals)\n weighted_wr = np.average(wr_vals, weights=windowed_rms)\n weighted_hf = np.average(hf_vals, weights=windowed_rms)\n\n return mean_wr, weighted_wr, mean_hf, weighted_hf", "def wfc3ir_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, \n flt='ibhj34h6q_flt.fits', filter='G141'):\n import numpy as np\n \n import astropy.io.fits as pyfits\n import astropy.wcs as pywcs\n \n im = pyfits.open(flt)\n wcs = pywcs.WCS(im[1].header, relax=True)\n \n thet0 = np.arctan2(im[1].header['CD2_2'], im[1].header['CD2_1'])/np.pi*180\n\n wcs.wcs.crval = np.array([ra, dec])\n \n ### Rotate the CD matrix\n theta = im[1].header['PA_APER'] - pa_aper \n cd_rot = rotate_CD_matrix(wcs.wcs.cd, theta)\n wcs.wcs.cd = cd_rot\n \n h = wcs.to_header(relax=True)\n \n for i in [1,2]:\n for j in [1,2]:\n h['CD%d_%d' %(i,j)] = h['PC%d_%d' %(i,j)]\n h.remove('PC%d_%d' %(i,j))\n \n h['BACKGR'] = 1.\n h['FILTER'] = filter\n h['INSTRUME'] = 'WFC3'\n h['READN'] = im[0].header['READNSEA']\n h['NAXIS1'] = h['NAXIS2'] = 1014\n h['DETECTOR'] = 'IR'\n h['PHOTFLAM'] = 1.\n h['PHOTPLAM'] = 1.\n \n return h, wcs", "def calculate_band(value, bands):\n for band in bands:\n if band > value:\n return band", "def reduce_dataset(X, year):\n\n drop_list = [i for i in range(config.DB_YEAR_MIN, config.DB_YEAR_MAX + 1)]\n drop_list.remove(year - 1)\n red_X = X.drop(drop_list, axis=0)\n return red_X", "def contours_and_data(epoch, model, features, filters, figname, fgal=0.5,\n idx=-1, data='s82', N=60000):\n if data == 's82':\n # fetch Stripe 82 data\n X, Xcov = fetch_prepped_s82data(epoch, fgal, features, filters)\n Xcoadd, Xcoaddcov = fetch_prepped_s82data(epoch, fgal, features,\n filters, use_single=False)\n sind = np.abs(Xcoadd[:, idx]) < 0.03\n gind = np.abs(Xcoadd[:, idx]) > 0.03\n\n else:\n # fetch DR10 data\n X, Xcov = fetch_prepped_dr10data(N, fgal, features, filters)\n sind = np.abs(X[:, idx]) < 0.145\n gind = np.abs(X[:, idx]) > 0.145\n\n # unpickle the XD model\n if type(model) == str: \n f = open(model, 'rb')\n model = cPickle.load(f)\n f.close()\n\n fs = 5\n ms = 1\n lsize = 20\n idx = [[0, -1], [2, 3], [3, 4]]\n xlim = [(18., 22), (-0.5, 2.5), (-0.5, 2)]\n ylim = [(-0.1, 0.5), (-0.5, 2.5), (-0.5, 1.5)]\n xlab = ['psfmag $r$', 'modelmag $g-r$', 'modelmag $r-i$']\n ylab = ['psfmag - modelmag $r$', 'modelmag $r-i$', 'modelmag $i-z$']\n\n f = pl.figure(figsize=(3 * fs, 3 * fs))\n Nstar = len(np.where(model.fixed_means[:, idx] != np.inf)[0])\n pl.subplots_adjust(wspace=0.3)\n for i in range(1, 10):\n k = (i - 1) % 3\n if i < 4:\n ind = np.arange(X.shape[0], dtype=np.int)\n rng = range(model.n_components)\n elif 3 < i < 7:\n ind = sind\n rng = range(Nstar)\n else:\n ind = gind\n rng = range(Nstar, model.n_components)\n ax = pl.subplot(3, 3, i)\n for j in rng:\n if model.alpha[j] > 1.e-3:\n draw_ellipse(model.mu[j, idx[k]],\n model.V[j, idx[k]][:, idx[k]],\n scales=[2], ec='k', fc='gray', alpha=0.2)\n pl.plot(X[ind][::10, idx[k][0]],\n X[ind][::10, idx[k][1]], '.k',ms=ms)\n pl.xlim(xlim[k])\n pl.ylim(ylim[k])\n pl.xlabel(xlab[k], fontsize=lsize)\n pl.ylabel(ylab[k], fontsize=lsize)\n if ('psf' in ylab[k]) & ('model' in ylab[k]):\n ytick = ['%0.1f' % v for v in np.linspace(-.1, 0.4, 6)]\n ytick[0] = ''\n ax.set_yticklabels(ytick)\n if i == 1:\n s = 'All'\n elif i == 3:\n s = '\"Stars\"'\n else:\n s = '\"Galaxies\"'\n ax.text(-.3, 0.5, s, ha='center', va='center', fontsize=25,\n rotation='vertical', transform=ax.transAxes)\n f.savefig(figname, bbox_inches='tight')", "def bandname(self):\n if self._properties['bandname'] is None:\n self._properties['bandname'] = \"wisew1\" if \"-w1-\" in self.filename \\\n else \"wisew2\" if \"-w2-\" in self.filename \\\n else \"wisew3\" if \"-w3-\" in self.filename \\\n else \"wisew4\" if \"-w4-\" in self.filename \\\n else \"unknown\"\n return self._properties['bandname']", "def createYearCount(fields):\n year = int(fields[1])\n count = int(fields[2])\n return YearCount(year,count)", "def findContinuumChannels(spectrum, nBaselineChannels=16, sigmaFindContinuum=3, \n nanmin=None, baselineMode='min', trimChannels='auto',\n narrow='auto', verbose=False, maxTrim=maxTrimDefault, \n maxTrimFraction=1.0, separator=';', fitResult=None,\n maxGroupsForMaxTrimAdjustment=3, lowHighBaselineThreshold=1.5,\n lineSNRThreshold=20, negativeThresholdFactor=1.15, \n dropBaselineChannels=2.0, madRatioUpperLimit=1.5, madRatioLowerLimit=1.15):\n if (fitResult is not None):\n myx = np.arange(len(spectrum), dtype=np.float64)\n if (len(fitResult) > 2):\n myx -= fitResult[3]\n originalSpectrum = spectrum + (fitResult[0]*myx**2 + fitResult[1]*myx + fitResult[2]) - nanmean(spectrum)\n# print \"fitResult = \", fitResult\n casalog.post(\"min/max spectrum = %f, %f\" % (np.min(spectrum), np.max(spectrum)))\n casalog.post(\"min/max originalSpectrum = %f, %f\" % (np.min(originalSpectrum), np.max(originalSpectrum)))\n else:\n originalSpectrum = spectrum + fitResult[0]*myx\n else:\n originalSpectrum = spectrum\n if (narrow == 'auto'):\n narrow = pickNarrow(len(spectrum))\n autoNarrow = True\n else:\n autoNarrow = False\n npts = len(spectrum)\n percentile = 100.0*nBaselineChannels/npts\n correctionFactor = sigmaCorrectionFactor(baselineMode, npts, percentile)\n sigmaEffective = sigmaFindContinuum*correctionFactor\n if (fitResult is not None):\n if (len(fitResult) > 2):\n casalogPost(\"****** starting findContinuumChannels (polynomial=%g*(x-%.2f)**2+%g*(x-%.2f)+%g) ***********\" % (fitResult[0], fitResult[3], fitResult[1], fitResult[3], fitResult[2]))\n else:\n casalogPost(\"****** starting findContinuumChannels (slope=%g) ***********\" % (fitResult[0]))\n else:\n casalogPost(\"****** starting findContinuumChannels ***********\")\n casalogPost(\"Using sigmaFindContinuum=%.2f, sigmaEffective=%.1f, percentile=%.0f for mode=%s, channels=%d/%d\" % (sigmaFindContinuum, sigmaEffective, percentile, baselineMode, nBaselineChannels, len(spectrum)))\n if (baselineMode == 'edge'):\n # pick n channels on both edges\n lowerChannels = spectrum[:nBaselineChannels/2]\n upperChannels = spectrum[-nBaselineChannels/2:]\n allBaselineChannels = list(lowerChannels) + list(upperChannels)\n allBaselineXChannels = range(0,nBaselineChannels/2) + range(len(spectrum)-nBaselineChannels/2,len(spectrum))\n if (np.std(lowerChannels) == 0):\n mad = MAD(upperChannels)\n median = nanmedian(upperChannels)\n casalogPost(\"edge method: Dropping lower channels from median and std calculations\")\n elif (np.std(upperChannels) == 0):\n mad = MAD(lowerChannels)\n median = nanmedian(lowerChannels)\n casalogPost(\"edge method: Dropping upper channels from median and std calculations\")\n else:\n mad = MAD(allBaselineChannels)\n median = nanmedian(allBaselineChannels)\n useLowBaseline = True\n else:\n # Pick the n channels with the n lowest values (or highest if those have smallest MAD), but\n # ignore edge channels inward for as long they are identical to themselves (i.e. avoid the\n # effectd of TDM edge flagging.)\n myspectrum = spectrum\n if (len(originalSpectrum) > 10 and len(originalSpectrum) <= 128):\n # Could open this up to all data except it makes a 4096-channel \n # spw worse: G0.25+0.02__sci.spw16, and it picks up some\n # continuum in self-absorbed area in Cha-MMs1_CS in 200-channel spw\n myspectrum = spectrum[np.where((originalSpectrum != originalSpectrum[0]) * (originalSpectrum != originalSpectrum[-1]))]\n casalogPost('Avoided %d edge channels when computing min channels' % (len(spectrum)-len(myspectrum)))\n idx = np.argsort(myspectrum)\n allBaselineChannels = myspectrum[idx[:nBaselineChannels]] \n allBaselineXChannels = idx[:nBaselineChannels]\n allBaselineOriginalChannels = originalSpectrum[idx[:nBaselineChannels]]\n highestChannels = myspectrum[idx[-nBaselineChannels:]] \n medianOfAllChannels = nanmedian(myspectrum)\n mad0 = MAD(allBaselineChannels)\n mad1 = MAD(highestChannels)\n\n # Introduce the lowHighBaselineThreshold factor on Aug 31, 2016 for CAS-8938\n if (mad0 > lowHighBaselineThreshold*mad1):\n casalogPost(\"Using highest %d channels as baseline because %g > %.1f*%g\" % (nBaselineChannels,mad0,lowHighBaselineThreshold,mad1))\n allBaselineChannels = highestChannels[::-1] # reversed it so that first channel is highest value\n mad0 = MAD(allBaselineChannels)\n useLowBaseline = False\n else:\n if verbose:\n casalogPost(\"Using lowest %d channels as baseline: %s %s\" % (nBaselineChannels, idx[:nBaselineChannels], myspectrum[idx[:10]]))\n casalogPost(\"Using lowest %d channels as baseline because %g <= %.1f*%g\" % (nBaselineChannels,mad0,lowHighBaselineThreshold,mad1))\n useLowBaseline = True\n\n casalogPost(\"Median of all channels = %f, MAD of selected baseline channels = %f\" % (medianOfAllChannels,mad0))\n madRatio = None\n if dropBaselineChannels > 0:\n dropExtremeChannels = int(len(idx)*dropBaselineChannels)/100\n if dropExtremeChannels > 0:\n allBaselineChannelsDropExtremeChannels = myspectrum[idx[dropExtremeChannels:nBaselineChannels+dropExtremeChannels]] \n mad0_dropExtremeChannels = MAD(allBaselineChannelsDropExtremeChannels)\n if mad0_dropExtremeChannels > 0:\n # prevent division by zero error\n madRatio = mad0/mad0_dropExtremeChannels\n if madRatioLowerLimit < madRatio < madRatioUpperLimit:\n # more than 1.2 means there was a significant improvement; more than 1.5 means something unexpected about the statistics\n casalogPost(\"****** Dropping most extreme %d = %.1f%% of channels when computing the MAD, since it reduces the mad by a factor of x=%.2f (%.2f<x<%.2f)\" % (dropExtremeChannels, dropBaselineChannels, madRatio, madRatioLowerLimit, madRatioUpperLimit))\n allBaselineChannels = allBaselineChannelsDropExtremeChannels\n allBaselineXChannels = idx[dropExtremeChannels:nBaselineChannels+dropExtremeChannels]\n allBaselineOriginalChannels = originalSpectrum[idx[dropExtremeChannels:nBaselineChannels+dropExtremeChannels]]\n else:\n casalogPost(\"**** Not dropping most extreme channels when computing the MAD, since the change in MAD of %.2f is not within %.2f<x<%.2f\" % (madRatio, madRatioLowerLimit, madRatioUpperLimit))\n \n\n casalogPost(\"min method: computing MAD and median of %d channels used as the baseline\" % (len(allBaselineChannels)))\n mad = MAD(allBaselineChannels)\n madOriginal = MAD(allBaselineOriginalChannels)\n casalogPost(\"MAD of all baseline channels = %f\" % (mad))\n if (fitResult is not None):\n casalogPost(\"MAD of original baseline channels (before removal of fit) = %f\" % (madOriginal))\n if (mad < 1e-17 or madOriginal < 1e-17): \n casalogPost(\"min method: avoiding blocks of identical-valued channels\")\n if (len(originalSpectrum) > 10):\n myspectrum = spectrum[np.where((originalSpectrum != originalSpectrum[0]) * (originalSpectrum != originalSpectrum[-1]))]\n else: # original logic, prior to linear fit removal\n myspectrum = spectrum[np.where(spectrum != allBaselineChannels[0])]\n allBaselineChannels = myspectrum[np.argsort(myspectrum)[:nBaselineChannels]] \n casalogPost(\" computing MAD and median of %d channels used as the baseline\" % (len(allBaselineChannels)))\n mad = MAD(allBaselineChannels)\n mad = MAD(allBaselineChannels)\n median = nanmedian(allBaselineChannels)\n casalogPost(\"min method: median intensity of %d channels used as the baseline: %f\" % (len(allBaselineChannels), median))\n # signalRatio will be 1.0 if no lines present and 0.25 if half the channels have lines, etc.\n signalRatio = (1.0 - 1.0*len(np.where(np.abs(spectrum-median)>(sigmaEffective*mad*2.0))[0]) / len(spectrum))**2\n originalMedian = np.median(originalSpectrum)\n # Should not divide by post-baseline-fit median since it may be close to 0\n spectralDiff = 100*np.median(np.abs(np.diff(spectrum)))/originalMedian\n spectralDiff2 = 100*np.median(np.abs(np.diff(spectrum,n=2)))/originalMedian\n casalogPost(\"signalRatio=%f, spectralDiff = %f and spectralDiff2=%f percent of the median\" % (signalRatio, spectralDiff,spectralDiff2))\n lineStrengthFactor = 1.0/signalRatio\n if (spectralDiff2 < 0.65 and npts > 192 and signalRatio<0.95):\n # This appears to be a channel-averaged FDM spectrum with lots of real line emission.\n # So, don't allow the median to be raised, and reduce the mad to lower the threshold.\n # page 15: G11.92_B7.ms_spw3 yields spectralDiff2=0.6027\n # We can also get into here if there is a large slope in the mean spectrum, so we\n # counter that by removing a linear slope before evaluating lineSNR.\n casalogPost('The spectral difference (n=2) is rather small, so set signalRatio=0 to reduce the baseline level.',debug=True)\n signalRatio = 0\n if True:\n # note: this slope removal differs from the one in runFindContinuum because\n # it always acts upon the whole spectrum, not just the potential baseline windows.\n print \"Removing linear slope for purposes of computing lineSNR.\"\n x = np.arange(len(spectrum))\n slope, intercept = linfit(x, spectrum, MAD(spectrum))\n newspectrum = spectrum - x*slope\n newmad = MAD(newspectrum)\n lineSNR = (np.max(newspectrum)-np.median(newspectrum))/newmad\n else:\n lineSNR = (np.max(spectrum)-median)/mad\n casalogPost('lineSNR = %f' % lineSNR)\n if (lineSNR > lineSNRThreshold):\n casalogPost('The lineSNR > %d, so scaling the mad by 1/3 to reduce the threshold.' % lineSNRThreshold, debug=True)\n mad *= 0.33\n if (trimChannels == 'auto'): \n trimChannels = 6\n casalogPost('Setting trimChannels to %d.' % (trimChannels))\n else:\n casalogPost('Not reducing mad by 1/3: npts=%d, signalRatio=%.2f, spectralDiff2=%.2f' % (npts,signalRatio,spectralDiff2),debug=True)\n \n medianTrue = medianCorrected(baselineMode, percentile, median, mad, \n signalRatio, useLowBaseline)\n peakFeatureSigma = (np.max(spectrum)-medianTrue)/mad \n if False:\n # experimental heuristic\n minChannelsForIncreasingSigma = 360\n maxChannelsForIncreasingSigma = 1440\n maxChannelsForDecreasingSigma = 2880\n maxSigma = 25\n if 12.5 < peakFeatureSigma < maxSigma and npts > 360:\n # No string features, then adjust sigmaEffective up or down depending on amount of channel averaging\n if (npts < 720 and 20 < peakFeatureSigma < maxSigma) or npts >= 1440:\n # nominally for the 480 and 960 channel cases\n sigmaEffective = min(sigmaEffective+4, sigmaEffective*1.25)\n casalogPost('Increasing sigmaEffective to %.2f since mad*12.5<peakFeature<%.1f*mad and channels ~480 or ~960' % (sigmaEffective,maxSigma))\n elif 1440 < npts < 2880:\n # nominally for the 1920 case\n sigmaEffective = max(sigmaEffective-4, sigmaEffective*0.75)\n casalogPost('Decreasing sigmaEffective to %.2f since mad*12.5<peakFeature<%.1f*mad and channels ~ 1920' % (sigmaEffective,maxSigma))\n threshold = sigmaEffective*mad + medianTrue\n # Use a (default=15%) lower negative threshold to help prevent false identification of absorption features.\n negativeThreshold = -negativeThresholdFactor*sigmaEffective*mad + medianTrue\n casalogPost(\"MAD = %f, median = %f, trueMedian=%f, signalRatio=%f\" % (mad, median, medianTrue, signalRatio))\n casalogPost(\"findContinuumChannels: computed threshold = %f, medianTrue=%f\" % (threshold, medianTrue))\n channels = np.where(spectrum < threshold)[0]\n if (negativeThreshold is not None):\n channels2 = np.where(spectrum > negativeThreshold)[0]\n channels = np.intersect1d(channels,channels2)\n\n # for CAS-8059: remove channels that are equal to the minimum if all \n # channels from it toward the nearest edge are also equal to the minimum: \n channels = list(channels)\n if (abs(originalSpectrum[np.min(channels)] - np.min(originalSpectrum)) < abs(1e-10*np.min(originalSpectrum))):\n lastmin = np.min(channels)\n channels.remove(lastmin)\n removed = 1\n casalogPost(\"Checking channels %d-%d\" % (np.min(channels),np.max(channels)))\n for c in range(np.min(channels),np.max(channels)):\n mydiff = abs(originalSpectrum[c] - np.min(originalSpectrum))\n mycrit = abs(1e-10*np.min(originalSpectrum))\n if (mydiff > mycrit):\n break\n if c in channels:\n channels.remove(c)\n removed += 1\n casalogPost(\"Removed %d channels on low channel edge that were at the minimum.\" % (removed))\n # Now come in from the upper side\n if (abs(originalSpectrum[np.max(channels)] - np.min(originalSpectrum)) < abs(1e-10*np.min(originalSpectrum))):\n lastmin = np.max(channels)\n channels.remove(lastmin)\n removed = 1\n casalog.post(\"Checking channels %d-%d\" % (np.max(channels),np.min(channels)))\n for c in range(np.max(channels),np.min(channels)-1,-1):\n mydiff = abs(originalSpectrum[c] - np.min(originalSpectrum))\n mycrit = abs(1e-10*np.min(originalSpectrum))\n if (mydiff > mycrit):\n break\n if c in channels:\n channels.remove(c)\n removed += 1\n casalogPost(\"Removed %d channels on high channel edge that were at the minimum.\" % (removed))\n peakChannels = np.where(spectrum > threshold)[0]\n peakChannelsLists = splitListIntoContiguousLists(peakChannels)\n widthOfWidestFeature = maxLengthOfLists(peakChannelsLists)\n casalogPost(\"Width of widest feature = %d (length spectrum = %d)\" % (widthOfWidestFeature, len(spectrum)))\n # C4R2 had signalRatio < 0.6 and spectralDiff2 < 1.2 but this yielded only 1 channel \n # of continuum on NGC6334I spw25 when memory expanded to 256GB.\n if (signalRatio > 0 and signalRatio < 0.925 and spectralDiff2 < 1.3 and \n len(spectrum) > 1000 and trimChannels=='auto' and \n widthOfWidestFeature < len(spectrum)/8):\n # This is meant to prevent rich hot cores from returning only 1\n # or 2 channels of continuum. signalRatio>0 is to avoid conflict\n # with the earlier heuristic above where it is set to zero.\n trimChannels = 13\n if autoNarrow:\n narrow = 2\n casalogPost('Setting trimChannels=%d, narrow=%s since many lines appear to be present (signalRatio=%f).' % (trimChannels,str(narrow), signalRatio))\n else:\n casalogPost('Not changing trimChannels from %s: signalRatio=%f, spectralDiff2=%f' % (str(trimChannels), signalRatio, spectralDiff2))\n \n\n peakMultiChannelsLists = splitListIntoContiguousListsAndRejectNarrow(peakChannels, narrow=2)\n allGroupsAboveSFC = len(peakChannelsLists)\n singleChannelPeaksAboveSFC = allGroupsAboveSFC - len(peakMultiChannelsLists)\n selection = convertChannelListIntoSelection(channels)\n casalogPost(\"Found %d potential continuum channels: %s\" % (len(channels), str(selection)))\n if (len(channels) == 0):\n selection = ''\n groups = 0\n else:\n channels = splitListIntoContiguousListsAndRejectZeroStd(channels, spectrum, nanmin, verbose=verbose)\n if verbose: \n print \"channels = \", channels\n selection = convertChannelListIntoSelection(channels,separator=separator)\n groups = len(selection.split(separator))\n casalogPost(\"Found %d channels after rejecting zero std: %s\" % (len(channels), str(selection)))\n if (len(channels) == 0):\n selection = ''\n else:\n if verbose:\n casalogPost(\"Calling splitListIntoContiguousListsAndTrim(channels=%s, trimChannels=%s, maxTrim=%d, maxTrimFraction=%f)\" % (str(channels), str(trimChannels), maxTrim, maxTrimFraction))\n else:\n casalogPost(\"Calling splitListIntoContiguousListsAndTrim(trimChannels=%s, maxTrim=%d, maxTrimFraction=%f)\" % (str(trimChannels), maxTrim, maxTrimFraction))\n channels = splitListIntoContiguousListsAndTrim(channels, \n trimChannels, maxTrim, maxTrimFraction, verbose)\n if verbose:\n print \"channels = \", channels\n selection = convertChannelListIntoSelection(channels)\n groups = len(selection.split(separator))\n if (groups > maxGroupsForMaxTrimAdjustment and trimChannels=='auto'\n and maxTrim>maxTrimDefault):\n maxTrim = maxTrimDefault\n casalogPost(\"Restoring maxTrim=%d because groups now > %d\" % (maxTrim,maxGroupsForMaxTrimAdjustment))\n if verbose:\n casalogPost(\"Calling splitListIntoContiguousListsAndTrim(channels=%s, trimChannels=%s, maxTrim=%d, maxTrimFraction=%f)\" % (str(channels), str(trimChannels), maxTrim, maxTrimFraction))\n channels = splitListIntoContiguousListsAndTrim(channels, \n trimChannels, maxTrim, maxTrimFraction, verbose)\n if verbose:\n print \"channels = \", channels\n selection = convertChannelListIntoSelection(channels)\n groups = len(selection.split(separator))\n\n if verbose:\n print \"Found %d groups of channels = \" % (groups), channels\n if (groups > 1):\n if verbose:\n casalogPost(\"Calling splitListIntoContiguousListsAndRejectNarrow(channels=%s, narrow=%s)\" % (str(channels), str(narrow)))\n else:\n casalogPost(\"Calling splitListIntoContiguousListsAndRejectNarrow(narrow=%s)\" % (str(narrow)))\n trialChannels = splitListIntoContiguousListsAndRejectNarrow(channels, narrow)\n if (len(trialChannels) > 0):\n channels = trialChannels\n casalogPost(\"Found %d channels after trimming %s channels and rejecting narrow groups.\" % (len(channels),str(trimChannels)))\n selection = convertChannelListIntoSelection(channels)\n groups = len(selection.split(separator))\n else:\n casalogPost(\"Not rejecting narrow groups since there is only %d group!\" % (groups))\n casalogPost(\"Found %d continuum channels in %d groups: %s\" % (len(channels), groups, selection))\n if True:\n channels = rejectNarrowInnerWindowsChannels(channels)\n selection = convertChannelListIntoSelection(channels)\n groups = len(selection.split(separator))\n casalogPost(\"Final: found %d continuum channels (sFC=%.2f) in %d groups: %s\" % (len(channels), sigmaFindContinuum, groups, selection))\n return(channels, selection, threshold, median, groups, correctionFactor, \n medianTrue, mad, computeMedianCorrectionFactor(baselineMode, percentile)*signalRatio,\n negativeThreshold, lineStrengthFactor, singleChannelPeaksAboveSFC, \n allGroupsAboveSFC, [spectralDiff, spectralDiff2], trimChannels, \n useLowBaseline, narrow, [allBaselineXChannels,allBaselineChannels], madRatio)" ]
[ "0.713149", "0.6458987", "0.6343248", "0.63266295", "0.613911", "0.57802016", "0.5521701", "0.5485822", "0.5108832", "0.49993712", "0.49877465", "0.4840823", "0.47786245", "0.47172713", "0.46206245", "0.46117097", "0.4591191", "0.45893195", "0.45841068", "0.45762342", "0.4554901", "0.45417878", "0.4530367", "0.45145327", "0.4510188", "0.44987616", "0.44847468", "0.44541776", "0.44473666", "0.4442619", "0.44259802", "0.4422097", "0.44099942", "0.43855268", "0.43612084", "0.43464068", "0.4281195", "0.42790043", "0.42604566", "0.4257486", "0.42530522", "0.42331815", "0.42257863", "0.42177215", "0.41978657", "0.4185542", "0.41774258", "0.4176944", "0.41768748", "0.41626075", "0.4156309", "0.41543865", "0.41510686", "0.41486943", "0.41454884", "0.41418287", "0.41371343", "0.41294044", "0.41024607", "0.41024503", "0.40888938", "0.40881363", "0.4084733", "0.40823746", "0.40783387", "0.40773845", "0.40736076", "0.40661946", "0.40642056", "0.40602446", "0.40587774", "0.40549135", "0.40538067", "0.40522277", "0.4049943", "0.40414074", "0.40316746", "0.4030207", "0.40298235", "0.4028252", "0.40269512", "0.40217406", "0.40201527", "0.40196055", "0.40147367", "0.40124837", "0.40088964", "0.40059838", "0.4000876", "0.4000669", "0.39960065", "0.3993994", "0.39939323", "0.3984984", "0.3983311", "0.39800408", "0.39665863", "0.39621204", "0.39561978", "0.39393726" ]
0.52166533
8
Function to perform a 3 year window filter for a single land cover value (such as Forest as 1) for the final year in an image. The image bands do not need to be in order, but the bandNames argument must be in chronological order. For the first year of land cover classifications, a three consecutive years window is used and if the classifications of the first and last years are different from its neighbours, this values are replaced by the classification of its matching neighbours. This function can be applied to whichever land cover values the user decides, such as all of the land cover values or a select few.
def applyMask3last(imagem, value, bandNames): mask = imagem.select(bandNames[-3]).eq(value) \ .bitwiseAnd(imagem.select(bandNames[-2]).eq(value)) \ .bitwiseAnd(imagem.select(bandNames[-1]).neq(value)) change_img = imagem.select(bandNames[-1]).mask(mask.eq(1)).where(mask.eq(1), value) img_out = imagem.select(bandNames[0:-1]) img_out = img_out.addBands(imagem.select(bandNames[-1]).blend(change_img)) return img_out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyWindow3years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-1):\n img_out = img_out.addBands(mask3(imagem, value,bandNames[(i-1):(i+2)]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def applyWindow4years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-2):\n img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def applyWindow5years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-3):\n img_out = img_out.addBands(mask5(imagem, value,bandNames[(i-1):(i+4)]))\n img_out = img_out.addBands(imagem.select(bandNames[-3]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): \n #Grab land cover classes as a list of strings\n lc_classes = classDictionary.keys().getInfo()\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Get the frequency of each class through the years by reducing the image collection to an image using the sum reducer\n class_frequency = binary_class_images.reduce(ee.Reducer.sum().unweighted()).rename(lc_classes)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Define an image to add bands with frequency filter applied\n out_img = ee.Image()\n \n #Loop through years\n for yearBand in yearBandNames:\n #Select the target year from the image\n yearImage = image.select(yearBand)\n \n #Loop through land cover classes in filterParams\n for lc_class in lc_classes:\n #Get the minimum occurance allowed in that land cover class\n min_occurance = filterParams.get(lc_class)\n \n #Find if the land cover class had less than the number of min_occurances in each pixel\n change_class = class_frequency.select(lc_class).lt(ee.Image.constant(min_occurance))\n \n #If change_class==1, then replace that pixel with the mode of all the years in that pixel\n #This filter is only applied to pixels of this land cover class\n #First mask yearImage to pixels of this land cover class, then get the union of pixels where change_class==1,\n #if both conditions are true, then the pixel is replaced with the mode\n yearImage = yearImage.where(yearImage.eq(ee.Number(classDictionary.get(lc_class))).bitwiseAnd(change_class),mode_image)\n #Rename yearImage to bandName\n yearImage = yearImage.rename(yearBand)\n #Append to output image\n out_img = out_img.addBands(yearImage)\n \n return out_img", "def applyForwardNoDataFilter(image, bandNames):\n #Get a list of band names from year(1) through the last year\n bandNamesEE = ee.List(bandNames[1:])\n \n #Define forwards filter\n #In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for the first year\n #currentImage = image.select(bandNames[1]), the image for the second year\n #previousImage = image.select(bandNames[0]), the first year\n #Find where the second year has missing data, replace those values with the values of the first year\n #Append previousImage to currentImage, so now currentImage is a two band image, with the first band being the second year with the gap fill\n #and the second band is the first years classification\n #The iteration continues, now with followingImage.select[0] being the second year with the gap fill applied, and bandName is the third year\n def forwardNoDataFilter(bandName, previousImage):\n currentImage = image.select(ee.String(bandName))\n previousImage = ee.Image(previousImage)\n currentImage = currentImage.unmask(previousImage.select([0]))\n return currentImage.addBands(previousImage)\n \n #Iterate through all the years, starting with the first year's classification\n filtered = bandNamesEE.iterate(forwardNoDataFilter,ee.Image(image.select(bandNames[0])))\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def applyBackwardNoDataFilter(image, bandNames):\n #Get a list of band names to iterate over, from year(-2) through year(0)\n bandNamesEE = ee.List(bandNames[:-1]).reverse()\n \n #Define backwards filter\n #In first iteration, bandName=bandNames[-2] and followingImage is image.select(bandNames[-1]), or the classifications for the final year\n #currentImage = image.select(bandNames[-2]), the second to last year\n #followingImage = image.select(bandNames[-1]), the final year\n #Find where the second to last year has missing data, replace those values with the values of the following year\n #Append followingImage to currentImage, so now currentImage is a two band image, with the first band being the second to last year with the gap fill\n #and the second band is the final years classification\n #The iteration continues, now with followingImage.select[0] being the second to last year with the gap fill applied, and bandName is the third to last year\n def backwardNoDataFilter(bandName, followingImage):\n currentImage = image.select(ee.String(bandName))\n followingImage = ee.Image(followingImage)\n currentImage = currentImage.unmask(followingImage.select([0]))\n return currentImage.addBands(followingImage)\n \n #Apply backwards filter, starting with the final year and iterating through to year(0) \n filtered = bandNamesEE.iterate(backwardNoDataFilter,ee.Image(image.select(bandNames[-1])))\n #Re-order bands to be in chronological order\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def stack_bands(yr, roi):\n\n water_year_start = '{}-10-01'.format(yr - 1)\n\n winter_s, winter_e = '{}-01-01'.format(yr), '{}-03-01'.format(yr),\n spring_s, spring_e = '{}-03-01'.format(yr), '{}-05-01'.format(yr),\n late_spring_s, late_spring_e = '{}-05-01'.format(yr), '{}-07-01'.format(yr)\n summer_s, summer_e = '{}-07-01'.format(yr), '{}-09-01'.format(yr)\n fall_s, fall_e = '{}-09-01'.format(yr), '{}-12-31'.format(yr)\n\n periods = [('cy', winter_s, fall_e),\n ('1', spring_s, spring_e),\n ('2', late_spring_s, late_spring_e),\n ('3', summer_s, summer_e),\n ('4', fall_s, fall_e)]\n\n first = True\n for name, start, end in periods:\n bands = landsat_composites(yr, start, end, roi, name)\n if first:\n input_bands = bands\n proj = bands.select('B2_cy').projection().getInfo()\n first = False\n else:\n input_bands = input_bands.addBands(bands)\n\n for s, e, n in [(spring_s, spring_e, 'espr'),\n (late_spring_s, late_spring_e, 'lspr'),\n (summer_s, summer_e, 'smr'),\n (fall_s, fall_e, 'fl'),\n (water_year_start, spring_e, 'wy_espr'),\n (water_year_start, late_spring_e, 'wy_espr'),\n (water_year_start, summer_e, 'wy_smr'),\n (water_year_start, fall_e, 'wy')]:\n gridmet = ee.ImageCollection(\"IDAHO_EPSCOR/GRIDMET\").filterBounds(\n roi).filterDate(s, e).select('pr', 'eto', 'tmmn', 'tmmx')\n temp_reducer = ee.Reducer.mean()\n t_names = ['tmax'.format(n), 'tmin'.format(n)]\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_{}'.format(n), 'pet_total_{}'.format(n)).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n wd_estimate = precip_sum.select('precip_total_{}'.format(n)).subtract(precip_sum.select(\n 'pet_total_{}'.format(n))).rename('wd_est_{}'.format(n))\n input_bands = input_bands.addBands([temp_perc, precip_sum, wd_estimate])\n\n temp_reducer = ee.Reducer.percentile([10, 50, 90])\n t_names = ['tmmn_p10_cy', 'tmmn_p50_cy', 'tmmn_p90_cy', 'tmmx_p10_cy', 'tmmx_p50_cy', 'tmmx_p90_cy']\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_cy', 'pet_total_cy').resample('bilinear').reproject(crs=proj['crs'], scale=30)\n wd_estimate = precip_sum.select('precip_total_cy').subtract(precip_sum.select(\n 'pet_total_cy')).rename('wd_est_cy')\n\n coords = ee.Image.pixelLonLat().rename(['Lon_GCS', 'LAT_GCS']).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n ned = ee.Image('USGS/NED')\n terrain = ee.Terrain.products(ned).select('elevation', 'slope', 'aspect').reduceResolution(\n ee.Reducer.mean()).reproject(crs=proj['crs'], scale=30)\n\n world_climate = get_world_climate(proj=proj)\n elev = terrain.select('elevation')\n tpi_1250 = elev.subtract(elev.focal_mean(1250, 'circle', 'meters')).add(0.5).rename('tpi_1250')\n tpi_250 = elev.subtract(elev.focal_mean(250, 'circle', 'meters')).add(0.5).rename('tpi_250')\n tpi_150 = elev.subtract(elev.focal_mean(150, 'circle', 'meters')).add(0.5).rename('tpi_150')\n static_input_bands = coords.addBands([temp_perc, wd_estimate, terrain, tpi_1250, tpi_250, tpi_150, world_climate])\n\n nlcd = ee.Image('USGS/NLCD/NLCD2011').select('landcover').reproject(crs=proj['crs'], scale=30).rename('nlcd')\n\n cdl_cult = ee.Image('USDA/NASS/CDL/2017').select('cultivated'). \\\n remap([1, 2], [0, 1]).reproject(crs=proj['crs'], scale=30).rename('cdlclt')\n\n cdl_crop = ee.Image('USDA/NASS/CDL/2017').select('cropland').reproject(crs=proj['crs'],\n scale=30).rename('cdlcrp')\n\n gsw = ee.Image('JRC/GSW1_0/GlobalSurfaceWater')\n occ_pos = gsw.select('occurrence').gt(0)\n water = occ_pos.unmask(0).rename('gsw')\n\n static_input_bands = static_input_bands.addBands([nlcd, cdl_cult, cdl_crop, water])\n\n input_bands = input_bands.addBands(static_input_bands).clip(roi)\n\n # standardize names to match EE javascript output\n standard_names = []\n temp_ct = 1\n prec_ct = 1\n names = input_bands.bandNames().getInfo()\n for name in names:\n if 'tavg' in name and 'tavg' in standard_names:\n standard_names.append('tavg_{}'.format(temp_ct))\n temp_ct += 1\n elif 'prec' in name and 'prec' in standard_names:\n standard_names.append('prec_{}'.format(prec_ct))\n prec_ct += 1\n else:\n standard_names.append(name)\n\n input_bands = input_bands.rename(standard_names)\n return input_bands", "def _bands_competed_last_year():\n lLastYear = datetime.datetime.now().year - 1\n cursor = connection.cursor()\n cursor.execute(\"SELECT count(distinct(r.band_id)) FROM contests_contestevent e, contests_contestresult r WHERE r.contest_event_id = e.id AND extract(year from e.date_of_event) = %(year)s GROUP BY extract(year from e.date_of_event) ORDER BY extract(year from e.date_of_event) desc\", {'year' : lLastYear})\n rows = cursor.fetchall()\n lReturn = 0\n if rows and rows[0]:\n lReturn = rows[0][0]\n cursor.close()\n return lReturn", "def applyIncidenceFilter(image, bandNames, classDictionary, numChangesCutoff = 8, connectedPixelCutoff=6):\n #Calculate the number of times a pixel changes throughout the time series and determine if it is over the numChangesCutoff\n num_changes = calculateNumberOfChanges(image, bandNames)\n too_many_changes = num_changes.gt(numChangesCutoff)\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Calculate the number of connected pixels for each land cover class and year, reduce to a single band image representing the number\n #of connected pixels of the same land cover class as the central pixel, and determine if it is over the connectedPixelCutoff\n connected_pixel_count = ee.ImageCollection(binary_class_images.map(lambda x: x.mask(x).connectedPixelCount(100,False).reduce(ee.Reducer.sum()).lt(connectedPixelCutoff)))\n \n #Get a bitwiseAnd determination if the number of connected pixels <= connectedPixelCutoff and the number of changes > numChangesCutoff \n incidence_filter = ee.ImageCollection(connected_pixel_count.map(lambda x: x.bitwiseAnd(too_many_changes))).toBands().rename(bandNames)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Replace pixels of image where incidence_filter is True with mode_image\n incidence_filtered = image.where(incidence_filter, mode_image)\n \n return incidence_filtered", "def calculate_daily_climatology(\n pctile,\n windowHalfWidth,\n lenClimYear,\n smoothPercentile,\n smoothPercentileWidth,\n thresh_climYear, # empty array\n seas_climYear, # empty array\n clim, # empty dict\n feb29,\n doyClim,\n clim_start,\n clim_end,\n tempClim,\n temp,\n):\n # Loop over all day-of-year values, and calculate threshold and seasonal climatology across years\n for d in range(1, lenClimYear + 1):\n # Special case for Feb 29\n if d == feb29:\n continue\n # find all indices for each day of the year +/- windowHalfWidth and from them calculate the threshold\n tt0 = np.where(doyClim[clim_start : clim_end + 1] == d)[\n 0\n ] # the index for that day each year\n # If this doy value does not exist (i.e. in 360-day calendars) then skip it\n if len(tt0) == 0:\n continue\n tt = np.array([])\n for w in range(-windowHalfWidth, windowHalfWidth + 1): # -5 : 5 default\n tt = np.append(\n tt, clim_start + tt0 + w\n ) # append the daily values 5days before and 5days after\n tt = tt[tt >= 0] # Reject indices \"before\" the first element\n tt = tt[tt < TClim] # Reject indices \"after\" the last element\n thresh_climYear[d - 1] = np.percentile(nonans(tempClim[tt.astype(int)]), pctile)\n seas_climYear[d - 1] = np.mean(nonans(tempClim[tt.astype(int)]))\n\n # Special case for Feb 29 (LEAP YEAR)\n thresh_climYear[feb29 - 1] = (\n 0.5 * thresh_climYear[feb29 - 2] + 0.5 * thresh_climYear[feb29]\n )\n seas_climYear[feb29 - 1] = (\n 0.5 * seas_climYear[feb29 - 2] + 0.5 * seas_climYear[feb29]\n )\n\n if smoothPercentile:\n thresh_climYear, seas_climYear = smooth_climatologies(\n thresh_climYear, seas_climYear, smoothPercentileWidth\n )\n\n # Generate threshold for full time series\n clim[\"thresh\"] = thresh_climYear[doy.astype(int) - 1]\n clim[\"seas\"] = seas_climYear[doy.astype(int) - 1]\n # Save vector indicating which points in temp are missing values\n clim[\"missing\"] = np.isnan(temp)\n\n return clim", "def msatna_blocks_3lag_year(year: int) -> pd.Series:\n return msatna_blocks_3lag_panel()[year]", "def applyMask3first(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).neq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).eq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).eq(value))\n change_img = imagem.select(bandNames[0]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[0]).blend(change_img)\n img_out = img_out.addBands(imagem.select(bandNames[1:]))\n return img_out", "def octave_bands(fc=1000, third=False, start=0.0, n=8):\n\n div = 1\n if third:\n div = 3\n\n # Octave Bands\n fcentre = fc * (\n 2.0 ** (np.arange(start * div, (start + n) * div - (div - 1)) / div)\n )\n fd = 2 ** (0.5 / div)\n bands = np.array([[f / fd, f * fd] for f in fcentre])\n\n return bands, fcentre", "def request_band_extract(file_prefix, points_layer, region, years, filter_bounds=False):\n roi = ee.FeatureCollection(region)\n plots = ee.FeatureCollection(points_layer)\n for yr in years:\n stack = stack_bands(yr, roi)\n\n if filter_bounds:\n plots = plots.filterBounds(roi)\n\n filtered = plots.filter(ee.Filter.eq('YEAR', yr))\n\n plot_sample_regions = stack.sampleRegions(\n collection=filtered,\n properties=['POINT_TYPE', 'YEAR'],\n scale=30,\n tileScale=16)\n\n task = ee.batch.Export.table.toCloudStorage(\n plot_sample_regions,\n description='{}_{}'.format(file_prefix, yr),\n bucket='wudr',\n fileNamePrefix='{}_{}'.format(file_prefix, yr),\n fileFormat='CSV')\n\n task.start()\n print(yr)\n exit()", "def winter_gif(self):\n # Create the directory.\n os.mkdir('./medal_figures_winter')\n start = self.start_year\n end = self.end_year\n duration = self.duration\n # Specify the years.\n years = [i for i in self.years_winter if (i >= start) and (i <= end)]\n # Setup the colormap.\n cmap = sns.cubehelix_palette(n_colors=6, start=2.5, rot=0.1, hue=2, dark=0.3, light=1, as_cmap=True)\n # Important variable and keywords to initialize cartopy.\n shapename = 'admin_0_countries'\n countries_shp = shpreader.natural_earth(resolution='110m', category='cultural', name=shapename)\n filenames = []\n # Loop in the specific years.\n for i in years:\n fig = plt.figure(figsize=(10, 8))\n ax = fig.add_subplot(1, 1, 1, projection=ccrs.Mercator())\n ax.set_extent([-169.95, 169.95, -65, 80], crs=ccrs.PlateCarree())\n ax.add_feature(cfeature.BORDERS)\n ax.coastlines(resolution='110m')\n # Add some titles for specific years.\n if i == 1924:\n fig.suptitle('The First Winter Olympics.', y=0.9, fontsize=14, fontweight='bold')\n if i == 1994:\n fig.suptitle('The International Olympic Committee voted to separate the Summer and Winter Games.',\n y=0.9, fontsize=12, fontweight='bold')\n if i == 2018:\n fig.suptitle('Suspension of the Russian Olympic Committee due to Olympic Doping Controversy.',\n y=0.9, fontsize=12, fontweight='bold')\n iso_lib = list(self.conv['ISO'])\n if i != 2018:\n city = self.df_winter.loc[self.df_winter['Year'] == i]['City'].iloc[0]\n ax.title.set_text('Total Number of Medals of Winter Olympics Year: %d City: %s' % (i, city))\n df_tmp = self.df_winter.loc[self.df_winter['Year'] == i]\n d = dict(df_tmp.groupby(df_tmp['Country']).size())\n else:\n ax.title.set_text('Total Number of Medals of Winter Olympics Year: %d City: %s' % (i, 'Pyeongchang'))\n m = []\n for j in self.df_2018_winter['NOC'].tolist():\n n = j[j.find('(')+1:j.find(')')]\n m.append(n)\n k = self.df_2018_winter['Total'].tolist()\n d = dict(zip(m, k))\n d.pop('30 NOCs', None)\n max_medal = float(max(d.values()))\n for country in shpreader.Reader(countries_shp).records():\n iso = country.attributes['ADM0_A3']\n medal_num = 0\n if iso in iso_lib:\n ioc = self.conv.loc[self.conv['ISO'] == iso,'IOC'].iloc[0]\n if not pd.isna(ioc):\n if ioc in d.keys():\n medal_num = d[ioc]\n if all([iso == 'RUS', i>=1956, i<=1988]):\n medal_num = d['URS']\n if all([iso=='DEU', i>=1968, i<=1988]):\n medal_num = d['FRG'] + d['GDR']\n if all([iso=='DEU', i>=1956, i<=1964]):\n medal_num = d['EUA']\n if i==1952 and iso=='DEU':\n medal_num = d['FRG']\n if i==1992 and iso=='RUS':\n medal_num = d['EUN']\n if i==2018 and iso=='RUS':\n medal_num = d['OAR']\n ax.add_geometries(country.geometry, ccrs.PlateCarree(),\n facecolor=cmap(medal_num / max_medal, 1))\n sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(0, max_medal))\n sm._A = []\n plt.colorbar(sm, ax=ax, orientation=\"horizontal\", fraction=0.046, pad=0.04)\n fname = './medal_figures_winter/year_%d.png' % i\n filenames.append(fname)\n plt.savefig(fname=fname, format='png')\n plt.close(fig)\n images = []\n # Create the gif.\n for filename in filenames:\n images.append(imageio.imread(filename))\n imageio.mimsave('./medal_figures_winter/movie.gif', images, duration=duration)\n return", "def band_filter(self, bands) -> 'ImageCollection':\n\n process_id = 'filter_bands'\n args = {\n 'imagery': self.graph,\n 'bands': bands\n }\n return self.graph_add_process(process_id, args)", "def applyGapFilter(image, bandNames):\n filtered = applyForwardNoDataFilter(image, bandNames)\n filtered = applyBackwardNoDataFilter(filtered, bandNames)\n return filtered", "def constant_2015():\n\n #Load the CMIP6 historical\n cubes = iris.load(data_dir+'SO2DMS-em-anthro_input4MIPs_emissions_CMIP_CEDS-v2016-07-26-gr_200001-201412_n48.nc')\n #Get low and high level emissions just in the last year (2014)\n cubes = iris.cube.CubeList([cubes[2],cubes[1]])\n final_cubes = iris.cube.CubeList()\n for cube in cubes:\n final_cube = cube[-12:]\n final_cubes.append(final_cube)\n \n #Set the year-on-year proportional reductions to be nothing\n yoy_rates = calc_perc_reducts()\n yoy_rates = np.array(yoy_rates)\n yoy_rates = np.ones_like(yoy_rates)\n\n #Create coordinates for new nc file between 2014 and 2100\n lat_coord = cubes[0].coord('latitude')\n lon_coord = cubes[0].coord('longitude')\n time_coord = DimCoord(np.arange(95055.,95055.+(2100-2014+1)*360.,30.),standard_name=u'time', units=cf_units.Unit('days since 1750-1-1 00:00:00', calendar='360_day'), long_name=u'time', var_name='time')\n\n #Create the cube date\n cube_data_surf = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n cube_data_high = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n #Set first year equal to 2014 in CMIP6 historical\n cube_data_surf[:12,...] = final_cubes[0].data\n cube_data_high[:12,...] = final_cubes[1].data\n #Apply equal emissions in all other years too\n for i in range(12,cube_data_surf.shape[0]):\n cube_data_surf[i,...] = cube_data_surf[(i-12),...] * yoy_rates[0,i]\n cube_data_high[i,...] = cube_data_high[(i-12),...] * yoy_rates[1,i]\n #Make the output cubes\n fut_cube_surf = iris.cube.Cube(cube_data_surf,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[0].standard_name, long_name=final_cubes[0].long_name, var_name=final_cubes[0].var_name, units=final_cubes[0].units, attributes=final_cubes[0].attributes)\n fut_cube_high = iris.cube.Cube(cube_data_high,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[1].standard_name, long_name=final_cubes[1].long_name, var_name=final_cubes[1].var_name, units=final_cubes[1].units, attributes=final_cubes[1].attributes)\n\n fut_cube_high.var_name = 'field569_1'\n fut_cube_high.units='kg/m2/s'\n fut_cube_high.long_name ='HIGH LEVEL SO2 EMISSIONS KG/M2/S'\n fut_cube_surf.var_name = 'field569'\n fut_cube_surf.units='kg/m2/s'\n fut_cube_surf.long_name ='SULPHUR DIOXIDE EMISSIONS'\n\n #Load the DMS cube from standard RCP2.6\n dms_cube = iris.load(data_dir+'DMSSO2NH3_18502100_RCP26_monthly.nc')[0]\n iris.coord_categorisation.add_year(dms_cube,'time',name='year')\n dms_cube = dms_cube.extract(iris.Constraint(year = lambda y: y>=2014))\n\n dms_cube.var_name = 'field570'\n dms_cube.attributes.pop('name')\n dms_cube.coord('time').var_name = 'time'\n dms_cube.coord('time').long_name = 'time'\n\n fut_cube_high = fut_cube_high[:-2]\n fut_cube_surf = fut_cube_surf[:-2]\n\n fut_dms = iris.cube.Cube(dms_cube.data[:,0,::-1,:],dim_coords_and_dims=[(fut_cube_surf.coord('time'),0),(fut_cube_surf.coord('latitude'),1),(fut_cube_surf.coord('longitude'), 2)],standard_name=dms_cube.standard_name, long_name=dms_cube.long_name, var_name=dms_cube.var_name, units=dms_cube.units, attributes=dms_cube.attributes)\n\n #Save the final cubes as netcdf (cutting them to be the same length)\n iris.save(iris.cube.CubeList([fut_dms,fut_cube_high,fut_cube_surf]),data_dir+ \"SO2DMS_const2014.nc\")\n os.system('ncatted -O -a calendar,time,m,c,\"360_day\" '+data_dir+ \"SO2DMS_const2014.nc\")\n\n return", "def mask3(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).eq(value))\n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[1]).blend(change_img)\n return img_out", "def calculateNumberOfChanges(image, bandNames):\n #Get a collection of images where each image has 2 bands: classifications for year(i) and classifications for year(i+1)\n lc_one_change_col = npv.getYearStackIC(image,bandNames, band_indices=[0,1])\n #Get a collection of images where each image represents whether there was change from year(i) to year(i+1) and convert to an image\n lc_one_change_col = lc_one_change_col.map(npv.LC_OneChange)\n lc_one_change_image = lc_one_change_col.toBands()\n #Calculate the number of changes by applying the sum reducer\n lc_sum_changes = lc_one_change_image.reduce(ee.Reducer.sum().unweighted())\n return lc_sum_changes", "def plot_land_cover(data, year=None, measurement=None, out_width=15, cols=4,):\n # get measurement name\n measurement = get_layer_name(measurement, data)\n\n # get colour map, normalisation\n try:\n cmap, norm = lc_colourmap(measurement)\n except AssertionError:\n\n raise KeyError('Could not automatically determine colour scheme from'\n f'DataArray name {measurement}. Please specify which '\n 'DEA Landcover measurement is being plotted by providing'\n 'the name using the \"measurement\" variable For example'\n '(measurement = \"full_classification\")')\n\n height, width = data.geobox.shape\n scale = out_width / width\n\n if year:\n #plotting protocall if 'year' variable is passed\n year_string = f\"{year}-01-01\"\n data = data.sel(time=year_string, method=\"nearest\")\n \n fig, ax = plt.subplots()\n fig.set_size_inches(width * scale, height * scale)\n make_colorbar(fig, ax, measurement)\n im = ax.imshow(data, cmap=cmap, norm=norm, interpolation=\"nearest\")\n\n \n elif len(data.time) == 1:\n #plotting protocall if only one timestep is passed and not a year variable\n fig, ax = plt.subplots()\n fig.set_size_inches(width * scale, height * scale)\n make_colorbar(fig, ax, measurement)\n im = ax.imshow(data.isel(time=0), cmap=cmap, norm=norm, interpolation=\"nearest\")\n else:\n #plotting protocall if multible time steps are passed to plot\n if cols > len(data.time):\n cols = len(data.time)\n rows = int((len(data.time) + cols-1)/cols)\n\n fig, ax = plt.subplots(nrows=rows, ncols=cols)\n fig.set_size_inches(\n width * scale, (height * scale / cols) * (len(data.time) / cols))\n\n make_colorbar(fig, ax.flat[0], measurement)\n\n for a, b in enumerate(ax.flat):\n if a < data.shape[0]:\n im = b.imshow(data[a], cmap=cmap, norm=norm,\n interpolation=\"nearest\")\n\n return im", "def _correct_band(image, band_name, kvol, kvol0, f_iso, f_geo, f_vol):\n\t\t\tiso = ee.Image(f_iso)\n\t\t\tgeo = ee.Image(f_geo)\n\t\t\tvol = ee.Image(f_vol)\n\t\t\tpred = vol.multiply(kvol).add(geo.multiply(kvol)).add(iso).rename(['pred'])\n\t\t\tpred0 = vol.multiply(kvol0).add(geo.multiply(kvol0)).add(iso).rename(['pred0'])\n\t\t\tcfac = pred0.divide(pred).rename(['cfac'])\n\t\t\tcorr = image.select(band_name).multiply(cfac).rename([band_name])\n\t\t\treturn corr", "def foldcurve(_band, _period):\n # Set epoch to first date observed\n _epoch = _band[0][0]\n # Iterate through array, update date to phase\n for i in range(0, _band.shape[0]):\n _band[i, 0] = ((_band[i, 0] - _epoch) / _period) % 1\n # Return folded array\n return _band", "def multiple_years(our_data, start, end):\n count = start\n album_list = []\n while count <= end:\n album_list.append(find_by_year(our_data,count))\n count += 1", "def winter_avg(var_nc,lat_slice=None,lon_slice=None): \n #\n # accumulate in shape [plev,lat,lon]\n #\n # use the whole array if slice objects are missing\n #\n if (lat_slice is None) and (lon_slice is None):\n num_lats=var_nc.shape[2]\n num_lons=var_nc.shape[3]\n lat_slice=slice(0,num_lats)\n lon_slice=slice(0,num_lons)\n print \"in winter avg: \",lat_slice,lon_slice\n else:\n num_lats=lat_slice.stop - lat_slice.start\n num_lons=lon_slice.stop - lon_slice.start\n num_levs=var_nc.shape[1]\n accumulate=ma.zeros([num_levs,num_lats,num_lons],dtype=var_nc.dtype)\n #\n # year 0 is special case since it doesn't have a december\n #\n djf0=np.array([0,1],dtype=np.int32) #january and feburary\n the_slice=var_nc[djf0,:,lat_slice,lon_slice]\n the_slice=ma.mean(the_slice,axis=0) #average over the two months\n accumulate+=the_slice\n num_years=var_nc.shape[0]//12\n #\n # now year 1 has year 0's december\n #\n djf=np.array([11,12,13],dtype=np.int32)\n #\n # iterate one year less because we've alread\n # done year zero as a special case\n #\n for the_year in np.arange(0,num_years-1):\n the_slice=var_nc[djf,:,lat_slice,lon_slice]\n the_slice=ma.mean(the_slice,axis=0)\n accumulate+=the_slice\n djf=djf+12\n accumulate=accumulate/num_years \n the_avg=ma.mean(accumulate,axis=1)\n the_avg=ma.mean(the_avg,axis=1)\n return the_avg", "def filter_irrigated(asset, yr, region, filter_type='irrigated', addl_yr=None):\n filt_fc = None\n\n # filter out any weird geometries\n plots = ee.FeatureCollection(asset)\n plots = plots.map(lambda x: x.set('geo_type', x.geometry().type()))\n plots = plots.filter(ee.Filter.eq('geo_type', 'Polygon'))\n\n roi = ee.FeatureCollection(region)\n if filter_type == 'irrigated':\n\n summer_s, late_summer_e = '{}-05-01'.format(yr), '{}-07-15'.format(yr)\n late_summer_s_, summer_e = '{}-07-01'.format(yr), '{}-10-31'.format(yr)\n\n lsSR_masked = landsat_masked(yr, roi)\n\n early_nd = ee.Image(lsSR_masked.filterDate(summer_s, late_summer_e).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd')\n early_nd_max = early_nd.select('nd').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n early_int_mean = early_nd_max.reduceRegions(collection=plots,\n reducer=ee.Reducer.median(),\n scale=30.0)\n early_int_mean = early_int_mean.select('median')\n\n late_nd = ee.Image(lsSR_masked.filterDate(late_summer_s_, summer_e).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd_1')\n late_nd_max = late_nd.select('nd_1').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n\n combo = late_nd_max.reduceRegions(collection=early_int_mean,\n reducer=ee.Reducer.mean(),\n scale=30.0)\n\n filt_fc = combo # .filter(ee.Filter.Or(ee.Filter.gt('median', 0.9), ee.Filter.gt('mean', 0.8)))\n desc = '{}_{}_irr'.format(os.path.basename(region), yr)\n\n elif filter_type == 'dryland':\n\n summer_s, late_summer_e = '{}-07-01'.format(yr), '{}-10-31'.format(yr)\n late_summer_s_, late_summer_e_ = '{}-07-01'.format(addl_yr), '{}-10-31'.format(addl_yr)\n\n lsSR_masked = landsat_masked(yr, roi)\n early_nd = ee.Image(lsSR_masked.filterDate(summer_s, late_summer_e).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd')\n early_nd_max = early_nd.select('nd').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n early_int_mean = early_nd_max.reduceRegions(collection=plots,\n reducer=ee.Reducer.mean(),\n scale=30.0)\n early_int_mean = early_int_mean.select(['mean', 'MGRS_TILE', 'system:index', 'popper'],\n ['nd_e', 'MGRS_TILE', 'system:index', 'popper'])\n\n lsSR_masked = landsat_masked(addl_yr, roi)\n late_nd = ee.Image(lsSR_masked.filterDate(late_summer_s_, late_summer_e_).map(\n lambda x: x.normalizedDifference(['B5', 'B4'])).max()).rename('nd_1')\n late_nd_max = late_nd.select('nd_1').reduce(ee.Reducer.intervalMean(0.0, 15.0))\n\n combo = late_nd_max.reduceRegions(collection=early_int_mean,\n reducer=ee.Reducer.mean(),\n scale=30.0)\n\n filt_fc = combo.filter(ee.Filter.Or(ee.Filter.lt('nd_e', 0.7), ee.Filter.lt('mean', 0.7)))\n desc = '{}_dry'.format(os.path.basename(region))\n\n else:\n raise NotImplementedError('must choose from filter_low or filter_high')\n\n task = ee.batch.Export.table.toCloudStorage(filt_fc,\n description=desc,\n bucket='wudr',\n fileFormat='SHP')\n print(yr, filter_type)\n task.start()", "def dwt(image_array, quantization_Array):\n # Create the high pass and low pass filters\n # both filters are non-causal\n # symmetric\n # [-2, -1, 0, 1, 2]\n LPF = [-0.125, 0.25, 0.75, 0.25, -0.125]\n LPF_center = 2\n\n # [ -2,-1, 0]\n HPF = [-0.5, 1, -0.5]\n HPF_center = 2\n\n nrow, ncol = image_array.shape\n\n # create an array that will contain the 4 different subbands of the image\n LL = np.zeros((nrow, ncol))\n LH = np.zeros((nrow, ncol))\n HL = np.zeros((nrow, ncol))\n HH = np.zeros((nrow, ncol))\n filtered_image = [LL, LH, HL, HH]\n\n # filtering the rows using a low pass and high pass filters\n LowPass_rows = np.zeros((nrow, ncol))\n HighPass_rows = np.zeros((nrow, ncol))\n for i in range(0, nrow):\n LowPass_rows[i, :] = lfilter(LPF, image_array[i, :], LPF_center)\n HighPass_rows[i, :] = lfilter(HPF, image_array[i, :], HPF_center)\n\n # down sample rows.\n # which means we will have half the number of columns\n for i in range(0, len(filtered_image)):\n filtered_image[i] = filtered_image[i][:, ::2]\n\n # apply filters accross columns\n for i in range(0, ncol):\n LL[:, i] = lfilter(LPF, LowPass_rows[:, i], LPF_center)\n LH[:, i] = lfilter(HPF, LowPass_rows[:, i], HPF_center)\n HL[:, i] = lfilter(LPF, HighPass_rows[:, i], LPF_center)\n HH[:, i] = lfilter(HPF, HighPass_rows[:, i], HPF_center)\n\n # down sample columns and quantize\n for i in range(0, len(filtered_image)):\n filtered_image[i] = filtered_image[i][::2, :]\n filtered_image[i] = np.round(\n filtered_image[i]/quantization_Array[i]).astype(int)\n\n return filtered_image", "def write_images(band,skypos,tranges,skyrange,write_cnt=False,write_int=False,write_rr=False,framesz=0,width=False,height=False,verbose=0,tscale=1000.,memlight=False,coadd=False,response=False,calpath='../cal/',clobber=False,retries=20):\n\t# No files were requested, so don't bother doing anything.\n\tif not (write_cnt or write_int or write_rr):\n\t\treturn\n\tcount,rr,intensity=create_images(band,skypos,tranges,skyrange,framesz=framesz,width=width,height=height,verbose=verbose,tscale=tscale,memlight=memlight,coadd=coadd,response=response,calpath=calpath,retries=retries)\n\n\t# Add a conditional so that this is only created for multi-frame images\n\ttbl = movie_tbl(band,tranges,framesz=framesz,verbose=verbose,retries=retries)\n\n\tif write_cnt:\n\t\thdu = pyfits.PrimaryHDU(count)\n\t\thdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)\n\t\thdulist = pyfits.HDUList([hdu,tbl])\n\t\tif verbose:\n\t\t\tprint 'Writing count image to '+str(write_cnt)\n\t\thdulist.writeto(write_cnt,clobber=clobber)\n\tif write_rr:\n\t\thdu = pyfits.PrimaryHDU(rr)\n\t\thdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)\n\t\thdulist = pyfits.HDUList([hdu,tbl])\n\t\tif verbose:\n\t\t\tprint 'Writing response image to '+str(write_rr)\n hdulist.writeto(write_rr,clobber=clobber)\n\tif write_int:\n\t\thdu = pyfits.PrimaryHDU(intensity)\n\t\thdu = fits_header(band,skypos,tranges,skyrange,width=width,height=height,verbose=verbose,tscale=tscale,hdu=hdu,retries=retries)\n\t\thdulist = pyfits.HDUList([hdu,tbl])\n\t\tif verbose:\n\t\t\tprint 'Writing intensity image to '+str(write_int)\n\t\thdulist.writeto(write_int,clobber=clobber)\n\n\treturn", "def fmask(bandname=\"fmask\"):\n\n def fmask(image):\n imgFmask = image.select(bandname)\n shadow = imgFmask.eq(3)\n snow = imgFmask.eq(4)\n cloud = imgFmask.eq(5)\n\n mask = shadow.Or(snow).Or(cloud)\n\n imgMask = image.updateMask(mask.Not())\n return imgMask\n return fmask", "def preprocess_land_cover(\n src_files, dst_raster, dst_crs, dst_bounds, dst_res, geom=None, overwrite=False\n):\n if os.path.isfile(dst_raster) and not overwrite:\n log.info(\"Land cover data already preprocessed. Skipping.\")\n return\n log.info(\"Starting preprocessing of land cover data.\")\n LC_CLASSES = [\n \"bare\",\n \"crops\",\n \"grass\",\n \"moss\",\n \"shrub\",\n \"tree\",\n \"urban\",\n \"water-permanent\",\n \"water-seasonal\",\n ]\n with TemporaryDirectory(prefix=\"geohealthaccess_\") as tmpdir:\n\n tmpdir = Path(tmpdir)\n for tile in src_files:\n unzip(tile, tmpdir)\n\n reprojected_files = []\n tile_names = unique_tiles(tmpdir)\n\n if not tile_names:\n raise MissingDataError(\"Land cover data not found.\")\n\n for lc_class in LC_CLASSES:\n tiles = [\n p.as_posix()\n for p in tmpdir.glob(f\"*{lc_class}-coverfraction-layer*.tif\")\n ]\n if len(tiles) > 1:\n src_file = merge_tiles(\n tiles, os.path.join(tmpdir, f\"{lc_class}_mosaic.tif\"), nodata=255,\n )\n else:\n src_file = tiles[0]\n reprojected_files.append(\n reproject(\n src_raster=src_file,\n dst_raster=os.path.join(tmpdir, f\"{lc_class}.tif\"),\n dst_crs=dst_crs,\n dst_bounds=dst_bounds,\n dst_res=dst_res,\n src_nodata=255,\n dst_nodata=255,\n dst_dtype=\"Byte\",\n resampling_method=\"cubic\",\n overwrite=overwrite,\n )\n )\n\n if len(reprojected_files) > 1:\n raster = concatenate_bands(\n src_files=reprojected_files,\n dst_file=dst_raster,\n band_descriptions=LC_CLASSES,\n )\n else:\n raster = reprojected_files[0]\n\n if geom:\n mask_raster(raster, geom)", "def write_jpeg(filename,band,skypos,tranges,skyrange,width=False,height=False,\n\t\t\t stepsz=1.,clobber=False,verbose=0,tscale=1000.,retries=20):\n\tscipy.misc.imsave(filename,countmap(band,skypos,tranges,skyrange,\n\t\t\t\t\t width=width,height=height,verbose=verbose,tscale=tscale,\n\t\t\t\t\t retries=retries))\n\treturn", "def interpolate_dataframes(ff):\n assert isinstance(ff, dict)\n year_min = ff['CA'][0].index[0]\n year_max = ff['CA'][0].index[-1]\n years = list(range(year_min, year_max + 1))\n for state in ff.keys():\n for cf in ff[state]:\n for year in years:\n if year not in cf.index:\n cf.loc[year] = cf.loc[year-1:year+1, :].sum(axis=0)\n cf.loc[year] = (cf.loc[year] / 2).astype(np.int64)\n cf.sort_index(inplace=True)\n return(ff)", "def sky_median_sig_clip(input_arr, sig_fract, percent_fract, max_iter=100, low_cut=True, high_cut=True):\r\n\t\r\n\twork_arr = np.ravel(input_arr)\r\n\told_sky = np.median(work_arr)\r\n\toldStaDesviation = work_arr.std()\r\n\tupper_limit = old_sky + sig_fract * oldStaDesviation\r\n\tlower_limit = old_sky - sig_fract * oldStaDesviation\r\n\tif low_cut and high_cut:\r\n\t\tindices = np.where((work_arr < upper_limit) & (work_arr > lower_limit))\r\n\telse:\r\n\t\tif low_cut:\r\n\t\t\tindices = np.where((work_arr > lower_limit))\r\n\t\telse:\r\n\t\t\tindices = np.where((work_arr < upper_limit))\r\n\twork_arr = work_arr[indices]\r\n\tnew_sky = np.median(work_arr)\r\n\titeration = 0\r\n\twhile ((math.fabs(old_sky - new_sky)/new_sky) > percent_fract) and (iteration < max_iter) :\r\n\t\titeration += 1\r\n\t\told_sky = new_sky\r\n\t\toldStaDesviation = work_arr.std()\r\n\t\tupper_limit = old_sky + sig_fract * oldStaDesviation\r\n\t\tlower_limit = old_sky - sig_fract * oldStaDesviation\r\n\t\tif low_cut and high_cut:\r\n\t\t\tindices = np.where((work_arr < upper_limit) & (work_arr > lower_limit))\r\n\t\telse:\r\n\t\t\tif low_cut:\r\n\t\t\t\tindices = np.where((work_arr > lower_limit))\r\n\t\t\telse:\r\n\t\t\t\tindices = np.where((work_arr < upper_limit))\r\n\t\twork_arr = work_arr[indices]\r\n\t\tnew_sky = np.median(work_arr)\r\n\treturn (new_sky, iteration)", "def mask4(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[3]).eq(value)) \n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value)\n change_img1 = imagem.select(bandNames[2]).mask(mask.eq(1)).where(mask.eq(1), value) \n img_out = imagem.select(bandNames[1]).blend(change_img).blend(change_img1)\n return img_out", "def apply_photo_style(path, decade):\n flt_path = os.path.dirname(path) + \"/\" + str(uuid.uuid4()) + \".jpg\"\n shutil.copyfile(path, flt_path) # make a copy of image because part of the filters change image in place\n f = None\n if decade <= 1930 or decade == 1950 or decade == 1970:\n success = execute_js(js_path, arguments='{} {} {}'.format(path, decade, flt_path)) # execute js rendering with Naked\n if decade == 1930:\n f = Thirties(flt_path)\n if decade == 1940:\n f = Gotham(flt_path)\n \n if decade == 1950 or decade == 1960: # for non-standard photo frames \n padding_x = 80\n if decade == 1950: # kodachrome frame\n padding_top = 80\n padding_bottom = 240\n else: # polaroid frame\n padding_bottom = 80\n padding_x = padding_top = 0\n expand_rect_padding(flt_path, padding_x, padding_top, padding_bottom, flt_path)\n \n if decade == 1950:\n f = Fifties(flt_path)\n if decade == 1960:\n f = Toaster(flt_path)\n if decade == 1970:\n f = Seventies(flt_path)\n if decade == 1980:\n f = Nashville(flt_path)\n if decade == 1990:\n f = Lomo(flt_path)\n if decade == 2000:\n f = Davehill(flt_path)\n \n if f is not None:\n f.apply() # apply photo filter using imagemagick\n\n if decade == 1940:\n # resize fix - gotham filter output image slightly differs in size so resize it to sizes of original image\n origin_img = Image.open(path)\n width, height = origin_img.size \n img = Image.open(flt_path) \n img = img.resize([width,height], Image.ANTIALIAS)\n img.save(flt_path, \"JPEG\")\n\n return flt_path", "def run_global(start_year, end_year, depth_from, depth_to, animate=True):\n# years, times, rootgrps = retrieve(1950,2018)\n# rootgrps_1950 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1950\\EN.4.2.1.f.analysis.g10.195001.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_1951 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1951\\EN.4.2.1.f.analysis.g10.195101.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_1952 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1952\\EN.4.2.1.f.analysis.g10.195201.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_1953 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1953\\EN.4.2.1.f.analysis.g10.195301.nc\", \"r+\", format=\"NETCDF4\")]\n#\n#\n# rootgrps_2015 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2015\\EN.4.2.1.f.analysis.g10.201501.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_2016 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2016\\EN.4.2.1.f.analysis.g10.201601.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_2017 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2017\\EN.4.2.1.f.analysis.g10.201701.nc\", \"r+\", format=\"NETCDF4\")]\n rootgrps_2018 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2018\\EN.4.2.1.f.analysis.g10.201801.nc\", \"r+\", format=\"NETCDF4\")]\n\n# HC_1950 = calculate_HC_global(rootgrps_1950, 0, 2000)\n# print('1950', time.time()-start)\n# HC_1951 = calculate_HC_global(rootgrps_1951, 0, 2000)\n# print('1951', time.time()-start)\n# HC_1952 = calculate_HC_global(rootgrps_1952, 0, 2000)\n# print('1952', time.time()-start)\n# HC_1953 = calculate_HC_global(rootgrps_1953, 0, 2000)\n# print('1953', time.time()-start) \n#\n# HC_2015 = calculate_HC_global(rootgrps_2015, 0, 2000)\n# print('2015', time.time()-start)\n# HC_2016 = calculate_HC_global(rootgrps_2016, 0, 2000)\n# print('2016', time.time()-start)\n# HC_2017 = calculate_HC_global(rootgrps_2017, 0, 2000)\n# print('2017', time.time()-start)\n HC_2018 = calculate_HC_global(rootgrps_2018, 0, 2000)\n# print('2018', time.time()-start)\n# HC_1950_mean = (HC_1950+HC_1951+HC_1952+HC_1953)/4\n# HC_2018_mean = (HC_2015+HC_2016+HC_2017+HC_2018)/4\n\n# dHC = (HC_2018_mean-HC_1950_mean)/(65*365*24*3600)\n if animate == True:\n plot(rootgrps_2018, HC_2018)\n return HC_2018", "def year_cv_split(X, year_range):\n return [\n ((X[\"year\"] < year).to_numpy(), (X[\"year\"] == year).to_numpy())\n for year in range(*year_range)\n ]", "def smooth_climatologies(thresh_climYear, seas_climYear, smoothPercentileWidth):\n # If the climatology contains NaNs, then assume it is a <365-day year and deal accordingly\n if np.sum(np.isnan(seas_climYear)) + np.sum(np.isnan(thresh_climYear)):\n valid = ~np.isnan(thresh_climYear)\n thresh_climYear[valid] = runavg(thresh_climYear[valid], smoothPercentileWidth)\n valid = ~np.isnan(seas_climYear)\n seas_climYear[valid] = runavg(seas_climYear[valid], smoothPercentileWidth)\n else: # >= 365-day year (no nans)\n thresh_climYear = runavg(thresh_climYear, smoothPercentileWidth)\n seas_climYear = runavg(seas_climYear, smoothPercentileWidth)\n\n return thresh_climYear, seas_climYear", "def band_selector(image, colors):\n # convert band to list for downstream compatibilty, if necessary\n if len(colors) == 3: #then it's an RGB image\n\n #housekeeping\n try:\n nbands = len(colors['band'])\n except: \n colors['band'] = [colors['band']]\n nbands = len(colors['band'])\n\n try:\n len(colors['dark_on_light'])\n except:\n colors['dark_on_light'] = [colors['dark_on_light']]\n\n if colors['colorspace'] is 'gray' or colors['colorspace'] is 'grey':\n colors['band'] = [0]\n nbands = 1\n if len(colors['dark_on_light']) > 1:\n raise ValueError(\n \"\"\"Can't interpret multiple arguments for 'dark_on_light' when \n 'colorspace' is {}.\n \"\"\".format(colors['colorspace'])\n )\n \n if nbands != len(colors['dark_on_light']):\n raise ValueError(\n \"\"\"Number of items in `colors['dark_on_light']` doesn't\n equal the number of bands in `colors['band']`!\"\"\"\n )\n\n # convert colorspace if necessary\n try:\n working_image = getattr(color, \"rgb2\" + colors['colorspace'].lower())(image)\n except:\n working_image = image.copy()\n if colors['colorspace'].lower() != 'rgb':\n raise ValueError(\n \"\"\"Didn't recognize specified colorspace. \n See skimage.color.rgb2* for options.\"\"\"\n )\n \n # pull bands\n if len(working_image.shape) == 3: # excludes rgb2gray\n working_image = [img_split(working_image)[i] for i in colors['band']]\n else:\n working_image = [working_image]\n nbands = 1\n \n else: # it's a black and white image\n nbands = 1\n working_image = [image.copy()]\n if len(image.shape) != 2:\n raise ValueError(\n \"\"\"Your `color` argument suggested a grayscale image, but it has \\\n multiple bands!\"\"\"\n )\n \n return(working_image)", "def _make_filters(self):\n\n \"\"\"\n filter_bank = bandpass_filterbank(\n self.bands, fs=self.fs, order=order, output=output\n )\n\n return [lambda sig: sosfiltfilt(bpf, sig) for bpf in filter_bank]\n \"\"\"\n\n # This seems to work only for Octave bands out of the box\n centers = self.centers\n n = len(self.centers)\n\n new_bands = [[centers[0] / 2, centers[1]]]\n for i in range(1, n - 1):\n new_bands.append([centers[i - 1], centers[i + 1]])\n new_bands.append([centers[-2], self.fs / 2])\n\n n_freq = self.n_fft // 2 + 1\n freq_resp = np.zeros((n_freq, n))\n freq = np.arange(n_freq) / self.n_fft * self.fs\n\n for b, (band, center) in enumerate(zip(new_bands, centers)):\n lo = np.logical_and(band[0] <= freq, freq < center)\n freq_resp[lo, b] = 0.5 * (1 + np.cos(2 * np.pi * freq[lo] / center))\n\n if b != n - 1:\n hi = np.logical_and(center <= freq, freq < band[1])\n freq_resp[hi, b] = 0.5 * (1 - np.cos(2 * np.pi * freq[hi] / band[1]))\n else:\n hi = center <= freq\n freq_resp[hi, b] = 1.0\n\n filters = np.fft.fftshift(\n np.fft.irfft(freq_resp, n=self.n_fft, axis=0),\n axes=[0],\n )\n\n # remove the first sample to make them odd-length symmetric filters\n self.filters = filters[1:, :]", "def winter_bar_chart(self):\n # Create the top n countries dataframe from 1994 to 2016\n df_winter = self.df_winter[self.df_winter['Year'] >= 1994]\n m = list(df_winter['Country'].value_counts()[:self.n_top].index)\n df_top = df_winter[df_winter['Country'].isin(m)].groupby(['Country', 'Medal']).size()\n new_index = pd.MultiIndex.from_product([m, ['Gold', 'Silver', 'Bronze']], names=df_top.index.names)\n df_top = df_top.reindex(new_index)\n unstacked_df_top = df_top.unstack().reindex(m, columns=['Gold', 'Silver', 'Bronze'])\n # Create the dataframe in 2018\n k = []\n for j in self.df_2018_winter['NOC'].tolist():\n n = j[j.find('(') + 1:j.find(')')]\n k.append((n, j))\n k = dict(k)\n winter_2018 = pd.DataFrame()\n for i in m:\n if i != 'RUS':\n df_tmp = self.df_2018_winter[self.df_2018_winter['NOC'] == k[i]]\n else:\n df_tmp = self.df_2018_winter[self.df_2018_winter['NOC'] == k['OAR']]\n winter_2018 = pd.concat([winter_2018, df_tmp])\n winter_2018['Country'] = m\n new_winter_2018 = winter_2018.set_index(['Country'])[['Gold', 'Silver', 'Bronze']]\n # Add two dataframes and plot.\n unstacked_df_top.add(new_winter_2018).reindex(m[::-1], columns=['Bronze', 'Silver', 'Gold']).plot(kind='barh')\n plt.title('Medal Result of Winter Olympics since 1994')\n fname = './medal_figures_winter/winter_bar_chart.png'\n plt.savefig(fname=fname, format='png')\n return", "def sky_groups():\n cam = \"sky\"\n for light, lens, ndc, good, window in [(True, True, False, True, True),\n (True, True, False, True, False),\n (True, True, False, False, False),\n (True, False, False, True, False),\n (True, False, False, False, False),\n (False, True, False, True, True),\n (False, True, False, False, True)]:\n filenames = flatfiles(cam)\n filenames = get_light_sky(filenames, light)\n filenames = get_lens(filenames, lens)\n filenames = get_ndc(filenames, ndc)\n filenames = get_good(filenames, good)\n filenames = get_window_sky(filenames, window)\n images = valid_images(filenames)\n process_images(images, cam, (light, lens, ndc, good, window))", "def wfc3Dispersion(xc, yc, subarray=256):\n coord0 = (1014 - subarray) // 2\n xc = xc + coord0\n yc = yc + coord0\n DLDP0 = [8949.40742544, 0.08044032819916265]\n DLDP1 = [44.97227893276267,\n 0.0004927891511929662,\n 0.0035782416625653765,\n -9.175233345083485e-7,\n 2.2355060371418054e-7, -9.258690000316504e-7]\n # calculate field dependent dispersion coefficient\n p0 = DLDP0[0] + DLDP0[1] * xc\n p1 = DLDP1[0] + DLDP1[1] * xc + DLDP1[2] * yc + \\\n DLDP1[3] * xc**2 + DLDP1[4] * xc * yc + DLDP1[5] * yc**2\n dx = np.arange(1014) - xc\n wavelength = (p0 + dx * p1)\n if subarray < 1014:\n i0 = (1014 - subarray) // 2\n wavelength = wavelength[i0: i0 + subarray]\n return wavelength", "def load_images(filename, bands, Args):\n image = np.zeros([Args.num, Args.out_size,\n Args.out_size, len(bands)])\n for i, band in enumerate(bands):\n print (\"Getting pstamps for band\", band)\n full_image = fits.open(filename.replace(\"band\", band))[0].data\n image[:, :, :, i] = get_stamps(full_image, Args)\n return image", "def make_lightcurve(centroids, bands, band_idx, box_size, aperture_radius):\n band_names = np.sort(list(bands.keys()))\n num_stars= range(len(centroids))\n for star_idx in num_stars:\n xcenters, ycenters = [],[]\n aperture_sums = []\n background = []\n fwhms = []\n obs_time = []\n obs_mjd = []\n ##extract lightcurve (enumerate all frames) in a given band\n for i in tqdm(bands[band_names[band_idx]]):\n #import pdb; pdb.set_trace()\n hdr = fits.open(i)[0].header\n img = fits.open(i)[0].data\n #get dates from fits header\n date=dt.strptime(hdr['DATE-OBS'], '%Y-%m-%d')\n time=dt.strptime(hdr['EXP-STRT'], '%H:%M:%S.%f')\n newdate = time.replace(year=date.year, month=date.month, day=date.day)\n obs_time.append(newdate)\n obs_mjd.append(hdr['MJD-STRT'])\n\n #crop\n #import pdb; pdb.set_trace()\n image_crop = get_crop(img, centroids[star_idx], box_size)\n\n ###aperture photometry###\n #compute centroid\n centroid = get_centroid(image_crop)\n\n xcenters.append(centroid[0])\n ycenters.append(centroid[1])\n\n #compute backgound\n bkg_mean=get_bkg(image_crop, centroid, r_in=20., r_out=30.)\n\n #measure fwhm\n fwhm=get_fwhm(image_crop)\n\n #without aperture photometry\n\n aperture_sum = get_phot(image_crop, centroid, r=aperture_radius)\n\n #minus background wihtin annulus\n #aperture_sum = get_phot2(image_crop,bkg_mean,centroid,r=aperture_radius)\n\n aperture_sums.append(aperture_sum)\n background.append(bkg_mean)\n\n # if fwhm < 10*np.median(fwhms):\n # fwhms.append(fwhm)\n # else:\n # fwhms.append(np.nan)\n fwhms.append(fwhm)\n\n #output as dataframe of given band and star\n\n dfs.append(pd.DataFrame(\n {'{0}_{1}_x'.format(band_names[band_idx], str(star_idx)) : xcenters,\n '{0}_{1}_y'.format(band_names[band_idx], str(star_idx)) : ycenters,\n '{0}_{1}_flux_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : aperture_sums,\n '{0}_{1}_bkg_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : background,\n '{0}_{1}_fwhm_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : fwhms},\n #'airmass' : airmass\n index = obs_time))\n return dfs, band_idx, band_names", "def countmap(band,skypos,tranges,skyrange,width=False,height=False,\n\t\t\t verbose=0,tscale=1000.,memlight=False,hdu=False,retries=20):\n\timsz = gxt.deg2pix(skypos,skyrange)\n\tcount = np.zeros(imsz)\n\tfor trange in tranges:\n\t\t# If memlight is requested, break the integration into\n\t\t# smaller chunks.\n\t\tstep = memlight if memlight else trange[1]-trange[0]\n\t\tfor i in np.arange(trange[0],trange[1],step):\n\t\t\tt0,t1=i,i+step\n\t\t\tif verbose:\n\t\t\t\tprint_inline('Coadding '+str(t0)+' to '+str(t1))\n\t\t\tevents = gQuery.getArray(gQuery.rect(band,skypos[0],skypos[1],t0,t1,\n\t\t\t\t\t\t\t\t\t\t\t\t skyrange[0],skyrange[1]),\n\t\t\t\t\t\t\t\t\t verbose=verbose,retries=retries)\n\n\t\t\t# Check that there is actually data here.\n\t\t\tif not events:\n\t\t\t\tif verbose>1:\n\t\t\t\t\tprint \"No data in \"+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\ttimes = np.array(events,dtype='float64')[:,0 ]/tscale\n\t\t\tcoo =\tnp.array(events,dtype='float64')[:,1:]\n\n\t\t\t# If there's no data, return a blank image.\n\t\t\tif len(coo)==0:\n\t\t\t\tif verbose:\n\t\t\t\t\tprint 'No data in this frame: '+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\t# Define World Coordinate System (WCS)\n\t\t\twcs = define_wcs(skypos,skyrange,width=False,height=False)\n\n\t\t\t# Map the sky coordinates onto the focal plane\n\t\t\tfoc = wcs.sip_pix2foc(wcs.wcs_world2pix(coo,1),1)\n\n\t\t\t# Bin the events into actual image pixels\n\t\t\tH,xedges,yedges=np.histogram2d(foc[:,1]-0.5,foc[:,0]-0.5,\n\t\t\t\t\t\t\t\tbins=imsz,range=([ [0,imsz[0]],[0,imsz[1]] ]))\n\t\t\tcount += H\n\n\treturn count", "def reduce_dataset(years, values,flux_floor=0,max_tm_error=0,min_reduction_steps=200):\n non_zero_ind, min_retained_zero_years = remove_begin_end_zero_flux(years,values,flux_floor,min_reduction_steps)\n\n years_mod = years[non_zero_ind]\n values_mod = values[non_zero_ind]\n\n if years_mod.size <3:\n years_mod = years\n values_mod = values\n values_mod = 0\n else:\n #makes ure you have not removed more than 1% of the mass when removing 0 or flux floor rates\n o_mass = TimeSeries(years,values,None,None).integrate().values[-1]\n r_mass = TimeSeries(years_mod, values_mod, None, None).integrate().values[-1]\n if abs((o_mass-r_mass)/o_mass)*100 > 1:\n years_mod = years\n values_mod = values\n timeseries = TimeSeries(years_mod, values_mod, None, None)\n mass = timeseries.integrate()\n\n #normalize Values\n maxval = np.max(values_mod)\n values_mod = values_mod/maxval\n o_timeseries = TimeSeries(years,values/maxval,None,None)\n o_mass = o_timeseries.integrate()\n timeseries = TimeSeries(years_mod, values_mod, None, None)\n mass = timeseries.integrate()\n\n mx = np.argmax(timeseries.values)\n points = [0, mx, len(timeseries)]\n x = timeseries.times\n\n ythresh = 100*np.mean(timeseries.values)\n out_error = 1\n out_error_last = out_error\n OUT_ERROR_THRESHOLD = 1e-2\n\n UPPER_N = 200\n LOWER_N = 50\n last_result = None\n MAX_ITERATIONS = 80\n\n solve_type = SMOOTH\n simple_peaks = False\n last_result,ix = reduct_iter(timeseries,flux_floor,ythresh,out_error,out_error_last,OUT_ERROR_THRESHOLD,UPPER_N,LOWER_N,last_result,MAX_ITERATIONS)\n last_result = retain_min_years(last_result.reduced_flux,o_timeseries,o_mass,min_retained_zero_years)\n #if there are less points than the min_reduction_steps then use the remaining\n #points to rebalance the segments with the largest mass errors.\n play_points = min_reduction_steps - last_result.num_reduced_points\n bef = last_result.reduced_flux.times.size\n if play_points > 0:\n last_result = red_flux.rebalance_extra_points(last_result,play_points)\n\n rr = last_result\n\n #find peaks for data rebalance and reporting\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=3,rel_height=1)\n if peaks.size == 0 :\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=2,rel_height=1)\n if peaks.size == 0:\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=1,rel_height=1)\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=3,rel_height=1)\n if pneg.size == 0:\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=2,rel_height=1)\n if pneg.size == 0:\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=1,rel_height=1)\n\n peaks = rr.reduced_flux.times[peaks]\n pneg = rr.reduced_flux.times[pneg]\n\n peaks = np.isin(o_timeseries.times,peaks)\n pneg = np.isin(o_timeseries.times,pneg)\n peaks = np.where(peaks)\n pneg = np.where(pneg)\n\n peaks = peaks[0]\n pneg = pneg[0]\n iter = 0\n while iter < 100 and (abs(last_result.total_mass_error*maxval) > max_tm_error or abs(last_result.total_mass_error/last_result.mass.values[-1])*100 > .001) :\n rr = red_flux.rebalance_valleys(rr,peaks,pneg)\n #keep the lowest total_mass_error\n if abs(rr.total_mass_error) < abs(last_result.total_mass_error):\n last_result = rr\n else:\n break\n iter += 1\n\n out_times = last_result.reduced_flux.times\n out_values = last_result.reduced_flux.values\n #return the reduced data, undo normalize of the values (*maxval)\n return out_times, out_values*maxval,-(last_result.total_mass_error * maxval),peaks.size,iter", "def extract_cochlear_subbands(nets, SIGNAL_SIZE, SR, LOW_LIM, HIGH_LIM, N, SAMPLE_FACTOR, pad_factor, debug, subbands_ifft, return_subbands_only, rectify_and_lowpass_subbands, rFFT, custom_filts, erb_filter_kwargs, include_all_keys, compression_function, include_subbands_noise, subbands_noise_mean, subbands_noise_stddev):\n\n # make the erb filters tensor\n nets['filts_tensor'] = make_filts_tensor(SIGNAL_SIZE, SR, LOW_LIM, HIGH_LIM, N, SAMPLE_FACTOR, use_rFFT=rFFT, pad_factor=pad_factor, custom_filts=custom_filts, erb_filter_kwargs=erb_filter_kwargs)\n\n # make subbands by multiplying filts with fft of input\n nets['subbands'] = tf.multiply(nets['filts_tensor'],nets['fft_input'],name='mul_subbands')\n if debug: # return the real and imaginary parts of the subbands separately -- use if matching to their output\n nets['subbands_r'] = tf.real(nets['subbands'])\n nets['subbands_i'] = tf.imag(nets['subbands'])\n\n # TODO: with using subbands_ifft is redundant. \n # make the time subband operations if we are returning the subbands or if we want to include all of the keys in the graph\n if subbands_ifft or return_subbands_only or include_all_keys:\n if not rFFT:\n nets['subbands_ifft'] = tf.real(tf.ifft(nets['subbands'],name='ifft_subbands'),name='ifft_subbands_r')\n else:\n nets['subbands_ifft'] = tf.spectral.irfft(nets['subbands'],name='ifft_subbands')\n if return_subbands_only or include_all_keys:\n nets['subbands_time'] = nets['subbands_ifft']\n if rectify_and_lowpass_subbands: # TODO: the subband operations are hard coded in?\n nets['subbands_time_relu'] = tf.nn.relu(nets['subbands_time'], name='rectified_subbands')\n nets['subbands_time_lowpassed'] = hanning_pooling_1d_no_depthwise(nets['subbands_time_relu'], downsample=2, length_of_window=2*4, make_plots=False, data_format='NCW', normalize=True, sqrt_window=False)\n\n # TODO: noise is only added in the case when we are calcalculating the time subbands, but we might want something similar for the cochleagram\n if return_subbands_only or include_all_keys:\n # Compress subbands if specified and add noise. \n nets = compression_function(nets, input_node_name='subbands_time_lowpassed', output_node_name='subbands_time_lowpassed_compressed')\n if include_subbands_noise:\n nets = add_neural_noise(nets, subbands_noise_mean, subbands_noise_stddev, input_node_name='subbands_time_lowpassed_compressed', output_node_name='subbands_time_lowpassed_compressed_with_noise')\n nets['subbands_time_lowpassed_compressed_with_noise'] = tf.expand_dims(nets['subbands_time_lowpassed_compressed_with_noise'],-1)\n nets['subbands_time_processed'] = nets['subbands_time_lowpassed_compressed_with_noise']\n else:\n nets['subbands_time_lowpassed_compressed'] = tf.expand_dims(nets['subbands_time_lowpassed_compressed'],-1)\n nets['subbands_time_processed'] = nets['subbands_time_lowpassed_compressed']\n\n return nets", "def calbands( band = 0, tmo = 30 ) :\n optimizeThresholds(band,tmo)\n flattenPhases(band,tmo)\n calibrateSpectra(band=band,tmo=tmo)", "def get_rgb_bands(image, bands):\n if bands is not MONOCHROME:\n red = image[:, :, bands['red']]\n green = image[:, :, bands['green']]\n blue = image[:, :, bands['blue']]\n\n img = np.rollaxis(np.array([red, green, blue]), 0, 3)\n else:\n img = color.grey2rgb(image)\n\n return img", "def bandstop_filter(data, lowcut, highcut, fs=2000, numtaps=255):\n nyq = fs / 2\n\n # design filter\n fe1 = lowcut / nyq\n fe2 = highcut / nyq\n b = firwin(numtaps, (fe1, fe2), pass_zero=True)\n\n filtered = lfilter(b, 1, data)\n\n return filtered", "def create_grism_waverange(outname=\"\",\n history=\"Ground NIRCAM Grismwavelengthrange\",\n author=\"STScI\",\n filter_range=None):\n ref_kw = common_reference_file_keywords(reftype=\"wavelengthrange\",\n title=\"NIRCAM Grism wavelenghtrange\",\n description=\"NIRCAM Grism+Filter Wavelength Ranges\",\n exp_type=\"NRC_GRISM\",\n author=author,\n model_type=\"WavelengthrangeModel\",\n filename=outname,\n )\n\n if filter_range is None:\n # These numbers from Nor Pirzkal, in microns\n filter_range = {1: {'F250M': [2.500411072, 4.800260833],\n 'F277W': [2.500411072, 3.807062006],\n 'F300M': [2.684896869, 4.025318456],\n 'F322W2': [2.5011293930000003, 4.215842089],\n 'F335M': [3.01459734, 4.260432726],\n 'F356W': [3.001085025, 4.302320901],\n 'F360M': [3.178096344, 4.00099629],\n 'F410M': [3.6267051809999997, 4.5644598],\n 'F430M': [4.04828939, 4.511761774],\n 'F444W': [3.696969216, 4.899565197],\n 'F460M': [3.103778615, 4.881999188],\n 'F480M': [4.5158154679999996, 4.899565197]},\n 2: {'F250M': [2.500411072, 2.667345336],\n 'F277W': [2.500411072, 3.2642254050000004],\n 'F300M': [2.6659796289999997, 3.2997071729999994],\n 'F322W2': [2.5011293930000003, 4.136119434],\n 'F335M': [2.54572003, 3.6780519760000003],\n 'F356W': [2.529505253, 4.133416971],\n 'F360M': [2.557881113, 4.83740855],\n 'F410M': [2.5186954019999996, 4.759037127],\n 'F430M': [2.5362614100000003, 4.541488865],\n 'F444W': [2.5011293930000003, 4.899565197],\n 'F460M': [2.575447122, 4.883350419],\n 'F480M': [2.549773725, 4.899565197]}}\n\n # array of integers\n orders = list(filter_range.keys())\n orders.sort()\n\n # same filters for every order, array of strings\n wrange_selector = list(filter_range[orders[0]].keys())\n wrange_selector.sort()\n\n # The lists below need\n # to remain ordered to be correctly referenced\n wavelengthrange = []\n for order in orders:\n o = []\n for fname in wrange_selector:\n o.append(filter_range[order][fname])\n wavelengthrange.append(o)\n\n ref = wcs_ref_models.WavelengthrangeModel()\n ref.meta.update(ref_kw)\n ref.meta.exposure.p_exptype = \"NRC_GRISM|NRC_TSGRISM\"\n ref.meta.input_units = u.micron\n ref.meta.output_units = u.micron\n ref.wrange_selector = wrange_selector\n ref.wrange = wavelengthrange\n ref.order = orders\n\n entry = HistoryEntry({'description': history, 'time': datetime.datetime.utcnow()})\n sdict = Software({'name': 'nircam_reftools.py',\n 'author': author,\n 'homepage': 'https://github.com/spacetelescope/jwreftools',\n 'version': '0.7.1'})\n entry['sofware'] = sdict\n ref.history['entries'] = [entry]\n ref.to_asdf(outname)\n ref.validate()", "def mask5(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[3]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[4]).eq(value))\n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value) \n change_img1 = imagem.select(bandNames[2]).mask(mask.eq(1)).where(mask.eq(1), value) \n change_img2 = imagem.select(bandNames[3]).mask(mask.eq(1)).where(mask.eq(1), value) \n img_out = imagem.select(bandNames[1]).blend(change_img).blend(change_img1).blend(change_img2)\n return img_out", "def middle_east_countries():\r\n middle_east_data = []\r\n years = []\r\n medians = []\r\n lst = []\r\n for idx in range(1960, 2016):\r\n years.append(idx)\r\n for idx in middle_east:\r\n middle_east_data.append(life_expectancy_graph(idx))\r\n y_idx = 0\r\n for idx in middle_east_data:\r\n if idx != None and idx != {}:\r\n if (list(idx.keys())[y_idx]) == years[y_idx]:\r\n lst.append((list(idx.values())[y_idx]))\r\n lst = sorted(lst)\r\n medians.append(median(lst))\r\n return medians", "def closeyear(year):\n\n # Return the specific year\n return int(year % 4)", "def bandpass_filter(data, lowcut, highcut, fs=2000, numtaps=255):\n nyq = fs / 2\n\n # design filter\n fe1 = lowcut / nyq\n fe2 = highcut / nyq\n b = firwin(numtaps, (fe1, fe2), pass_zero=False)\n\n filtered = lfilter(b, 1, data)\n\n return filtered", "def upper_middle_income_countries():\r\n upper_middle_countries_data = []\r\n years = []\r\n medians = []\r\n lst = []\r\n for idx in range(1960, 2016):\r\n years.append(idx)\r\n for idx in upper_middle_countries:\r\n upper_middle_countries_data.append(life_expectancy_graph(idx))\r\n y_idx = 0\r\n for idx in upper_middle_countries_data:\r\n if idx != {}:\r\n if (list(idx.keys())[y_idx]) == years[y_idx]:\r\n lst.append((list(idx.values())[y_idx]))\r\n lst = sorted(lst)\r\n medians.append(median(lst))\r\n return medians", "def slice_bands(self, band_idx):\n new_eigenvals = self.eigenvals.T[sorted(band_idx)].T\n return type(self)(kpoints=self.kpoints, eigenvals=new_eigenvals)", "def closing_by_reconstruction_image_filter(*args, **kwargs):\n import itk\n instance = itk.ClosingByReconstructionImageFilter.New(*args, **kwargs)\n return instance.__internal_call__()", "def cover_crop_added(self):\n\n ## getting input parameter\n crop_input = self.soil_inputs.crop_cover.values[0]\n if pd.isnull(crop_input):\n crop_input = \"nan\"\n #climate_input = self.soil_inputs.climate.values[0]\n years_cropcover_tech = self.soil_inputs.time_using_crop_cover.values[0]\n\n if np.isnan(years_cropcover_tech):\n years_cropcover_tech = 10\n\n if self.language == \"spanish\":\n #climate_options = [i.lower() for i in tl.climate_options[0]]\n cover_crop_options = [i.lower() for i in tl.cover_crop_options[0]]\n else:\n #climate_options = [i.lower() for i in tl.climate_options[1]]\n cover_crop_options = [i.lower() for i in tl.cover_crop_options[1]]\n\n if crop_input.lower() in cover_crop_options:\n\n cc_eng_input = tl.cover_crop_options[1][cover_crop_options.index(crop_input.lower())]\n self._cc_eng_input = cc_eng_input\n #cl_eng_input = tl.climate_options[1][climate_options.index(self._cl_eng_input.lower())]\n\n covercropfilter = ef.cover_cropping_factors.Change.str.lower() == cc_eng_input.lower()\n climatefilter = ef.cover_cropping_factors.Climate.str.lower() == self._cl_eng_input.lower()\n\n if climatefilter.sum() == 0:\n cl_eng_input = tl.world_climate_bouwman[1][tl.world_climate_bouwman[0].index(self._cl_eng_input)]\n climatefilter = ef.cover_cropping_factors.Climate.str.lower() == cl_eng_input.lower()\n\n filter_conditions = climatefilter & covercropfilter\n if np.array(filter_conditions).sum() != 0:\n factor_change_20years = ef.cover_cropping_factors.Factor.loc[filter_conditions].values[0]\n else:\n factor_change_20years = 1\n\n self.cover_crop_soc_change = cumulative_socemissions_for_20years(years_cropcover_tech,\n factor_change_20years,\n self.soil_c_stock)\n else:\n self.cover_crop_soc_change = [0]", "def pil_image_mask_by_band_value(img, band, val, cval=0):\n # type: (PImage.Image, int, int) -> PImage.Image\n\n num_bands = len(img.getbands())\n\n if band >= num_bands:\n raise ValueError('Cannot get band with index {} from image with {} bands'.format(band, num_bands))\n\n # Create a look up table where only one value maps to itself and everything else to cval\n other_band_lut = [cval] * 256\n target_band_lut = [cval] * 256\n target_band_lut[val] = val\n lut = []\n\n for i in range(num_bands):\n if i == band:\n lut += target_band_lut\n else:\n lut += other_band_lut\n\n img = img.point(lut)\n return img", "def numberOfWideBands(config=None):\n # Get correlator configuration\n c = config\n if c == None: \n c = utils.getConfigAstroband()\n\n # Determine if we have both wideband and spectral line astrobands. \n # If we do, we return nwide & maxbandwidth for sl only since \n # this is the correlator which will be attached to all ants.\n astrobands = [ abc[0] for abc in c ]\n if len( astrobands ) == 0:\n raise Exception, \"No existing astroband configuration.\"\n if max( astrobands ) > 8 and min( astrobands ) < 9: \n astrobands = [ ab for ab in astrobands if ab < 9 ]\n\n # Check bandwidth\n nwide = 0\n maxbandwidth = 0\n for t in c:\n astroband = t[0]\n # Skip band if it is not being used or is not in astroband list above.\n mp = commands.queryString('SignalPath.Mapping.Astroband%d.confTag' % (astroband) )\n if mp == 'NONE' or astroband not in astrobands: continue\n\n # Get bandwidth\n if t[2] == commands.BW500:\n bw = 500\n elif t[2] == commands.BW250:\n bw = 250\n elif t[2] == commands.BW125:\n bw = 125\n elif t[2] == commands.BW62:\n bw = 62\n elif t[2] == commands.BW31:\n bw = 31\n elif t[2] == commands.BW8:\n bw = 8\n elif t[2] == commands.BW2:\n bw = 2\n else:\n raise Exception, 'Could not find bandwith for '+str(t[2])\n\n # Maximum?\n if bw > maxbandwidth: \n maxbandwidth = bw\n if utils.isDualPol( astroband ):\n nwide = 2 \n else:\n nwide = 1\n elif bw == maxbandwidth:\n if utils.isDualPol( astroband ): \n nwide += 2 \n else:\n nwide += 1\n\n return nwide, maxbandwidth", "def lower_middle_income_countries():\r\n lower_middle_countries_data = []\r\n years = []\r\n medians = []\r\n lst = []\r\n for idx in range(1960, 2016):\r\n years.append(idx)\r\n for idx in lower_middle_countries:\r\n lower_middle_countries_data.append(life_expectancy_graph(idx))\r\n y_idx = 0\r\n for idx in lower_middle_countries_data:\r\n if idx != {}:\r\n if (list(idx.keys())[y_idx]) == years[y_idx]:\r\n lst.append((list(idx.values())[y_idx]))\r\n lst = sorted(lst)\r\n medians.append(median(lst))\r\n return medians", "def calculate_overall_winners(pContestGroup, pContestsRequired, pYear):\n # Work out how many bands competed that year\n lCursor = connection.cursor()\n lCursor.execute(\"SELECT count(distinct(r.band_id)) FROM contests_contestresult r, contests_contestevent e, contests_contest c WHERE r.contest_event_id = e.id AND e.contest_id = c.id AND c.group_id = %(groupid)s AND extract(year from e.date_of_event) = %(year)s\", {'groupid': pContestGroup.id, 'year' : pYear})\n lRows = lCursor.fetchall()\n for row in lRows:\n lBandsCompeting = row[0]\n lCursor.close()\n\n # get full results for that year - first element is the winner\n lResults = calculate_overall_results(pContestGroup, pContestsRequired, pYear)\n try:\n lWinner = lResults[0]\n except IndexError:\n lWinner = {}\n return lWinner, lBandsCompeting", "def band(self, name, bands, new_name=None, label=None, text_key=None):\n if not self._is_numeric(name):\n msg = \"Can only band numeric typed data! {} is {}.\"\n msg = msg.format(name, self._get_type(name))\n raise TypeError(msg)\n if not text_key: text_key = self.text_key\n if not new_name: new_name = '{}_banded'.format(name)\n if not label: label = self.text(name, False, text_key)\n franges = []\n for idx, band in enumerate(bands, start=1):\n lab = None\n if isinstance(band, dict):\n lab = list(band.keys())[0]\n band = list(band.values())[0]\n if isinstance(band, tuple):\n if band[0] < 0:\n raise ValueError('Cannot band with lower bound < 0.')\n elif band[1] < 0:\n raise ValueError('Cannot band with upper bound < 0.')\n r = '{}-{}'.format(band[0], band[1])\n franges.append([idx, lab or r, {name: frange(r)}])\n else:\n r = str(band)\n franges.append([idx, lab or r, {name: [band]}])\n\n self.derive(new_name, 'single', label, franges,\n text_key=text_key)\n\n return None", "def seasonal_avg(var_nc,the_season,lat_slice=None,lon_slice=None): \n the_season=np.array(the_season,dtype=np.int32)\n if (lat_slice is None) and (lon_slice is None):\n num_lats=var_nc.shape[2]\n num_lons=var_nc.shape[3]\n lat_slice=slice(0,num_lats)\n lon_slice=slice(0,num_lons)\n else:\n if lat_slice.stop is None:\n num_lats=var_nc.shape[2]\n else:\n num_lats=lat_slice.stop - lat_slice.start\n if lon_slice.stop is None:\n num_lons=var_nc.shape[3]\n else:\n num_lons=lon_slice.stop - lon_slice.start\n num_levs=var_nc.shape[1]\n accumulate=ma.zeros([num_levs,num_lats,num_lons],dtype=var_nc.dtype)\n num_years=var_nc.shape[0]//12\n\n for the_year in np.arange(0,num_years):\n the_slice=var_nc[the_season,:,lat_slice,lon_slice]\n the_slice=ma.mean(the_slice,axis=0)\n accumulate+=the_slice\n the_season=the_season+12\n accumulate=accumulate/num_years \n the_avg=ma.mean(accumulate,axis=1)\n the_avg=ma.mean(the_avg,axis=1)\n return the_avg", "def calculate_band(value, bands):\n for band in bands:\n if band > value:\n return band", "def masked_f3kdb(clip: vs.VideoNode,\n rad: int = 16,\n thr: Union[int, List[int]] = 24,\n grain: Union[int, List[int]] = [12, 0],\n mask_args: Dict[str, Any] = {}\n ) -> vs.VideoNode:\n from debandshit import dumb3kdb\n\n deb_mask_args: Dict[str, Any] = dict(brz=(1000, 2750))\n deb_mask_args |= mask_args\n\n bits, clip = _get_bits(clip)\n\n deband_mask = detail_mask(clip, **deb_mask_args)\n\n deband = dumb3kdb(clip, radius=rad, threshold=thr, grain=grain, seed=69420)\n deband_masked = core.std.MaskedMerge(deband, clip, deband_mask)\n deband_masked = deband_masked if bits == 16 else depth(deband_masked, bits)\n return deband_masked", "def filter_bands(self, imagery, bands=None, names=None, wavelengths=None) -> 'ImageCollection':\n\n graph = {\n 'process_id': 'filter_bands',\n 'imagery': imagery.graph,\n }\n\n if bands:\n graph['bands'] = bands\n if names:\n graph['names'] = names\n if wavelengths:\n graph['wavelengths'] = wavelengths\n\n imagery.graph = graph\n return imagery", "def filter_ic(ic,year,month):\n \n ic_filtered = (ic.filter(ee.Filter.eq(\"month\",month))\n .filter(ee.Filter.eq(\"year\",year)))\n image = ee.Image(ic_filtered.first())\n return(image)", "def collapse(self):\n try:\n wavelengths = pylab.linspace(self.start, self.end,\n self.image.shape[not self.waveaxis])\n except TypeError:\n print 'The starting and ending wavelengths must be specified.'\n background = pylab.zeros(len(wavelengths))\n backgroundlines = 0\n data = pylab.zeros(len(wavelengths))\n datalines = 0\n for region in self.regions:\n if region['group'] is 0:\n backgroundlines += region['max'] - region['min']\n background += self.image[region['min']:region['max'] + 1, :]\\\n .sum(axis=self.waveaxis)\n else:\n datalines += region['max'] - region['min']\n data += self.image[region['min']:region['max'] + 1, :]\\\n .sum(axis=self.waveaxis)\n background = [sum/backgroundlines for sum in background]\n data = [sum/datalines for sum in data]\n corrected = pylab.array(data) - pylab.array(background)\n output = Spectrum(list(wavelengths), list(corrected))\n return output", "def modify_bands(\n xraster: xr.core.dataarray.DataArray, input_bands: List[str],\n output_bands: List[str], drop_bands: List[str] = []):\n # Do not modify if image has the same number of output bands\n if xraster['band'].shape[0] == len(output_bands):\n return xraster\n\n # Drop any bands from input that should not be on output\n for ind_id in list(set(input_bands) - set(output_bands)):\n drop_bands.append(input_bands.index(ind_id)+1)\n return xraster.drop(dim=\"band\", labels=drop_bands, drop=True)", "def precip_stats_to_climatology(fili, start_year=1981, end_year=2015):\n\n nyear = end_year - start_year + 1\n \n ds = xr.open_dataset(fili)\n\n year = ds['time'].dt.year\n #dsMsk = ds.isel( time=( (year >= start_year) & (year <= end_year) ) ).count(dim='time')\n dsClm = ds.isel( time=( (year >= start_year) & (year <= end_year) ) ).mean(dim='time', skipna=False)\n #dsClm = dsClm.where(dsMsk == nyear)\n \n #dsMsk.to_netcdf('era5.count.nc4')\n\n print (dsClm)\n \n filo = fili.replace('annual','annual.clm')\n print (f'Writing climatology to {filo}') \n dsClm.to_netcdf(filo)\n\n return", "def end_of_year_returns(model_roi, return_type, start, end):\n\n # converts index of datetimes to a list of strings\n dates = model_roi.index.astype('str').tolist()\n\n # list comprehension of a string of dates between the\n # start and end dates\n years = [str(x) for x in range(start, end + 1)]\n\n # generates an empty list of lists for each year\n end_year_dates = [[] for _ in range(len(years))]\n\n # iterates over every date in the roi series\n for date in dates:\n\n # iterates over every year in the years list\n for year in years:\n\n # iterates over every year in each date\n if year in date:\n\n # converts each date string to a datime object\n datetime_object = datetime.strptime(date, '%Y-%m-%d')\n\n # appends each date to its corresponding year in the years list\n (end_year_dates[years.index(year)]\n .append(datetime.strftime(datetime_object, '%Y-%m-%d')))\n\n # finds the last date in each year\n end_year_dates = [max(x) for x in end_year_dates]\n\n # gets the rounded end of year returns\n returns = [round(model_roi[date], 1) for date in end_year_dates]\n\n # shifts the returns list by 1 and appends 0 to the beginning of the list\n return_rates = [0] + returns[:len(returns)-1]\n \"\"\"Example: [a, b, c] -> [0, a, b]\"\"\"\n\n # converts returns list to an array\n returns_arr = np.array(returns)\n\n # converts the return_rates list to an array\n return_rates_arr = np.array(return_rates)\n\n # calculates the rounded rate of returns\n return_rates = [round(x, 1) for x in list(returns_arr - return_rates_arr)]\n \"\"\"Example: [a, b, c] - [0, a, b] = [a, b-a, c-b]\"\"\"\n\n # dictionary with the years as keys and returns as values\n returns = dict(zip(years, returns))\n\n # dictionary with the years as keys and return rates as values\n return_rates = dict(zip(years, return_rates))\n\n if return_type == 'returns':\n return returns\n\n if return_type == 'return_rates':\n return return_rates", "def initialize_layers(self, years):\n min_year = min(years)\n max_year = max(years)\n ordered_years = list(range(min_year, max_year + 1))\n self.layers = [Layer(y) for y in ordered_years]", "def build_sea_data(\n start_year=1999,\n end_year=2016,\n netcdf_path=\"data/sea_level/netcdf/\",\n target_lon=175.8606890,\n target_lat=-36.993684,\n buffer_degrees=0.5,\n path_out=\".\",\n):\n # tairua_coords = (-36.993684, 175.8606890)\n df_sea_data = pd.DataFrame()\n\n for year in range(start_year, end_year + 1):\n ds_first = xr.open_mfdataset(\n os.path.join(netcdf_path, f\"dt_global_twosat_phy_l4_{year}*.nc\")\n )\n\n target_lon = xr.DataArray(\n list(target_lon + np.linspace(-buffer_degrees, buffer_degrees))\n )\n target_lat = xr.DataArray(\n list(target_lat + np.linspace(-buffer_degrees, buffer_degrees))\n )\n\n ds_tairua = ds_first[[\"adt\", \"ugos\", \"vgos\"]].sel(\n longitude=target_lon, latitude=target_lat, method=\"nearest\"\n )\n df_sealevel_pandas = (\n ds_tairua.resample(time=\"MS\")\n .mean()\n .mean(dim=\"dim_0\")\n .to_dataframe()\n )\n\n df_sea_data = pd.concat([df_sea_data, df_sealevel_pandas])\n\n print(\n f\"************************Done {year} ************************************\"\n )\n print(df_sea_data.tail(10))\n\n df_sea_data.to_csv(os.path.join(path_out, \"df_sea_data.csv\"))", "def get_sea_level_raw(start_year, end_year, path_out):\n c = cdsapi.Client()\n\n for year in range(start_year, end_year + 1):\n\n print(f\"Starting Year: {year}\")\n\n c.retrieve(\n \"satellite-sea-level-global\",\n {\n \"format\": \"tgz\",\n \"year\": [str(year)],\n \"month\": [\n \"01\",\n \"02\",\n \"03\",\n \"04\",\n \"05\",\n \"06\",\n \"07\",\n \"08\",\n \"09\",\n \"10\",\n \"11\",\n \"12\",\n ],\n \"day\": [\n \"01\",\n \"02\",\n \"03\",\n \"04\",\n \"05\",\n \"06\",\n \"07\",\n \"08\",\n \"09\",\n \"10\",\n \"11\",\n \"12\",\n \"13\",\n \"14\",\n \"15\",\n \"16\",\n \"17\",\n \"18\",\n \"19\",\n \"20\",\n \"21\",\n \"22\",\n \"23\",\n \"24\",\n \"25\",\n \"26\",\n \"27\",\n \"28\",\n \"29\",\n \"30\",\n \"31\",\n ],\n },\n os.path.join(path_out, str(year) + \"_download.tar.gz\"),\n )", "def hourly_grib2_to_netcdf(grib_file, grib_source, nc_file, nc_var_name,\n grib_var_name, grib_level, cache_size=100,\n initial_year=1979, overwrite_nc_units=None,\n include_analysis=True,\n nc_format='NETCDF4'):\n\n list_of_msg_dicts = gribou.get_all_msg_dict(grib_file)\n list_of_i, analysis_present = filter_var_timesteps(list_of_msg_dicts,\n grib_var_name,\n grib_level,\n include_analysis)\n cfsr_var = CFSRVariable(list_of_msg_dicts[list_of_i[0]])\n lats, lons = gribou.get_latlons(grib_file, list_of_i[0] + 1)\n\n now = datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S\")\n nc1 = netCDF4.Dataset(nc_file, 'w', format=nc_format)\n\n nc1.Conventions = 'CF-1.5'\n nc1.title = 'Climate System Forecast Reanalysis'\n nc1.history = \"%s: Convert from grib2 to NetCDF\" % (now,)\n nc1.institution = 'NCEP'\n nc1.source = 'Reanalysis'\n nc1.references = 'http://cfs.ncep.noaa.gov/cfsr/'\n if analysis_present:\n msg1 = \"Obtained from %s server, \" % (grib_source,)\n msg2 = \"analysis is included, 6h forecast removed.\"\n nc1.comment = msg1 + msg2\n else:\n msg1 = \"Obtained from %s server, \" % (grib_source,)\n msg2 = \"no analysis, 6h forecast is included.\"\n nc1.comment = msg1 + msg2\n nc1.redistribution = \"Free to redistribute.\"\n\n nc1.createDimension('time', None)\n nc1.createDimension('timecomp', 6)\n nc1.createDimension('lat', lats.shape[0])\n nc1.createDimension('lon', lats.shape[1])\n\n nc1.createVariable('timecomp', 'i2', ('timecomp',), zlib=True, fill_value=defi2)\n\n time = nc1.createVariable('time', 'i4', ('time',), zlib=True)\n time.axis = 'T'\n if initial_year is None:\n warp = (str(cfsr_var.grib_msg_dict['year']),)\n else:\n warp = (initial_year,)\n time.units = \"hours since %s-01-01 00:00:00\" % warp\n time.long_name = 'time'\n time.standard_name = 'time'\n time.calendar = 'gregorian'\n\n time_vectors = nc1.createVariable('time_vectors', 'i2', ('time', 'timecomp'),\n zlib=True)\n\n vtype = cfsr_var.vertical_type\n if vtype in ['depthBelowSea', 'heightAboveGround']:\n try:\n dummy = len(cfsr_var.level)\n bounds = True\n except:\n bounds = False\n else:\n nc1.createDimension('nv', 2)\n level = nc1.createVariable('level', 'f4', (), zlib=True)\n level.axis = 'Z'\n level.units = cfsr_var.vertical_units\n if vtype == 'depthBelowSea':\n level.positive = 'up'\n else:\n level.positive = 'down'\n level.long_name = vtype\n level.standard_name = standard_names[vtype]\n if bounds:\n level.bounds = 'level_bnds'\n level_bnds = nc1.createVariable('level_bnds', 'f4', ('nv',), zlib=True)\n level_bnds[0] = cfsr_var.level[0]\n level_bnds[1] = cfsr_var.level[1]\n level[:] = (level_bnds[0] + level_bnds[1]) / 2.0\n else:\n level[:] = cfsr_var.level\n\n lat = nc1.createVariable('lat', 'f4', ('lat'), zlib=True)\n lat.axis = 'Y'\n lat.units = 'degrees_north'\n lat.long_name = 'latitude'\n lat.standard_name = 'latitude'\n lat[:] = lats[::-1, 0]\n\n lon = nc1.createVariable('lon', 'f4', ('lon'), zlib=True)\n lon.axis = 'X'\n lon.units = 'degrees_east'\n lon.long_name = 'longitude'\n lon.standard_name = 'longitude'\n lon[:] = lons[0, :]\n\n warp = optimal_chunksizes(len(list_of_i), lat.size, lon.size)\n var1 = nc1.createVariable(nc_var_name, 'f4', ('time', 'lat', 'lon'), zlib=True,\n fill_value=deff4, chunksizes=warp)\n if overwrite_nc_units is None:\n var1.units = cfsr_var.units\n else:\n var1.units = overwrite_nc_units\n var1.long_name = cfsr_var.name\n var1.standard_name = standard_names[nc_var_name]\n var1.statistic = cfsr_var.statistic\n\n t = 0 # counter for the NetCDF file\n c = 0 # counter for our temporary array\n temporary_array = ma.zeros([cache_size, var1.shape[1], var1.shape[2]])\n temporary_tvs = np.zeros([cache_size, 6])\n flag_runtimeerror = False\n for i, grb_msg in enumerate(gribou.msg_iterator(grib_file)):\n if i not in list_of_i:\n continue\n try:\n data = grb_msg['values'][::-1, :]\n except RuntimeError:\n data = ma.masked_all([var1.shape[1], var1.shape[2]])\n flag_runtimeerror = True\n dt = list_of_msg_dicts[i]['endStep'] - list_of_msg_dicts[i]['startStep']\n if cfsr_var.statistic == 'avg':\n if dt == 1:\n temporary_array[c, :, :] = data\n else:\n if list_of_msg_dicts[i]['startStep'] != 0:\n raise NotImplementedError(\"Weird delta t?\")\n x = list_of_msg_dicts[i]['endStep']\n temporary_array[c, :, :] = x * data - (x - 1) * previous_data\n elif cfsr_var.statistic == 'accum':\n if dt == 1:\n temporary_array[c, :, :] = data / 3600.0\n else:\n if list_of_msg_dicts[i]['startStep'] != 0:\n raise NotImplementedError(\"Weird delta t?\")\n temporary_array[c, :, :] = (data - previous_data) / 3600.0\n else:\n temporary_array[c, :, :] = data\n temporary_tvs[c, 0] = list_of_msg_dicts[i]['year']\n temporary_tvs[c, 1] = list_of_msg_dicts[i]['month']\n temporary_tvs[c, 2] = list_of_msg_dicts[i]['day']\n warp = list_of_msg_dicts[i]['hour'] + list_of_msg_dicts[i]['endStep']\n temporary_tvs[c, 3] = warp\n if temporary_tvs[c, 3] == 24:\n temporary_tvs[c, 3] = 0\n warp = CalGregorian.count_days_in_cycle(temporary_tvs[c, 1],\n temporary_tvs[c, 0])\n if temporary_tvs[c, 2] == warp:\n temporary_tvs[c, 2] = 1\n if temporary_tvs[c, 1] == 12:\n temporary_tvs[c, 1] = 1\n temporary_tvs[c, 0] = temporary_tvs[c, 0] + 1\n else:\n temporary_tvs[c, 1] = temporary_tvs[c, 1] + 1\n else:\n temporary_tvs[c, 2] = temporary_tvs[c, 2] + 1\n temporary_tvs[c, 4] = 0\n temporary_tvs[c, 5] = 0\n c += 1\n if c == cache_size:\n c = 0\n if nc_var_name == 'clt':\n var1[t:t + cache_size, :, :] = temporary_array / 100.0\n else:\n var1[t:t + cache_size, :, :] = temporary_array\n time_vectors[t:t + cache_size, :] = temporary_tvs\n t += cache_size\n previous_data = data\n if nc_var_name == 'clt':\n var1[t:t + c, :, :] = temporary_array[0:c, :, :] / 100.0\n else:\n var1[t:t + c, :, :] = temporary_array[0:c, :, :]\n time_vectors[t:t + c, :] = temporary_tvs[0:c, :]\n\n datetimes, masked, valid = nc._time_vectors_to_datetimes(time_vectors[:, :])\n num1 = netCDF4.date2num(datetimes, time.units, time.calendar)\n if time.dtype in [np.int8, np.int16, np.int32, np.int64]:\n time[valid] = np.array(np.round(num1), dtype=time.dtype)\n else:\n time[valid] = num1\n if len(masked): time[masked] = ma.masked_all([len(masked)])\n\n if flag_runtimeerror:\n nc1.warnings = \"RuntimeError encountered, missing values inserted.\"\n nc1.close()", "def scrape_world_cup_scoreboard(year):\n # Replace this with the results logic somehow...\n\n d = world_cup_mapping[year]\n prefix = 'http://www.fifa.com'\n if type(d) == int:\n root_url = '/worldcup/archive/edition=%s/' % d\n else:\n root_url = '/worldcup/archive/%s/' % d\n data = scrape_url(prefix + root_url + \"results/index.html\")\n\n # Find urls in the page.\n match_re = re.compile(root_url + \"results/matches/match=\\d+/report.html\")\n urls = match_re.findall(data)\n return [prefix + e for e in urls]", "def warm_region_cal(audio_samples, fs):\n #window the audio\n windowed_samples = timbral_util.window_audio(audio_samples)\n\n # need to define a function for the roughness stimuli, emphasising the 20 - 40 region (of the bark scale)\n min_bark_band = 10\n max_bark_band = 40\n mean_bark_band = (min_bark_band + max_bark_band) / 2.0\n array = np.arange(min_bark_band, max_bark_band)\n x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)\n x -= np.min(x)\n x /= np.max(x)\n\n wr_array = np.zeros(240)\n wr_array[min_bark_band:max_bark_band] = x\n\n # need to define a second array emphasising the 20 - 40 region (of the bark scale)\n min_bark_band = 80\n max_bark_band = 240\n mean_bark_band = (min_bark_band + max_bark_band) / 2.0\n array = np.arange(min_bark_band, max_bark_band)\n x = timbral_util.normal_dist(array, theta=0.01, mean=mean_bark_band)\n x -= np.min(x)\n x /= np.max(x)\n\n hf_array = np.zeros(240)\n hf_array[min_bark_band:max_bark_band] = x\n\n windowed_loud_spec = []\n windowed_rms = []\n\n wr_vals = []\n hf_vals = []\n\n for i in range(windowed_samples.shape[0]):\n samples = windowed_samples[i, :]\n N_entire, N_single = timbral_util.specific_loudness(samples, Pref=100.0, fs=fs, Mod=0)\n\n # append the loudness spec\n windowed_loud_spec.append(N_single)\n windowed_rms.append(np.sqrt(np.mean(samples * samples)))\n\n wr_vals.append(np.sum(wr_array * N_single))\n hf_vals.append(np.sum(hf_array * N_single))\n\n mean_wr = np.mean(wr_vals)\n mean_hf = np.mean(hf_vals)\n weighted_wr = np.average(wr_vals, weights=windowed_rms)\n weighted_hf = np.average(hf_vals, weights=windowed_rms)\n\n return mean_wr, weighted_wr, mean_hf, weighted_hf", "def bandname(self):\n if self._properties['bandname'] is None:\n self._properties['bandname'] = \"wisew1\" if \"-w1-\" in self.filename \\\n else \"wisew2\" if \"-w2-\" in self.filename \\\n else \"wisew3\" if \"-w3-\" in self.filename \\\n else \"wisew4\" if \"-w4-\" in self.filename \\\n else \"unknown\"\n return self._properties['bandname']", "def filter_tracks(df, start_year=1980, end_year=2010, zeta=0, age=36):\n tracks = df.groupby('num')\n filterdf = tracks.filter(lambda x: (x['datetime'].dt.year.min() >= start_year) &\n (x['datetime'].dt.year.max() <= end_year) &\n (x['age'].max() >= age) &\n (np.abs(x['vorticity'].min()) > zeta))\n return filterdf", "def imdb_crawl_by_years(years, verbose):\n for year in years:\n imdb_crawl_by_year(year, verbose)", "def contours_and_data(epoch, model, features, filters, figname, fgal=0.5,\n idx=-1, data='s82', N=60000):\n if data == 's82':\n # fetch Stripe 82 data\n X, Xcov = fetch_prepped_s82data(epoch, fgal, features, filters)\n Xcoadd, Xcoaddcov = fetch_prepped_s82data(epoch, fgal, features,\n filters, use_single=False)\n sind = np.abs(Xcoadd[:, idx]) < 0.03\n gind = np.abs(Xcoadd[:, idx]) > 0.03\n\n else:\n # fetch DR10 data\n X, Xcov = fetch_prepped_dr10data(N, fgal, features, filters)\n sind = np.abs(X[:, idx]) < 0.145\n gind = np.abs(X[:, idx]) > 0.145\n\n # unpickle the XD model\n if type(model) == str: \n f = open(model, 'rb')\n model = cPickle.load(f)\n f.close()\n\n fs = 5\n ms = 1\n lsize = 20\n idx = [[0, -1], [2, 3], [3, 4]]\n xlim = [(18., 22), (-0.5, 2.5), (-0.5, 2)]\n ylim = [(-0.1, 0.5), (-0.5, 2.5), (-0.5, 1.5)]\n xlab = ['psfmag $r$', 'modelmag $g-r$', 'modelmag $r-i$']\n ylab = ['psfmag - modelmag $r$', 'modelmag $r-i$', 'modelmag $i-z$']\n\n f = pl.figure(figsize=(3 * fs, 3 * fs))\n Nstar = len(np.where(model.fixed_means[:, idx] != np.inf)[0])\n pl.subplots_adjust(wspace=0.3)\n for i in range(1, 10):\n k = (i - 1) % 3\n if i < 4:\n ind = np.arange(X.shape[0], dtype=np.int)\n rng = range(model.n_components)\n elif 3 < i < 7:\n ind = sind\n rng = range(Nstar)\n else:\n ind = gind\n rng = range(Nstar, model.n_components)\n ax = pl.subplot(3, 3, i)\n for j in rng:\n if model.alpha[j] > 1.e-3:\n draw_ellipse(model.mu[j, idx[k]],\n model.V[j, idx[k]][:, idx[k]],\n scales=[2], ec='k', fc='gray', alpha=0.2)\n pl.plot(X[ind][::10, idx[k][0]],\n X[ind][::10, idx[k][1]], '.k',ms=ms)\n pl.xlim(xlim[k])\n pl.ylim(ylim[k])\n pl.xlabel(xlab[k], fontsize=lsize)\n pl.ylabel(ylab[k], fontsize=lsize)\n if ('psf' in ylab[k]) & ('model' in ylab[k]):\n ytick = ['%0.1f' % v for v in np.linspace(-.1, 0.4, 6)]\n ytick[0] = ''\n ax.set_yticklabels(ytick)\n if i == 1:\n s = 'All'\n elif i == 3:\n s = '\"Stars\"'\n else:\n s = '\"Galaxies\"'\n ax.text(-.3, 0.5, s, ha='center', va='center', fontsize=25,\n rotation='vertical', transform=ax.transAxes)\n f.savefig(figname, bbox_inches='tight')", "def north_america_countries():\r\n north_america_data = []\r\n years = []\r\n medians = []\r\n lst = []\r\n for idx in range(1960, 2016):\r\n years.append(idx)\r\n for idx in north_america:\r\n north_america_data.append(life_expectancy_graph(idx))\r\n y_idx = 0\r\n for idx in north_america_data:\r\n if idx != None and idx != {}:\r\n if (list(idx.keys())[y_idx]) == years[y_idx]:\r\n lst.append((list(idx.values())[y_idx]))\r\n lst = sorted(lst)\r\n medians.append(median(lst))\r\n return medians", "def maskClouds(self,img):\n\t\t\n\t\tscore = ee.Image(1.0);\n\t\t# Clouds are reasonably bright in the blue band.\n\t\tblue_rescale = img.select('blue').subtract(ee.Number(0.1)).divide(ee.Number(0.3).subtract(ee.Number(0.1)))\n\t\tscore = score.min(blue_rescale);\n\n\t\t# Clouds are reasonably bright in all visible bands.\n\t\tvisible = img.select('red').add(img.select('green')).add(img.select('blue'))\n\t\tvisible_rescale = visible.subtract(ee.Number(0.2)).divide(ee.Number(0.8).subtract(ee.Number(0.2)))\n\t\tscore = score.min(visible_rescale);\n\n\t\t# Clouds are reasonably bright in all infrared bands.\n\t\tinfrared = img.select('nir').add(img.select('swir1')).add(img.select('swir2'))\n\t\tinfrared_rescale = infrared.subtract(ee.Number(0.3)).divide(ee.Number(0.8).subtract(ee.Number(0.3)))\n\t\tscore = score.min(infrared_rescale);\n\n\t\t# Clouds are reasonably cool in temperature.\n\t\ttemp_rescale = img.select('thermal').subtract(ee.Number(300)).divide(ee.Number(290).subtract(ee.Number(300)))\n\t\tscore = score.min(temp_rescale);\n\n\t\t# However, clouds are not snow.\n\t\tndsi = img.normalizedDifference(['green', 'swir1']);\n\t\tndsi_rescale = ndsi.subtract(ee.Number(0.8)).divide(ee.Number(0.6).subtract(ee.Number(0.8)))\n\t\tscore = score.min(ndsi_rescale).multiply(100).byte();\n\t\tmask = score.lt(self.env.cloudThreshold).rename(['cloudMask']);\n\t\timg = img.updateMask(mask);\n \n\t\treturn img;", "def get_bands(self, data_array_norm, baseline_array_norm, f):\n\n fmax = 50\n fidx = f < fmax\n fnum = f[fidx].size\n\n band_tot = np.zeros((fnum, fnum, data_array_norm.shape[0], data_array_norm.shape[2], data_array_norm.shape[3]))\n band_tot_bl = np.zeros((fnum, fnum, baseline_array_norm.shape[0], baseline_array_norm.shape[2], baseline_array_norm.shape[3]))\n for i in range(fnum):\n for j in range(fnum):\n if j > i:\n idx = (f >= f[i]) & (f < f[j])\n band_tot[i, j, :, :] = np.sum(data_array_norm[:, idx, :, :], axis=1) / (f[j] - f[i])\n band_tot_bl[i, j, :, :] = np.sum(baseline_array_norm[:, idx, :, :], axis=1) / (f[j] - f[i])\n\n\n band_tot_bl1 = np.mean(band_tot_bl, axis=3) # average across time bins\n band_tot_bl2 = np.repeat(band_tot_bl1[:, :, :, None, :], band_tot_bl.shape[3], axis=3) # repeat same value across time\n return band_tot, band_tot_bl2, f[fidx]", "def bins_per_year(self):\n # Load the vector version #\n df = self.grouped_bins.reset_index()\n # Add year and remove TimeStep #\n df['year'] = self.country.timestep_to_year(df['time_step'])\n df = df.drop('time_step', axis=1)\n # Only if we are in the calibration scenario #\n if self.parent.parent.scenario.short_name == 'calibration':\n # Patch the harvest data frame to stop at the simulation year #\n selector = df['year'] <= self.parent.parent.country.base_year\n df = df.loc[selector].copy()\n # Return #\n return df", "def obtain_filters_mask(model, threshold, cba_index, prune_index):\n\n num_pruned_bn = 0\n num_total_bn = 0\n num_remain_filters = []\n mask_remain_filters = []\n\n # The number of filters reserved must be a multiple of 8\n int_multiple = 8\n filter_switch = list(range(0, 1024, int_multiple))\n\n # cba_index stores all convolution layers with BN layer (the previous layer of YOLO layer is without BN layer)\n for index in cba_index:\n bn_module = model.module_list[index][1]\n if index in prune_index:\n mask = obtain_bn_mask(bn_module, threshold).cpu().numpy()\n num_layer_remain_bn = int(mask.sum())\n if num_layer_remain_bn < 8:\n layer_sort_bn = bn_module.weight.data.abs().clone()\n value_sort_bn = torch.sort(layer_sort_bn)[0]\n layer_threshold = value_sort_bn[-8]\n mask = obtain_bn_mask(bn_module, layer_threshold).cpu().numpy()\n else:\n for i, _ in enumerate(filter_switch):\n if num_layer_remain_bn < filter_switch[i]:\n num_layer_remain_bn = filter_switch[i - 1]\n break\n layer_sort_bn = bn_module.weight.data.abs().clone()\n value_sort_bn = torch.sort(layer_sort_bn)[0]\n layer_threshold = value_sort_bn[-num_layer_remain_bn]\n mask = obtain_bn_mask(bn_module, layer_threshold).cpu().numpy()\n\n num_remain_bn = int(mask.sum())\n num_pruned_bn = num_pruned_bn + mask.shape[0] - num_remain_bn\n\n if num_remain_bn == 0:\n print(\"Channels would be all pruned!\")\n raise Exception\n\n logger.info('layer index: %d \\t total channel: %d \\t remaining channel: %d',\n index, mask.shape[0], num_remain_bn)\n else:\n mask = np.ones(bn_module.weight.data.shape)\n num_remain_bn = mask.shape[0]\n num_total_bn += mask.shape[0]\n num_remain_filters.append(num_remain_bn)\n mask_remain_filters.append(mask.copy())\n\n prune_ratio = num_pruned_bn / num_total_bn\n logger.info('Prune channels: %d \\t Prune ratio: %.3f', num_pruned_bn, prune_ratio)\n\n return num_remain_filters, mask_remain_filters", "def get_year_tracks(year):\n print(f\"Total Tracks in {year}: {get_num_of_tracks(year)}\")\n\n query_format = f\"year:{year} track:\"\n\n search_string_letter_ids = [0]\n\n tracks = {}\n\n total = 0;\n\n while (search_string_letter_ids is not None):\n search_string = construct_search_string(search_string_letter_ids)\n count = track_count(query_format + search_string)\n print(f\"{search_string} : {count}\")\n if count < 2000:\n for i in range(0, count, 50):\n track_results = sp.search(query_format + search_string, type='track', limit=50, offset=i)\n for t in track_results['tracks']['items']:\n if t['id'] not in tracks:\n total += 1\n tracks[t['id']] = {'name': t['name']}\n\n search_string_letter_ids = get_next_search_string(search_string_letter_ids, last_was_under=True)\n else:\n search_string_letter_ids = get_next_search_string(search_string_letter_ids, last_was_under=False)\n\n print(f\"Tracks Saved In File: {total}\")\n\n file = save_to_json(tracks, f\"Tracks{year}.json\")\n return file", "def set_rocks_in_grad(self, elevation, landcover):\n # Compute the steepness of each pixel\n grad = gaussian_gradient_magnitude(elevation, 1.0)\n grad /= self.mercator.Resolution(self.__zoom)\n # Get the mask of rock (with a smooth transition)\n mask = (grad >= ROCK_STEEPNESS).astype(np.float)\n mask = gaussian_filter(mask, 3.0)\n # Blend the images\n dtype = landcover.dtype\n rock_image = np.zeros(landcover.shape, dtype=dtype)\n rock_image[:,:] = ROCK_COLOR\n for i in range(3):\n rock_image[:,:,i] = (mask * rock_image[:,:,i]).astype(dtype)\n landcover[:,:,i] = ((1.0 - mask) * landcover[:,:,i]).astype(dtype)\n landcover += rock_image\n return landcover", "def filter_roi(roi_data, nb_nonzero_thr):\n # Discard slices with less nonzero voxels than nb_nonzero_thr\n return not np.any(roi_data) or np.count_nonzero(roi_data) <= nb_nonzero_thr", "def background(self, header):\n band = get_filt_band(header)\n if self.spl_dict[band] is None:\n return self.jd_b_dict[band][0][1]\n T = Time(header['DATE-OBS'], format='fits')\n return np.asscalar(self.spl_dict[band](T.jd))", "def reduce_dataset(X, year):\n\n drop_list = [i for i in range(config.DB_YEAR_MIN, config.DB_YEAR_MAX + 1)]\n drop_list.remove(year - 1)\n red_X = X.drop(drop_list, axis=0)\n return red_X", "def compress_netcfd(folder_path, start_date, out_folder, file_name, num_of_rivids):\n\n # Based on 15 day forecast\n forecast_day_indices = np.array([0, 8, 16, 24, 32, 40, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84], dtype=np.int8)\n\n # Based on 10 day forecast\n # Excluding the first day because we already have initialization from the normal forecasts\n high_res_forecast_day_indices = np.array([24, 48, 72, 92, 100, 108, 112, 116, 120, 124])\n\n start_datetime = to_datetime(start_date, infer_datetime_format=True)\n dates = date_range(start_datetime + DateOffset(1), periods=15)\n high_res_dates = date_range(start_datetime + DateOffset(1), periods=10)\n\n # Ensemble Dimensions\n # 1) Rivid\n # 2) Number of forecast days (i.e. 15 in a 15 day forecast)\n # 3) Number of ensembles\n\n ensembles = np.zeros((num_of_rivids, 15, 51), dtype=np.float32)\n initialization = np.zeros((num_of_rivids,), dtype=np.float32)\n\n for forecast_number in range(1, 52):\n file = os.path.join(folder_path, \"{}_{}.nc\".format(file_name, forecast_number))\n\n tmp_dataset = xr.open_dataset(file)\n streamflow = tmp_dataset['Qout'].data\n streamflow = streamflow[:, forecast_day_indices]\n\n if forecast_number == 1:\n initialization[:] = streamflow[:, 0]\n rivids = tmp_dataset['rivid'].data\n lat = tmp_dataset['lat'].data\n lon = tmp_dataset['lon'].data\n z = tmp_dataset['z'].data\n\n ensembles[:, :, forecast_number - 1] = streamflow[:, 1:]\n\n tmp_dataset.close()\n\n # High Res Forecast\n file = os.path.join(folder_path, \"{}_52.nc\".format(file_name))\n\n tmp_dataset = xr.open_dataset(file)\n\n high_res_forecast_data = tmp_dataset[\"Qout\"].data\n high_res_forecast_data = high_res_forecast_data[:, high_res_forecast_day_indices]\n\n tmp_dataset.close()\n\n data_variables = {\n \"Qout\": (['rivid', 'date', 'ensemble_number'], ensembles),\n \"Qout_high_res\": (['rivid', 'date_high_res'], high_res_forecast_data)\n }\n\n coords = {\n 'rivid': rivids,\n 'date': dates,\n 'date_high_res': high_res_dates,\n 'ensemble_number': np.arange(1, 52, dtype=np.uint8),\n 'initialization_values': ('rivid', initialization),\n 'lat': ('rivid', lat),\n 'lon': ('rivid', lon),\n 'z': ('rivid', z),\n 'start_date': start_datetime\n }\n\n xarray_dataset = xr.Dataset(data_variables, coords)\n xarray_dataset.to_netcdf(path=os.path.join(out_folder, '{}.nc'.format(start_date)), format='NETCDF4')", "def ShowOneContour(index,all_images,all_pointing,thex0,they0,all_titles,object_name,all_expo,dir_top_img,all_filt,figname):\n plt.figure(figsize=(15,6))\n spec_index_min=100 # cut the left border\n spec_index_max=1900 # cut the right border\n star_halfwidth=70\n \n YMIN=-15\n YMAX=15\n \n figfilename=os.path.join(dir_top_img,figname) \n \n #center is approximately the one on the original raw image (may be changed)\n #x0=int(all_pointing[index][0])\n x0=int(thex0[index])\n \n \n # Extract the image \n full_image=np.copy(all_images[index])\n \n # refine center in X,Y\n star_region_X=full_image[:,x0-star_halfwidth:x0+star_halfwidth]\n \n profile_X=np.sum(star_region_X,axis=0)\n profile_Y=np.sum(star_region_X,axis=1)\n\n NX=profile_X.shape[0]\n NY=profile_Y.shape[0]\n \n X_=np.arange(NX)\n Y_=np.arange(NY)\n \n avX,sigX=weighted_avg_and_std(X_,profile_X**4) # take squared on purpose (weigh must be >0)\n avY,sigY=weighted_avg_and_std(Y_,profile_Y**4)\n \n x0=int(avX+x0-star_halfwidth)\n \n \n # find the center in Y on the spectrum\n yprofile=np.sum(full_image[:,spec_index_min:spec_index_max],axis=1)\n y0=np.where(yprofile==yprofile.max())[0][0]\n\n # cut the image in vertical and normalise by exposition time\n reduc_image=full_image[y0-20:y0+20,x0:spec_index_max]/all_expo[index] \n reduc_image[:,0:100]=0 # erase central star\n \n X_Size_Pixels=np.arange(0,reduc_image.shape[1])\n Y_Size_Pixels=np.arange(0,reduc_image.shape[0])\n Transverse_Pixel_Size=Y_Size_Pixels-int(float(Y_Size_Pixels.shape[0])/2.)\n \n # calibration in wavelength\n #grating_name=all_filt[index].replace('dia ','')\n grating_name=get_disperser_filtname(all_filt[index])\n \n lambdas=Pixel_To_Lambdas(grating_name,X_Size_Pixels,all_pointing[index],True)\n \n #if grating_name=='Ron200':\n # holo = Hologram('Ron400',verbose=True)\n #else: \n # holo = Hologram(grating_name,verbose=True)\n #lambdas=holo.grating_pixel_to_lambda(X_Size_Pixels,all_pointing[index])\n #if grating_name=='Ron200':\n # lambdas=lambdas*2.\n \n\n X,Y=np.meshgrid(lambdas,Transverse_Pixel_Size) \n T=np.transpose(reduc_image)\n \n \n plt.contourf(X, Y, reduc_image, 100, alpha=1., cmap='jet',origin='lower')\n C = plt.contour(X, Y, reduc_image , 20, colors='black', linewidth=.5,origin='lower')\n \n \n for line in LINES:\n if line == O2 or line == HALPHA or line == HBETA or line == HGAMMA:\n plt.plot([line['lambda'],line['lambda']],[YMIN,YMAX],'-',color='lime',lw=0.5)\n plt.text(line['lambda'],YMAX-3,line['label'],verticalalignment='bottom', horizontalalignment='center',color='lime', fontweight='bold',fontsize=16)\n \n \n \n plt.axis([X.min(), X.max(), Y.min(), Y.max()]); plt.grid(True)\n plt.title(all_titles[index])\n plt.grid(color='white', ls='solid')\n plt.text(200,-5.,all_filt[index],verticalalignment='bottom', horizontalalignment='center',color='yellow', fontweight='bold',fontsize=16)\n plt.xlabel('$\\lambda$ (nm)')\n plt.ylabel('pixels')\n plt.ylim(YMIN,YMAX)\n plt.xlim(0.,1200.)\n plt.savefig(figfilename)", "def conditional_to_greyscale(self, image):\r\n bands = image.getbands()\r\n if len(bands) >= 3:\r\n # histogram for all bands concatenated\r\n hist = image.histogram()\r\n if len(hist) >= 768:\r\n hist1 = hist[0:256]\r\n hist2 = hist[256:512]\r\n hist3 = hist[512:768]\r\n # print \"length of histograms: %d %d %d\" % (len(hist1), len(hist2), len(hist3))\r\n if hist1 == hist2 == hist3:\r\n # print \"All histograms are the same!\"\r\n return image.convert('L')\r\n return image", "def _resampler(df_year, year):\n # Aggregates data using mean for each time interval and gets a\n # sample count for each new data point.\n df_15 = df_year.resample('15T').apply(['mean', 'count'])\n df_30 = df_year.resample('30T').apply(['mean', 'count'])\n df_1h = df_year.resample('1H').apply(['mean', 'count'])\n df_1d = df_year.resample('D').apply(['mean', 'count'])\n\n # Removes top level title that is not needed.\n df_15.columns = df_15.columns.droplevel(0)\n df_30.columns = df_30.columns.droplevel(0)\n df_1h.columns = df_1h.columns.droplevel(0)\n df_1d.columns = df_1d.columns.droplevel(0)\n\n # Creating new date range to include all time intervals within the year.\n idx_15 = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:45:00', freq='15T')\n idx_30 = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:30:00', freq='30T')\n idx_1h = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:00:00', freq='1H')\n idx_1d = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:00:00', freq='D')\n\n # Reindexing so data that starts in, for example August, will now\n # have the months prior to August filled with nans.\n df_15_reindex = df_15.reindex(idx_15, fill_value=np.nan)\n df_15_reindex[['count']] = df_15_reindex[['count']].fillna(0).astype(int)\n # Adding all columns to match example excel.\n df_15_reindex = df_15_reindex.rename(columns={'mean': 'H(ft)'})\n df_15_reindex = df_15_reindex.rename(columns={'count': 'SampleCount'})\n\n # Adding meters column.\n df_15_reindex['H(m)'] = df_15_reindex['H(ft)'] / 3.28\n # Rounds meters column so significant digits match\n # original height column.\n df_15_reindex['H(m)'] = df_15_reindex['H(m)'].round(2)\n df_15_reindex['H(ft)'] = df_15_reindex['H(ft)'].round(2)\n df_15_reindex['DateTime2'] = df_15_reindex.index\n df_15_reindex['Date'] = df_15_reindex.index\n df_15_reindex['Date2'] = df_15_reindex.index\n df_15_reindex['Date_Python_generated'] = df_15_reindex['Date'].dt.date\n df_15_reindex['Time1'] = df_15_reindex['Date'].dt.time\n df_15_reindex['Time2'] = df_15_reindex['Date'].dt.time\n df_15_reindex['H(m)_final'] = df_15_reindex['H(m)']\n df_15_reindex = df_15_reindex.reset_index(drop=True)\n # Adding original datetime and height data to dataframe. To do this\n # pd.concat is used because the column lengths are different.\n df_15_reindex = pd.concat([\n df_15_reindex, df_year.reset_index(drop=True)], axis=1)\n df_15_reindex['dateTime'] = pd.to_datetime(df_15_reindex['dateTime'])\n # Reordering columns to match example excel.\n df_15_reindex = df_15_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n # Filling nans with empty cells in columns similar to example excel.\n df_15_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_15_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n\n # Similar to 15 minute interval code but 30 minutes interval.\n df_30_reindex = df_30.reindex(idx_30, fill_value=np.nan)\n df_30_reindex[['count']] = df_30_reindex[['count']].fillna(0).astype(int)\n df_30_reindex = df_30_reindex.rename(columns={'mean': 'H(ft)'})\n df_30_reindex = df_30_reindex.rename(columns={'count': 'SampleCount'})\n df_30_reindex['H(m)'] = df_30_reindex['H(ft)'] / 3.28\n df_30_reindex['H(m)'] = df_30_reindex['H(m)'].round(2)\n df_30_reindex['H(ft)'] = df_30_reindex['H(ft)'].round(2)\n df_30_reindex['DateTime2'] = df_30_reindex.index\n df_30_reindex['Date'] = df_30_reindex.index\n df_30_reindex['Date2'] = df_30_reindex.index\n df_30_reindex['Date_Python_generated'] = df_30_reindex['Date'].dt.date\n df_30_reindex['Time1'] = df_30_reindex['Date'].dt.time\n df_30_reindex['Time2'] = df_30_reindex['Date'].dt.time\n df_30_reindex['H(m)_final'] = df_30_reindex['H(m)']\n df_30_reindex = df_30_reindex.reset_index(drop=True)\n df_30_reindex = pd.concat([\n df_30_reindex, df_year.reset_index(drop=True)], axis=1)\n df_30_reindex['dateTime'] = pd.to_datetime(df_30_reindex['dateTime'])\n df_30_reindex = df_30_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n df_30_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_30_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n\n # Similar to 15 minute interval code but hourly interval.\n df_1h_reindex = df_1h.reindex(idx_1h, fill_value=np.nan)\n df_1h_reindex[['count']] = df_1h_reindex[['count']].fillna(0).astype(int)\n df_1h_reindex = df_1h_reindex.rename(columns={'mean': 'H(ft)'})\n df_1h_reindex = df_1h_reindex.rename(columns={'count': 'SampleCount'})\n df_1h_reindex['H(m)'] = df_1h_reindex['H(ft)'] / 3.28\n df_1h_reindex['H(m)'] = df_1h_reindex['H(m)'].round(2)\n df_1h_reindex['H(ft)'] = df_1h_reindex['H(ft)'].round(2)\n df_1h_reindex['DateTime2'] = df_1h_reindex.index\n df_1h_reindex['Date'] = df_1h_reindex.index\n df_1h_reindex['Date2'] = df_1h_reindex.index\n df_1h_reindex['Date_Python_generated'] = df_1h_reindex['Date'].dt.date\n df_1h_reindex['Time1'] = df_1h_reindex['Date'].dt.time\n df_1h_reindex['Time2'] = df_1h_reindex['Date'].dt.time\n df_1h_reindex['H(m)_final'] = df_1h_reindex['H(m)']\n df_1h_reindex = df_1h_reindex.reset_index(drop=True)\n df_1h_reindex = pd.concat([\n df_1h_reindex, df_year.reset_index(drop=True)], axis=1)\n df_1h_reindex['dateTime'] = pd.to_datetime(df_1h_reindex['dateTime'])\n df_1h_reindex = df_1h_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n df_1h_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_1h_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n\n # Similar to 15 minute interval code but daily interval.\n df_1d_reindex = df_1d.reindex(idx_1d, fill_value=np.nan)\n df_1d_reindex[['count']] = df_1d_reindex[['count']].fillna(0).astype(int)\n df_1d_reindex = df_1d_reindex.rename(columns={'mean': 'H(ft)'})\n df_1d_reindex = df_1d_reindex.rename(columns={'count': 'SampleCount'})\n df_1d_reindex['H(m)'] = df_1d_reindex['H(ft)'] / 3.28\n df_1d_reindex['H(m)'] = df_1d_reindex['H(m)'].round(2)\n df_1d_reindex['H(ft)'] = df_1d_reindex['H(ft)'].round(2)\n df_1d_reindex['DateTime2'] = df_1d_reindex.index\n df_1d_reindex['Date'] = df_1d_reindex.index\n df_1d_reindex['Date2'] = df_1d_reindex.index\n df_1d_reindex['Date_Python_generated'] = df_1d_reindex['Date'].dt.date\n df_1d_reindex['Time1'] = df_1d_reindex['Date'].dt.time\n df_1d_reindex['Time2'] = df_1d_reindex['Date'].dt.time\n df_1d_reindex['H(m)_final'] = df_1d_reindex['H(m)']\n df_1d_reindex = df_1d_reindex.reset_index(drop=True)\n df_1d_reindex = pd.concat([\n df_1d_reindex, df_year.reset_index(drop=True)], axis=1)\n df_1d_reindex['dateTime'] = pd.to_datetime(df_1d_reindex['dateTime'])\n df_1d_reindex = df_1d_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n df_1d_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_1d_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n return df_15_reindex, df_30_reindex, df_1h_reindex, df_1d_reindex", "def movie(band,skypos,tranges,skyrange,framesz=0,width=False,height=False,\n\t\t verbose=0,tscale=1000.,memlight=False,coadd=False,\n\t\t response=False,calpath='../cal/',hdu=False,retries=20):\n\t# Not defining stepsz effectively creates a count map.\n\tmv = []\n\trr = []\n\tif coadd:\n\t\tif verbose>2:\n\t\t\tprint 'Coadding across '+str(tranges)\n\t\tmv.append(countmap(band,skypos,tranges,skyrange,width=width,\n\t\t\t\t height=height,verbose=verbose,tscale=tscale,memlight=memlight,\n\t\t\t\t hdu=hdu,retries=retries))\n\t\trr.append(rrhr(band,skypos,tranges,skyrange,response=response,width=width,height=height,stepsz=1.,verbose=verbose,calpath=calpath,tscale=tscale,hdu=hdu,retries=retries)) if response else rr.append(np.ones(np.shape(mv)[1:]))\n\telse:\n\t\tfor trange in tranges:\n\t\t\tstepsz = framesz if framesz else trange[1]-trange[0]\n\t\t\tsteps = np.ceil((trange[1]-trange[0])/stepsz)\n\t\t\tfor i,t0 in enumerate(np.arange(trange[0],trange[1],stepsz)):\n\t\t\t\tif verbose>1:\n\t\t\t\t\tprint_inline('Movie frame '+str(i+1)+' of '+str(int(steps)))\n\t\t\t\tt1 = trange[1] if i==steps else t0+stepsz\n\t\t\t\tmv.append(countmap(band,skypos,[[t0,t1]],skyrange,width=width,height=height,verbose=verbose,tscale=tscale,memlight=memlight,hdu=hdu,retries=retries))\n\t# FIXME: This should not create an rr unless it's requested...\n\t\t\t\trr.append(rrhr(band,skypos,[[t0,t1]],skyrange,response=response,width=width,height=height,stepsz=1.,verbose=verbose,calpath=calpath,tscale=tscale,retries=retries)) if response else rr.append(np.ones(np.shape(mv)[1:]))\n\n\treturn np.array(mv),np.array(rr)", "def resample(self, octave_bands):\n self.energy_absorption = {\n \"coeffs\": octave_bands(**self.energy_absorption),\n \"center_freqs\": octave_bands.centers,\n }\n self.scattering = {\n \"coeffs\": octave_bands(**self.scattering),\n \"center_freqs\": octave_bands.centers,\n }" ]
[ "0.73957485", "0.67384607", "0.65364456", "0.6378453", "0.61525947", "0.61262476", "0.5607345", "0.55978113", "0.5576969", "0.4884514", "0.48811036", "0.48680866", "0.4859982", "0.47968277", "0.47706997", "0.476356", "0.47634727", "0.47294396", "0.47226104", "0.4682663", "0.46414387", "0.45971748", "0.45451874", "0.4488063", "0.44866607", "0.44803828", "0.44522887", "0.44513443", "0.4450906", "0.44263735", "0.43899956", "0.43514735", "0.43376043", "0.4330374", "0.4325817", "0.43091312", "0.43088886", "0.43056694", "0.4285487", "0.42775786", "0.42662787", "0.42653602", "0.42594346", "0.4246089", "0.424549", "0.42441946", "0.42376307", "0.42320675", "0.4222916", "0.42228422", "0.4221016", "0.4209543", "0.42094016", "0.42057532", "0.42030764", "0.42015418", "0.41945726", "0.4157634", "0.4150862", "0.41461414", "0.41242656", "0.41218275", "0.4112924", "0.4099573", "0.40993357", "0.40846062", "0.40839648", "0.40755412", "0.40699595", "0.40646186", "0.40638632", "0.4060622", "0.405686", "0.4055706", "0.404957", "0.40444398", "0.40442735", "0.40411806", "0.4039678", "0.40360138", "0.40290898", "0.40255278", "0.40233365", "0.40228748", "0.40195906", "0.40139407", "0.4012097", "0.40116733", "0.40046844", "0.3995176", "0.3994025", "0.39897174", "0.39866233", "0.398517", "0.3976914", "0.3974064", "0.39683187", "0.3965463", "0.39648116", "0.3964067" ]
0.53521746
9
Function to perform a forward moving gap fill for all years in an image. The image bands do not need to be in order, but the bandNames argument must be in chronological order. The forward gap fill is applied iteratively from the first year of bandNames through the final year, where if the current image has missing data, it is filled with the following year's values.
def applyForwardNoDataFilter(image, bandNames): #Get a list of band names from year(1) through the last year bandNamesEE = ee.List(bandNames[1:]) #Define forwards filter #In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for the first year #currentImage = image.select(bandNames[1]), the image for the second year #previousImage = image.select(bandNames[0]), the first year #Find where the second year has missing data, replace those values with the values of the first year #Append previousImage to currentImage, so now currentImage is a two band image, with the first band being the second year with the gap fill #and the second band is the first years classification #The iteration continues, now with followingImage.select[0] being the second year with the gap fill applied, and bandName is the third year def forwardNoDataFilter(bandName, previousImage): currentImage = image.select(ee.String(bandName)) previousImage = ee.Image(previousImage) currentImage = currentImage.unmask(previousImage.select([0])) return currentImage.addBands(previousImage) #Iterate through all the years, starting with the first year's classification filtered = bandNamesEE.iterate(forwardNoDataFilter,ee.Image(image.select(bandNames[0]))) filtered = ee.Image(filtered) return filtered.select(bandNames)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyBackwardNoDataFilter(image, bandNames):\n #Get a list of band names to iterate over, from year(-2) through year(0)\n bandNamesEE = ee.List(bandNames[:-1]).reverse()\n \n #Define backwards filter\n #In first iteration, bandName=bandNames[-2] and followingImage is image.select(bandNames[-1]), or the classifications for the final year\n #currentImage = image.select(bandNames[-2]), the second to last year\n #followingImage = image.select(bandNames[-1]), the final year\n #Find where the second to last year has missing data, replace those values with the values of the following year\n #Append followingImage to currentImage, so now currentImage is a two band image, with the first band being the second to last year with the gap fill\n #and the second band is the final years classification\n #The iteration continues, now with followingImage.select[0] being the second to last year with the gap fill applied, and bandName is the third to last year\n def backwardNoDataFilter(bandName, followingImage):\n currentImage = image.select(ee.String(bandName))\n followingImage = ee.Image(followingImage)\n currentImage = currentImage.unmask(followingImage.select([0]))\n return currentImage.addBands(followingImage)\n \n #Apply backwards filter, starting with the final year and iterating through to year(0) \n filtered = bandNamesEE.iterate(backwardNoDataFilter,ee.Image(image.select(bandNames[-1])))\n #Re-order bands to be in chronological order\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def applyWindow4years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-2):\n img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def applyGapFilter(image, bandNames):\n filtered = applyForwardNoDataFilter(image, bandNames)\n filtered = applyBackwardNoDataFilter(filtered, bandNames)\n return filtered", "def fill_price_gaps(\n from_date=dt.datetime(1970,1,1),\n to_date=dt.datetime.today().replace(hour=0, minute=0, second=0, microsecond=0)\n ):\n #Create a collection of years\n years = []\n cur_year = from_date.year\n while cur_year <= to_date.year:\n years.append(cur_year)\n cur_year += 1\n #Loop each year\n all_year_dates = pd.DataFrame([])\n for year in tqdm(years, total=len(years), desc=\"Loop through years to find dates\"):\n #establish bounding dates\n year_from_date = None if year != from_date.year else from_date\n year_to_date = None if year != to_date.year else to_date\n #Get filtered year dates\n year_dates = create_filtered_year_dates(year, from_date=year_from_date, to_date=year_to_date, )\n #Add to the full list\n all_year_dates = pd.concat([all_year_dates, year_dates])\n #Order the dates (just in case)\n all_year_dates = all_year_dates.sort_values([\"date\"]) \\\n .reset_index(drop=True)\n #Fetch all the tickers\n tickers = sqlaq_to_df(ticker.fetch())\n #Loop through tickers\n errors = []\n run_time = ProcessTime()\n for _,r in tqdm(tickers[[\"id\",\"ticker\"]].iterrows(), total=tickers.shape[0], desc=\"Filling in gaps\"):\n logger.info(f\"Filling gaps in {r.id} -> {r.ticker}\")\n try:\n #Fetch all prices\n dp = sqlaq_to_df(daily_price.fetch(ticker_ids=[r.id]))\n dp[\"date\"] = dp.date.astype(\"datetime64[ns]\")\n #Identify missing dates\n missing_dates = pd.merge(all_year_dates, dp[[\"date\",\"id\"]], on=[\"date\"], how=\"left\")\n #Identify the start date and remove all missing date before that\n start_date = missing_dates[~missing_dates.id.isnull()].date.min()\n missing_dates = missing_dates[missing_dates.date > start_date]\n #Remove all other items which have dates\n missing_dates = missing_dates[missing_dates.id.isnull()]\n #Order remaining dates\n missing_dates = missing_dates.sort_values(\"date\")\n #Create groupings no larger than max_days (in config)\n st_d = None\n date_groups = []\n missing_dates = missing_dates.date.to_list()\n if len(missing_dates):\n for i,d in enumerate(missing_dates):\n if not st_d:\n st_d = d\n else:\n #Append when group gets too big\n if (d - st_d).days > WEB_SCRAPE_MAX_DAYS:\n date_groups.append([st_d, missing_dates[i-1]])\n #Update the start date\n st_d = d\n #Append the last item\n date_groups.append([st_d, d])\n #Scrape the missing prices\n logger.info('Number of webscrapes to perform -> {}'.format(len(date_groups)))\n #For each time frame perform a scrape\n try: #Try loop so as not to miss all following date groups\n for i,dates in enumerate(date_groups):\n logger.info(f\"Running dates {i} -> {dt.datetime.strptime(str(dates[0])[:10], '%Y-%m-%d')} - {dt.datetime.strptime(str(dates[1])[:10], '%Y-%m-%d')}\")\n process_daily_prices(\n r.ticker,\n r.id,\n st_date=dates[0],\n en_date=dates[1],\n \n )\n except Exception as e:\n logger.error(e)\n errors.append({'ticker_id':r.id, 'ticker':r.ticker, \"error\":e, \"st_date\":dates[0], \"en_dates\":dates[1]})\n #Run an update on th weekly prices\n process_weekly_prices(\n r.id,\n \n )\n except Exception as e:\n logger.error(e)\n errors.append({'ticker_id':r.id, 'ticker':r.ticker, \"error\":e})\n #Lap\n logger.info(run_time.lap())\n logger.info(run_time.show_latest_lap_time(show_time=True))\n logger.info(f\"GAP FILL RUN TIME - {run_time.end()}\")\n\n logger.info(f'\\nGAP FILL ERROR COUNT -> {len(errors)}')\n if len(errors) > 0:\n logger.info('GAP FILL ERRORS ->')\n for e in errors:\n logger.error(e)", "def stack_bands(yr, roi):\n\n water_year_start = '{}-10-01'.format(yr - 1)\n\n winter_s, winter_e = '{}-01-01'.format(yr), '{}-03-01'.format(yr),\n spring_s, spring_e = '{}-03-01'.format(yr), '{}-05-01'.format(yr),\n late_spring_s, late_spring_e = '{}-05-01'.format(yr), '{}-07-01'.format(yr)\n summer_s, summer_e = '{}-07-01'.format(yr), '{}-09-01'.format(yr)\n fall_s, fall_e = '{}-09-01'.format(yr), '{}-12-31'.format(yr)\n\n periods = [('cy', winter_s, fall_e),\n ('1', spring_s, spring_e),\n ('2', late_spring_s, late_spring_e),\n ('3', summer_s, summer_e),\n ('4', fall_s, fall_e)]\n\n first = True\n for name, start, end in periods:\n bands = landsat_composites(yr, start, end, roi, name)\n if first:\n input_bands = bands\n proj = bands.select('B2_cy').projection().getInfo()\n first = False\n else:\n input_bands = input_bands.addBands(bands)\n\n for s, e, n in [(spring_s, spring_e, 'espr'),\n (late_spring_s, late_spring_e, 'lspr'),\n (summer_s, summer_e, 'smr'),\n (fall_s, fall_e, 'fl'),\n (water_year_start, spring_e, 'wy_espr'),\n (water_year_start, late_spring_e, 'wy_espr'),\n (water_year_start, summer_e, 'wy_smr'),\n (water_year_start, fall_e, 'wy')]:\n gridmet = ee.ImageCollection(\"IDAHO_EPSCOR/GRIDMET\").filterBounds(\n roi).filterDate(s, e).select('pr', 'eto', 'tmmn', 'tmmx')\n temp_reducer = ee.Reducer.mean()\n t_names = ['tmax'.format(n), 'tmin'.format(n)]\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_{}'.format(n), 'pet_total_{}'.format(n)).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n wd_estimate = precip_sum.select('precip_total_{}'.format(n)).subtract(precip_sum.select(\n 'pet_total_{}'.format(n))).rename('wd_est_{}'.format(n))\n input_bands = input_bands.addBands([temp_perc, precip_sum, wd_estimate])\n\n temp_reducer = ee.Reducer.percentile([10, 50, 90])\n t_names = ['tmmn_p10_cy', 'tmmn_p50_cy', 'tmmn_p90_cy', 'tmmx_p10_cy', 'tmmx_p50_cy', 'tmmx_p90_cy']\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_cy', 'pet_total_cy').resample('bilinear').reproject(crs=proj['crs'], scale=30)\n wd_estimate = precip_sum.select('precip_total_cy').subtract(precip_sum.select(\n 'pet_total_cy')).rename('wd_est_cy')\n\n coords = ee.Image.pixelLonLat().rename(['Lon_GCS', 'LAT_GCS']).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n ned = ee.Image('USGS/NED')\n terrain = ee.Terrain.products(ned).select('elevation', 'slope', 'aspect').reduceResolution(\n ee.Reducer.mean()).reproject(crs=proj['crs'], scale=30)\n\n world_climate = get_world_climate(proj=proj)\n elev = terrain.select('elevation')\n tpi_1250 = elev.subtract(elev.focal_mean(1250, 'circle', 'meters')).add(0.5).rename('tpi_1250')\n tpi_250 = elev.subtract(elev.focal_mean(250, 'circle', 'meters')).add(0.5).rename('tpi_250')\n tpi_150 = elev.subtract(elev.focal_mean(150, 'circle', 'meters')).add(0.5).rename('tpi_150')\n static_input_bands = coords.addBands([temp_perc, wd_estimate, terrain, tpi_1250, tpi_250, tpi_150, world_climate])\n\n nlcd = ee.Image('USGS/NLCD/NLCD2011').select('landcover').reproject(crs=proj['crs'], scale=30).rename('nlcd')\n\n cdl_cult = ee.Image('USDA/NASS/CDL/2017').select('cultivated'). \\\n remap([1, 2], [0, 1]).reproject(crs=proj['crs'], scale=30).rename('cdlclt')\n\n cdl_crop = ee.Image('USDA/NASS/CDL/2017').select('cropland').reproject(crs=proj['crs'],\n scale=30).rename('cdlcrp')\n\n gsw = ee.Image('JRC/GSW1_0/GlobalSurfaceWater')\n occ_pos = gsw.select('occurrence').gt(0)\n water = occ_pos.unmask(0).rename('gsw')\n\n static_input_bands = static_input_bands.addBands([nlcd, cdl_cult, cdl_crop, water])\n\n input_bands = input_bands.addBands(static_input_bands).clip(roi)\n\n # standardize names to match EE javascript output\n standard_names = []\n temp_ct = 1\n prec_ct = 1\n names = input_bands.bandNames().getInfo()\n for name in names:\n if 'tavg' in name and 'tavg' in standard_names:\n standard_names.append('tavg_{}'.format(temp_ct))\n temp_ct += 1\n elif 'prec' in name and 'prec' in standard_names:\n standard_names.append('prec_{}'.format(prec_ct))\n prec_ct += 1\n else:\n standard_names.append(name)\n\n input_bands = input_bands.rename(standard_names)\n return input_bands", "def fill_year(timeseries, value=0):\n # Obtain firts and last date from timeseries\n first_date = timeseries.index.min()\n last_date = timeseries.index.max()\n\n one_year_date = last_date - timedelta(days=365)\n\n ## Obtain the sunday beofre the date of one year ago\n starting_date = one_year_date - timedelta(days=one_year_date.weekday()+1)\n\n assert starting_date.weekday_name == 'Sunday'\n\n\n # Fill dates with mising zero\n date_range_series = create_timeseries(starting_date,\n first_date-timedelta(days=1),\n value)\n\n # Fill the original timeseries\n filled_timeseries = pd.concat([date_range_series, timeseries])\n\n return filled_timeseries", "def fillna(df, col: str, forward: bool):\n na_prev = len(df)\n report = f'fillna(\"{col}\") ' + ('forward' if forward else 'backward') + ' NA count:'\n while True:\n na = df[col].isna().sum()\n report += f' {na}'\n if na == na_prev or na == 0: break\n na_prev = na\n # df must to be sorted by (ABI, YEAR)\n df.loc[df[col].isna(), col] = df.groupby('ABI')[col].shift(1 if forward else -1)", "def applyWindow3years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-1):\n img_out = img_out.addBands(mask3(imagem, value,bandNames[(i-1):(i+2)]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def interpolate_dataframes(ff):\n assert isinstance(ff, dict)\n year_min = ff['CA'][0].index[0]\n year_max = ff['CA'][0].index[-1]\n years = list(range(year_min, year_max + 1))\n for state in ff.keys():\n for cf in ff[state]:\n for year in years:\n if year not in cf.index:\n cf.loc[year] = cf.loc[year-1:year+1, :].sum(axis=0)\n cf.loc[year] = (cf.loc[year] / 2).astype(np.int64)\n cf.sort_index(inplace=True)\n return(ff)", "def applyWindow5years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-3):\n img_out = img_out.addBands(mask5(imagem, value,bandNames[(i-1):(i+4)]))\n img_out = img_out.addBands(imagem.select(bandNames[-3]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def complete_zeros(df_dm,year):\n df_dm.insert(1,year,0)\n return df_dm", "def fill_forward(df):\n df = df.fillna(method='ffill')\n df = df.fillna(method='bfill').fillna(0)\n return df", "def gap_years_aggregated(mongo_client):\n db = mongo_client[\"nobel\"]\n\n original_categories = sorted(set(db.prizes.distinct(\"category\", {\"year\": \"1901\"})))\n\n pipeline = [\n {\"$match\": {\"category\": {\"$in\": original_categories}}},\n {\"$project\": {\"category\": 1, \"year\": 1}},\n\n # Collect the set of category values for each prize year.\n {\"$group\": {\"_id\": \"$year\", \"categories\": {\"$addToSet\": \"$category\"}}},\n\n # Project categories *not* awarded (i.e., that are missing this year).\n {\"$project\": {\"missing\": {\"$setDifference\": [original_categories, \"$categories\"]}}},\n\n # Only include years with at least one missing category\n {\"$match\": {\"missing.0\": {\"$exists\": True}}},\n\n # Sort in reverse chronological order. Note that \"_id\" is a distinct year at this stage.\n {\"$sort\": OrderedDict([(\"_id\", -1)])},\n ]\n\n for doc in db.prizes.aggregate(pipeline):\n print(\"{year}: {missing}\".format(year=doc[\"_id\"], missing=\", \".join(sorted(doc[\"missing\"]))))", "def get_yearly_data(name, startyr=None, endyr=None, interpolated=False):\n varinfo = get_varinfo(name)\n \n if varinfo[\"type\"] == \"yearly\":\n data = get_data(varinfo[\"id\"], startyr=startyr, endyr=endyr)\n giddict = dict()\n sorteddata = sorted(data[\"cells\"], key=lambda vd: vd[\"gid\"])\n for gid,valuedicts in itertools.groupby(sorteddata, key=lambda vd: vd[\"gid\"]):\n yrdict = dict([(valuedict[\"year\"],valuedict[\"value\"])\n for valuedict in valuedicts\n ])\n info = {\"data\": yrdict}\n giddict[gid] = info\n\n if interpolated:\n def pairwise(iterable):\n a, b = itertools.tee(iterable)\n next(b, None)\n return zip(a, b)\n \n def lerp(factor, fromval, toval):\n valrange = toval - fromval\n return fromval + valrange * factor\n \n for gid,info in giddict.items():\n yrdict = info[\"data\"]\n if len(yrdict) > 1:\n for (fromyr,fromval),(toyr,toval) in pairwise(sorted(yrdict.items(),key=lambda i: i[0])):\n curyr = fromyr + 1\n interpneeded = fromval != toval\n \n while curyr != toyr:\n if interpneeded:\n factor = (curyr - fromyr) / float(toyr - fromyr)\n yrdict[curyr] = lerp(factor, fromval, toval)\n else:\n yrdict[curyr] = fromval\n curyr += 1\n\n return giddict\n\n else:\n raise Exception(\"Could not find a yearly variable with that name\")", "def foldcurve(_band, _period):\n # Set epoch to first date observed\n _epoch = _band[0][0]\n # Iterate through array, update date to phase\n for i in range(0, _band.shape[0]):\n _band[i, 0] = ((_band[i, 0] - _epoch) / _period) % 1\n # Return folded array\n return _band", "def FS2Years(inputFolderPath = './FormattedFilesWithoutMissingToNextYear', outputFolderPath = './FormattedFilesWithoutMissingToNextYear'):\n\tfileList = []\n\tfor root, dirs, files in os.walk(inputFolderPath): \n\t for afile in files:\n\t \tfileList.append(afile)\n\n\ttargetList = [2704,2707,2713,2716,2718,808,811,1954]\n\t# targetList = [1994,1997,2003,2006,2008,807,810,1953]\n\tyearList = [(1998,2015),(2005,2015),(2005,2015),(2005,2015),(2005,2015),(1960,2014),(1961,2014),(2002,2012)]\n\n\n\tfor i in range(len(targetList)):\n\t\t# i = 0\n\t\trows = []\n\t\tfor year in range(yearList[i][0],yearList[i][1]+1):\n\t\t\t# print str(year) + '-' + str(targetList[i]) \n\t\t\tregex = re.compile(\"(\"+ str(year) +\").*\")\n\t\t\tfiles = [m.group(0) for l in fileList for m in [regex.search(l)] if m and len(l) == 28]\n\t\t\t\n\n\t\t\t# load the CSV file as a numpy matrix\n\t\t\twith open(inputFolderPath+'/'+files[0],'rb') as f:\n\t\t\t reader = csv.reader(f)\n\t\t\t header = next(reader)\n\t\t\t num_cols = len(header)\n\t\t\t # print header\n\t\t\t print i\n\t\t\t target_idx = [idx for idx, item in enumerate(header) if item.startswith(str(targetList[i]).zfill(4)+'N')]\n\t\t\t regex = re.compile(\"....N:.*\")\n\t\t\t nextYearIDs = [idx for idx, item in enumerate(header) if regex.search(item)]\n\t\t\t nextYearCount = len(nextYearIDs)\n\t\t\t if len(target_idx) > 0:\n\t\t\t \ttarget = target_idx[0]-1\n\t\t\t \tprint ('OK',year, targetList[i], inputFolderPath+'/'+files[0])\n\t\t\t else:\n\t\t\t \tprint (year, targetList[i], inputFolderPath+'/'+files[0])\n\t\t\t \tbreak\n\t\t\t f.close()\n\t\t\tdataset = np.genfromtxt(inputFolderPath+'/'+files[0], delimiter=\",\", skip_header=1, autostrip=True, missing_values=np.nan, usecols=tuple(range(1,num_cols)))\n\t\t\t# print (dataset.shape)\n\t\t\t# X = np.concatenate((dataset[:,0:target],dataset[:,target+1:dataset.shape[1]]),axis=1)\n\t\t\tX = dataset[:,nextYearCount:dataset.shape[1]]\n\t\t\t# X = np.concatenate((dataset[:,0:2],dataset[:,3:dataset.shape[1]),axis=1)\n\t\t\ty = dataset[:,target]\n\t\t\t\n\t\t\timp = Imputer(missing_values='NaN', strategy='median', axis=0)\n\t\t\timputedX = imp.fit_transform(X,y)\n\t\t\timputedX = np.array([imputedX[j] for j in range(imputedX.shape[0]) if not np.isnan(y[j])])\n\t\t\tdeleteMissingY = np.array([x1 for x1 in y if not np.isnan(x1)])\n\n\t\t\tk = 40\n\t\t\tselection = SelectKBest(f_regression, k=k)\n\t\t\timputedX_new = selection.fit_transform(imputedX, deleteMissingY)\n\t\t\t\n\t\t\tselectedFeatures = [[item, selection.scores_[idx], selection.pvalues_[idx]] for idx, item in enumerate(header[nextYearCount+1:]) if selection.get_support()[idx]]\n\t\t\tselectedFeatures.sort(key=lambda x: x[1], reverse=True)\n\t\t\t\n\t\t\trows.append([year, 'score', 'p-value'])\n\t\t\trows.extend(selectedFeatures)\n\t\t\trows.append(['', '', ''])\n\t\t\tprint 'Hey'\n\n\t\tfilename = outputFolderPath+'/'+('FeatureSelectionIndicator%d - k%d - %s.csv' % (targetList[i], k, 'f_regression'))\n\t\twith open(filename,'wb') as w:\n\t\t\ta = csv.writer(w, delimiter = ',')\n\t\t\ta.writerows(rows)\n\t\tw.close()", "def resample(self, dataframes, freq='5s'):\n\n for df in dataframes:\n yield df.resample(freq, fill_method='bfill')", "def increment_year(self):", "def fill_missing_date_range():\n pickle_dir ='/misc/yoda/www/plots/user/sheep'\n #pickle_dir = '/Users/ken/Downloads/sheep'\n drange = get_missing_date_range(pickle_dir)\n if drange:\n print 'fill date range', drange\n pickle_date_range(drange[0], drange[1])", "def imdb_crawl_by_years(years, verbose):\n for year in years:\n imdb_crawl_by_year(year, verbose)", "def fill_in_data(color,frames,fs=25):\n color = color\n colormat = color.as_matrix()\n frameDiff = np.diff(colormat.T[2])\n locations = np.where(frameDiff!=1)[0]\n\n #Calculate number of frames skipped\n #sample = []\n #sample = colormat.T\n sample = sample[:2].T\n #frames = range(100,len(colormat.T[2])+100)\n #frames = np.linspace(frames[0],frames[-1],frames[-1]-frames[0]+1)\n #frames = frames[:len(frames)-1]\n \n #if locations is empty, try looking for a row of nans\n if np.all(locations):\n for i in range(len(sample)):\n if np.all(sample[i] == 0):\n sample[i]=[np.nan, np.nan]\n missing = list(np.where(np.isnan(sample.T[0])))\n\n else:\n numfill = []\n missing = []\n for i in locations:\n numfill.append(frames[i+1]-frames[i])#-1)\n #pdb.set_trace()\n missing.append(np.linspace(i+1,i+1+numfill[-1],numfill[-1]))\n\n missing = np.concatenate(missing)\n\n missing = missing[:len(missing)-1]\n missing = missing.astype(int)\n\n pdb.set_trace()\n\n for j in reversed(missing):\n sample = np.insert(sample,j,(np.nan,np.nan),axis = 0)\n #frames = np.insert(frames,j,j,axis=0)\n\n color_x,color_y,x_filt=KFilt(sample,fs)\n color_mat = np.column_stack((color_x[:,0],color_y[:,0],color_x[:,1],color_y[:,1]))\n return color_mat,frames,x_filt", "def fill_between(initial,final):\n return np.arange(initial + 1, final)", "def _resampler(df_year, year):\n # Aggregates data using mean for each time interval and gets a\n # sample count for each new data point.\n df_15 = df_year.resample('15T').apply(['mean', 'count'])\n df_30 = df_year.resample('30T').apply(['mean', 'count'])\n df_1h = df_year.resample('1H').apply(['mean', 'count'])\n df_1d = df_year.resample('D').apply(['mean', 'count'])\n\n # Removes top level title that is not needed.\n df_15.columns = df_15.columns.droplevel(0)\n df_30.columns = df_30.columns.droplevel(0)\n df_1h.columns = df_1h.columns.droplevel(0)\n df_1d.columns = df_1d.columns.droplevel(0)\n\n # Creating new date range to include all time intervals within the year.\n idx_15 = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:45:00', freq='15T')\n idx_30 = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:30:00', freq='30T')\n idx_1h = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:00:00', freq='1H')\n idx_1d = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:00:00', freq='D')\n\n # Reindexing so data that starts in, for example August, will now\n # have the months prior to August filled with nans.\n df_15_reindex = df_15.reindex(idx_15, fill_value=np.nan)\n df_15_reindex[['count']] = df_15_reindex[['count']].fillna(0).astype(int)\n # Adding all columns to match example excel.\n df_15_reindex = df_15_reindex.rename(columns={'mean': 'H(ft)'})\n df_15_reindex = df_15_reindex.rename(columns={'count': 'SampleCount'})\n\n # Adding meters column.\n df_15_reindex['H(m)'] = df_15_reindex['H(ft)'] / 3.28\n # Rounds meters column so significant digits match\n # original height column.\n df_15_reindex['H(m)'] = df_15_reindex['H(m)'].round(2)\n df_15_reindex['H(ft)'] = df_15_reindex['H(ft)'].round(2)\n df_15_reindex['DateTime2'] = df_15_reindex.index\n df_15_reindex['Date'] = df_15_reindex.index\n df_15_reindex['Date2'] = df_15_reindex.index\n df_15_reindex['Date_Python_generated'] = df_15_reindex['Date'].dt.date\n df_15_reindex['Time1'] = df_15_reindex['Date'].dt.time\n df_15_reindex['Time2'] = df_15_reindex['Date'].dt.time\n df_15_reindex['H(m)_final'] = df_15_reindex['H(m)']\n df_15_reindex = df_15_reindex.reset_index(drop=True)\n # Adding original datetime and height data to dataframe. To do this\n # pd.concat is used because the column lengths are different.\n df_15_reindex = pd.concat([\n df_15_reindex, df_year.reset_index(drop=True)], axis=1)\n df_15_reindex['dateTime'] = pd.to_datetime(df_15_reindex['dateTime'])\n # Reordering columns to match example excel.\n df_15_reindex = df_15_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n # Filling nans with empty cells in columns similar to example excel.\n df_15_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_15_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n\n # Similar to 15 minute interval code but 30 minutes interval.\n df_30_reindex = df_30.reindex(idx_30, fill_value=np.nan)\n df_30_reindex[['count']] = df_30_reindex[['count']].fillna(0).astype(int)\n df_30_reindex = df_30_reindex.rename(columns={'mean': 'H(ft)'})\n df_30_reindex = df_30_reindex.rename(columns={'count': 'SampleCount'})\n df_30_reindex['H(m)'] = df_30_reindex['H(ft)'] / 3.28\n df_30_reindex['H(m)'] = df_30_reindex['H(m)'].round(2)\n df_30_reindex['H(ft)'] = df_30_reindex['H(ft)'].round(2)\n df_30_reindex['DateTime2'] = df_30_reindex.index\n df_30_reindex['Date'] = df_30_reindex.index\n df_30_reindex['Date2'] = df_30_reindex.index\n df_30_reindex['Date_Python_generated'] = df_30_reindex['Date'].dt.date\n df_30_reindex['Time1'] = df_30_reindex['Date'].dt.time\n df_30_reindex['Time2'] = df_30_reindex['Date'].dt.time\n df_30_reindex['H(m)_final'] = df_30_reindex['H(m)']\n df_30_reindex = df_30_reindex.reset_index(drop=True)\n df_30_reindex = pd.concat([\n df_30_reindex, df_year.reset_index(drop=True)], axis=1)\n df_30_reindex['dateTime'] = pd.to_datetime(df_30_reindex['dateTime'])\n df_30_reindex = df_30_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n df_30_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_30_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n\n # Similar to 15 minute interval code but hourly interval.\n df_1h_reindex = df_1h.reindex(idx_1h, fill_value=np.nan)\n df_1h_reindex[['count']] = df_1h_reindex[['count']].fillna(0).astype(int)\n df_1h_reindex = df_1h_reindex.rename(columns={'mean': 'H(ft)'})\n df_1h_reindex = df_1h_reindex.rename(columns={'count': 'SampleCount'})\n df_1h_reindex['H(m)'] = df_1h_reindex['H(ft)'] / 3.28\n df_1h_reindex['H(m)'] = df_1h_reindex['H(m)'].round(2)\n df_1h_reindex['H(ft)'] = df_1h_reindex['H(ft)'].round(2)\n df_1h_reindex['DateTime2'] = df_1h_reindex.index\n df_1h_reindex['Date'] = df_1h_reindex.index\n df_1h_reindex['Date2'] = df_1h_reindex.index\n df_1h_reindex['Date_Python_generated'] = df_1h_reindex['Date'].dt.date\n df_1h_reindex['Time1'] = df_1h_reindex['Date'].dt.time\n df_1h_reindex['Time2'] = df_1h_reindex['Date'].dt.time\n df_1h_reindex['H(m)_final'] = df_1h_reindex['H(m)']\n df_1h_reindex = df_1h_reindex.reset_index(drop=True)\n df_1h_reindex = pd.concat([\n df_1h_reindex, df_year.reset_index(drop=True)], axis=1)\n df_1h_reindex['dateTime'] = pd.to_datetime(df_1h_reindex['dateTime'])\n df_1h_reindex = df_1h_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n df_1h_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_1h_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n\n # Similar to 15 minute interval code but daily interval.\n df_1d_reindex = df_1d.reindex(idx_1d, fill_value=np.nan)\n df_1d_reindex[['count']] = df_1d_reindex[['count']].fillna(0).astype(int)\n df_1d_reindex = df_1d_reindex.rename(columns={'mean': 'H(ft)'})\n df_1d_reindex = df_1d_reindex.rename(columns={'count': 'SampleCount'})\n df_1d_reindex['H(m)'] = df_1d_reindex['H(ft)'] / 3.28\n df_1d_reindex['H(m)'] = df_1d_reindex['H(m)'].round(2)\n df_1d_reindex['H(ft)'] = df_1d_reindex['H(ft)'].round(2)\n df_1d_reindex['DateTime2'] = df_1d_reindex.index\n df_1d_reindex['Date'] = df_1d_reindex.index\n df_1d_reindex['Date2'] = df_1d_reindex.index\n df_1d_reindex['Date_Python_generated'] = df_1d_reindex['Date'].dt.date\n df_1d_reindex['Time1'] = df_1d_reindex['Date'].dt.time\n df_1d_reindex['Time2'] = df_1d_reindex['Date'].dt.time\n df_1d_reindex['H(m)_final'] = df_1d_reindex['H(m)']\n df_1d_reindex = df_1d_reindex.reset_index(drop=True)\n df_1d_reindex = pd.concat([\n df_1d_reindex, df_year.reset_index(drop=True)], axis=1)\n df_1d_reindex['dateTime'] = pd.to_datetime(df_1d_reindex['dateTime'])\n df_1d_reindex = df_1d_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n df_1d_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_1d_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n return df_15_reindex, df_30_reindex, df_1h_reindex, df_1d_reindex", "def multiple_years(our_data, start, end):\n count = start\n album_list = []\n while count <= end:\n album_list.append(find_by_year(our_data,count))\n count += 1", "def main(years=(2000, 2019)):\n year_list = range(years[0], years[1] + 1)\n dfs = []\n for year in year_list:\n dfs.append(get_df(year))\n print(f\"Done: {len(dfs)} dataframes written\")", "def modis_lai_fill_gap(in_path, doy_start, doy_end):\n date_start = datetime.datetime.strptime(doy_start, \"%Y%m%d\").date()\n date_end = datetime.datetime.strptime(doy_end, \"%Y%m%d\").date()\n\n for nc_file in os.listdir(in_path):\n if nc_file.endswith('.nc'):\n nc_date = datetime.datetime.strptime(nc_file[:-3], \"%Y%m%d\").date()\n if date_start <= nc_date <= date_end:\n print(nc_file, \"----\",)\n doy = int(datetime.datetime.strptime(nc_file[:-3], '%Y%m%d').strftime('%Y%j'))\n for new_doy in [doy + x for x in range(1, 4)]:\n shutil.copy2(os.path.join(in_path, nc_file), os.path.join(in_path, '{}.nc'.format(\n datetime.datetime.strptime(str(new_doy), '%Y%j').strftime('%Y%m%d'))))\n print('{}.nc'.format(\n datetime.datetime.strptime(str(new_doy), '%Y%j').strftime('%Y%m%d')),)\n print('\\n')", "def fill_gaps(self):\n\n for source in self.sources.keys():\n if source in self.staticsources:\n continue\n src = self.sources[source]\n print '[INFO] Scanning ' + source + ' for gaps'\n src.fill_gaps()", "def initialize_layers(self, years):\n min_year = min(years)\n max_year = max(years)\n ordered_years = list(range(min_year, max_year + 1))\n self.layers = [Layer(y) for y in ordered_years]", "def linear_interpolate(df, offset, final_year=\"2050\", harmonize_year=\"2015\"):\n df = df.copy()\n x1, x2 = harmonize_year, final_year\n y1, y2 = offset + df[x1], df[x2]\n m = (y2 - y1) / (float(x2) - float(x1))\n b = y1 - m * float(x1)\n\n cols = [x for x in utils.numcols(df) if int(x) < int(final_year)]\n for c in cols:\n df[c] = m * float(c) + b\n return df", "def _enumerate_years(self, preprocessed_data, disjoint):\n pass", "def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): \n #Grab land cover classes as a list of strings\n lc_classes = classDictionary.keys().getInfo()\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Get the frequency of each class through the years by reducing the image collection to an image using the sum reducer\n class_frequency = binary_class_images.reduce(ee.Reducer.sum().unweighted()).rename(lc_classes)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Define an image to add bands with frequency filter applied\n out_img = ee.Image()\n \n #Loop through years\n for yearBand in yearBandNames:\n #Select the target year from the image\n yearImage = image.select(yearBand)\n \n #Loop through land cover classes in filterParams\n for lc_class in lc_classes:\n #Get the minimum occurance allowed in that land cover class\n min_occurance = filterParams.get(lc_class)\n \n #Find if the land cover class had less than the number of min_occurances in each pixel\n change_class = class_frequency.select(lc_class).lt(ee.Image.constant(min_occurance))\n \n #If change_class==1, then replace that pixel with the mode of all the years in that pixel\n #This filter is only applied to pixels of this land cover class\n #First mask yearImage to pixels of this land cover class, then get the union of pixels where change_class==1,\n #if both conditions are true, then the pixel is replaced with the mode\n yearImage = yearImage.where(yearImage.eq(ee.Number(classDictionary.get(lc_class))).bitwiseAnd(change_class),mode_image)\n #Rename yearImage to bandName\n yearImage = yearImage.rename(yearBand)\n #Append to output image\n out_img = out_img.addBands(yearImage)\n \n return out_img", "def calculate_daily_climatology(\n pctile,\n windowHalfWidth,\n lenClimYear,\n smoothPercentile,\n smoothPercentileWidth,\n thresh_climYear, # empty array\n seas_climYear, # empty array\n clim, # empty dict\n feb29,\n doyClim,\n clim_start,\n clim_end,\n tempClim,\n temp,\n):\n # Loop over all day-of-year values, and calculate threshold and seasonal climatology across years\n for d in range(1, lenClimYear + 1):\n # Special case for Feb 29\n if d == feb29:\n continue\n # find all indices for each day of the year +/- windowHalfWidth and from them calculate the threshold\n tt0 = np.where(doyClim[clim_start : clim_end + 1] == d)[\n 0\n ] # the index for that day each year\n # If this doy value does not exist (i.e. in 360-day calendars) then skip it\n if len(tt0) == 0:\n continue\n tt = np.array([])\n for w in range(-windowHalfWidth, windowHalfWidth + 1): # -5 : 5 default\n tt = np.append(\n tt, clim_start + tt0 + w\n ) # append the daily values 5days before and 5days after\n tt = tt[tt >= 0] # Reject indices \"before\" the first element\n tt = tt[tt < TClim] # Reject indices \"after\" the last element\n thresh_climYear[d - 1] = np.percentile(nonans(tempClim[tt.astype(int)]), pctile)\n seas_climYear[d - 1] = np.mean(nonans(tempClim[tt.astype(int)]))\n\n # Special case for Feb 29 (LEAP YEAR)\n thresh_climYear[feb29 - 1] = (\n 0.5 * thresh_climYear[feb29 - 2] + 0.5 * thresh_climYear[feb29]\n )\n seas_climYear[feb29 - 1] = (\n 0.5 * seas_climYear[feb29 - 2] + 0.5 * seas_climYear[feb29]\n )\n\n if smoothPercentile:\n thresh_climYear, seas_climYear = smooth_climatologies(\n thresh_climYear, seas_climYear, smoothPercentileWidth\n )\n\n # Generate threshold for full time series\n clim[\"thresh\"] = thresh_climYear[doy.astype(int) - 1]\n clim[\"seas\"] = seas_climYear[doy.astype(int) - 1]\n # Save vector indicating which points in temp are missing values\n clim[\"missing\"] = np.isnan(temp)\n\n return clim", "def calculateNumberOfChanges(image, bandNames):\n #Get a collection of images where each image has 2 bands: classifications for year(i) and classifications for year(i+1)\n lc_one_change_col = npv.getYearStackIC(image,bandNames, band_indices=[0,1])\n #Get a collection of images where each image represents whether there was change from year(i) to year(i+1) and convert to an image\n lc_one_change_col = lc_one_change_col.map(npv.LC_OneChange)\n lc_one_change_image = lc_one_change_col.toBands()\n #Calculate the number of changes by applying the sum reducer\n lc_sum_changes = lc_one_change_image.reduce(ee.Reducer.sum().unweighted())\n return lc_sum_changes", "def accumulate_to_year_end(da, year_ends, mask=None, shift=5, accumulate=12, time_name='time'):\n da = da.shift({time_name: shift}) \\\n .rolling({time_name: accumulate}).sum()\n da = da.where(da[time_name].dt.month == year_ends) \\\n .resample({time_name: '1YS'}).sum(skipna=True)\n if mask is None:\n return da\n else:\n return da.where(mask == 1)", "def load_images(filename, bands, Args):\n image = np.zeros([Args.num, Args.out_size,\n Args.out_size, len(bands)])\n for i, band in enumerate(bands):\n print (\"Getting pstamps for band\", band)\n full_image = fits.open(filename.replace(\"band\", band))[0].data\n image[:, :, :, i] = get_stamps(full_image, Args)\n return image", "def msatna_blocks_3lag_year(year: int) -> pd.Series:\n return msatna_blocks_3lag_panel()[year]", "def austral_year_daily(x, y):\n if isinstance(x, xr.DataArray):\n x = x.values\n \n jfmamj = x < 182.\n jasond = x >= 182.\n \n x_jasond = []\n y_jasond = []\n if any(jasond):\n x_jasond = x[jasond] - 181\n y_jasond = y[jasond]\n\n x_jfmamj = []\n y_jfmamj = []\n if any(jfmamj):\n x_jfmamj = x[jfmamj] + 184\n y_jfmamj = y[jfmamj]\n\n xout = np.concatenate([xi for xi in [x_jasond, x_jfmamj] if len(xi)])\n yout = np.concatenate([yi for yi in [y_jasond, y_jfmamj] if len(yi)])\n \n return xout, yout", "def reduce_dataset(years, values,flux_floor=0,max_tm_error=0,min_reduction_steps=200):\n non_zero_ind, min_retained_zero_years = remove_begin_end_zero_flux(years,values,flux_floor,min_reduction_steps)\n\n years_mod = years[non_zero_ind]\n values_mod = values[non_zero_ind]\n\n if years_mod.size <3:\n years_mod = years\n values_mod = values\n values_mod = 0\n else:\n #makes ure you have not removed more than 1% of the mass when removing 0 or flux floor rates\n o_mass = TimeSeries(years,values,None,None).integrate().values[-1]\n r_mass = TimeSeries(years_mod, values_mod, None, None).integrate().values[-1]\n if abs((o_mass-r_mass)/o_mass)*100 > 1:\n years_mod = years\n values_mod = values\n timeseries = TimeSeries(years_mod, values_mod, None, None)\n mass = timeseries.integrate()\n\n #normalize Values\n maxval = np.max(values_mod)\n values_mod = values_mod/maxval\n o_timeseries = TimeSeries(years,values/maxval,None,None)\n o_mass = o_timeseries.integrate()\n timeseries = TimeSeries(years_mod, values_mod, None, None)\n mass = timeseries.integrate()\n\n mx = np.argmax(timeseries.values)\n points = [0, mx, len(timeseries)]\n x = timeseries.times\n\n ythresh = 100*np.mean(timeseries.values)\n out_error = 1\n out_error_last = out_error\n OUT_ERROR_THRESHOLD = 1e-2\n\n UPPER_N = 200\n LOWER_N = 50\n last_result = None\n MAX_ITERATIONS = 80\n\n solve_type = SMOOTH\n simple_peaks = False\n last_result,ix = reduct_iter(timeseries,flux_floor,ythresh,out_error,out_error_last,OUT_ERROR_THRESHOLD,UPPER_N,LOWER_N,last_result,MAX_ITERATIONS)\n last_result = retain_min_years(last_result.reduced_flux,o_timeseries,o_mass,min_retained_zero_years)\n #if there are less points than the min_reduction_steps then use the remaining\n #points to rebalance the segments with the largest mass errors.\n play_points = min_reduction_steps - last_result.num_reduced_points\n bef = last_result.reduced_flux.times.size\n if play_points > 0:\n last_result = red_flux.rebalance_extra_points(last_result,play_points)\n\n rr = last_result\n\n #find peaks for data rebalance and reporting\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=3,rel_height=1)\n if peaks.size == 0 :\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=2,rel_height=1)\n if peaks.size == 0:\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=1,rel_height=1)\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=3,rel_height=1)\n if pneg.size == 0:\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=2,rel_height=1)\n if pneg.size == 0:\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=1,rel_height=1)\n\n peaks = rr.reduced_flux.times[peaks]\n pneg = rr.reduced_flux.times[pneg]\n\n peaks = np.isin(o_timeseries.times,peaks)\n pneg = np.isin(o_timeseries.times,pneg)\n peaks = np.where(peaks)\n pneg = np.where(pneg)\n\n peaks = peaks[0]\n pneg = pneg[0]\n iter = 0\n while iter < 100 and (abs(last_result.total_mass_error*maxval) > max_tm_error or abs(last_result.total_mass_error/last_result.mass.values[-1])*100 > .001) :\n rr = red_flux.rebalance_valleys(rr,peaks,pneg)\n #keep the lowest total_mass_error\n if abs(rr.total_mass_error) < abs(last_result.total_mass_error):\n last_result = rr\n else:\n break\n iter += 1\n\n out_times = last_result.reduced_flux.times\n out_values = last_result.reduced_flux.values\n #return the reduced data, undo normalize of the values (*maxval)\n return out_times, out_values*maxval,-(last_result.total_mass_error * maxval),peaks.size,iter", "def estimate_year_data(self, years, frequency):\n data_year = self.price.index.year.unique()\n no_data_year = {pd.Period(year) for year in years} - {pd.Period(year) for year in data_year} # which years do we not have data for\n\n if len(no_data_year) > 0:\n for yr in no_data_year:\n source_year = pd.Period(max(data_year))\n\n source_data = self.price[self.price.index.year == source_year.year] # use source year data\n new_data = Lib.apply_growth(source_data, self.energy_growth, source_year, yr, frequency)\n self.price = pd.concat([self.price, new_data], sort=True) # add to existing\n\n source_data = self.p_regu[self.p_regu.index.year == source_year.year] # use source year data\n new_data = Lib.apply_growth(source_data, self.growth, source_year, yr, frequency)\n self.p_regu = pd.concat([self.p_regu, new_data], sort=True) # add to existing\n\n source_data = self.p_regd[self.p_regd.index.year == source_year.year] # use source year data\n new_data = Lib.apply_growth(source_data, self.growth, source_year, yr, frequency)\n self.p_regd = pd.concat([self.p_regd, new_data], sort=True) # add to existing", "def winter_gif(self):\n # Create the directory.\n os.mkdir('./medal_figures_winter')\n start = self.start_year\n end = self.end_year\n duration = self.duration\n # Specify the years.\n years = [i for i in self.years_winter if (i >= start) and (i <= end)]\n # Setup the colormap.\n cmap = sns.cubehelix_palette(n_colors=6, start=2.5, rot=0.1, hue=2, dark=0.3, light=1, as_cmap=True)\n # Important variable and keywords to initialize cartopy.\n shapename = 'admin_0_countries'\n countries_shp = shpreader.natural_earth(resolution='110m', category='cultural', name=shapename)\n filenames = []\n # Loop in the specific years.\n for i in years:\n fig = plt.figure(figsize=(10, 8))\n ax = fig.add_subplot(1, 1, 1, projection=ccrs.Mercator())\n ax.set_extent([-169.95, 169.95, -65, 80], crs=ccrs.PlateCarree())\n ax.add_feature(cfeature.BORDERS)\n ax.coastlines(resolution='110m')\n # Add some titles for specific years.\n if i == 1924:\n fig.suptitle('The First Winter Olympics.', y=0.9, fontsize=14, fontweight='bold')\n if i == 1994:\n fig.suptitle('The International Olympic Committee voted to separate the Summer and Winter Games.',\n y=0.9, fontsize=12, fontweight='bold')\n if i == 2018:\n fig.suptitle('Suspension of the Russian Olympic Committee due to Olympic Doping Controversy.',\n y=0.9, fontsize=12, fontweight='bold')\n iso_lib = list(self.conv['ISO'])\n if i != 2018:\n city = self.df_winter.loc[self.df_winter['Year'] == i]['City'].iloc[0]\n ax.title.set_text('Total Number of Medals of Winter Olympics Year: %d City: %s' % (i, city))\n df_tmp = self.df_winter.loc[self.df_winter['Year'] == i]\n d = dict(df_tmp.groupby(df_tmp['Country']).size())\n else:\n ax.title.set_text('Total Number of Medals of Winter Olympics Year: %d City: %s' % (i, 'Pyeongchang'))\n m = []\n for j in self.df_2018_winter['NOC'].tolist():\n n = j[j.find('(')+1:j.find(')')]\n m.append(n)\n k = self.df_2018_winter['Total'].tolist()\n d = dict(zip(m, k))\n d.pop('30 NOCs', None)\n max_medal = float(max(d.values()))\n for country in shpreader.Reader(countries_shp).records():\n iso = country.attributes['ADM0_A3']\n medal_num = 0\n if iso in iso_lib:\n ioc = self.conv.loc[self.conv['ISO'] == iso,'IOC'].iloc[0]\n if not pd.isna(ioc):\n if ioc in d.keys():\n medal_num = d[ioc]\n if all([iso == 'RUS', i>=1956, i<=1988]):\n medal_num = d['URS']\n if all([iso=='DEU', i>=1968, i<=1988]):\n medal_num = d['FRG'] + d['GDR']\n if all([iso=='DEU', i>=1956, i<=1964]):\n medal_num = d['EUA']\n if i==1952 and iso=='DEU':\n medal_num = d['FRG']\n if i==1992 and iso=='RUS':\n medal_num = d['EUN']\n if i==2018 and iso=='RUS':\n medal_num = d['OAR']\n ax.add_geometries(country.geometry, ccrs.PlateCarree(),\n facecolor=cmap(medal_num / max_medal, 1))\n sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(0, max_medal))\n sm._A = []\n plt.colorbar(sm, ax=ax, orientation=\"horizontal\", fraction=0.046, pad=0.04)\n fname = './medal_figures_winter/year_%d.png' % i\n filenames.append(fname)\n plt.savefig(fname=fname, format='png')\n plt.close(fig)\n images = []\n # Create the gif.\n for filename in filenames:\n images.append(imageio.imread(filename))\n imageio.mimsave('./medal_figures_winter/movie.gif', images, duration=duration)\n return", "def fill_between_steps(x, y0, y1, ax=None, *args, **kwargs):\n so = np.argsort(x)\n mid = x[so][:-1] + np.diff(x[so])/2.\n xfull = np.append(np.append(x, mid), mid+np.diff(x[so])/1.e6)\n y0full = np.append(np.append(y0, y0[:-1]), y0[1:])\n y1full = np.append(np.append(y1, y1[:-1]), y1[1:])\n \n so = np.argsort(xfull)\n if ax is None:\n ax = plt.gca()\n \n ax.fill_between(xfull[so], y0full[so], y1full[so], *args, **kwargs)", "def start_requests(self):\n initial_year = self.start_date.year\n end_year = datetime.date.today().year\n for year in range(initial_year, end_year + 1):\n yield Request(\n f\"{self.GAZETTE_URL}?dir={year}\",\n meta={\"year\": year},\n callback=self.parse_year,\n )", "def interpolate(df):\n for x in df.columns:\n if x == \"date\":\n continue\n df[x] = df[x].interpolate(method='linear', axis=0).ffill().bfill()\n return df", "def run_global(start_year, end_year, depth_from, depth_to, animate=True):\n# years, times, rootgrps = retrieve(1950,2018)\n# rootgrps_1950 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1950\\EN.4.2.1.f.analysis.g10.195001.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_1951 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1951\\EN.4.2.1.f.analysis.g10.195101.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_1952 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1952\\EN.4.2.1.f.analysis.g10.195201.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_1953 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1953\\EN.4.2.1.f.analysis.g10.195301.nc\", \"r+\", format=\"NETCDF4\")]\n#\n#\n# rootgrps_2015 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2015\\EN.4.2.1.f.analysis.g10.201501.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_2016 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2016\\EN.4.2.1.f.analysis.g10.201601.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_2017 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2017\\EN.4.2.1.f.analysis.g10.201701.nc\", \"r+\", format=\"NETCDF4\")]\n rootgrps_2018 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2018\\EN.4.2.1.f.analysis.g10.201801.nc\", \"r+\", format=\"NETCDF4\")]\n\n# HC_1950 = calculate_HC_global(rootgrps_1950, 0, 2000)\n# print('1950', time.time()-start)\n# HC_1951 = calculate_HC_global(rootgrps_1951, 0, 2000)\n# print('1951', time.time()-start)\n# HC_1952 = calculate_HC_global(rootgrps_1952, 0, 2000)\n# print('1952', time.time()-start)\n# HC_1953 = calculate_HC_global(rootgrps_1953, 0, 2000)\n# print('1953', time.time()-start) \n#\n# HC_2015 = calculate_HC_global(rootgrps_2015, 0, 2000)\n# print('2015', time.time()-start)\n# HC_2016 = calculate_HC_global(rootgrps_2016, 0, 2000)\n# print('2016', time.time()-start)\n# HC_2017 = calculate_HC_global(rootgrps_2017, 0, 2000)\n# print('2017', time.time()-start)\n HC_2018 = calculate_HC_global(rootgrps_2018, 0, 2000)\n# print('2018', time.time()-start)\n# HC_1950_mean = (HC_1950+HC_1951+HC_1952+HC_1953)/4\n# HC_2018_mean = (HC_2015+HC_2016+HC_2017+HC_2018)/4\n\n# dHC = (HC_2018_mean-HC_1950_mean)/(65*365*24*3600)\n if animate == True:\n plot(rootgrps_2018, HC_2018)\n return HC_2018", "def impute_ferc714_hourly_demand_matrix(df: pd.DataFrame) -> pd.DataFrame:\n results = []\n for year, gdf in df.groupby(df.index.year):\n logger.info(f\"Imputing year {year}\")\n keep = df.columns[~gdf.isnull().all()]\n tsi = pudl.analysis.timeseries_cleaning.Timeseries(gdf[keep])\n result = tsi.to_dataframe(tsi.impute(method=\"tnn\"), copy=False)\n results.append(result)\n return pd.concat(results)", "def yearly_avg(dacycle,avg):\n\n if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:\n raise IOError,'Choice of averaging invalid'\n\n analysisdir = dacycle['dir.analysis']\n monthdir = os.path.join(analysisdir , 'data_%s_monthly'%avg )\n yeardir = os.path.join(analysisdir,'data_%s_yearly'%avg)\n\n if not os.path.exists(yeardir):\n print \"Creating new output directory \" + yeardir\n os.makedirs(yeardir)\n\n files = os.listdir(monthdir) # get monthly files\n files = [f for f in files if '-' in f and f.endswith('.nc')]\n\n if not files:\n print \"No full year finished yet, skipping yearly average...\"\n return\n\n fileinfo = {}\n for filename in files:\n date=datetime.datetime.strptime(filename.split('.')[-2],'%Y-%m')\n fileinfo[filename] = date\n\n years = set([d.year for d in fileinfo.values()])\n\n sd = datetime.datetime(min(years),1,1)\n ed = datetime.datetime(max(years)+1,1,1)\n\n while sd < ed: \n\n nd = sd + relativedelta(years=+1)\n \n avg_files = [os.path.join(monthdir,k) for k,v in fileinfo.iteritems() if v < nd and v >= sd]\n \n if not len(avg_files) == 12 : \n print \"Year %04d not finished yet, skipping yearly average...\"%sd.year\n else:\n targetfile = os.path.join(yeardir,'%s_fluxes.%s.nc'%(avg,sd.strftime('%Y')))\n \n if not os.path.exists(targetfile):\n print \"Year %04d is complete, I have 12 months for the next file\"%sd.year\n command = ['ncra','-O']+ avg_files + [targetfile]\n status = subprocess.check_call(command)\n\n sd = nd", "def fake_date_fill(df, back_method: str = 'slice'):\n df_index = df.index.to_series().copy()\n df2 = df.sort_index(ascending=False).copy()\n df2 = df2.apply(lambda x: pd.Series(x.dropna().values))\n df2 = df2.sort_index(ascending=False)\n df2.index = df_index.tail(len(df2.index))\n df2 = df2.dropna(how='all', axis=0)\n if df2.empty:\n df2 = df.fillna(0)\n\n if back_method == 'bfill':\n df2 = fill_forward(df2)\n return df\n elif back_method == 'slice':\n thresh = int(df.shape[1] * 0.5)\n thresh = thresh if thresh > 1 else 1\n df3 = df2.dropna(thresh=thresh, axis=0)\n if df3.empty or df3.shape[0] < 8:\n df3 = fill_forward(df2)\n else:\n df3 = fill_forward(df3)\n return df3\n elif back_method == 'keepna':\n return df2\n else:\n print('back_method not recognized in fake_date_fill')\n return df2", "def calculate_iron_hemoglobin_time_lag_effective_fraction(df, years):\n final = pd.DataFrame()\n data = df.reset_index()\n for i in list(range(0, len(years))):\n current = (data.loc[data.year == years[i]]\n .set_index([c for c in data.columns if 'draw' not in c and c != 'year'])\n .drop(columns='year'))\n if i == 0:\n for draw in list(range(0, 1000)):\n current[f'draw_{draw}'] = 1\n else:\n prior = (data.loc[data.year == years[i - 1]]\n .set_index([c for c in data.columns if 'draw' not in c and c != 'year'])\n .drop(columns='year'))\n current = 1 - ((current - prior) * 0.75 / current)\n current['year'] = years[i]\n final = pd.concat([final, current])\n final = final.reset_index().set_index([c for c in data.columns if 'draw' not in c]).sort_index()\n return final", "def combine_data(self):\n for country in config.COUNTRIES:\n frames = []\n for year in config.years:\n incidence_path = (config.raw_data_path / country / 'complete'\n / (str(year) + '_' + str(year + 1) + '.csv'))\n\n if incidence_path.exists() and incidence_path.is_file():\n df_incidence = pd.read_csv(incidence_path)\n\n wiki_path1 = config.raw_data_path / ('wikipedia_' +\n country) / \\\n 'complete' / (\n str(year) + '.csv')\n wiki_path2 = config.raw_data_path / ('wikipedia_' +\n country) / \\\n 'complete' / (\n str(year + 1) + '.csv')\n\n if wiki_path1.exists() and wiki_path1.is_file():\n df_wiki1 = pd.read_csv(wiki_path1)\n df_wiki1 = df_wiki1.rename(columns={'Week': 'week'})\n df_incidence = pd.merge(\n df_wiki1, df_incidence, on='week', how='right')\n\n if wiki_path2.exists() and wiki_path2.is_file():\n df_wiki2 = pd.read_csv(wiki_path2)\n df_wiki2 = df_wiki2.rename(columns={'Week': 'week'})\n df_incidence = pd.merge(\n df_wiki2, df_incidence, on='week', how='right')\n\n for col_name in df_incidence.columns:\n if col_name[-1] == 'x':\n if col_name[:-2] + '_y' in df_incidence.columns:\n df_incidence[col_name[:-2]] = df_incidence[\n col_name].fillna(\n df_incidence[col_name[:-2] + '_y'])\n df_incidence = df_incidence.drop(\n columns=[col_name,\n col_name[:-2] + '_y'])\n\n frames.append(df_incidence)\n\n df_country = pd.concat(frames)\n df_country['date'] = pd.to_datetime(\n df_country.week.add('-0'), format='%Y-%W-%w')\n df_country = df_country.sort_values(by=\"date\")\n\n if 'cases' in df_country.columns:\n df_country.drop(columns=['cases'])\n\n file_path = config.combined_data_path / (country + '.csv')\n\n df_country.to_csv(file_path, index=False)", "def _fill_undefined_gaps(phases):\n\n undefined, = np.where(phases == CurvePhases.Undetermined.value)\n last_index = phases.size - 1\n\n # If the curve is just two measurements this makes little sense\n if last_index < 2:\n return\n\n for loc in undefined:\n\n if loc == 0:\n if phases[1] != CurvePhases.Undetermined.value:\n phases[loc] = phases[loc + 1]\n elif loc == last_index:\n if phases[loc - 1] != CurvePhases.Undetermined.value:\n phases[loc] = phases[loc - 1]\n elif phases[loc - 1] == phases[loc + 1] and phases[loc + 1] != CurvePhases.Undetermined.value:\n phases[loc] = phases[loc + 1]", "def crawl_all_by_years(years, verbose):\n for year in years:\n crawl_all_by_year(year, verbose)", "def lc_animation(\n da,\n file_name=\"default_animation\",\n measurement=None,\n stacked_plot=False,\n colour_bar=False,\n animation_interval=500,\n width_pixels=10,\n dpi=150,\n font_size=15,\n label_ax=True):\n\n def calc_class_ratio(da):\n \"\"\"\n Creates a table listing year by year what percentage of the\n total area is taken up by each class.\n Parameters\n ----------\n da : xarray.DataArray with time dimension\n Returns\n -------\n Pandas Dataframe : containing class percentages per year\n \"\"\"\n\n # list all class codes in dataset\n list_classes = (np.unique(da, return_counts=False)).tolist()\n\n # create empty dataframe & dictionary\n ratio_table = pd.DataFrame(data=None, columns=list_classes)\n date_line = {}\n\n # count all pixels, should be consistent\n total_pix = int(np.sum(da.isel(time=1)))\n\n # iterate through each year in dataset\n for i in range(0, len(da.time)):\n date = str(da.time[i].data)[0:10]\n\n # for each year iterate though each present class number\n # and count pixels\n for n in list_classes:\n number_of_pixles = int(np.sum(da.isel(time=i) == n))\n percentage = number_of_pixles / total_pix * 100\n date_line[n] = percentage\n\n # add each year's counts to dataframe\n ratio_table.loc[date] = date_line\n\n return ratio_table\n\n def rgb_to_hex(r, g, b):\n hex = \"#%x%x%x\" % (r, g, b)\n if len(hex) < 7:\n hex = \"#0\" + hex[1:]\n return hex\n\n measurement = get_layer_name(measurement, da)\n\n # Add gif to end of filename\n file_name = file_name + \".gif\"\n\n # Create colour map and normalisation for specified lc measurement\n try:\n layer_cmap, layer_norm, cb_labels, cb_ticks = lc_colourmap(\n measurement, colour_bar=True)\n except AssertionError:\n\n raise KeyError(f'Could not automatically determine colour scheme from '\n f'DataArray name {measurement}. Please specify which '\n 'DEA Landcover measurement is being plotted by providing '\n 'the name using the \"measurement\" variable For example '\n '(measurement = \"full_classification\")')\n \n # Prepare variables needed\n # Get info on dataset dimensions\n height, width = da.geobox.shape\n scale = width_pixels / width\n left, bottom, right, top = da.geobox.extent.boundingbox\n extent = [left, right, bottom, top]\n\n outline = [patheffects.withStroke(linewidth=2.5, foreground=\"black\")]\n annotation_defaults = {\n \"xy\": (1, 1),\n \"xycoords\": \"axes fraction\",\n \"xytext\": (-5, -5),\n \"textcoords\": \"offset points\",\n \"horizontalalignment\": \"right\",\n \"verticalalignment\": \"top\",\n \"fontsize\": font_size,\n \"color\": \"white\",\n \"path_effects\": outline,\n }\n\n # Get information needed to display the year in the top corner\n times_list = da.time.dt.strftime(\"%Y\").values\n text_list = [False] * len(times_list)\n annotation_list = [\"\\n\".join([str(i) for i in (a, b) if i])\n for a, b in zip(times_list, text_list)]\n\n if stacked_plot == True:\n \n\n\n # Create table for stacked plot\n stacked_plot_table = calc_class_ratio(da)\n\n # Build colour list of hex vals for stacked plot\n hex_colour_list = []\n colour_def = lc_colours[measurement]\n\n # Custom error message to help if user puts incorrect measurement name\n for val in list(stacked_plot_table):\n try:\n r, g, b = colour_def[val][0:3]\n except KeyError:\n raise KeyError(\n \"class number not found in colour definition. \"\n \"Ensure measurement name provided matches the dataset being used\")\n hex_val = rgb_to_hex(r, g, b)\n hex_colour_list.append(hex_val)\n\n # Define & set up figure\n fig, (ax1, ax2) = plt.subplots(1, 2, dpi=dpi, constrained_layout=True)\n fig.set_size_inches(width * scale * 2, height * scale, forward=True)\n fig.set_constrained_layout_pads(\n w_pad=0.2, h_pad=0.2, hspace=0, wspace=0)\n\n # This function is called at regular intervals with changing i\n # values for each frame\n def _update_frames(i, ax1, ax2, extent, annotation_text,\n annotation_defaults, cmap, norm):\n # Clear previous frame to optimise render speed and plot imagery\n ax1.clear()\n ax2.clear()\n\n ax1.imshow(da[i, ...], cmap=cmap, norm=norm,\n extent=extent, interpolation=\"nearest\")\n if(not label_ax):\n ax1.set_axis_off()\n\n clipped_table = stacked_plot_table.iloc[: int(i + 1)]\n data = clipped_table.to_dict(orient=\"list\")\n date = clipped_table.index\n\n ax2.stackplot(date, data.values(), colors=hex_colour_list)\n ax2.tick_params(axis=\"x\", labelrotation=-45)\n ax2.margins(x=0, y=0)\n\n # Add annotation text\n ax1.annotate(annotation_text[i], **annotation_defaults)\n ax2.annotate(annotation_text[i], **annotation_defaults)\n\n # anim_fargs contains all the values we send to our\n # _update_frames function.\n # Note the layer_cmap and layer_norm which were calculated\n # earlier being passed through\n anim_fargs = (\n ax1,\n ax2, # axis to plot into\n [left, right, bottom, top], # imshow extent\n annotation_list,\n annotation_defaults,\n layer_cmap,\n layer_norm,\n )\n\n else: # stacked_plot = False\n\n # if plotting level 4 with colourbar\n\n if measurement == 'level4' and colour_bar == True:\n\n # specific setting to fit level 4 colour bar beside the plot\n # we will plot the animation in the left hand plot\n # and put the colour bar on the right hand side\n\n # Define & set up figure, two subplots so colour bar fits :)\n fig, (ax1, ax2) = plt.subplots(1, 2, dpi=dpi,\n constrained_layout=True, gridspec_kw={'width_ratios': [3, 1]})\n fig.set_size_inches(width * scale * 2,\n height * scale, forward=True)\n fig.set_constrained_layout_pads(\n w_pad=0.2, h_pad=0.2, hspace=0, wspace=0)\n\n # make colour bar\n # provide left hand canvas to colour bar fuction which is where the image will go\n # colourbar will plot on right side beside it\n\n make_colorbar(fig, ax1, measurement, animation=True)\n\n # turn off lines for second plot so it's not ontop of colourbar\n ax2.set_axis_off()\n\n # plotting any other measurement with or with-out colour bar or level 4 without\n else:\n\n # Define & set up figure\n fig, ax1 = plt.subplots(1, 1, dpi=dpi)\n fig.set_size_inches(width * scale, height * scale, forward=True)\n if(not label_ax):\n fig.subplots_adjust(left=0, bottom=0, right=1,\n top=1, wspace=None, hspace=None)\n # Add colourbar here\n if colour_bar:\n make_colorbar(fig, ax1, measurement)\n\n\n # This function is called at regular intervals with changing i\n # values for each frame\n def _update_frames(i, ax1, extent, annotation_text,\n annotation_defaults, cmap, norm):\n # Clear previous frame to optimise render speed and plot imagery\n ax1.clear()\n ax1.imshow(da[i, ...], cmap=cmap, norm=norm,\n extent=extent, interpolation=\"nearest\")\n if(not label_ax):\n ax1.set_axis_off()\n\n # Add annotation text\n ax1.annotate(annotation_text[i], **annotation_defaults)\n\n # anim_fargs contains all the values we send to our\n # _update_frames function.\n # Note the layer_cmap and layer_norm which were calculated\n # earlier being passed through\n anim_fargs = (\n ax1,\n [left, right, bottom, top], # imshow extent\n annotation_list,\n annotation_defaults,\n layer_cmap,\n layer_norm,\n )\n\n # Animate\n anim = FuncAnimation(\n fig=fig,\n func=_update_frames,\n fargs=anim_fargs,\n frames=len(da.time),\n interval=animation_interval,\n repeat=False,\n )\n\n anim.save(file_name, writer=\"pillow\", dpi=dpi)\n plt.close()\n return Image(filename=file_name)", "def lag(x: np.ndarray, p: int = 1, *, fill_value: Any = np.nan):\n return shift(x, p, fill_value=fill_value)", "def collect_data_for_multiple_years(start_year, end_year, filename):\n \n full_dict = {}\n \n for year in range(start_year, end_year + 1):\n year_dict = collect_data_by_year(year)\n full_dict.update(year_dict) # Create dict of dicts\n \n print(year)\n time.sleep(random.randint(5, 60)) # Sleep to not overwhelm site\n \n # Convert dict of dicts to Pandas df\n df = pd.DataFrame.from_dict(full_dict, orient='index')\n df.reset_index(inplace=True)\n df = df.rename(columns = {'index':'playerCode'})\n save_df_to_csv(df, filename, col_headers=True, index=True,\n index_label='idNum', mode='w')\n \n return df", "def backfill(arr, arr1):\n \n arr = np.where(arr < 0.01, np.NaN, arr)\n # FIXME:\n # RuntimeWarning: invalid value encountered in less\n # arr = np.where(arr < 0.01, np.NaN, arr)\n\n x = np.isnan(arr1)\n arr1[x] = arr[x]\n return arr1", "def melt_naics_crosswalk():\n # load the mastercroswalk and subset by sectorsourcename,\n # save values to list\n cw_load = load_crosswalk('sector_timeseries')\n\n # create melt table of possible 2007 and 2017 naics that can\n # be mapped to 2012\n cw_melt = cw_load.melt(\n id_vars='NAICS_2012_Code', var_name='NAICS_year', value_name='NAICS')\n # drop the naics year because not relevant for replacement purposes\n cw_replacement = cw_melt.dropna(how='any')\n cw_replacement = cw_replacement[\n ['NAICS_2012_Code', 'NAICS']].drop_duplicates()\n # drop rows where contents are equal\n cw_replacement = cw_replacement[\n cw_replacement['NAICS_2012_Code'] != cw_replacement['NAICS']]\n # drop rows where length > 6\n cw_replacement = cw_replacement[cw_replacement['NAICS_2012_Code'].apply(\n lambda x: len(x) < 7)].reset_index(drop=True)\n # order by naics 2012\n cw_replacement = cw_replacement.sort_values(\n ['NAICS', 'NAICS_2012_Code']).reset_index(drop=True)\n\n # create allocation ratios by determining number of\n # NAICS 2012 to other naics when not a 1:1 ratio\n cw_replacement_2 = cw_replacement.assign(\n naics_count=cw_replacement.groupby(\n ['NAICS'])['NAICS_2012_Code'].transform('count'))\n cw_replacement_2 = cw_replacement_2.assign(\n allocation_ratio=1/cw_replacement_2['naics_count'])\n\n return cw_replacement_2", "def replace_missingvalues_bandmean(X):\n if X.ndim != 4:\n raise ValueError('Input not valid, no [pic, row, column, band] data format')\n\n zeros = np.where(X[:,:,:] == 0)\n\n bandmean = {}\n\n for i in sorted(np.unique(zeros[3])):\n bandmean.update({i:np.mean(X[:,:,:,i])})\n\n for i in range(0,len(zeros[0])):\n pic, row, column, band = zeros[0][i],zeros[1][i],zeros[2][i],zeros[3][i]\n mean = bandmean.get(band)\n X[pic,row,column,band] = int(mean)\n\n return X", "def get_dataframes_for_each_year(main_dataframe, years):\n list_of_dataframes = []\n for year in years:\n dataframe_by_year = main_dataframe.loc[ (main_dataframe['year'] == year) ].T\n # Getting rid of the first two rows \n dataframe_by_year = dataframe_by_year.iloc[2:]\n list_of_dataframes.append(dataframe_by_year)\n return list_of_dataframes", "def bins_per_year(self):\n # Load the vector version #\n df = self.grouped_bins.reset_index()\n # Add year and remove TimeStep #\n df['year'] = self.country.timestep_to_year(df['time_step'])\n df = df.drop('time_step', axis=1)\n # Only if we are in the calibration scenario #\n if self.parent.parent.scenario.short_name == 'calibration':\n # Patch the harvest data frame to stop at the simulation year #\n selector = df['year'] <= self.parent.parent.country.base_year\n df = df.loc[selector].copy()\n # Return #\n return df", "def parse_year(self, response):\n months_available = response.json().get(\"data\", [])\n year = response.meta[\"year\"]\n\n for month in months_available:\n yield Request(\n f\"{self.GAZETTE_URL}?dir={year}/{month}\",\n meta={\"month\": month, \"year\": year},\n callback=self.parse_month,\n )", "def make_lightcurve(centroids, bands, band_idx, box_size, aperture_radius):\n band_names = np.sort(list(bands.keys()))\n num_stars= range(len(centroids))\n for star_idx in num_stars:\n xcenters, ycenters = [],[]\n aperture_sums = []\n background = []\n fwhms = []\n obs_time = []\n obs_mjd = []\n ##extract lightcurve (enumerate all frames) in a given band\n for i in tqdm(bands[band_names[band_idx]]):\n #import pdb; pdb.set_trace()\n hdr = fits.open(i)[0].header\n img = fits.open(i)[0].data\n #get dates from fits header\n date=dt.strptime(hdr['DATE-OBS'], '%Y-%m-%d')\n time=dt.strptime(hdr['EXP-STRT'], '%H:%M:%S.%f')\n newdate = time.replace(year=date.year, month=date.month, day=date.day)\n obs_time.append(newdate)\n obs_mjd.append(hdr['MJD-STRT'])\n\n #crop\n #import pdb; pdb.set_trace()\n image_crop = get_crop(img, centroids[star_idx], box_size)\n\n ###aperture photometry###\n #compute centroid\n centroid = get_centroid(image_crop)\n\n xcenters.append(centroid[0])\n ycenters.append(centroid[1])\n\n #compute backgound\n bkg_mean=get_bkg(image_crop, centroid, r_in=20., r_out=30.)\n\n #measure fwhm\n fwhm=get_fwhm(image_crop)\n\n #without aperture photometry\n\n aperture_sum = get_phot(image_crop, centroid, r=aperture_radius)\n\n #minus background wihtin annulus\n #aperture_sum = get_phot2(image_crop,bkg_mean,centroid,r=aperture_radius)\n\n aperture_sums.append(aperture_sum)\n background.append(bkg_mean)\n\n # if fwhm < 10*np.median(fwhms):\n # fwhms.append(fwhm)\n # else:\n # fwhms.append(np.nan)\n fwhms.append(fwhm)\n\n #output as dataframe of given band and star\n\n dfs.append(pd.DataFrame(\n {'{0}_{1}_x'.format(band_names[band_idx], str(star_idx)) : xcenters,\n '{0}_{1}_y'.format(band_names[band_idx], str(star_idx)) : ycenters,\n '{0}_{1}_flux_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : aperture_sums,\n '{0}_{1}_bkg_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : background,\n '{0}_{1}_fwhm_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : fwhms},\n #'airmass' : airmass\n index = obs_time))\n return dfs, band_idx, band_names", "def request_band_extract(file_prefix, points_layer, region, years, filter_bounds=False):\n roi = ee.FeatureCollection(region)\n plots = ee.FeatureCollection(points_layer)\n for yr in years:\n stack = stack_bands(yr, roi)\n\n if filter_bounds:\n plots = plots.filterBounds(roi)\n\n filtered = plots.filter(ee.Filter.eq('YEAR', yr))\n\n plot_sample_regions = stack.sampleRegions(\n collection=filtered,\n properties=['POINT_TYPE', 'YEAR'],\n scale=30,\n tileScale=16)\n\n task = ee.batch.Export.table.toCloudStorage(\n plot_sample_regions,\n description='{}_{}'.format(file_prefix, yr),\n bucket='wudr',\n fileNamePrefix='{}_{}'.format(file_prefix, yr),\n fileFormat='CSV')\n\n task.start()\n print(yr)\n exit()", "def get_yearly_avg(all_stock_data):\n try:\n yearly_stock_data = {}\n for data in all_stock_data:\n year = data[0][0:4]\n if year not in yearly_stock_data:\n yearly_stock_data[year] = []\n yearly_stock_data[year].append(data)\n yearly_avg_list = []\n for year, stock_data in yearly_stock_data.items():\n yearly_avg_list.append((year, get_avg(stock_data)))\n return yearly_avg_list\n\n except Exception as e:\n print(e)\n exit()", "def transition(self, data, year):\n if self.accounting_column is None:\n nrows = int(round(len(data) * self.growth_rate))\n else:\n nrows = int(round(data[self.accounting_column].sum() * self.growth_rate))\n with log_start_finish(\n 'adding {} rows via growth rate ({}) transition'.format(\n nrows, self.growth_rate),\n logger):\n return add_or_remove_rows(data, nrows, accounting_column=self.accounting_column)", "def fill_nan(x):\n (n_rows, wdw) = x.shape\n new_x = np.zeros((n_rows,wdw)); new_x[:] = np.nan\n for i in range(n_rows):\n indMissing = np.where(np.isnan(x[i,:]))[0]\n l = len(x[i,indMissing]) #number of MVs\n if l < 4*wdw/5: #20% available values otherwise discarded\n new_x[i,:] = x[i,:]\n if l > 0 and indMissing[0] == 0: #missing value at index 0 \n c = 0\n while c + 1 < len(indMissing) and indMissing[c+1] == indMissing[c] + 1:\n c += 1\n new_x[i,:c+1] = x[i,c+1] #first nans replaced by first non nan value\n indMissing = np.where(np.isnan(new_x[i,:]))[0]\n l = len(new_x[i,indMissing])\n if l > 0 and indMissing[0] > 0:\n new_x[i,:] = interpolate1d(new_x[i,:]) #interpolate intermediate nans\n ind = np.where(~np.isnan(new_x).all(axis=1))[0]\n new_x = new_x[ind] #remove NaNs \n \n return new_x, ind", "def reindex_year_line_plot(df, **kwargs):\n fig = go.Figure()\n dft = transforms.reindex_year(df)\n max_results = kwargs.get(\"max_results\", None)\n if max_results:\n dft = dft.tail(max_results)\n colsel = cpu.reindex_year_df_rel_col(dft)\n\n traces = cptr.reindex_plot_traces(dft, current_select_year=colsel, **kwargs)\n\n if \"shaded_range\" in traces and traces[\"shaded_range\"]:\n for trace in traces[\"shaded_range\"]:\n fig.add_trace(trace)\n\n if \"hist\" in traces:\n for trace in traces[\"hist\"]:\n fig.add_trace(trace)\n\n kwargs[\"title_postfix\"] = colsel\n title = cpu.gen_title(df[colsel], title_prefix=colsel, **kwargs)\n\n legend = go.layout.Legend(font=dict(size=10))\n yaxis_title = kwargs.get(\"yaxis_title\", None)\n fig.update_layout(\n title=title,\n title_x=0.01,\n xaxis_tickformat=\"%b-%y\",\n yaxis_title=yaxis_title,\n legend=legend,\n margin=preset_margins,\n )\n # zoom into last 3 years\n fig.update_xaxes(\n type=\"date\",\n range=[\n dft.tail(365 * 3).index[0].strftime(\"%Y-%m-%d\"),\n dft.index[-1].strftime(\"%Y-%m-%d\"),\n ],\n )\n\n return fig", "def smooth_climatologies(thresh_climYear, seas_climYear, smoothPercentileWidth):\n # If the climatology contains NaNs, then assume it is a <365-day year and deal accordingly\n if np.sum(np.isnan(seas_climYear)) + np.sum(np.isnan(thresh_climYear)):\n valid = ~np.isnan(thresh_climYear)\n thresh_climYear[valid] = runavg(thresh_climYear[valid], smoothPercentileWidth)\n valid = ~np.isnan(seas_climYear)\n seas_climYear[valid] = runavg(seas_climYear[valid], smoothPercentileWidth)\n else: # >= 365-day year (no nans)\n thresh_climYear = runavg(thresh_climYear, smoothPercentileWidth)\n seas_climYear = runavg(seas_climYear, smoothPercentileWidth)\n\n return thresh_climYear, seas_climYear", "def time_bucket_gapfill(self, field: str, interval: str, start: datetime, end: datetime, datapoints: int=240):\n return self.values(bucket=TimeBucketGapFill(field, interval, start, end, datapoints))", "def load_all_data():\r\n\r\n data = dict()\r\n for year in ['2010', '2011', '2014', '2016']:\r\n\r\n data[year] = load_data(int(year))\r\n\r\n # Calculate the dune widths\r\n data[year]['Dune Width'] = data[year]['x_heel'] - data[year]['x_toe']\r\n data[year]['Fenced Dune Width'] = data[year]['x_fence_heel'] - data[year]['x_fence_toe']\r\n data[year]['Fenced Dune System Width'] = data[year]['x_heel'] - data[year]['x_fence_toe']\r\n\r\n # For now, remove all negative widths and volumes, something went wrong with them\r\n width_condition = data[year]['Fenced Dune Width'] <= 0\r\n volume_condition = data[year]['Fenced Dune Volume'] <= 0\r\n\r\n data[year]['y_fence_crest'][width_condition] = np.nan\r\n data[year]['Fenced Dune Width'][width_condition] = np.nan\r\n data[year]['Fenced Dune Volume'][width_condition] = np.nan\r\n\r\n data[year]['y_fence_crest'][volume_condition] = np.nan\r\n data[year]['Fenced Dune Width'][volume_condition] = np.nan\r\n data[year]['Fenced Dune Volume'][volume_condition] = np.nan\r\n\r\n data[year]['Fenced Dune System Width'][data[year]['Fenced Dune System Width'] <= 0] = np.nan\r\n\r\n # Remove instances where the fenced and natural dune crest are not positioned correctly\r\n crest_condition_1 = data[year]['x_fence_crest'] >= data[year]['x_crest']\r\n crest_condition_2 = data[year]['y_fence_crest'] >= data[year]['y_crest']\r\n\r\n data[year]['y_fence_crest'][crest_condition_1] = np.nan\r\n data[year]['Fenced Dune Width'][crest_condition_1] = np.nan\r\n data[year]['Fenced Dune Volume'][crest_condition_1] = np.nan\r\n\r\n data[year]['y_fence_crest'][crest_condition_2] = np.nan\r\n data[year]['Fenced Dune Width'][crest_condition_2] = np.nan\r\n data[year]['Fenced Dune Volume'][crest_condition_2] = np.nan\r\n\r\n data['Fences'] = load_fence_locations(y=0)\r\n\r\n return data", "def split_on_year(\n self,\n df: pyspark.DataFrame,\n column_name: str,\n batch_identifiers: dict,\n ) -> pyspark.DataFrame:\n return self.split_on_date_parts(\n df=df,\n column_name=column_name,\n batch_identifiers=batch_identifiers,\n date_parts=[DatePart.YEAR],\n )", "def ANdatefixer(years):\n\n\n\t# ========== create the new dates ==========\n\t# year = ds.Year\n\n\t# +++++ set up the list of dates +++++\n\tdates = OrderedDict()\n\ttm = [dt.datetime(int(year) , 6, 30) for year in years]\n\tdates[\"time\"] = pd.to_datetime(tm)\n\n\tdates[\"calendar\"] = 'standard'\n\tdates[\"units\"] = 'days since 1900-01-01 00:00'\n\t\n\tdates[\"CFTime\"] = date2num(\n\t\ttm, calendar=dates[\"calendar\"], units=dates[\"units\"])\n\n\treturn dates", "def ffill(\n self: FrameLike,\n axis: Optional[Axis] = None,\n inplace: bool_type = False,\n limit: Optional[int] = None,\n ) -> FrameLike:\n return self.fillna(method=\"ffill\", axis=axis, inplace=inplace, limit=limit)", "def winter_bar_chart(self):\n # Create the top n countries dataframe from 1994 to 2016\n df_winter = self.df_winter[self.df_winter['Year'] >= 1994]\n m = list(df_winter['Country'].value_counts()[:self.n_top].index)\n df_top = df_winter[df_winter['Country'].isin(m)].groupby(['Country', 'Medal']).size()\n new_index = pd.MultiIndex.from_product([m, ['Gold', 'Silver', 'Bronze']], names=df_top.index.names)\n df_top = df_top.reindex(new_index)\n unstacked_df_top = df_top.unstack().reindex(m, columns=['Gold', 'Silver', 'Bronze'])\n # Create the dataframe in 2018\n k = []\n for j in self.df_2018_winter['NOC'].tolist():\n n = j[j.find('(') + 1:j.find(')')]\n k.append((n, j))\n k = dict(k)\n winter_2018 = pd.DataFrame()\n for i in m:\n if i != 'RUS':\n df_tmp = self.df_2018_winter[self.df_2018_winter['NOC'] == k[i]]\n else:\n df_tmp = self.df_2018_winter[self.df_2018_winter['NOC'] == k['OAR']]\n winter_2018 = pd.concat([winter_2018, df_tmp])\n winter_2018['Country'] = m\n new_winter_2018 = winter_2018.set_index(['Country'])[['Gold', 'Silver', 'Bronze']]\n # Add two dataframes and plot.\n unstacked_df_top.add(new_winter_2018).reindex(m[::-1], columns=['Bronze', 'Silver', 'Gold']).plot(kind='barh')\n plt.title('Medal Result of Winter Olympics since 1994')\n fname = './medal_figures_winter/winter_bar_chart.png'\n plt.savefig(fname=fname, format='png')\n return", "def FS1Year(inputFolderPath = './Formatted Files Without Missing', outputFolderPath = './Feature Selection'):\n\tfileList = []\n\tfor root, dirs, files in os.walk(inputFolderPath): \n\t for afile in files:\n\t \tfileList.append(afile)\n\n\ttargetList = [2704,2707,2713,2716,2718,808,811,1954]\n\t# targetList = [1994,1997,2003,2006,2008,807,810,1953]\n\tyearList = [(1998,2015),(2005,2015),(2005,2015),(2005,2015),(2005,2015),(1960,2014),(1961,2014),(2002,2012)]\n\n\n\tfor i in range(len(targetList)):\n\t\t# i = 0\n\t\trows = []\n\t\tfor year in range(yearList[i][0],yearList[i][1]+1):\n\t\t\t# print str(year) + '-' + str(targetList[i]) \n\t\t\tregex = re.compile(\"(\"+ str(year) +\").*\")\n\t\t\tfiles = [m.group(0) for l in fileList for m in [regex.search(l)] if m and len(l) == 28]\n\t\t\t# print files\n\t\t\t# call([\"java\",\"-jar\",\"MINE.jar\",\"./New Formatted Files/\"+files[0],str(targetList[i]+1),\"cv=0.5\"])\n\t\t\t\n\n\t\t\t# load the CSV file as a numpy matrix\n\t\t\t# dataset = np.loadtxt('./New Formatted Files/'+files[0], delimiter=\",\", skiprows=1, usecols=tuple(range(1,3240)))\n\t\t\t# dataset = np.genfromtxt('./New Formatted Files/'+files[0], delimiter=\",\", names=True, autostrip=True, max_rows=10, missing_values=np.nan, usecols=tuple(range(1,30)))\n\t\t\twith open(inputFolderPath+'/'+files[0],'rb') as f:\n\t\t\t reader = csv.reader(f)\n\t\t\t header = next(reader)\n\t\t\t num_cols = len(header)\n\t\t\t # print header\n\t\t\t print i\n\t\t\t target_idx = [idx for idx, item in enumerate(header) if item.startswith(str(targetList[i]).zfill(4))]\n\t\t\t if len(target_idx) > 0:\n\t\t\t \ttarget = target_idx[0]-1\n\t\t\t \tprint ('OK',year, targetList[i], inputFolderPath+'/'+files[0])\n\t\t\t else:\n\t\t\t \tprint (year, targetList[i], inputFolderPath+'/'+files[0])\n\t\t\t \tbreak\n\t\t\t f.close()\n\t\t\tdataset = np.genfromtxt(inputFolderPath+'/'+files[0], delimiter=\",\", skip_header=1, autostrip=True, missing_values=np.nan, usecols=tuple(range(1,num_cols)))\n\t\t\t# print (dataset.shape)\n\t\t\tX = np.concatenate((dataset[:,0:target],dataset[:,target+1:dataset.shape[1]]),axis=1)\n\t\t\t# X = np.concatenate((dataset[:,0:2],dataset[:,3:dataset.shape[1]),axis=1)\n\t\t\ty = dataset[:,target]\n\t\t\t# print tuple(range(1,3240))\n\t\t\t# print dataset.dtype.names[0]\n\t\t\t# print dataset.dtype.names[-1]\n\t\t\t# print dataset[0]\n\t\t\timp = Imputer(missing_values='NaN', strategy='median', axis=0)\n\t\t\timputedX = imp.fit_transform(X,y)\n\t\t\timputedX = np.array([imputedX[j] for j in range(imputedX.shape[0]) if not np.isnan(y[j])])\n\t\t\tdeleteMissingY = np.array([x1 for x1 in y if not np.isnan(x1)])\n\t\t\t# print dataset[0]\n\t\t\t# print (imputedX.shape, y.shape)\n\t\t\t# print (imputedX.shape, deleteMissingY.shape)\n\t\t\t# print (np.any(np.isnan(imputedX)), np.all(np.isfinite(imputedX)))\n\t\t\t# imputedX_new = SelectKBest(chi2, k=10).fit_transform(imputedX, y)\n\t\t\tk = 30\n\t\t\tselection = SelectKBest(f_regression, k=k)\n\t\t\timputedX_new = selection.fit_transform(imputedX, deleteMissingY)\n\t\t\t# print (len(selection.get_support()), len(header[1:target+1]+header[target+2:]))\n\t\t\tselectedFeatures = [[item, selection.scores_[idx], selection.pvalues_[idx]] for idx, item in enumerate(header[1:target+1]+header[target+2:]) if selection.get_support()[idx]]\n\t\t\tselectedFeatures.sort(key=lambda x: x[1], reverse=True)\n\t\t\t# for sf in selectedFeatures:\n\t\t\t# \tprint sf\n\t\t\t# print selection.scores_\n\t\t\t# print selection.get_support()\n\t\t\t# print (imputedX_new.shape, y.shape)\n\t\t\t# print (imputedX_new.shape, deleteMissingY.shape)\n\t\t\t# print imputedX[0,1994]\n\t\t\t# print dataset['3137_Estimates_and_projections_of_the_total_population_by_sex_age_and_rural__urban_areasSexTotal_10year_age_bands__2534_Geographical_coverage__National_Thousands_Persons__ILO']\n\t\t\t# print dataset\n\t\t\t# separate the data from the target attributes\n\t\t\t# X = np.concatenate((imputedDataset[:,0:7],imputedDataset[:,0:7]),axis=1)\n\t\t\t# y = imputedDataset[:,8]\n\t\t\trows.append([year, 'score', 'p-value'])\n\t\t\trows.extend(selectedFeatures)\n\t\t\trows.append(['', '', ''])\n\t\t\t# print 'Hey'\n\n\t\tfilename = outputFolderPath+'/'+('Indicator%d - k%d - %s.csv' % (targetList[i], k, 'f_regression'))\n\t\twith open(filename,'wb') as w:\n\t\t\ta = csv.writer(w, delimiter = ',')\n\t\t\ta.writerows(rows)\n\t\tw.close()", "def interp1d_stair_aver(x, y): #TODO: deal with the case x not sorted\n def f(xp):\n yp=np.empty(np.size(xp)-1)\n xmod=x[~(np.isnan(x)+np.isnan(y))]\n ymod=y[~(np.isnan(x)+np.isnan(y))]\n yint=np.cumsum(np.concatenate((np.array([0]),ymod[:-1]*(xmod[1:]-xmod[:-1]))))\n g=interp1d(xmod,yint, bounds_error=False, fill_value=np.nan)\n# yp=np.where((xp[:-1]>min(xmod))*(xp[1:]<max(xmod)),(g(xp[1:])-g(xp[:-1]))/(xp[1:]-xp[:-1]),np.nan) #Maybe this is suboptimal since we compute twice g(xp[i])\n yp=np.where((xp[:-1]>min(xmod))*(xp[1:]<max(xmod)),(g(xp[1:])-g(xp[:-1]))/(xp[1:]-xp[:-1]),np.nan) #Maybe this is suboptimal since we compute twice g(xp[i])\n return yp\n\n return f", "def arima(X):\n \n sq_preds = status_quo_model(X)\n X[\"id\"] = X.index\n # get it to long form\n longX = pd.melt(X, id_vars=['id'], value_vars=list(X.columns.values)[:-1])\n # change the default column name to a proper one\n longX['year'] = longX['variable']\n longX.drop(columns = 'variable', inplace=True)\n # sort by id and year so that we can fit individual time series models for each indicator\n longX = longX.sort_values(['id', 'year'])\n # group\n grouped = longX.groupby('id')['value']\n # for each group (a country-indicator combination)\n forecasts = list()\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\")\n for k, g in grouped:\n g = g[-5:]\n gi = g.interpolate(method = 'linear', limit = 50, limit_direction = 'backward')\n model = ARIMA(gi, order=(2,1,0))\n try:\n results = model.fit(disp = 0)\n forecasts.append(results.forecast()[0][0])\n except (ValueError, numpy.linalg.linalg.LinAlgError) as e:\n forecasts.append(sq_preds[k])\n return forecasts", "def _process_data(data, band):\n\n meta = {key:value for key,value in data[0].items() if key != \"subset\" }\n meta['band'] = band\n data_dict = {'dates': [], 'arrays': [], 'metadata': meta}\n for i in data:\n for j in i['subset']:\n if j['band'] == band:\n data_dict['dates'].append(j['calendar_date'])\n data = []\n for x in j['data']:\n try:\n data.append(float(x))\n except ValueError:\n data.append(np.nan) \n data_dict['arrays'].append(np.array(data).reshape(meta['nrows'], \n meta['ncols'])) \n dtdates = [dt.datetime.strptime(d,\"%Y-%m-%d\") for d in data_dict['dates']]\n xcoordinates = ([float(meta['xllcorner'])] + \n [i * meta['cellsize'] + float(meta['xllcorner']) \n for i in range(1, meta['ncols'])])\n ycoordinates = ([float(meta['yllcorner'])] + \n [i * meta['cellsize'] + float(meta['yllcorner'])\n for i in range(1, meta['nrows'])])\n return xr.DataArray(name = band,\n data = np.flipud(np.dstack(data_dict['arrays'])),\n coords = [np.array(ycoordinates), \n np.array(xcoordinates), dtdates],\n dims = [ \"y\", \"x\", \"time\" ],\n attrs = meta)", "def add_image_to_frame_list(self,startFrame, endFrame, imageName): \n for i in range(startFrame-1, endFrame-1):\n try:\n # image = imageio.imread(imageName)\n im = Image.open(imageName)\n im = im.resize((720, 720))\n self.frame_list.append(im)\n # self.frame_list.append(im)\n\n except:\n print (imageName, \" not found.\")\n # BufferedImage bi= new BufferedImage(320,240,BufferedImage.TYPE_BYTE_GRAY);\n im=self.blank\n self.frame_list.append(im)", "def SplitGap(data,gapsize,medwin,fluxdiff):\n \n # defining new empty lists and stuff\n pcount=0\n istamps=[]\n outData={}\n \n data['x'].mask = data['UnMasked']\n data['y'].mask = data['UnMasked']\n data['yerr'].mask = data['UnMasked']\n \n # median smoothing the lightcurve\n mvavg1 = movingMedian(data['y'],medwin)\n mvavg1 = num.append(mvavg1,mvavg1[-1])\n mvavg1 = data['y']\n # first derivative of smoothed lightcurve\n diff1 = num.diff(mvavg1)\n diff1 = num.hstack((diff1,diff1[-1]))\n \n # second derivative of smoothed lightcurve\n diff2 = num.diff(diff1)\n diff2 = num.hstack((diff2[-1],diff2))\n\n # compute ourlier resistant sigma\n sig = compute1Sigma(diff1)\n #pylab.plot(diff1,'g.')\n #pylab.plot([0,6000],[5*sig,5*sig],'k-')\n #pylab.plot([0,6000],[3*sig,3*sig],'k-')\n #pylab.plot([0,6000],[1*sig,1*sig],'k-')\n #pylab.show()\n\n # The grand master loop >=}\n # to make portion slices\n for i in range(len(data['x'])-1):\n dt = data['x'][i+1]- data['x'][i]\n j1 = max(0,i-medwin)\n j2 = i + medwin\n if pcount == 0:\n i0 = 0\n if pcount > 0:\n i0 = i1+1\n if dt > gapsize:\n i1 = i\n istamps.append([i0,i1])\n pcount += 1\n #if num.abs(diff1[i]) > 5*sig:\n #i1 = i\n #istamps.append([i0,i1])\n #pcount += 1\n #print num.abs(diff1[i]/data['y'][i]), diff1[i], data['y'][i], diff1[i+1], data['y'][i+1]\n #print i, ' test flux gap'\n i1 = i+1\n istamps.append([i0,i1])\n \n \n \n if data['bool']==False:\n # Applying slices\n for j in range(len(istamps)):\n #print istamps[j][0], istamps[j][1]\n outData['portion' + str(j+1)] = {'kid':data['kid'],'x':data['x'][istamps[j][0]:istamps[j][1]+1], 'y':data['y'][istamps[j][0]:istamps[j][1]+1], 'yerr':data['yerr'][istamps[j][0]:istamps[j][1]+1],'UnMasked':data['UnMasked'][istamps[j][0]:istamps[j][1]+1],'bool':False}\n else:\n # Applying slices\n for j in range(len(istamps)):\n #print istamps[j][0], istamps[j][1]\n outData['portion' + str(j+1)] = {'kid':data['kid'],'x':data['x'][istamps[j][0]:istamps[j][1]+1], 'y':data['y'][istamps[j][0]:istamps[j][1]+1], 'yerr':data['yerr'][istamps[j][0]:istamps[j][1]+1], 'TransitMask':data['TransitMask'][istamps[j][0]:istamps[j][1]+1],'UnMasked':data['UnMasked'][istamps[j][0]:istamps[j][1]+1],'bool':True}\n \n return outData", "def get_aligned_image_2frames(self, x, flows_backward, flows_forward):\n n = x.size(1)\n x_backward = [torch.zeros_like(x[:, -1, ...]).repeat(1, 4, 1, 1)]\n for i in range(n - 1, 0, -1):\n x_i = x[:, i, ...]\n flow = flows_backward[:, i - 1, ...]\n x_backward.insert(0, flow_warp(x_i, flow.permute(0, 2, 3, 1), 'nearest4'))\n x_forward = [torch.zeros_like(x[:, 0, ...]).repeat(1, 4, 1, 1)]\n for i in range(0, n - 1):\n x_i = x[:, i, ...]\n flow = flows_forward[:, i, ...]\n x_forward.append(flow_warp(x_i, flow.permute(0, 2, 3, 1), 'nearest4'))\n return [torch.stack(x_backward, 1), torch.stack(x_forward, 1)]", "def year_data(self,year):\n idx = [i for i in range(self.dates.shape[0]) if self.dates[i].year == year]\n year_dates = self.dates[idx]\n year_dc = self.dc[idx]\n return year_dates, year_dc", "def test_leap_years(self):\n\n dates1 = (\n datetime.date(2000, 1, 29),\n datetime.date(2004, 1, 29),\n datetime.date(2008, 1, 29),\n datetime.date(2012, 1, 29),\n datetime.date(2016, 1, 29),\n datetime.date(2020, 1, 29),\n datetime.date(2024, 1, 29),\n )\n\n dates2 = (\n datetime.date(2000, 2, 29),\n datetime.date(2004, 2, 29),\n datetime.date(2008, 2, 29),\n datetime.date(2012, 2, 29),\n datetime.date(2016, 2, 29),\n datetime.date(2020, 2, 29),\n datetime.date(2024, 2, 29),\n )\n\n for date1, date2 in zip(dates1, dates2):\n self.assertTrue(self.expander.is_same_date_month_ahead(date1, date2))", "def _year_days(year):\n return pd.DataFrame({'time': pd.date_range(f'{year}-01-01', f'{year}-12-31')})", "def get_year_month_range(year, month, quantity):\n yield year, month\n for _ in range(quantity - 1):\n year, month = increment_year_month(year, month)\n yield year, month", "def fill(self, data_grouped, *args, **kw):\n sdata = _scale_data(data_grouped, self.ranges)\n self.ax.fill(self.angle, np.r_[sdata, sdata[0]], *args, **kw)", "def fill(self, filler):\n\n for x in range(self.__xmax):\n for y in range(self.__ymax):\n self.__data[(x,y)] = filler(x,y) % self.mod", "def year_cv_split(X, year_range):\n return [\n ((X[\"year\"] < year).to_numpy(), (X[\"year\"] == year).to_numpy())\n for year in range(*year_range)\n ]", "def fill_nan(array):\n idx = np.arange(array.shape[0])\n good = np.where(np.isfinite(array))\n interp = interpolate.interp1d(idx[good], array[good], bounds_error=False)\n return np.where(np.isfinite(array), array, interp(idx))", "def zero_end_interpolation(df: pd.DataFrame):\n end = df.index[-1]\n empty_df = pd.DataFrame(index=np.arange(0, end + 1, 1))\n res = pd.concat([df, empty_df], axis=1)\n res = res.fillna(method='ffill')\n res = res.fillna(method='bfill')\n return res", "def _pad_frames_list(self, frames):\n if len(frames) < config.RGB_N_FRAMES:\n n_pad_frames = config.RGB_N_FRAMES - len(frames)\n for _ in range(n_pad_frames):\n blank_frame = np.zeros((config.RGB_FRAME_HEIGHT, config.RGB_FRAME_WIDTH, config.CHANNELS))\n frames.append(blank_frame)\n\n return frames", "def reshape_bfill(x, y, xnew, left_values=\"first\", right_values=0):\r\n fill_value = [left_values, right_values]\r\n if left_values == \"first\":\r\n fill_value[0] = y[0]\r\n fill_value = tuple(fill_value)\r\n foo = scipy.interpolate.interp1d(\r\n x, y,\r\n axis=0,\r\n copy=False,\r\n kind=\"next\",\r\n bounds_error=False,\r\n fill_value=fill_value,\r\n assume_sorted=True,\r\n )\r\n return foo(xnew)", "def _shift(self, arr: np.ndarray, num: int = 1, fill_value: int = 0) -> np.ndarray:\n result = np.empty_like(arr)\n if num > 0:\n result[:num] = fill_value\n result[num:] = arr[:-num]\n elif num < 0:\n result[num:] = fill_value\n result[:num] = arr[-num:]\n else:\n result[:] = arr\n return result", "def fill_mising(self, dict):\t\n\t\tfor name, df in dict.items():\n\t\t\tdf = df.fillna(method='pad')\n\t\t\tdict[name] = df\n\t\treturn dict", "def ran_remove_shaded(year_list,airline_list,processed_direc,graph_direc):\n \n IAPL_df_all = pd.DataFrame(columns = ['Year','Airline','IAPL'])\n CS_df_all = pd.DataFrame(columns = ['Year','Airline','Cluster_Size'])\n AC_df_all = pd.DataFrame(columns = ['Year','Airline','AC']) \n for airline in airline_list:\n script_dir = os.path.dirname(os.getcwd())\n CS_path = \"%s%s_CSR.csv\" %(processed_direc,airline)\n CS_file = os.path.join(script_dir,CS_path)\n CS_df = pd.read_csv(CS_file)\n\n IAPL_path = \"%s%s_IAPLR.csv\" %(processed_direc,airline)\n IAPL_file = os.path.join(script_dir,IAPL_path)\n IAPL_df = pd.read_csv(IAPL_file)\n\n AC_path = \"%s%s_ACR.csv\" %(processed_direc,airline)\n AC_file = os.path.join(script_dir,AC_path)\n AC_df = pd.read_csv(AC_file)\n\n CS_df_airline = pd.DataFrame(columns = ['Year','Airline','Cluster_Size'])\n CS_year_df = pd.DataFrame()\n\n IAPL_df_airline = pd.DataFrame(columns = ['Year','Airline','IAPL'])\n IAPL_year_df = pd.DataFrame()\n\n AC_df_airline = pd.DataFrame(columns = ['Year','Airline','AC'])\n AC_year_df = pd.DataFrame()\n\n col = 0\n for year in year_list:\n CS_year_df['Cluster_Size'] = CS_df.iloc[:,col]\n CS_quant_calc = CS_year_df.quantile([0.25,0.5,0.75])\n CS_quant_calc['Year'] = year\n CS_df_airline = pd.concat([CS_df_airline,CS_quant_calc],ignore_index=True)\n\n IAPL_year_df['IAPL'] = IAPL_df.iloc[:,col]\n IAPL_quant_calc = IAPL_year_df.quantile([0.25,0.5,0.75])\n IAPL_quant_calc['Year'] = year\n IAPL_df_airline = pd.concat([IAPL_df_airline,IAPL_quant_calc],ignore_index=True)\n\n AC_year_df['AC'] = AC_df.iloc[:,col]\n AC_quant_calc = AC_year_df.quantile([0.5,0.5,0.5])\n AC_quant_calc['Year'] = year\n AC_df_airline = pd.concat([AC_df_airline,AC_quant_calc],ignore_index=True)\n\n col = col + 1\n CS_df_airline['Airline'] = airline\n CS_df_all = pd.concat([CS_df_all,CS_df_airline],ignore_index = True)\n\n IAPL_df_airline['Airline'] = airline\n IAPL_df_all = pd.concat([IAPL_df_all,IAPL_df_airline],ignore_index = True)\n\n AC_df_airline['Airline'] = airline\n AC_df_all = pd.concat([AC_df_all,AC_df_airline],ignore_index = True)\n\n\n plt.figure(1,figsize=(2.8,2.0),dpi=300)\n ax1 = sns.lineplot(data=CS_df_all, x = 'Year', y = 'Cluster_Size', hue='Airline', style='Airline', marker = 'o')\n ax1.xaxis.set_major_locator(ticker.MultipleLocator(1))\n plt.xlabel('Year')\n plt.ylabel('Cluster Size')\n plt.legend(airline_list,fontsize=10,labelspacing=0.15)\n plt.tight_layout()\n plt.savefig('%sShaded_CS.pdf'%(graph_direc,))\n\n plt.figure(2,figsize=(2.8,2.0),dpi=300)\n ax2 = sns.lineplot(data=IAPL_df_all, x = 'Year', y = 'IAPL', hue='Airline', style='Airline', marker = 'o')\n ax2.xaxis.set_major_locator(ticker.MultipleLocator(1))\n plt.xlabel('Year')\n plt.ylabel('IAPL')\n plt.legend(airline_list,fontsize=10,labelspacing=0.15)\n plt.tight_layout()\n plt.savefig('%sShaded_IAPL.pdf'%(graph_direc,))\n\n plt.figure(3,figsize=(2.8,2.0),dpi=300)\n ax3 = sns.lineplot(data=AC_df_all, x = 'Year', y = 'AC', hue='Airline', style='Airline', marker = 'o')\n ax3.xaxis.set_major_locator(ticker.MultipleLocator(1))\n plt.xlabel('Year')\n plt.ylabel('Algebraic Connectivity')\n plt.legend(airline_list,fontsize=10,labelspacing=0.15)\n plt.tight_layout()\n plt.savefig('%sShaded_AC.pdf'%(graph_direc,))\n\n plt.show()", "def FillGapFnBuilder(duration=500, gap_back=10, gap_forward=5, gap_threshold=10):\n def carFn((idx, car), sim, step):\n gap = [0] * sim.numLanes\n new_speed = [0] * sim.numLanes\n for lane in range(sim.numLanes):\n if sim.getCars(idx, dxBack=gap_back, dxForward=gap_forward, lane=lane):\n # cars too close, no lane changing allowed\n gap[lane] = 0\n continue\n\n try:\n [back_car, front_car] = sim.getCars(idx, numBack=1, numForward=1, lane=lane)\n except ValueError:\n # Not enough cars on lane\n gap[lane] = 0\n continue\n\n gap[lane] = (front_car[\"x\"] - back_car[\"x\"]) % sim.length\n new_speed[lane] = (front_car[\"v\"] + back_car[\"v\"]) / 2\n max_gap = max(gap)\n max_lane = gap.index(max_gap)\n\n if max_lane != car[\"lane\"] and max_gap-gap[car[\"lane\"]] > gap_threshold:\n traci.vehicle.slowDown(car[\"id\"], new_speed[max_lane], duration)\n traci.vehicle.changeLane(car[\"id\"], max_lane, 10000)\n\n return carFn", "def get_aligned_feature_4frames(self, x, flows_backward, flows_forward):\n n = x.size(1)\n x_backward = [torch.zeros_like(x[:, -1, ...])]\n for i in range(n, 1, -1):\n x_i = x[:, i - 1, ...]\n flow1 = flows_backward[0][:, i - 2, ...]\n if i == n:\n x_ii = torch.zeros_like(x[:, n - 2, ...])\n flow2 = torch.zeros_like(flows_backward[1][:, n - 3, ...])\n else:\n x_ii = x[:, i, ...]\n flow2 = flows_backward[1][:, i - 2, ...]\n x_i_warped = flow_warp(x_i, flow1.permute(0, 2, 3, 1), 'bilinear')\n x_ii_warped = flow_warp(x_ii, flow2.permute(0, 2, 3, 1), 'bilinear')\n x_backward.insert(0, self.pa_deform(torch.cat([x_i, x_ii], 1), [x_i_warped, x_ii_warped], x[:, i - 2, ...], [flow1, flow2]))\n x_forward = [torch.zeros_like(x[:, 0, ...])]\n for i in range(-1, n - 2):\n x_i = x[:, i + 1, ...]\n flow1 = flows_forward[0][:, i + 1, ...]\n if i == -1:\n x_ii = torch.zeros_like(x[:, 1, ...])\n flow2 = torch.zeros_like(flows_forward[1][:, 0, ...])\n else:\n x_ii = x[:, i, ...]\n flow2 = flows_forward[1][:, i, ...]\n x_i_warped = flow_warp(x_i, flow1.permute(0, 2, 3, 1), 'bilinear')\n x_ii_warped = flow_warp(x_ii, flow2.permute(0, 2, 3, 1), 'bilinear')\n x_forward.append(self.pa_deform(torch.cat([x_i, x_ii], 1), [x_i_warped, x_ii_warped], x[:, i + 2, ...], [flow1, flow2]))\n return [torch.stack(x_backward, 1), torch.stack(x_forward, 1)]", "def _calculate_stocks_after_baseline_period(\n baseline_stock_raster_path, yearly_accumulation_raster_path, n_years,\n target_raster_path):\n # Both of these values are assumed to be defined from earlier in the\n # model's execution.\n baseline_nodata = pygeoprocessing.get_raster_info(\n baseline_stock_raster_path)['nodata'][0]\n accum_nodata = pygeoprocessing.get_raster_info(\n yearly_accumulation_raster_path)['nodata'][0]\n\n def _calculate_accumulation_over_years(baseline_matrix, accum_matrix):\n target_matrix = numpy.empty(baseline_matrix.shape, dtype=numpy.float32)\n target_matrix[:] = NODATA_FLOAT32_MIN\n\n valid_pixels = (\n ~utils.array_equals_nodata(baseline_matrix, baseline_nodata) &\n ~utils.array_equals_nodata(accum_matrix, accum_nodata))\n\n target_matrix[valid_pixels] = (\n baseline_matrix[valid_pixels] + (\n accum_matrix[valid_pixels] * n_years))\n\n return target_matrix\n\n pygeoprocessing.raster_calculator(\n [(baseline_stock_raster_path, 1),\n (yearly_accumulation_raster_path, 1)],\n _calculate_accumulation_over_years, target_raster_path,\n gdal.GDT_Float32, NODATA_FLOAT32_MIN)", "def fill_sky_gradient(num_steps: int, start_y: float):\n # compute some helper values\n min_x = -turtle.window_width() / 2\n max_x = +turtle.window_width() / 2\n y_step = turtle.window_height()*start_y / num_steps\n min_y = turtle.window_height() / 2 - turtle.window_height()*start_y\n \n # fill the section below the gradient\n fill_rectangle(min_x, -turtle.window_height()/2, max_x, min_y, LOWER_SKY_COLOR)\n \n # fill the gradient\n for i in range(num_steps):\n fill_rectangle(min_x, min_y, max_x, min_y + y_step + 1,\n mix_colors(LOWER_SKY_COLOR, UPPER_SKY_COLOR, i/(num_steps-1)))\n min_y += y_step", "def cull_missing(df, colname, missingdays):\n df2 = df[[\"binyear\", colname]]\n nancounts = df2.groupby(\"binyear\").agg(lambda x: x.isnull().sum())\n # cull anything with more than 3 days NaN\n df2 = nancounts[nancounts[colname] > missingdays]\n years = []\n if not df2.empty:\n years = list(df2.index.values)\n resdf = df[~df[\"binyear\"].isin(years)]\n minyear = resdf[\"binyear\"].min()\n # Prevent scary cullyears listing\n return resdf, list(filter(lambda x: x > minyear, years))", "def loop(self):\n catalog_copy = self.catalog.copy() # Keep the original to apply the merge.\n self.catalog['AE'] = np.nan\n\n for year in self.unique_years:\n print(f'Merging AE data for year={year}')\n # Try to load the AE file.\n try:\n self.ae = self.load_ae(year)\n except AssertionError as err:\n if 'No AE files found.' in str(err):\n print(err)\n continue\n else:\n raise\n\n merged = pd.merge_asof(catalog_copy, self.ae, left_index=True, \n right_index=True, tolerance=pd.Timedelta(minutes=1),\n direction='nearest')\n self.catalog.update(merged)\n return" ]
[ "0.65311575", "0.5876845", "0.5747447", "0.5284138", "0.52226", "0.5096014", "0.5058866", "0.5047209", "0.49857956", "0.49474898", "0.48613372", "0.48247787", "0.47784477", "0.46002764", "0.45790556", "0.45642906", "0.4548984", "0.4521293", "0.45085937", "0.44796604", "0.44686285", "0.44508076", "0.44366032", "0.44195914", "0.4402968", "0.43964994", "0.4350866", "0.43493602", "0.43422398", "0.43408775", "0.43221173", "0.43178424", "0.43097156", "0.4303747", "0.43008196", "0.42974886", "0.42887056", "0.42552507", "0.42456695", "0.4237061", "0.42359817", "0.42357475", "0.423474", "0.42086428", "0.42009333", "0.41841903", "0.41723892", "0.41685423", "0.41509795", "0.41234985", "0.41161048", "0.41082975", "0.4090072", "0.40681487", "0.40671405", "0.40592307", "0.40588433", "0.4048328", "0.40316984", "0.4022465", "0.40130025", "0.40014708", "0.39999276", "0.39844623", "0.3974101", "0.39445385", "0.39407188", "0.39354014", "0.3933356", "0.3932712", "0.3928329", "0.39276963", "0.39270726", "0.39199167", "0.39186594", "0.3910035", "0.39052224", "0.3892387", "0.3884633", "0.38819295", "0.38765642", "0.3873026", "0.38687277", "0.38618508", "0.38573882", "0.38554278", "0.38539243", "0.38487262", "0.38480484", "0.38469028", "0.38454613", "0.38451558", "0.38433355", "0.38371503", "0.3834757", "0.38322377", "0.38267237", "0.38225472", "0.381289", "0.38093963" ]
0.70489925
0
Function to perform a backward moving gap fill for all years in an image. The image bands do not need to be in order, but the bandNames argument must be in chronological order. The backward gap fill is applied iteratively from the last year of bandNames through the first year, where if the current image has missing data, it is filled with the previous year's values.
def applyBackwardNoDataFilter(image, bandNames): #Get a list of band names to iterate over, from year(-2) through year(0) bandNamesEE = ee.List(bandNames[:-1]).reverse() #Define backwards filter #In first iteration, bandName=bandNames[-2] and followingImage is image.select(bandNames[-1]), or the classifications for the final year #currentImage = image.select(bandNames[-2]), the second to last year #followingImage = image.select(bandNames[-1]), the final year #Find where the second to last year has missing data, replace those values with the values of the following year #Append followingImage to currentImage, so now currentImage is a two band image, with the first band being the second to last year with the gap fill #and the second band is the final years classification #The iteration continues, now with followingImage.select[0] being the second to last year with the gap fill applied, and bandName is the third to last year def backwardNoDataFilter(bandName, followingImage): currentImage = image.select(ee.String(bandName)) followingImage = ee.Image(followingImage) currentImage = currentImage.unmask(followingImage.select([0])) return currentImage.addBands(followingImage) #Apply backwards filter, starting with the final year and iterating through to year(0) filtered = bandNamesEE.iterate(backwardNoDataFilter,ee.Image(image.select(bandNames[-1]))) #Re-order bands to be in chronological order filtered = ee.Image(filtered) return filtered.select(bandNames)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyForwardNoDataFilter(image, bandNames):\n #Get a list of band names from year(1) through the last year\n bandNamesEE = ee.List(bandNames[1:])\n \n #Define forwards filter\n #In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for the first year\n #currentImage = image.select(bandNames[1]), the image for the second year\n #previousImage = image.select(bandNames[0]), the first year\n #Find where the second year has missing data, replace those values with the values of the first year\n #Append previousImage to currentImage, so now currentImage is a two band image, with the first band being the second year with the gap fill\n #and the second band is the first years classification\n #The iteration continues, now with followingImage.select[0] being the second year with the gap fill applied, and bandName is the third year\n def forwardNoDataFilter(bandName, previousImage):\n currentImage = image.select(ee.String(bandName))\n previousImage = ee.Image(previousImage)\n currentImage = currentImage.unmask(previousImage.select([0]))\n return currentImage.addBands(previousImage)\n \n #Iterate through all the years, starting with the first year's classification\n filtered = bandNamesEE.iterate(forwardNoDataFilter,ee.Image(image.select(bandNames[0])))\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def applyGapFilter(image, bandNames):\n filtered = applyForwardNoDataFilter(image, bandNames)\n filtered = applyBackwardNoDataFilter(filtered, bandNames)\n return filtered", "def applyWindow4years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-2):\n img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def stack_bands(yr, roi):\n\n water_year_start = '{}-10-01'.format(yr - 1)\n\n winter_s, winter_e = '{}-01-01'.format(yr), '{}-03-01'.format(yr),\n spring_s, spring_e = '{}-03-01'.format(yr), '{}-05-01'.format(yr),\n late_spring_s, late_spring_e = '{}-05-01'.format(yr), '{}-07-01'.format(yr)\n summer_s, summer_e = '{}-07-01'.format(yr), '{}-09-01'.format(yr)\n fall_s, fall_e = '{}-09-01'.format(yr), '{}-12-31'.format(yr)\n\n periods = [('cy', winter_s, fall_e),\n ('1', spring_s, spring_e),\n ('2', late_spring_s, late_spring_e),\n ('3', summer_s, summer_e),\n ('4', fall_s, fall_e)]\n\n first = True\n for name, start, end in periods:\n bands = landsat_composites(yr, start, end, roi, name)\n if first:\n input_bands = bands\n proj = bands.select('B2_cy').projection().getInfo()\n first = False\n else:\n input_bands = input_bands.addBands(bands)\n\n for s, e, n in [(spring_s, spring_e, 'espr'),\n (late_spring_s, late_spring_e, 'lspr'),\n (summer_s, summer_e, 'smr'),\n (fall_s, fall_e, 'fl'),\n (water_year_start, spring_e, 'wy_espr'),\n (water_year_start, late_spring_e, 'wy_espr'),\n (water_year_start, summer_e, 'wy_smr'),\n (water_year_start, fall_e, 'wy')]:\n gridmet = ee.ImageCollection(\"IDAHO_EPSCOR/GRIDMET\").filterBounds(\n roi).filterDate(s, e).select('pr', 'eto', 'tmmn', 'tmmx')\n temp_reducer = ee.Reducer.mean()\n t_names = ['tmax'.format(n), 'tmin'.format(n)]\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_{}'.format(n), 'pet_total_{}'.format(n)).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n wd_estimate = precip_sum.select('precip_total_{}'.format(n)).subtract(precip_sum.select(\n 'pet_total_{}'.format(n))).rename('wd_est_{}'.format(n))\n input_bands = input_bands.addBands([temp_perc, precip_sum, wd_estimate])\n\n temp_reducer = ee.Reducer.percentile([10, 50, 90])\n t_names = ['tmmn_p10_cy', 'tmmn_p50_cy', 'tmmn_p90_cy', 'tmmx_p10_cy', 'tmmx_p50_cy', 'tmmx_p90_cy']\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_cy', 'pet_total_cy').resample('bilinear').reproject(crs=proj['crs'], scale=30)\n wd_estimate = precip_sum.select('precip_total_cy').subtract(precip_sum.select(\n 'pet_total_cy')).rename('wd_est_cy')\n\n coords = ee.Image.pixelLonLat().rename(['Lon_GCS', 'LAT_GCS']).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n ned = ee.Image('USGS/NED')\n terrain = ee.Terrain.products(ned).select('elevation', 'slope', 'aspect').reduceResolution(\n ee.Reducer.mean()).reproject(crs=proj['crs'], scale=30)\n\n world_climate = get_world_climate(proj=proj)\n elev = terrain.select('elevation')\n tpi_1250 = elev.subtract(elev.focal_mean(1250, 'circle', 'meters')).add(0.5).rename('tpi_1250')\n tpi_250 = elev.subtract(elev.focal_mean(250, 'circle', 'meters')).add(0.5).rename('tpi_250')\n tpi_150 = elev.subtract(elev.focal_mean(150, 'circle', 'meters')).add(0.5).rename('tpi_150')\n static_input_bands = coords.addBands([temp_perc, wd_estimate, terrain, tpi_1250, tpi_250, tpi_150, world_climate])\n\n nlcd = ee.Image('USGS/NLCD/NLCD2011').select('landcover').reproject(crs=proj['crs'], scale=30).rename('nlcd')\n\n cdl_cult = ee.Image('USDA/NASS/CDL/2017').select('cultivated'). \\\n remap([1, 2], [0, 1]).reproject(crs=proj['crs'], scale=30).rename('cdlclt')\n\n cdl_crop = ee.Image('USDA/NASS/CDL/2017').select('cropland').reproject(crs=proj['crs'],\n scale=30).rename('cdlcrp')\n\n gsw = ee.Image('JRC/GSW1_0/GlobalSurfaceWater')\n occ_pos = gsw.select('occurrence').gt(0)\n water = occ_pos.unmask(0).rename('gsw')\n\n static_input_bands = static_input_bands.addBands([nlcd, cdl_cult, cdl_crop, water])\n\n input_bands = input_bands.addBands(static_input_bands).clip(roi)\n\n # standardize names to match EE javascript output\n standard_names = []\n temp_ct = 1\n prec_ct = 1\n names = input_bands.bandNames().getInfo()\n for name in names:\n if 'tavg' in name and 'tavg' in standard_names:\n standard_names.append('tavg_{}'.format(temp_ct))\n temp_ct += 1\n elif 'prec' in name and 'prec' in standard_names:\n standard_names.append('prec_{}'.format(prec_ct))\n prec_ct += 1\n else:\n standard_names.append(name)\n\n input_bands = input_bands.rename(standard_names)\n return input_bands", "def applyWindow3years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-1):\n img_out = img_out.addBands(mask3(imagem, value,bandNames[(i-1):(i+2)]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def applyWindow5years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-3):\n img_out = img_out.addBands(mask5(imagem, value,bandNames[(i-1):(i+4)]))\n img_out = img_out.addBands(imagem.select(bandNames[-3]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def get_yearly_data(name, startyr=None, endyr=None, interpolated=False):\n varinfo = get_varinfo(name)\n \n if varinfo[\"type\"] == \"yearly\":\n data = get_data(varinfo[\"id\"], startyr=startyr, endyr=endyr)\n giddict = dict()\n sorteddata = sorted(data[\"cells\"], key=lambda vd: vd[\"gid\"])\n for gid,valuedicts in itertools.groupby(sorteddata, key=lambda vd: vd[\"gid\"]):\n yrdict = dict([(valuedict[\"year\"],valuedict[\"value\"])\n for valuedict in valuedicts\n ])\n info = {\"data\": yrdict}\n giddict[gid] = info\n\n if interpolated:\n def pairwise(iterable):\n a, b = itertools.tee(iterable)\n next(b, None)\n return zip(a, b)\n \n def lerp(factor, fromval, toval):\n valrange = toval - fromval\n return fromval + valrange * factor\n \n for gid,info in giddict.items():\n yrdict = info[\"data\"]\n if len(yrdict) > 1:\n for (fromyr,fromval),(toyr,toval) in pairwise(sorted(yrdict.items(),key=lambda i: i[0])):\n curyr = fromyr + 1\n interpneeded = fromval != toval\n \n while curyr != toyr:\n if interpneeded:\n factor = (curyr - fromyr) / float(toyr - fromyr)\n yrdict[curyr] = lerp(factor, fromval, toval)\n else:\n yrdict[curyr] = fromval\n curyr += 1\n\n return giddict\n\n else:\n raise Exception(\"Could not find a yearly variable with that name\")", "def gap_years_aggregated(mongo_client):\n db = mongo_client[\"nobel\"]\n\n original_categories = sorted(set(db.prizes.distinct(\"category\", {\"year\": \"1901\"})))\n\n pipeline = [\n {\"$match\": {\"category\": {\"$in\": original_categories}}},\n {\"$project\": {\"category\": 1, \"year\": 1}},\n\n # Collect the set of category values for each prize year.\n {\"$group\": {\"_id\": \"$year\", \"categories\": {\"$addToSet\": \"$category\"}}},\n\n # Project categories *not* awarded (i.e., that are missing this year).\n {\"$project\": {\"missing\": {\"$setDifference\": [original_categories, \"$categories\"]}}},\n\n # Only include years with at least one missing category\n {\"$match\": {\"missing.0\": {\"$exists\": True}}},\n\n # Sort in reverse chronological order. Note that \"_id\" is a distinct year at this stage.\n {\"$sort\": OrderedDict([(\"_id\", -1)])},\n ]\n\n for doc in db.prizes.aggregate(pipeline):\n print(\"{year}: {missing}\".format(year=doc[\"_id\"], missing=\", \".join(sorted(doc[\"missing\"]))))", "def fill_price_gaps(\n from_date=dt.datetime(1970,1,1),\n to_date=dt.datetime.today().replace(hour=0, minute=0, second=0, microsecond=0)\n ):\n #Create a collection of years\n years = []\n cur_year = from_date.year\n while cur_year <= to_date.year:\n years.append(cur_year)\n cur_year += 1\n #Loop each year\n all_year_dates = pd.DataFrame([])\n for year in tqdm(years, total=len(years), desc=\"Loop through years to find dates\"):\n #establish bounding dates\n year_from_date = None if year != from_date.year else from_date\n year_to_date = None if year != to_date.year else to_date\n #Get filtered year dates\n year_dates = create_filtered_year_dates(year, from_date=year_from_date, to_date=year_to_date, )\n #Add to the full list\n all_year_dates = pd.concat([all_year_dates, year_dates])\n #Order the dates (just in case)\n all_year_dates = all_year_dates.sort_values([\"date\"]) \\\n .reset_index(drop=True)\n #Fetch all the tickers\n tickers = sqlaq_to_df(ticker.fetch())\n #Loop through tickers\n errors = []\n run_time = ProcessTime()\n for _,r in tqdm(tickers[[\"id\",\"ticker\"]].iterrows(), total=tickers.shape[0], desc=\"Filling in gaps\"):\n logger.info(f\"Filling gaps in {r.id} -> {r.ticker}\")\n try:\n #Fetch all prices\n dp = sqlaq_to_df(daily_price.fetch(ticker_ids=[r.id]))\n dp[\"date\"] = dp.date.astype(\"datetime64[ns]\")\n #Identify missing dates\n missing_dates = pd.merge(all_year_dates, dp[[\"date\",\"id\"]], on=[\"date\"], how=\"left\")\n #Identify the start date and remove all missing date before that\n start_date = missing_dates[~missing_dates.id.isnull()].date.min()\n missing_dates = missing_dates[missing_dates.date > start_date]\n #Remove all other items which have dates\n missing_dates = missing_dates[missing_dates.id.isnull()]\n #Order remaining dates\n missing_dates = missing_dates.sort_values(\"date\")\n #Create groupings no larger than max_days (in config)\n st_d = None\n date_groups = []\n missing_dates = missing_dates.date.to_list()\n if len(missing_dates):\n for i,d in enumerate(missing_dates):\n if not st_d:\n st_d = d\n else:\n #Append when group gets too big\n if (d - st_d).days > WEB_SCRAPE_MAX_DAYS:\n date_groups.append([st_d, missing_dates[i-1]])\n #Update the start date\n st_d = d\n #Append the last item\n date_groups.append([st_d, d])\n #Scrape the missing prices\n logger.info('Number of webscrapes to perform -> {}'.format(len(date_groups)))\n #For each time frame perform a scrape\n try: #Try loop so as not to miss all following date groups\n for i,dates in enumerate(date_groups):\n logger.info(f\"Running dates {i} -> {dt.datetime.strptime(str(dates[0])[:10], '%Y-%m-%d')} - {dt.datetime.strptime(str(dates[1])[:10], '%Y-%m-%d')}\")\n process_daily_prices(\n r.ticker,\n r.id,\n st_date=dates[0],\n en_date=dates[1],\n \n )\n except Exception as e:\n logger.error(e)\n errors.append({'ticker_id':r.id, 'ticker':r.ticker, \"error\":e, \"st_date\":dates[0], \"en_dates\":dates[1]})\n #Run an update on th weekly prices\n process_weekly_prices(\n r.id,\n \n )\n except Exception as e:\n logger.error(e)\n errors.append({'ticker_id':r.id, 'ticker':r.ticker, \"error\":e})\n #Lap\n logger.info(run_time.lap())\n logger.info(run_time.show_latest_lap_time(show_time=True))\n logger.info(f\"GAP FILL RUN TIME - {run_time.end()}\")\n\n logger.info(f'\\nGAP FILL ERROR COUNT -> {len(errors)}')\n if len(errors) > 0:\n logger.info('GAP FILL ERRORS ->')\n for e in errors:\n logger.error(e)", "def complete_zeros(df_dm,year):\n df_dm.insert(1,year,0)\n return df_dm", "def accumulate_to_year_end(da, year_ends, mask=None, shift=5, accumulate=12, time_name='time'):\n da = da.shift({time_name: shift}) \\\n .rolling({time_name: accumulate}).sum()\n da = da.where(da[time_name].dt.month == year_ends) \\\n .resample({time_name: '1YS'}).sum(skipna=True)\n if mask is None:\n return da\n else:\n return da.where(mask == 1)", "def fill_year(timeseries, value=0):\n # Obtain firts and last date from timeseries\n first_date = timeseries.index.min()\n last_date = timeseries.index.max()\n\n one_year_date = last_date - timedelta(days=365)\n\n ## Obtain the sunday beofre the date of one year ago\n starting_date = one_year_date - timedelta(days=one_year_date.weekday()+1)\n\n assert starting_date.weekday_name == 'Sunday'\n\n\n # Fill dates with mising zero\n date_range_series = create_timeseries(starting_date,\n first_date-timedelta(days=1),\n value)\n\n # Fill the original timeseries\n filled_timeseries = pd.concat([date_range_series, timeseries])\n\n return filled_timeseries", "def calculate_iron_hemoglobin_time_lag_effective_fraction(df, years):\n final = pd.DataFrame()\n data = df.reset_index()\n for i in list(range(0, len(years))):\n current = (data.loc[data.year == years[i]]\n .set_index([c for c in data.columns if 'draw' not in c and c != 'year'])\n .drop(columns='year'))\n if i == 0:\n for draw in list(range(0, 1000)):\n current[f'draw_{draw}'] = 1\n else:\n prior = (data.loc[data.year == years[i - 1]]\n .set_index([c for c in data.columns if 'draw' not in c and c != 'year'])\n .drop(columns='year'))\n current = 1 - ((current - prior) * 0.75 / current)\n current['year'] = years[i]\n final = pd.concat([final, current])\n final = final.reset_index().set_index([c for c in data.columns if 'draw' not in c]).sort_index()\n return final", "def fake_date_fill(df, back_method: str = 'slice'):\n df_index = df.index.to_series().copy()\n df2 = df.sort_index(ascending=False).copy()\n df2 = df2.apply(lambda x: pd.Series(x.dropna().values))\n df2 = df2.sort_index(ascending=False)\n df2.index = df_index.tail(len(df2.index))\n df2 = df2.dropna(how='all', axis=0)\n if df2.empty:\n df2 = df.fillna(0)\n\n if back_method == 'bfill':\n df2 = fill_forward(df2)\n return df\n elif back_method == 'slice':\n thresh = int(df.shape[1] * 0.5)\n thresh = thresh if thresh > 1 else 1\n df3 = df2.dropna(thresh=thresh, axis=0)\n if df3.empty or df3.shape[0] < 8:\n df3 = fill_forward(df2)\n else:\n df3 = fill_forward(df3)\n return df3\n elif back_method == 'keepna':\n return df2\n else:\n print('back_method not recognized in fake_date_fill')\n return df2", "def interpolate_dataframes(ff):\n assert isinstance(ff, dict)\n year_min = ff['CA'][0].index[0]\n year_max = ff['CA'][0].index[-1]\n years = list(range(year_min, year_max + 1))\n for state in ff.keys():\n for cf in ff[state]:\n for year in years:\n if year not in cf.index:\n cf.loc[year] = cf.loc[year-1:year+1, :].sum(axis=0)\n cf.loc[year] = (cf.loc[year] / 2).astype(np.int64)\n cf.sort_index(inplace=True)\n return(ff)", "def get_backward(prev_dfs, right_most_path, hist, pm_backward, dfs_codes, db):\n\tlast_edge = hist.edges[right_most_path[0]]\n\tg = db[prev_dfs.id]\n\tlast_node = g.nodes[last_edge.to]\n\n\tfor idx,rmp in reversed(list(enumerate(right_most_path[1:]))):\n\t\tedge = hist.edges[rmp]\n\t\tfor e in last_node.edges:\n\t\t\tif e.id in hist.has_edges:\n\t\t\t\tcontinue\n\t\t\tif e.to not in hist.has_node:\n\t\t\t\tcontinue\n\t\t\tfrom_node = g.nodes[edge.fromn]\n\t\t\tto_node = g.nodes[edge.to]\n\t\t\tif e.to == edge.fromn and (e.label > edge.label or (e.label == edge.label and last_node.label >= to_node.label)):\n\t\t\t\tfrom_id = dfs_codes[right_most_path[0]].to\n\t\t\t\tto_id = dfs_codes[rmp].fromn\n\t\t\t\tdfsc = dfs_code(from_id, to_id, last_node.label, e.label, from_node.label)\n\t\t\t\tpdfs = pre_dfs(g.id, e, prev_dfs)\n\t\t\t\tif dfsc in pm_backward:\n\t\t\t\t\tpm_backward[dfsc].append(pdfs)\n\t\t\t\telse:\n\t\t\t\t\tpm_backward[dfsc] = [pdfs,]\n\t\n\treturn pm_backward", "def backfill(arr, arr1):\n \n arr = np.where(arr < 0.01, np.NaN, arr)\n # FIXME:\n # RuntimeWarning: invalid value encountered in less\n # arr = np.where(arr < 0.01, np.NaN, arr)\n\n x = np.isnan(arr1)\n arr1[x] = arr[x]\n return arr1", "def foldcurve(_band, _period):\n # Set epoch to first date observed\n _epoch = _band[0][0]\n # Iterate through array, update date to phase\n for i in range(0, _band.shape[0]):\n _band[i, 0] = ((_band[i, 0] - _epoch) / _period) % 1\n # Return folded array\n return _band", "def fill_forward(df):\n df = df.fillna(method='ffill')\n df = df.fillna(method='bfill').fillna(0)\n return df", "def _bands_competed_last_year():\n lLastYear = datetime.datetime.now().year - 1\n cursor = connection.cursor()\n cursor.execute(\"SELECT count(distinct(r.band_id)) FROM contests_contestevent e, contests_contestresult r WHERE r.contest_event_id = e.id AND extract(year from e.date_of_event) = %(year)s GROUP BY extract(year from e.date_of_event) ORDER BY extract(year from e.date_of_event) desc\", {'year' : lLastYear})\n rows = cursor.fetchall()\n lReturn = 0\n if rows and rows[0]:\n lReturn = rows[0][0]\n cursor.close()\n return lReturn", "def fillna(df, col: str, forward: bool):\n na_prev = len(df)\n report = f'fillna(\"{col}\") ' + ('forward' if forward else 'backward') + ' NA count:'\n while True:\n na = df[col].isna().sum()\n report += f' {na}'\n if na == na_prev or na == 0: break\n na_prev = na\n # df must to be sorted by (ABI, YEAR)\n df.loc[df[col].isna(), col] = df.groupby('ABI')[col].shift(1 if forward else -1)", "def msatna_blocks_3lag_year(year: int) -> pd.Series:\n return msatna_blocks_3lag_panel()[year]", "def reduce_dataset(X, year):\n\n drop_list = [i for i in range(config.DB_YEAR_MIN, config.DB_YEAR_MAX + 1)]\n drop_list.remove(year - 1)\n red_X = X.drop(drop_list, axis=0)\n return red_X", "def get_previous_yr(df, df2, years):\n # Get n+_ year\n df[\"season_n-{}_tmp\".format(years)] = df[\"season\"] - years\n df_merged = pd.merge(df, df2, how=\"left\", left_on=[\"player\", \"player_id\", \"season_n-{}_tmp\".format(years)],\n right_on=[\"player\", \"player_id\", \"season\"],\n suffixes=['', \"_n-{}\".format(years)])\n\n df_merged = df_merged.drop([\"season_n-{}_tmp\".format(years)], axis=1)\n\n return df_merged", "def modify_bands(\n xraster: xr.core.dataarray.DataArray, input_bands: List[str],\n output_bands: List[str], drop_bands: List[str] = []):\n # Do not modify if image has the same number of output bands\n if xraster['band'].shape[0] == len(output_bands):\n return xraster\n\n # Drop any bands from input that should not be on output\n for ind_id in list(set(input_bands) - set(output_bands)):\n drop_bands.append(input_bands.index(ind_id)+1)\n return xraster.drop(dim=\"band\", labels=drop_bands, drop=True)", "def reduce_dataset(years, values,flux_floor=0,max_tm_error=0,min_reduction_steps=200):\n non_zero_ind, min_retained_zero_years = remove_begin_end_zero_flux(years,values,flux_floor,min_reduction_steps)\n\n years_mod = years[non_zero_ind]\n values_mod = values[non_zero_ind]\n\n if years_mod.size <3:\n years_mod = years\n values_mod = values\n values_mod = 0\n else:\n #makes ure you have not removed more than 1% of the mass when removing 0 or flux floor rates\n o_mass = TimeSeries(years,values,None,None).integrate().values[-1]\n r_mass = TimeSeries(years_mod, values_mod, None, None).integrate().values[-1]\n if abs((o_mass-r_mass)/o_mass)*100 > 1:\n years_mod = years\n values_mod = values\n timeseries = TimeSeries(years_mod, values_mod, None, None)\n mass = timeseries.integrate()\n\n #normalize Values\n maxval = np.max(values_mod)\n values_mod = values_mod/maxval\n o_timeseries = TimeSeries(years,values/maxval,None,None)\n o_mass = o_timeseries.integrate()\n timeseries = TimeSeries(years_mod, values_mod, None, None)\n mass = timeseries.integrate()\n\n mx = np.argmax(timeseries.values)\n points = [0, mx, len(timeseries)]\n x = timeseries.times\n\n ythresh = 100*np.mean(timeseries.values)\n out_error = 1\n out_error_last = out_error\n OUT_ERROR_THRESHOLD = 1e-2\n\n UPPER_N = 200\n LOWER_N = 50\n last_result = None\n MAX_ITERATIONS = 80\n\n solve_type = SMOOTH\n simple_peaks = False\n last_result,ix = reduct_iter(timeseries,flux_floor,ythresh,out_error,out_error_last,OUT_ERROR_THRESHOLD,UPPER_N,LOWER_N,last_result,MAX_ITERATIONS)\n last_result = retain_min_years(last_result.reduced_flux,o_timeseries,o_mass,min_retained_zero_years)\n #if there are less points than the min_reduction_steps then use the remaining\n #points to rebalance the segments with the largest mass errors.\n play_points = min_reduction_steps - last_result.num_reduced_points\n bef = last_result.reduced_flux.times.size\n if play_points > 0:\n last_result = red_flux.rebalance_extra_points(last_result,play_points)\n\n rr = last_result\n\n #find peaks for data rebalance and reporting\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=3,rel_height=1)\n if peaks.size == 0 :\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=2,rel_height=1)\n if peaks.size == 0:\n peaks, _ = sig.find_peaks(rr.reduced_flux.values,width=1,rel_height=1)\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=3,rel_height=1)\n if pneg.size == 0:\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=2,rel_height=1)\n if pneg.size == 0:\n pneg, _ = sig.find_peaks(-rr.reduced_flux.values,width=1,rel_height=1)\n\n peaks = rr.reduced_flux.times[peaks]\n pneg = rr.reduced_flux.times[pneg]\n\n peaks = np.isin(o_timeseries.times,peaks)\n pneg = np.isin(o_timeseries.times,pneg)\n peaks = np.where(peaks)\n pneg = np.where(pneg)\n\n peaks = peaks[0]\n pneg = pneg[0]\n iter = 0\n while iter < 100 and (abs(last_result.total_mass_error*maxval) > max_tm_error or abs(last_result.total_mass_error/last_result.mass.values[-1])*100 > .001) :\n rr = red_flux.rebalance_valleys(rr,peaks,pneg)\n #keep the lowest total_mass_error\n if abs(rr.total_mass_error) < abs(last_result.total_mass_error):\n last_result = rr\n else:\n break\n iter += 1\n\n out_times = last_result.reduced_flux.times\n out_values = last_result.reduced_flux.values\n #return the reduced data, undo normalize of the values (*maxval)\n return out_times, out_values*maxval,-(last_result.total_mass_error * maxval),peaks.size,iter", "def calculateNumberOfChanges(image, bandNames):\n #Get a collection of images where each image has 2 bands: classifications for year(i) and classifications for year(i+1)\n lc_one_change_col = npv.getYearStackIC(image,bandNames, band_indices=[0,1])\n #Get a collection of images where each image represents whether there was change from year(i) to year(i+1) and convert to an image\n lc_one_change_col = lc_one_change_col.map(npv.LC_OneChange)\n lc_one_change_image = lc_one_change_col.toBands()\n #Calculate the number of changes by applying the sum reducer\n lc_sum_changes = lc_one_change_image.reduce(ee.Reducer.sum().unweighted())\n return lc_sum_changes", "def fill_missing_date_range():\n pickle_dir ='/misc/yoda/www/plots/user/sheep'\n #pickle_dir = '/Users/ken/Downloads/sheep'\n drange = get_missing_date_range(pickle_dir)\n if drange:\n print 'fill date range', drange\n pickle_date_range(drange[0], drange[1])", "def winter_bar_chart(self):\n # Create the top n countries dataframe from 1994 to 2016\n df_winter = self.df_winter[self.df_winter['Year'] >= 1994]\n m = list(df_winter['Country'].value_counts()[:self.n_top].index)\n df_top = df_winter[df_winter['Country'].isin(m)].groupby(['Country', 'Medal']).size()\n new_index = pd.MultiIndex.from_product([m, ['Gold', 'Silver', 'Bronze']], names=df_top.index.names)\n df_top = df_top.reindex(new_index)\n unstacked_df_top = df_top.unstack().reindex(m, columns=['Gold', 'Silver', 'Bronze'])\n # Create the dataframe in 2018\n k = []\n for j in self.df_2018_winter['NOC'].tolist():\n n = j[j.find('(') + 1:j.find(')')]\n k.append((n, j))\n k = dict(k)\n winter_2018 = pd.DataFrame()\n for i in m:\n if i != 'RUS':\n df_tmp = self.df_2018_winter[self.df_2018_winter['NOC'] == k[i]]\n else:\n df_tmp = self.df_2018_winter[self.df_2018_winter['NOC'] == k['OAR']]\n winter_2018 = pd.concat([winter_2018, df_tmp])\n winter_2018['Country'] = m\n new_winter_2018 = winter_2018.set_index(['Country'])[['Gold', 'Silver', 'Bronze']]\n # Add two dataframes and plot.\n unstacked_df_top.add(new_winter_2018).reindex(m[::-1], columns=['Bronze', 'Silver', 'Gold']).plot(kind='barh')\n plt.title('Medal Result of Winter Olympics since 1994')\n fname = './medal_figures_winter/winter_bar_chart.png'\n plt.savefig(fname=fname, format='png')\n return", "def austral_year_daily(x, y):\n if isinstance(x, xr.DataArray):\n x = x.values\n \n jfmamj = x < 182.\n jasond = x >= 182.\n \n x_jasond = []\n y_jasond = []\n if any(jasond):\n x_jasond = x[jasond] - 181\n y_jasond = y[jasond]\n\n x_jfmamj = []\n y_jfmamj = []\n if any(jfmamj):\n x_jfmamj = x[jfmamj] + 184\n y_jfmamj = y[jfmamj]\n\n xout = np.concatenate([xi for xi in [x_jasond, x_jfmamj] if len(xi)])\n yout = np.concatenate([yi for yi in [y_jasond, y_jfmamj] if len(yi)])\n \n return xout, yout", "def iterate_grey_level(prev_mask, new_g_disc, converter, \n num_grey_levels=256, upward=True):\n gl_delta = 1./num_grey_levels\n grey_level = new_g_disc/(num_grey_levels - 1)\n \n # Create desired spectrum.\n desired = desired_PSD_nd(\n new_g_disc*gl_delta, prev_mask.shape[0], prev_mask.ndim)\n desired_radial = converter.radially_average(desired)\n \n # Find error:\n corrected_sig = correct_signal(prev_mask, desired_radial, converter)\n error = np.abs(corrected_sig - prev_mask)\n \n # Make corrections:\n num_replacements = int(np.multiply.reduce(prev_mask.shape)*gl_delta)\n \n ## Identify worst zeros. This is different than BIPPSMA, because we \n ## have to check each replacement's neighbourhood to avoid clusters.\n replace_value = 0 if upward else 1\n replace_to = 1 - replace_value\n \n void = prev_mask == replace_value\n void_error = np.where(void, error, 0)\n void_error_order = np.argsort(-void_error, None)# descending.\n \n ## Replace:\n new_sig = prev_mask.copy()\n error_coords = np.unravel_index(void_error_order[:void.sum()], prev_mask.shape)\n \n # We need to make sure replacements don't cluster, by observing the local\n # means. We do that for the entire array - in NumPy. It's cheaper than\n # doing it individually per point in pure Python.\n half_window = 4\n window_size = (2*half_window + 1)\n window = np.full((window_size,)*prev_mask.ndim, 1/window_size**prev_mask.ndim)\n local_mean = ndi.convolve(prev_mask, window, mode='wrap')\n \n for coords in zip(*error_coords):\n if upward:\n crowded = local_mean[coords] > grey_level\n else:\n crowded = local_mean[coords] < grey_level\n \n if crowded:\n continue\n \n assert(new_sig[coords] == replace_value)\n new_sig[coords] = replace_to\n num_replacements -= 1\n if num_replacements == 0:\n break\n \n # Profit:\n return new_sig", "def FS2Years(inputFolderPath = './FormattedFilesWithoutMissingToNextYear', outputFolderPath = './FormattedFilesWithoutMissingToNextYear'):\n\tfileList = []\n\tfor root, dirs, files in os.walk(inputFolderPath): \n\t for afile in files:\n\t \tfileList.append(afile)\n\n\ttargetList = [2704,2707,2713,2716,2718,808,811,1954]\n\t# targetList = [1994,1997,2003,2006,2008,807,810,1953]\n\tyearList = [(1998,2015),(2005,2015),(2005,2015),(2005,2015),(2005,2015),(1960,2014),(1961,2014),(2002,2012)]\n\n\n\tfor i in range(len(targetList)):\n\t\t# i = 0\n\t\trows = []\n\t\tfor year in range(yearList[i][0],yearList[i][1]+1):\n\t\t\t# print str(year) + '-' + str(targetList[i]) \n\t\t\tregex = re.compile(\"(\"+ str(year) +\").*\")\n\t\t\tfiles = [m.group(0) for l in fileList for m in [regex.search(l)] if m and len(l) == 28]\n\t\t\t\n\n\t\t\t# load the CSV file as a numpy matrix\n\t\t\twith open(inputFolderPath+'/'+files[0],'rb') as f:\n\t\t\t reader = csv.reader(f)\n\t\t\t header = next(reader)\n\t\t\t num_cols = len(header)\n\t\t\t # print header\n\t\t\t print i\n\t\t\t target_idx = [idx for idx, item in enumerate(header) if item.startswith(str(targetList[i]).zfill(4)+'N')]\n\t\t\t regex = re.compile(\"....N:.*\")\n\t\t\t nextYearIDs = [idx for idx, item in enumerate(header) if regex.search(item)]\n\t\t\t nextYearCount = len(nextYearIDs)\n\t\t\t if len(target_idx) > 0:\n\t\t\t \ttarget = target_idx[0]-1\n\t\t\t \tprint ('OK',year, targetList[i], inputFolderPath+'/'+files[0])\n\t\t\t else:\n\t\t\t \tprint (year, targetList[i], inputFolderPath+'/'+files[0])\n\t\t\t \tbreak\n\t\t\t f.close()\n\t\t\tdataset = np.genfromtxt(inputFolderPath+'/'+files[0], delimiter=\",\", skip_header=1, autostrip=True, missing_values=np.nan, usecols=tuple(range(1,num_cols)))\n\t\t\t# print (dataset.shape)\n\t\t\t# X = np.concatenate((dataset[:,0:target],dataset[:,target+1:dataset.shape[1]]),axis=1)\n\t\t\tX = dataset[:,nextYearCount:dataset.shape[1]]\n\t\t\t# X = np.concatenate((dataset[:,0:2],dataset[:,3:dataset.shape[1]),axis=1)\n\t\t\ty = dataset[:,target]\n\t\t\t\n\t\t\timp = Imputer(missing_values='NaN', strategy='median', axis=0)\n\t\t\timputedX = imp.fit_transform(X,y)\n\t\t\timputedX = np.array([imputedX[j] for j in range(imputedX.shape[0]) if not np.isnan(y[j])])\n\t\t\tdeleteMissingY = np.array([x1 for x1 in y if not np.isnan(x1)])\n\n\t\t\tk = 40\n\t\t\tselection = SelectKBest(f_regression, k=k)\n\t\t\timputedX_new = selection.fit_transform(imputedX, deleteMissingY)\n\t\t\t\n\t\t\tselectedFeatures = [[item, selection.scores_[idx], selection.pvalues_[idx]] for idx, item in enumerate(header[nextYearCount+1:]) if selection.get_support()[idx]]\n\t\t\tselectedFeatures.sort(key=lambda x: x[1], reverse=True)\n\t\t\t\n\t\t\trows.append([year, 'score', 'p-value'])\n\t\t\trows.extend(selectedFeatures)\n\t\t\trows.append(['', '', ''])\n\t\t\tprint 'Hey'\n\n\t\tfilename = outputFolderPath+'/'+('FeatureSelectionIndicator%d - k%d - %s.csv' % (targetList[i], k, 'f_regression'))\n\t\twith open(filename,'wb') as w:\n\t\t\ta = csv.writer(w, delimiter = ',')\n\t\t\ta.writerows(rows)\n\t\tw.close()", "def lag(x: np.ndarray, p: int = 1, *, fill_value: Any = np.nan):\n return shift(x, p, fill_value=fill_value)", "def modis_lai_fill_gap(in_path, doy_start, doy_end):\n date_start = datetime.datetime.strptime(doy_start, \"%Y%m%d\").date()\n date_end = datetime.datetime.strptime(doy_end, \"%Y%m%d\").date()\n\n for nc_file in os.listdir(in_path):\n if nc_file.endswith('.nc'):\n nc_date = datetime.datetime.strptime(nc_file[:-3], \"%Y%m%d\").date()\n if date_start <= nc_date <= date_end:\n print(nc_file, \"----\",)\n doy = int(datetime.datetime.strptime(nc_file[:-3], '%Y%m%d').strftime('%Y%j'))\n for new_doy in [doy + x for x in range(1, 4)]:\n shutil.copy2(os.path.join(in_path, nc_file), os.path.join(in_path, '{}.nc'.format(\n datetime.datetime.strptime(str(new_doy), '%Y%j').strftime('%Y%m%d'))))\n print('{}.nc'.format(\n datetime.datetime.strptime(str(new_doy), '%Y%j').strftime('%Y%m%d')),)\n print('\\n')", "def reshape_bfill(x, y, xnew, left_values=\"first\", right_values=0):\r\n fill_value = [left_values, right_values]\r\n if left_values == \"first\":\r\n fill_value[0] = y[0]\r\n fill_value = tuple(fill_value)\r\n foo = scipy.interpolate.interp1d(\r\n x, y,\r\n axis=0,\r\n copy=False,\r\n kind=\"next\",\r\n bounds_error=False,\r\n fill_value=fill_value,\r\n assume_sorted=True,\r\n )\r\n return foo(xnew)", "def _circular_bias(sim: xr.DataArray, ref: xr.DataArray) -> xr.DataArray:\n out = (sim - ref) % 365\n out = out.where(\n out <= 365 / 2, 365 - out\n ) # when condition false, replace by 2nd arg\n out = out.where(ref >= sim, out * -1) # when condition false, replace by 2nd arg\n return out.assign_attrs(units=\"days\")", "def applyMask3last(imagem, value, bandNames):\n mask = imagem.select(bandNames[-3]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[-2]).eq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[-1]).neq(value))\n change_img = imagem.select(bandNames[-1]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[0:-1])\n img_out = img_out.addBands(imagem.select(bandNames[-1]).blend(change_img))\n return img_out", "def SplitGap(data,gapsize,medwin,fluxdiff):\n \n # defining new empty lists and stuff\n pcount=0\n istamps=[]\n outData={}\n \n data['x'].mask = data['UnMasked']\n data['y'].mask = data['UnMasked']\n data['yerr'].mask = data['UnMasked']\n \n # median smoothing the lightcurve\n mvavg1 = movingMedian(data['y'],medwin)\n mvavg1 = num.append(mvavg1,mvavg1[-1])\n mvavg1 = data['y']\n # first derivative of smoothed lightcurve\n diff1 = num.diff(mvavg1)\n diff1 = num.hstack((diff1,diff1[-1]))\n \n # second derivative of smoothed lightcurve\n diff2 = num.diff(diff1)\n diff2 = num.hstack((diff2[-1],diff2))\n\n # compute ourlier resistant sigma\n sig = compute1Sigma(diff1)\n #pylab.plot(diff1,'g.')\n #pylab.plot([0,6000],[5*sig,5*sig],'k-')\n #pylab.plot([0,6000],[3*sig,3*sig],'k-')\n #pylab.plot([0,6000],[1*sig,1*sig],'k-')\n #pylab.show()\n\n # The grand master loop >=}\n # to make portion slices\n for i in range(len(data['x'])-1):\n dt = data['x'][i+1]- data['x'][i]\n j1 = max(0,i-medwin)\n j2 = i + medwin\n if pcount == 0:\n i0 = 0\n if pcount > 0:\n i0 = i1+1\n if dt > gapsize:\n i1 = i\n istamps.append([i0,i1])\n pcount += 1\n #if num.abs(diff1[i]) > 5*sig:\n #i1 = i\n #istamps.append([i0,i1])\n #pcount += 1\n #print num.abs(diff1[i]/data['y'][i]), diff1[i], data['y'][i], diff1[i+1], data['y'][i+1]\n #print i, ' test flux gap'\n i1 = i+1\n istamps.append([i0,i1])\n \n \n \n if data['bool']==False:\n # Applying slices\n for j in range(len(istamps)):\n #print istamps[j][0], istamps[j][1]\n outData['portion' + str(j+1)] = {'kid':data['kid'],'x':data['x'][istamps[j][0]:istamps[j][1]+1], 'y':data['y'][istamps[j][0]:istamps[j][1]+1], 'yerr':data['yerr'][istamps[j][0]:istamps[j][1]+1],'UnMasked':data['UnMasked'][istamps[j][0]:istamps[j][1]+1],'bool':False}\n else:\n # Applying slices\n for j in range(len(istamps)):\n #print istamps[j][0], istamps[j][1]\n outData['portion' + str(j+1)] = {'kid':data['kid'],'x':data['x'][istamps[j][0]:istamps[j][1]+1], 'y':data['y'][istamps[j][0]:istamps[j][1]+1], 'yerr':data['yerr'][istamps[j][0]:istamps[j][1]+1], 'TransitMask':data['TransitMask'][istamps[j][0]:istamps[j][1]+1],'UnMasked':data['UnMasked'][istamps[j][0]:istamps[j][1]+1],'bool':True}\n \n return outData", "def increment_year(self):", "def _calculate_stocks_after_baseline_period(\n baseline_stock_raster_path, yearly_accumulation_raster_path, n_years,\n target_raster_path):\n # Both of these values are assumed to be defined from earlier in the\n # model's execution.\n baseline_nodata = pygeoprocessing.get_raster_info(\n baseline_stock_raster_path)['nodata'][0]\n accum_nodata = pygeoprocessing.get_raster_info(\n yearly_accumulation_raster_path)['nodata'][0]\n\n def _calculate_accumulation_over_years(baseline_matrix, accum_matrix):\n target_matrix = numpy.empty(baseline_matrix.shape, dtype=numpy.float32)\n target_matrix[:] = NODATA_FLOAT32_MIN\n\n valid_pixels = (\n ~utils.array_equals_nodata(baseline_matrix, baseline_nodata) &\n ~utils.array_equals_nodata(accum_matrix, accum_nodata))\n\n target_matrix[valid_pixels] = (\n baseline_matrix[valid_pixels] + (\n accum_matrix[valid_pixels] * n_years))\n\n return target_matrix\n\n pygeoprocessing.raster_calculator(\n [(baseline_stock_raster_path, 1),\n (yearly_accumulation_raster_path, 1)],\n _calculate_accumulation_over_years, target_raster_path,\n gdal.GDT_Float32, NODATA_FLOAT32_MIN)", "def bandpasscorrect(data):\n ret=[x for x in data]\n n=len(ret)\n ret[0]=1.083*ret[0]-0.083*ret[1]\n ret[n-1]=1.083*ret[n-1]-0.083*ret[n-2]\n for k in range(1,n-1):\n ret[k]=1.166*ret[k]-0.083*ret[k-1]-0.083*ret[k+1]\n return ret", "def reverts_group(*name_lists):\n\n per_yr_data = []\n frac_reverts_data = []\n labels = []\n\n for name_list in name_lists:\n\n for name in name_list:\n pg = Wik_edits_pg(name)\n per_yr_data.append(pg.reverts_per_yr())\n frac_reverts_data.append(pg.frac_reverts())\n\n labels.extend(name_list)\n # create breaks in data and labels\n labels.append(\"\")\n per_yr_data.append(0)\n frac_reverts_data.append(0)\n\n # delete tailing break\n del per_yr_data[-1]\n del frac_reverts_data[-1]\n\n return (per_yr_data, frac_reverts_data, labels)", "def linear_interpolate(df, offset, final_year=\"2050\", harmonize_year=\"2015\"):\n df = df.copy()\n x1, x2 = harmonize_year, final_year\n y1, y2 = offset + df[x1], df[x2]\n m = (y2 - y1) / (float(x2) - float(x1))\n b = y1 - m * float(x1)\n\n cols = [x for x in utils.numcols(df) if int(x) < int(final_year)]\n for c in cols:\n df[c] = m * float(c) + b\n return df", "def fill_between(initial,final):\n return np.arange(initial + 1, final)", "def band(self, name, bands, new_name=None, label=None, text_key=None):\n if not self._is_numeric(name):\n msg = \"Can only band numeric typed data! {} is {}.\"\n msg = msg.format(name, self._get_type(name))\n raise TypeError(msg)\n if not text_key: text_key = self.text_key\n if not new_name: new_name = '{}_banded'.format(name)\n if not label: label = self.text(name, False, text_key)\n franges = []\n for idx, band in enumerate(bands, start=1):\n lab = None\n if isinstance(band, dict):\n lab = list(band.keys())[0]\n band = list(band.values())[0]\n if isinstance(band, tuple):\n if band[0] < 0:\n raise ValueError('Cannot band with lower bound < 0.')\n elif band[1] < 0:\n raise ValueError('Cannot band with upper bound < 0.')\n r = '{}-{}'.format(band[0], band[1])\n franges.append([idx, lab or r, {name: frange(r)}])\n else:\n r = str(band)\n franges.append([idx, lab or r, {name: [band]}])\n\n self.derive(new_name, 'single', label, franges,\n text_key=text_key)\n\n return None", "def decrement_frame(self, increment=1, freeze_cursor=False):\n if self.current_frame > 0 or self.selected_index < self.frame_size - increment:\n self.current_frame -= increment\n\n process_result = self.__process_selected_change(True, freeze_cursor)\n if process_result:\n self.current_frame += increment", "def closeyear(year):\n\n # Return the specific year\n return int(year % 4)", "def lc_animation(\n da,\n file_name=\"default_animation\",\n measurement=None,\n stacked_plot=False,\n colour_bar=False,\n animation_interval=500,\n width_pixels=10,\n dpi=150,\n font_size=15,\n label_ax=True):\n\n def calc_class_ratio(da):\n \"\"\"\n Creates a table listing year by year what percentage of the\n total area is taken up by each class.\n Parameters\n ----------\n da : xarray.DataArray with time dimension\n Returns\n -------\n Pandas Dataframe : containing class percentages per year\n \"\"\"\n\n # list all class codes in dataset\n list_classes = (np.unique(da, return_counts=False)).tolist()\n\n # create empty dataframe & dictionary\n ratio_table = pd.DataFrame(data=None, columns=list_classes)\n date_line = {}\n\n # count all pixels, should be consistent\n total_pix = int(np.sum(da.isel(time=1)))\n\n # iterate through each year in dataset\n for i in range(0, len(da.time)):\n date = str(da.time[i].data)[0:10]\n\n # for each year iterate though each present class number\n # and count pixels\n for n in list_classes:\n number_of_pixles = int(np.sum(da.isel(time=i) == n))\n percentage = number_of_pixles / total_pix * 100\n date_line[n] = percentage\n\n # add each year's counts to dataframe\n ratio_table.loc[date] = date_line\n\n return ratio_table\n\n def rgb_to_hex(r, g, b):\n hex = \"#%x%x%x\" % (r, g, b)\n if len(hex) < 7:\n hex = \"#0\" + hex[1:]\n return hex\n\n measurement = get_layer_name(measurement, da)\n\n # Add gif to end of filename\n file_name = file_name + \".gif\"\n\n # Create colour map and normalisation for specified lc measurement\n try:\n layer_cmap, layer_norm, cb_labels, cb_ticks = lc_colourmap(\n measurement, colour_bar=True)\n except AssertionError:\n\n raise KeyError(f'Could not automatically determine colour scheme from '\n f'DataArray name {measurement}. Please specify which '\n 'DEA Landcover measurement is being plotted by providing '\n 'the name using the \"measurement\" variable For example '\n '(measurement = \"full_classification\")')\n \n # Prepare variables needed\n # Get info on dataset dimensions\n height, width = da.geobox.shape\n scale = width_pixels / width\n left, bottom, right, top = da.geobox.extent.boundingbox\n extent = [left, right, bottom, top]\n\n outline = [patheffects.withStroke(linewidth=2.5, foreground=\"black\")]\n annotation_defaults = {\n \"xy\": (1, 1),\n \"xycoords\": \"axes fraction\",\n \"xytext\": (-5, -5),\n \"textcoords\": \"offset points\",\n \"horizontalalignment\": \"right\",\n \"verticalalignment\": \"top\",\n \"fontsize\": font_size,\n \"color\": \"white\",\n \"path_effects\": outline,\n }\n\n # Get information needed to display the year in the top corner\n times_list = da.time.dt.strftime(\"%Y\").values\n text_list = [False] * len(times_list)\n annotation_list = [\"\\n\".join([str(i) for i in (a, b) if i])\n for a, b in zip(times_list, text_list)]\n\n if stacked_plot == True:\n \n\n\n # Create table for stacked plot\n stacked_plot_table = calc_class_ratio(da)\n\n # Build colour list of hex vals for stacked plot\n hex_colour_list = []\n colour_def = lc_colours[measurement]\n\n # Custom error message to help if user puts incorrect measurement name\n for val in list(stacked_plot_table):\n try:\n r, g, b = colour_def[val][0:3]\n except KeyError:\n raise KeyError(\n \"class number not found in colour definition. \"\n \"Ensure measurement name provided matches the dataset being used\")\n hex_val = rgb_to_hex(r, g, b)\n hex_colour_list.append(hex_val)\n\n # Define & set up figure\n fig, (ax1, ax2) = plt.subplots(1, 2, dpi=dpi, constrained_layout=True)\n fig.set_size_inches(width * scale * 2, height * scale, forward=True)\n fig.set_constrained_layout_pads(\n w_pad=0.2, h_pad=0.2, hspace=0, wspace=0)\n\n # This function is called at regular intervals with changing i\n # values for each frame\n def _update_frames(i, ax1, ax2, extent, annotation_text,\n annotation_defaults, cmap, norm):\n # Clear previous frame to optimise render speed and plot imagery\n ax1.clear()\n ax2.clear()\n\n ax1.imshow(da[i, ...], cmap=cmap, norm=norm,\n extent=extent, interpolation=\"nearest\")\n if(not label_ax):\n ax1.set_axis_off()\n\n clipped_table = stacked_plot_table.iloc[: int(i + 1)]\n data = clipped_table.to_dict(orient=\"list\")\n date = clipped_table.index\n\n ax2.stackplot(date, data.values(), colors=hex_colour_list)\n ax2.tick_params(axis=\"x\", labelrotation=-45)\n ax2.margins(x=0, y=0)\n\n # Add annotation text\n ax1.annotate(annotation_text[i], **annotation_defaults)\n ax2.annotate(annotation_text[i], **annotation_defaults)\n\n # anim_fargs contains all the values we send to our\n # _update_frames function.\n # Note the layer_cmap and layer_norm which were calculated\n # earlier being passed through\n anim_fargs = (\n ax1,\n ax2, # axis to plot into\n [left, right, bottom, top], # imshow extent\n annotation_list,\n annotation_defaults,\n layer_cmap,\n layer_norm,\n )\n\n else: # stacked_plot = False\n\n # if plotting level 4 with colourbar\n\n if measurement == 'level4' and colour_bar == True:\n\n # specific setting to fit level 4 colour bar beside the plot\n # we will plot the animation in the left hand plot\n # and put the colour bar on the right hand side\n\n # Define & set up figure, two subplots so colour bar fits :)\n fig, (ax1, ax2) = plt.subplots(1, 2, dpi=dpi,\n constrained_layout=True, gridspec_kw={'width_ratios': [3, 1]})\n fig.set_size_inches(width * scale * 2,\n height * scale, forward=True)\n fig.set_constrained_layout_pads(\n w_pad=0.2, h_pad=0.2, hspace=0, wspace=0)\n\n # make colour bar\n # provide left hand canvas to colour bar fuction which is where the image will go\n # colourbar will plot on right side beside it\n\n make_colorbar(fig, ax1, measurement, animation=True)\n\n # turn off lines for second plot so it's not ontop of colourbar\n ax2.set_axis_off()\n\n # plotting any other measurement with or with-out colour bar or level 4 without\n else:\n\n # Define & set up figure\n fig, ax1 = plt.subplots(1, 1, dpi=dpi)\n fig.set_size_inches(width * scale, height * scale, forward=True)\n if(not label_ax):\n fig.subplots_adjust(left=0, bottom=0, right=1,\n top=1, wspace=None, hspace=None)\n # Add colourbar here\n if colour_bar:\n make_colorbar(fig, ax1, measurement)\n\n\n # This function is called at regular intervals with changing i\n # values for each frame\n def _update_frames(i, ax1, extent, annotation_text,\n annotation_defaults, cmap, norm):\n # Clear previous frame to optimise render speed and plot imagery\n ax1.clear()\n ax1.imshow(da[i, ...], cmap=cmap, norm=norm,\n extent=extent, interpolation=\"nearest\")\n if(not label_ax):\n ax1.set_axis_off()\n\n # Add annotation text\n ax1.annotate(annotation_text[i], **annotation_defaults)\n\n # anim_fargs contains all the values we send to our\n # _update_frames function.\n # Note the layer_cmap and layer_norm which were calculated\n # earlier being passed through\n anim_fargs = (\n ax1,\n [left, right, bottom, top], # imshow extent\n annotation_list,\n annotation_defaults,\n layer_cmap,\n layer_norm,\n )\n\n # Animate\n anim = FuncAnimation(\n fig=fig,\n func=_update_frames,\n fargs=anim_fargs,\n frames=len(da.time),\n interval=animation_interval,\n repeat=False,\n )\n\n anim.save(file_name, writer=\"pillow\", dpi=dpi)\n plt.close()\n return Image(filename=file_name)", "def fill_in_data(color,frames,fs=25):\n color = color\n colormat = color.as_matrix()\n frameDiff = np.diff(colormat.T[2])\n locations = np.where(frameDiff!=1)[0]\n\n #Calculate number of frames skipped\n #sample = []\n #sample = colormat.T\n sample = sample[:2].T\n #frames = range(100,len(colormat.T[2])+100)\n #frames = np.linspace(frames[0],frames[-1],frames[-1]-frames[0]+1)\n #frames = frames[:len(frames)-1]\n \n #if locations is empty, try looking for a row of nans\n if np.all(locations):\n for i in range(len(sample)):\n if np.all(sample[i] == 0):\n sample[i]=[np.nan, np.nan]\n missing = list(np.where(np.isnan(sample.T[0])))\n\n else:\n numfill = []\n missing = []\n for i in locations:\n numfill.append(frames[i+1]-frames[i])#-1)\n #pdb.set_trace()\n missing.append(np.linspace(i+1,i+1+numfill[-1],numfill[-1]))\n\n missing = np.concatenate(missing)\n\n missing = missing[:len(missing)-1]\n missing = missing.astype(int)\n\n pdb.set_trace()\n\n for j in reversed(missing):\n sample = np.insert(sample,j,(np.nan,np.nan),axis = 0)\n #frames = np.insert(frames,j,j,axis=0)\n\n color_x,color_y,x_filt=KFilt(sample,fs)\n color_mat = np.column_stack((color_x[:,0],color_y[:,0],color_x[:,1],color_y[:,1]))\n return color_mat,frames,x_filt", "def get_gaps_curve(raw_data):\n peaks = []\n valleys = []\n gaps = []\n # process the first window; i.e., the first PAGESIZE rows of data\n for j in range(1, Parser.PAGESIZE):\n if raw_data[j] > raw_data[j - 1] and raw_data[j] > raw_data[j + 1]:\n bisect.insort_left(peaks, raw_data[j], bisect.bisect_left(peaks, raw_data[j]))\n elif raw_data[j] < raw_data[j - 1] and raw_data[j] < raw_data[j + 1]:\n bisect.insort_left(valleys, raw_data[j], bisect.bisect_left(valleys, raw_data[j]))\n\n gaps.append(Parser.__find_gaps(peaks, valleys))\n\n # slide from start to end\n for j in range(Parser.PAGESIZE, len(raw_data)):\n s = j - Parser.PAGESIZE + 1\n if raw_data[s] > raw_data[s - 1] and raw_data[s] > raw_data[s + 1]:\n del peaks[bisect.bisect_left(peaks, raw_data[s])]\n elif raw_data[s] < raw_data[s - 1] and raw_data[s] < raw_data[s + 1]:\n del valleys[bisect.bisect_left(valleys, raw_data[s])]\n\n e = j - 1\n if raw_data[e] > raw_data[e - 1] and raw_data[e] > raw_data[e + 1]:\n bisect.insort_left(peaks, raw_data[e], bisect.bisect_left(peaks, raw_data[e]))\n elif raw_data[e] < raw_data[e - 1] and raw_data[e] < raw_data[e + 1]:\n bisect.insort_left(valleys, raw_data[e], bisect.bisect_left(valleys, raw_data[e]))\n gaps.append(Parser.__find_gaps(peaks, valleys))\n\n return gaps", "def backward(self, grad, index):\n pass", "def __min_birth_max_death(persistence, band=0.0):\n # Look for minimum birth date and maximum death date for plot optimisation\n max_death = 0\n min_birth = persistence[0][1][0]\n for interval in reversed(persistence):\n if float(interval[1][1]) != float(\"inf\"):\n if float(interval[1][1]) > max_death:\n max_death = float(interval[1][1])\n if float(interval[1][0]) > max_death:\n max_death = float(interval[1][0])\n if float(interval[1][0]) < min_birth:\n min_birth = float(interval[1][0])\n if band > 0.0:\n max_death += band\n return min_birth, max_death", "def DrawBands(self, count):\n value = self.little[0]\n mobile_average = float(sum([float(self.little[i])\n for i in range(len(self.little))])) / float(self.period)\n standard_derivation = sqrt(sum([pow(self.little[i] - mobile_average, 2)\n for i in range(len(self.little))]) / self.period)\n upper_band = mobile_average + (standard_derivation * self.sd_coef)\n lower_band = mobile_average - (standard_derivation * self.sd_coef)\n self.upper.insert(0, upper_band)\n self.lower.insert(0, lower_band)\n if len(self.upper) >= self.period:\n self.upper.pop()\n if len(self.lower) >= self.period:\n self.lower.pop()\n if count >= self.period:\n for i in range(len(self.little) - 1):\n self.canvas.create_line((i * self.incr / 1.725) + self.incr * 4,\n self.height - self.incr * 4 + (self.little[i] - 1) * 5000 - 200,\n (i * self.incr / 1.725) + self.incr * 4 + self.incr / 1.725,\n self.height - self.incr * 4 + (self.little[i + 1] - 1) * 5000 - 200,\n fill = \"#FFFF00\", width = 2)\n for i in range(len(self.upper) - 1):\n self.canvas.create_line((i * self.incr / 1.635) + self.incr * 4,\n self.height - self.incr * 4 + (self.upper[i] - 1) * 5000 - 200,\n (i * self.incr / 1.635) + self.incr * 4 + self.incr / 1.635,\n self.height - self.incr * 4 + (self.upper[i + 1] - 1) * 5000 - 200,\n fill = \"#FF6600\", width = 3)\n self.canvas.create_line((i * self.incr / 1.635) + self.incr * 4,\n self.height - self.incr * 4 + (self.lower[i] - 1) * 5000 - 200,\n (i * self.incr / 1.635) + self.incr * 4 + self.incr / 1.635,\n self.height - self.incr * 4 + (self.lower[i + 1] - 1) * 5000 - 200,\n fill = \"#FF0000\", width = 3)", "def calculate_daily_climatology(\n pctile,\n windowHalfWidth,\n lenClimYear,\n smoothPercentile,\n smoothPercentileWidth,\n thresh_climYear, # empty array\n seas_climYear, # empty array\n clim, # empty dict\n feb29,\n doyClim,\n clim_start,\n clim_end,\n tempClim,\n temp,\n):\n # Loop over all day-of-year values, and calculate threshold and seasonal climatology across years\n for d in range(1, lenClimYear + 1):\n # Special case for Feb 29\n if d == feb29:\n continue\n # find all indices for each day of the year +/- windowHalfWidth and from them calculate the threshold\n tt0 = np.where(doyClim[clim_start : clim_end + 1] == d)[\n 0\n ] # the index for that day each year\n # If this doy value does not exist (i.e. in 360-day calendars) then skip it\n if len(tt0) == 0:\n continue\n tt = np.array([])\n for w in range(-windowHalfWidth, windowHalfWidth + 1): # -5 : 5 default\n tt = np.append(\n tt, clim_start + tt0 + w\n ) # append the daily values 5days before and 5days after\n tt = tt[tt >= 0] # Reject indices \"before\" the first element\n tt = tt[tt < TClim] # Reject indices \"after\" the last element\n thresh_climYear[d - 1] = np.percentile(nonans(tempClim[tt.astype(int)]), pctile)\n seas_climYear[d - 1] = np.mean(nonans(tempClim[tt.astype(int)]))\n\n # Special case for Feb 29 (LEAP YEAR)\n thresh_climYear[feb29 - 1] = (\n 0.5 * thresh_climYear[feb29 - 2] + 0.5 * thresh_climYear[feb29]\n )\n seas_climYear[feb29 - 1] = (\n 0.5 * seas_climYear[feb29 - 2] + 0.5 * seas_climYear[feb29]\n )\n\n if smoothPercentile:\n thresh_climYear, seas_climYear = smooth_climatologies(\n thresh_climYear, seas_climYear, smoothPercentileWidth\n )\n\n # Generate threshold for full time series\n clim[\"thresh\"] = thresh_climYear[doy.astype(int) - 1]\n clim[\"seas\"] = seas_climYear[doy.astype(int) - 1]\n # Save vector indicating which points in temp are missing values\n clim[\"missing\"] = np.isnan(temp)\n\n return clim", "def conv_backward(dZ, A_prev, W, b, padding=\"same\", stride=(1, 1)):\n (m, h_prev, w_prev, c_prev) = A_prev.shape\n (m, h_new, w_new, c_new) = dZ.shape\n (kh, kw, c_prev, c_new) = W.shape\n sh, sw = stride\n if padding == 'same':\n ph = int(np.ceil((((h_prev - 1) * sh + kh - h_prev) / 2)))\n pw = int(np.ceil((((w_prev - 1) * sw + kw - w_prev) / 2)))\n if padding == 'valid':\n pw = 0\n ph = 0\n dA_prev = np.zeros(A_prev.shape)\n dW = np.zeros(W.shape)\n db = np.sum(dZ, axis=(0, 1, 2), keepdims=True)\n A_prev_pad = np.pad(A_prev, pad_width=((0, 0), (ph, ph), (pw, pw),\n (0, 0)), mode='constant')\n dA_prev_pad = np.pad(dA_prev, pad_width=((0, 0), (ph, ph), (pw, pw),\n (0, 0)), mode='constant')\n for i in range(m):\n a_prev_pad = A_prev_pad[i]\n da_prev_pad = dA_prev_pad[i]\n for h in range(h_new):\n for w in range(w_new):\n for c in range(c_new):\n v_beg = h * sh\n v_end = v_beg + kh\n h_start = w * sw\n h_end = h_start + kw\n a_slice = a_prev_pad[v_beg:v_end, h_start:h_end]\n da_prev_pad[v_beg:v_end,\n h_start:h_end] += \\\n W[:, :, :, c] * dZ[i, h, w, c]\n dW[:, :, :, c] += a_slice * dZ[i, h, w, c]\n\n if padding == 'same':\n dA_prev[i, :, :, :] += da_prev_pad[ph:-ph, pw:-pw, :]\n if padding == 'valid':\n dA_prev[i, :, :, :] += da_prev_pad\n\n return dA_prev, dW, db", "def interpolate(df):\n for x in df.columns:\n if x == \"date\":\n continue\n df[x] = df[x].interpolate(method='linear', axis=0).ffill().bfill()\n return df", "def get_last_filling(stock_ticker: str,\n filling_year: Optional[int] = None):\n company_cik = sec_finance_functions.get_company_data_by_ticker(stock_ticker).company_cik\n all_company_fillings = sec_finance_functions.get_all_company_filings_by_cik(company_cik)\n all_company_fillings_by_year = sec_finance_functions.get_all_fillings_of_year(all_company_fillings, filling_year)\n return all_company_fillings_by_year", "def _fill_undefined_gaps(phases):\n\n undefined, = np.where(phases == CurvePhases.Undetermined.value)\n last_index = phases.size - 1\n\n # If the curve is just two measurements this makes little sense\n if last_index < 2:\n return\n\n for loc in undefined:\n\n if loc == 0:\n if phases[1] != CurvePhases.Undetermined.value:\n phases[loc] = phases[loc + 1]\n elif loc == last_index:\n if phases[loc - 1] != CurvePhases.Undetermined.value:\n phases[loc] = phases[loc - 1]\n elif phases[loc - 1] == phases[loc + 1] and phases[loc + 1] != CurvePhases.Undetermined.value:\n phases[loc] = phases[loc + 1]", "def cull_missing(df, colname, missingdays):\n df2 = df[[\"binyear\", colname]]\n nancounts = df2.groupby(\"binyear\").agg(lambda x: x.isnull().sum())\n # cull anything with more than 3 days NaN\n df2 = nancounts[nancounts[colname] > missingdays]\n years = []\n if not df2.empty:\n years = list(df2.index.values)\n resdf = df[~df[\"binyear\"].isin(years)]\n minyear = resdf[\"binyear\"].min()\n # Prevent scary cullyears listing\n return resdf, list(filter(lambda x: x > minyear, years))", "async def refresh_bbands(self, pair: str):\n\n if not config['enable_bbands']:\n return\n\n bband_window = config['ma_windows'][config['bband_ma']]\n source = self.adjusted_close_values[pair][-(config['chart_age'] + bband_window):]\n bband_high = []\n bband_low = []\n ma_index = 0\n\n for index in range(bband_window, len(source)):\n bband_stdev = np.std(np.array(source[index - bband_window:index])) * config['bband_mult']\n bband_high.append(self.close_value_mas[pair][bband_window][ma_index] + bband_stdev)\n bband_low.append(self.close_value_mas[pair][bband_window][ma_index] - bband_stdev)\n ma_index += 1\n\n self.bollinger_bands[pair]['H'] = bband_high\n self.bollinger_bands[pair]['L'] = bband_low\n\n self.log.debug('{} Refreshed Bollinger bands.', pair, verbosity=1)", "def reverts_per_yr_graphic(data, labels):\n\n y_pos = [3*x for x in list(range(len(data)))]\n plt.bar(y_pos, data)\n plt.xticks(y_pos,labels, rotation = \"vertical\")\n plt.ylabel('Reverts Per Year')\n plt.title('Reverts Per Year of Contentious vs. Non-contentious Wikipeida Articles')\n plt.savefig(\"Reverts_per_Year\")\n plt.show()", "def backward(self, *output_grads):\n raise NotImplementedError", "def yearly_avg(dacycle,avg):\n\n if avg not in ['transcom','transcom_extended','olson','olson_extended','country','flux1x1']:\n raise IOError,'Choice of averaging invalid'\n\n analysisdir = dacycle['dir.analysis']\n monthdir = os.path.join(analysisdir , 'data_%s_monthly'%avg )\n yeardir = os.path.join(analysisdir,'data_%s_yearly'%avg)\n\n if not os.path.exists(yeardir):\n print \"Creating new output directory \" + yeardir\n os.makedirs(yeardir)\n\n files = os.listdir(monthdir) # get monthly files\n files = [f for f in files if '-' in f and f.endswith('.nc')]\n\n if not files:\n print \"No full year finished yet, skipping yearly average...\"\n return\n\n fileinfo = {}\n for filename in files:\n date=datetime.datetime.strptime(filename.split('.')[-2],'%Y-%m')\n fileinfo[filename] = date\n\n years = set([d.year for d in fileinfo.values()])\n\n sd = datetime.datetime(min(years),1,1)\n ed = datetime.datetime(max(years)+1,1,1)\n\n while sd < ed: \n\n nd = sd + relativedelta(years=+1)\n \n avg_files = [os.path.join(monthdir,k) for k,v in fileinfo.iteritems() if v < nd and v >= sd]\n \n if not len(avg_files) == 12 : \n print \"Year %04d not finished yet, skipping yearly average...\"%sd.year\n else:\n targetfile = os.path.join(yeardir,'%s_fluxes.%s.nc'%(avg,sd.strftime('%Y')))\n \n if not os.path.exists(targetfile):\n print \"Year %04d is complete, I have 12 months for the next file\"%sd.year\n command = ['ncra','-O']+ avg_files + [targetfile]\n status = subprocess.check_call(command)\n\n sd = nd", "def ran_remove_shaded(year_list,airline_list,processed_direc,graph_direc):\n \n IAPL_df_all = pd.DataFrame(columns = ['Year','Airline','IAPL'])\n CS_df_all = pd.DataFrame(columns = ['Year','Airline','Cluster_Size'])\n AC_df_all = pd.DataFrame(columns = ['Year','Airline','AC']) \n for airline in airline_list:\n script_dir = os.path.dirname(os.getcwd())\n CS_path = \"%s%s_CSR.csv\" %(processed_direc,airline)\n CS_file = os.path.join(script_dir,CS_path)\n CS_df = pd.read_csv(CS_file)\n\n IAPL_path = \"%s%s_IAPLR.csv\" %(processed_direc,airline)\n IAPL_file = os.path.join(script_dir,IAPL_path)\n IAPL_df = pd.read_csv(IAPL_file)\n\n AC_path = \"%s%s_ACR.csv\" %(processed_direc,airline)\n AC_file = os.path.join(script_dir,AC_path)\n AC_df = pd.read_csv(AC_file)\n\n CS_df_airline = pd.DataFrame(columns = ['Year','Airline','Cluster_Size'])\n CS_year_df = pd.DataFrame()\n\n IAPL_df_airline = pd.DataFrame(columns = ['Year','Airline','IAPL'])\n IAPL_year_df = pd.DataFrame()\n\n AC_df_airline = pd.DataFrame(columns = ['Year','Airline','AC'])\n AC_year_df = pd.DataFrame()\n\n col = 0\n for year in year_list:\n CS_year_df['Cluster_Size'] = CS_df.iloc[:,col]\n CS_quant_calc = CS_year_df.quantile([0.25,0.5,0.75])\n CS_quant_calc['Year'] = year\n CS_df_airline = pd.concat([CS_df_airline,CS_quant_calc],ignore_index=True)\n\n IAPL_year_df['IAPL'] = IAPL_df.iloc[:,col]\n IAPL_quant_calc = IAPL_year_df.quantile([0.25,0.5,0.75])\n IAPL_quant_calc['Year'] = year\n IAPL_df_airline = pd.concat([IAPL_df_airline,IAPL_quant_calc],ignore_index=True)\n\n AC_year_df['AC'] = AC_df.iloc[:,col]\n AC_quant_calc = AC_year_df.quantile([0.5,0.5,0.5])\n AC_quant_calc['Year'] = year\n AC_df_airline = pd.concat([AC_df_airline,AC_quant_calc],ignore_index=True)\n\n col = col + 1\n CS_df_airline['Airline'] = airline\n CS_df_all = pd.concat([CS_df_all,CS_df_airline],ignore_index = True)\n\n IAPL_df_airline['Airline'] = airline\n IAPL_df_all = pd.concat([IAPL_df_all,IAPL_df_airline],ignore_index = True)\n\n AC_df_airline['Airline'] = airline\n AC_df_all = pd.concat([AC_df_all,AC_df_airline],ignore_index = True)\n\n\n plt.figure(1,figsize=(2.8,2.0),dpi=300)\n ax1 = sns.lineplot(data=CS_df_all, x = 'Year', y = 'Cluster_Size', hue='Airline', style='Airline', marker = 'o')\n ax1.xaxis.set_major_locator(ticker.MultipleLocator(1))\n plt.xlabel('Year')\n plt.ylabel('Cluster Size')\n plt.legend(airline_list,fontsize=10,labelspacing=0.15)\n plt.tight_layout()\n plt.savefig('%sShaded_CS.pdf'%(graph_direc,))\n\n plt.figure(2,figsize=(2.8,2.0),dpi=300)\n ax2 = sns.lineplot(data=IAPL_df_all, x = 'Year', y = 'IAPL', hue='Airline', style='Airline', marker = 'o')\n ax2.xaxis.set_major_locator(ticker.MultipleLocator(1))\n plt.xlabel('Year')\n plt.ylabel('IAPL')\n plt.legend(airline_list,fontsize=10,labelspacing=0.15)\n plt.tight_layout()\n plt.savefig('%sShaded_IAPL.pdf'%(graph_direc,))\n\n plt.figure(3,figsize=(2.8,2.0),dpi=300)\n ax3 = sns.lineplot(data=AC_df_all, x = 'Year', y = 'AC', hue='Airline', style='Airline', marker = 'o')\n ax3.xaxis.set_major_locator(ticker.MultipleLocator(1))\n plt.xlabel('Year')\n plt.ylabel('Algebraic Connectivity')\n plt.legend(airline_list,fontsize=10,labelspacing=0.15)\n plt.tight_layout()\n plt.savefig('%sShaded_AC.pdf'%(graph_direc,))\n\n plt.show()", "def backfill_gaps(self, report):\n\n # sort timestamps into sequential bins (to reduce # of polls)\n if len(report['gaps']) != 0:\n bins = [\n list(g) for k, g in groupby(\n sorted(report['gaps']),\n key=lambda n, c=count(0, 60): n - next(c))]\n\n # if any bins > max_bin_size, split them into smaller bins.\n # takes the old list\n bins = self.split_oversize_bins(bins, report['max_bin_size'])\n\n delay = 1 # wait time before attmepting to re-poll after error\n stagger = 2 # delay co-efficient\n timeout = 10 # number of times to repoll before exception raised.\n\n # poll exchange REST endpoint for replacement bars\n bars_to_store = []\n for i in bins:\n try:\n bars = report['exchange'].get_bars_in_period(\n report['symbol'], i[0], len(i))\n for bar in bars:\n bars_to_store.append(bar)\n stagger = 2 # reset stagger to base after successful poll\n time.sleep(stagger + 1)\n except Exception as e:\n # retry polling with an exponential delay after each error\n for i in range(timeout):\n try:\n time.sleep(delay + 1)\n bars = report['exchange'].get_bars_in_period(\n report['symbol'], i[0], len(i))\n for bar in bars:\n bars_to_store.append(bar)\n stagger = 2\n break\n except Exception as e:\n delay *= stagger\n if i == timeout - 1:\n raise Exception(\"Polling timeout.\")\n\n # Sanity check, check that the retreived bars match gaps\n timestamps = [i['timestamp'] for i in bars_to_store]\n timestamps = sorted(timestamps)\n bars = sorted(report['gaps'])\n if timestamps == bars:\n query = {\"symbol\": report['symbol']}\n doc_count_before = (\n self.db_collections[report[\n 'exchange'].get_name()].count_documents(query))\n for bar in bars_to_store:\n try:\n self.db_collections[\n report['exchange'].get_name()].insert_one(bar)\n except pymongo.errors.DuplicateKeyError:\n # Skip duplicates that exist in DB.\n continue\n doc_count_after = (\n self.db_collections[report[\n 'exchange'].get_name()].count_documents(query))\n doc_count = doc_count_after - doc_count_before\n self.logger.debug(\n \"Saved \" + str(doc_count) + \" missing \" +\n report['symbol'] + \" bars.\")\n return True\n else:\n raise Exception(\n \"Fetched bars do not match missing timestamps.\")\n else:\n # Return false if there is no missing data.\n return False", "def winter_gif(self):\n # Create the directory.\n os.mkdir('./medal_figures_winter')\n start = self.start_year\n end = self.end_year\n duration = self.duration\n # Specify the years.\n years = [i for i in self.years_winter if (i >= start) and (i <= end)]\n # Setup the colormap.\n cmap = sns.cubehelix_palette(n_colors=6, start=2.5, rot=0.1, hue=2, dark=0.3, light=1, as_cmap=True)\n # Important variable and keywords to initialize cartopy.\n shapename = 'admin_0_countries'\n countries_shp = shpreader.natural_earth(resolution='110m', category='cultural', name=shapename)\n filenames = []\n # Loop in the specific years.\n for i in years:\n fig = plt.figure(figsize=(10, 8))\n ax = fig.add_subplot(1, 1, 1, projection=ccrs.Mercator())\n ax.set_extent([-169.95, 169.95, -65, 80], crs=ccrs.PlateCarree())\n ax.add_feature(cfeature.BORDERS)\n ax.coastlines(resolution='110m')\n # Add some titles for specific years.\n if i == 1924:\n fig.suptitle('The First Winter Olympics.', y=0.9, fontsize=14, fontweight='bold')\n if i == 1994:\n fig.suptitle('The International Olympic Committee voted to separate the Summer and Winter Games.',\n y=0.9, fontsize=12, fontweight='bold')\n if i == 2018:\n fig.suptitle('Suspension of the Russian Olympic Committee due to Olympic Doping Controversy.',\n y=0.9, fontsize=12, fontweight='bold')\n iso_lib = list(self.conv['ISO'])\n if i != 2018:\n city = self.df_winter.loc[self.df_winter['Year'] == i]['City'].iloc[0]\n ax.title.set_text('Total Number of Medals of Winter Olympics Year: %d City: %s' % (i, city))\n df_tmp = self.df_winter.loc[self.df_winter['Year'] == i]\n d = dict(df_tmp.groupby(df_tmp['Country']).size())\n else:\n ax.title.set_text('Total Number of Medals of Winter Olympics Year: %d City: %s' % (i, 'Pyeongchang'))\n m = []\n for j in self.df_2018_winter['NOC'].tolist():\n n = j[j.find('(')+1:j.find(')')]\n m.append(n)\n k = self.df_2018_winter['Total'].tolist()\n d = dict(zip(m, k))\n d.pop('30 NOCs', None)\n max_medal = float(max(d.values()))\n for country in shpreader.Reader(countries_shp).records():\n iso = country.attributes['ADM0_A3']\n medal_num = 0\n if iso in iso_lib:\n ioc = self.conv.loc[self.conv['ISO'] == iso,'IOC'].iloc[0]\n if not pd.isna(ioc):\n if ioc in d.keys():\n medal_num = d[ioc]\n if all([iso == 'RUS', i>=1956, i<=1988]):\n medal_num = d['URS']\n if all([iso=='DEU', i>=1968, i<=1988]):\n medal_num = d['FRG'] + d['GDR']\n if all([iso=='DEU', i>=1956, i<=1964]):\n medal_num = d['EUA']\n if i==1952 and iso=='DEU':\n medal_num = d['FRG']\n if i==1992 and iso=='RUS':\n medal_num = d['EUN']\n if i==2018 and iso=='RUS':\n medal_num = d['OAR']\n ax.add_geometries(country.geometry, ccrs.PlateCarree(),\n facecolor=cmap(medal_num / max_medal, 1))\n sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(0, max_medal))\n sm._A = []\n plt.colorbar(sm, ax=ax, orientation=\"horizontal\", fraction=0.046, pad=0.04)\n fname = './medal_figures_winter/year_%d.png' % i\n filenames.append(fname)\n plt.savefig(fname=fname, format='png')\n plt.close(fig)\n images = []\n # Create the gif.\n for filename in filenames:\n images.append(imageio.imread(filename))\n imageio.mimsave('./medal_figures_winter/movie.gif', images, duration=duration)\n return", "def resample(self, dataframes, freq='5s'):\n\n for df in dataframes:\n yield df.resample(freq, fill_method='bfill')", "def load_images(filename, bands, Args):\n image = np.zeros([Args.num, Args.out_size,\n Args.out_size, len(bands)])\n for i, band in enumerate(bands):\n print (\"Getting pstamps for band\", band)\n full_image = fits.open(filename.replace(\"band\", band))[0].data\n image[:, :, :, i] = get_stamps(full_image, Args)\n return image", "def _enumerate_years(self, preprocessed_data, disjoint):\n pass", "def initialize_layers(self, years):\n min_year = min(years)\n max_year = max(years)\n ordered_years = list(range(min_year, max_year + 1))\n self.layers = [Layer(y) for y in ordered_years]", "def collapse(self):\n try:\n wavelengths = pylab.linspace(self.start, self.end,\n self.image.shape[not self.waveaxis])\n except TypeError:\n print 'The starting and ending wavelengths must be specified.'\n background = pylab.zeros(len(wavelengths))\n backgroundlines = 0\n data = pylab.zeros(len(wavelengths))\n datalines = 0\n for region in self.regions:\n if region['group'] is 0:\n backgroundlines += region['max'] - region['min']\n background += self.image[region['min']:region['max'] + 1, :]\\\n .sum(axis=self.waveaxis)\n else:\n datalines += region['max'] - region['min']\n data += self.image[region['min']:region['max'] + 1, :]\\\n .sum(axis=self.waveaxis)\n background = [sum/backgroundlines for sum in background]\n data = [sum/datalines for sum in data]\n corrected = pylab.array(data) - pylab.array(background)\n output = Spectrum(list(wavelengths), list(corrected))\n return output", "def save_barplot_yearly_sns(self, zerovalues=True):\n if zerovalues:\n df = self.df\n else:\n df = self.df\n df = df.loc[df['Global Horiz'] != 0]\n\n sns.set()\n for i in range(1, 13):\n df_month_by_year = self.filter_df_month(i)\n plt.title('{}'.format(calendar.month_name[i]))\n\n sns.barplot(x=df_month_by_year.index,\n y='Global Horiz',\n data=df_month_by_year,\n color='blue',\n capsize=.2)\n\n plt.xticks(rotation=90)\n plt.savefig('{}_{}.png'.format(\n i, calendar.month_name[i]), format='png')\n\n plt.clf()", "def remove_dbledge(img):\n\t(ny,nx) = np.shape(img)\n\tfor i in range(nx):\n\t\tidx = np.array(np.nonzero(img[:,i]))\n\t\tif np.size(idx) == 0:\n\t\t\tcontinue\n\t\tidx = idx[0][-1]\n\t\timg[idx-1::-1,i] = 0\n\treturn img", "def fix_date_year(df, col_1, col_2):\n\n for idx, date in enumerate(df[col_1]):\n year1 = date.year\n year2 = df.loc[idx, col_2]\n if np.abs(year1-year2)>95:\n year1 -=100\n df.loc[idx, col_1]=df.loc[idx, col_1].replace(year=year1)\n return df", "def multi_impute_maxes(frame,daily_limit = 15,annual_limit = 1,\n max_nan_per_year = 60):\n \n n_columns_original = frame.shape[1]\n \n # We first do a backward/forward fill to cover gaps up \n # to 2*daily_limit days.\n frame_bfill = frame.fillna(method='bfill',limit = daily_limit)\n frame_ffill = frame_bfill.fillna(method='ffill',limit = daily_limit)\n\n # We then compute a semi-processed maxima dataframe. \n frame_max = frame_ffill.groupby(pd.TimeGrouper('A')).max()\n\n # Furthermore, we want to NaN out any years for which there are \n # too many unobserved days.\n nans_per_year = np.isnan(frame).groupby(pd.TimeGrouper('A')).sum()\n\n # If there are too many NaNs in that year, we drop that year.\n max_nan_per_year = 60\n is_year_allowed = nans_per_year < max_nan_per_year # Boolean array\n frame_max[~is_year_allowed] = np.nan\n\n # Next, we back/forward fill 'annual_limit' years. \n # Any stations which still have missing data \n # remaining are dropped from further analysis.\n max_bfill = frame_max.fillna(method='bfill',limit = annual_limit)\n max_ffill = max_bfill.fillna(method='ffill',limit = annual_limit)\n max_dropped = max_ffill.dropna(axis=1)\n \n n_dropped = max_dropped.shape[1] - n_columns_original\n print 'Out of {0} columns, {1} were dropped.'.format(n_columns_original,\n n_dropped)\n \n return max_dropped", "def reallocate(banks):\n distributions = dict()\n cycles = 0\n\n while tuple(banks) not in distributions:\n distributions[tuple(banks)] = cycles\n redistribute(banks, banks.index(max(banks)))\n cycles += 1\n\n cycles_in_loop = cycles - distributions[tuple(banks)]\n return cycles, cycles_in_loop", "def end_of_year_returns(model_roi, return_type, start, end):\n\n # converts index of datetimes to a list of strings\n dates = model_roi.index.astype('str').tolist()\n\n # list comprehension of a string of dates between the\n # start and end dates\n years = [str(x) for x in range(start, end + 1)]\n\n # generates an empty list of lists for each year\n end_year_dates = [[] for _ in range(len(years))]\n\n # iterates over every date in the roi series\n for date in dates:\n\n # iterates over every year in the years list\n for year in years:\n\n # iterates over every year in each date\n if year in date:\n\n # converts each date string to a datime object\n datetime_object = datetime.strptime(date, '%Y-%m-%d')\n\n # appends each date to its corresponding year in the years list\n (end_year_dates[years.index(year)]\n .append(datetime.strftime(datetime_object, '%Y-%m-%d')))\n\n # finds the last date in each year\n end_year_dates = [max(x) for x in end_year_dates]\n\n # gets the rounded end of year returns\n returns = [round(model_roi[date], 1) for date in end_year_dates]\n\n # shifts the returns list by 1 and appends 0 to the beginning of the list\n return_rates = [0] + returns[:len(returns)-1]\n \"\"\"Example: [a, b, c] -> [0, a, b]\"\"\"\n\n # converts returns list to an array\n returns_arr = np.array(returns)\n\n # converts the return_rates list to an array\n return_rates_arr = np.array(return_rates)\n\n # calculates the rounded rate of returns\n return_rates = [round(x, 1) for x in list(returns_arr - return_rates_arr)]\n \"\"\"Example: [a, b, c] - [0, a, b] = [a, b-a, c-b]\"\"\"\n\n # dictionary with the years as keys and returns as values\n returns = dict(zip(years, returns))\n\n # dictionary with the years as keys and return rates as values\n return_rates = dict(zip(years, return_rates))\n\n if return_type == 'returns':\n return returns\n\n if return_type == 'return_rates':\n return return_rates", "def _resampler(df_year, year):\n # Aggregates data using mean for each time interval and gets a\n # sample count for each new data point.\n df_15 = df_year.resample('15T').apply(['mean', 'count'])\n df_30 = df_year.resample('30T').apply(['mean', 'count'])\n df_1h = df_year.resample('1H').apply(['mean', 'count'])\n df_1d = df_year.resample('D').apply(['mean', 'count'])\n\n # Removes top level title that is not needed.\n df_15.columns = df_15.columns.droplevel(0)\n df_30.columns = df_30.columns.droplevel(0)\n df_1h.columns = df_1h.columns.droplevel(0)\n df_1d.columns = df_1d.columns.droplevel(0)\n\n # Creating new date range to include all time intervals within the year.\n idx_15 = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:45:00', freq='15T')\n idx_30 = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:30:00', freq='30T')\n idx_1h = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:00:00', freq='1H')\n idx_1d = pd.date_range(str(year) + '-01-01 00:00:00',\n str(year) + '-12-31 23:00:00', freq='D')\n\n # Reindexing so data that starts in, for example August, will now\n # have the months prior to August filled with nans.\n df_15_reindex = df_15.reindex(idx_15, fill_value=np.nan)\n df_15_reindex[['count']] = df_15_reindex[['count']].fillna(0).astype(int)\n # Adding all columns to match example excel.\n df_15_reindex = df_15_reindex.rename(columns={'mean': 'H(ft)'})\n df_15_reindex = df_15_reindex.rename(columns={'count': 'SampleCount'})\n\n # Adding meters column.\n df_15_reindex['H(m)'] = df_15_reindex['H(ft)'] / 3.28\n # Rounds meters column so significant digits match\n # original height column.\n df_15_reindex['H(m)'] = df_15_reindex['H(m)'].round(2)\n df_15_reindex['H(ft)'] = df_15_reindex['H(ft)'].round(2)\n df_15_reindex['DateTime2'] = df_15_reindex.index\n df_15_reindex['Date'] = df_15_reindex.index\n df_15_reindex['Date2'] = df_15_reindex.index\n df_15_reindex['Date_Python_generated'] = df_15_reindex['Date'].dt.date\n df_15_reindex['Time1'] = df_15_reindex['Date'].dt.time\n df_15_reindex['Time2'] = df_15_reindex['Date'].dt.time\n df_15_reindex['H(m)_final'] = df_15_reindex['H(m)']\n df_15_reindex = df_15_reindex.reset_index(drop=True)\n # Adding original datetime and height data to dataframe. To do this\n # pd.concat is used because the column lengths are different.\n df_15_reindex = pd.concat([\n df_15_reindex, df_year.reset_index(drop=True)], axis=1)\n df_15_reindex['dateTime'] = pd.to_datetime(df_15_reindex['dateTime'])\n # Reordering columns to match example excel.\n df_15_reindex = df_15_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n # Filling nans with empty cells in columns similar to example excel.\n df_15_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_15_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n\n # Similar to 15 minute interval code but 30 minutes interval.\n df_30_reindex = df_30.reindex(idx_30, fill_value=np.nan)\n df_30_reindex[['count']] = df_30_reindex[['count']].fillna(0).astype(int)\n df_30_reindex = df_30_reindex.rename(columns={'mean': 'H(ft)'})\n df_30_reindex = df_30_reindex.rename(columns={'count': 'SampleCount'})\n df_30_reindex['H(m)'] = df_30_reindex['H(ft)'] / 3.28\n df_30_reindex['H(m)'] = df_30_reindex['H(m)'].round(2)\n df_30_reindex['H(ft)'] = df_30_reindex['H(ft)'].round(2)\n df_30_reindex['DateTime2'] = df_30_reindex.index\n df_30_reindex['Date'] = df_30_reindex.index\n df_30_reindex['Date2'] = df_30_reindex.index\n df_30_reindex['Date_Python_generated'] = df_30_reindex['Date'].dt.date\n df_30_reindex['Time1'] = df_30_reindex['Date'].dt.time\n df_30_reindex['Time2'] = df_30_reindex['Date'].dt.time\n df_30_reindex['H(m)_final'] = df_30_reindex['H(m)']\n df_30_reindex = df_30_reindex.reset_index(drop=True)\n df_30_reindex = pd.concat([\n df_30_reindex, df_year.reset_index(drop=True)], axis=1)\n df_30_reindex['dateTime'] = pd.to_datetime(df_30_reindex['dateTime'])\n df_30_reindex = df_30_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n df_30_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_30_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n\n # Similar to 15 minute interval code but hourly interval.\n df_1h_reindex = df_1h.reindex(idx_1h, fill_value=np.nan)\n df_1h_reindex[['count']] = df_1h_reindex[['count']].fillna(0).astype(int)\n df_1h_reindex = df_1h_reindex.rename(columns={'mean': 'H(ft)'})\n df_1h_reindex = df_1h_reindex.rename(columns={'count': 'SampleCount'})\n df_1h_reindex['H(m)'] = df_1h_reindex['H(ft)'] / 3.28\n df_1h_reindex['H(m)'] = df_1h_reindex['H(m)'].round(2)\n df_1h_reindex['H(ft)'] = df_1h_reindex['H(ft)'].round(2)\n df_1h_reindex['DateTime2'] = df_1h_reindex.index\n df_1h_reindex['Date'] = df_1h_reindex.index\n df_1h_reindex['Date2'] = df_1h_reindex.index\n df_1h_reindex['Date_Python_generated'] = df_1h_reindex['Date'].dt.date\n df_1h_reindex['Time1'] = df_1h_reindex['Date'].dt.time\n df_1h_reindex['Time2'] = df_1h_reindex['Date'].dt.time\n df_1h_reindex['H(m)_final'] = df_1h_reindex['H(m)']\n df_1h_reindex = df_1h_reindex.reset_index(drop=True)\n df_1h_reindex = pd.concat([\n df_1h_reindex, df_year.reset_index(drop=True)], axis=1)\n df_1h_reindex['dateTime'] = pd.to_datetime(df_1h_reindex['dateTime'])\n df_1h_reindex = df_1h_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n df_1h_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_1h_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n\n # Similar to 15 minute interval code but daily interval.\n df_1d_reindex = df_1d.reindex(idx_1d, fill_value=np.nan)\n df_1d_reindex[['count']] = df_1d_reindex[['count']].fillna(0).astype(int)\n df_1d_reindex = df_1d_reindex.rename(columns={'mean': 'H(ft)'})\n df_1d_reindex = df_1d_reindex.rename(columns={'count': 'SampleCount'})\n df_1d_reindex['H(m)'] = df_1d_reindex['H(ft)'] / 3.28\n df_1d_reindex['H(m)'] = df_1d_reindex['H(m)'].round(2)\n df_1d_reindex['H(ft)'] = df_1d_reindex['H(ft)'].round(2)\n df_1d_reindex['DateTime2'] = df_1d_reindex.index\n df_1d_reindex['Date'] = df_1d_reindex.index\n df_1d_reindex['Date2'] = df_1d_reindex.index\n df_1d_reindex['Date_Python_generated'] = df_1d_reindex['Date'].dt.date\n df_1d_reindex['Time1'] = df_1d_reindex['Date'].dt.time\n df_1d_reindex['Time2'] = df_1d_reindex['Date'].dt.time\n df_1d_reindex['H(m)_final'] = df_1d_reindex['H(m)']\n df_1d_reindex = df_1d_reindex.reset_index(drop=True)\n df_1d_reindex = pd.concat([\n df_1d_reindex, df_year.reset_index(drop=True)], axis=1)\n df_1d_reindex['dateTime'] = pd.to_datetime(df_1d_reindex['dateTime'])\n df_1d_reindex = df_1d_reindex[[\n 'dateTime', 'X_00065_00000', 'Date_Python_generated', 'Time1', 'Time2',\n 'DateTime2', 'Date', 'H(ft)', 'H(m)', 'SampleCount', 'Date2',\n 'H(m)_final']]\n df_1d_reindex[[\n 'dateTime', 'X_00065_00000', 'H(m)_final'\n ]] = df_1d_reindex[['dateTime', 'X_00065_00000', 'H(m)_final']].fillna('')\n return df_15_reindex, df_30_reindex, df_1h_reindex, df_1d_reindex", "def drop_years(dataframe, start, end):\n tmp = dataframe\n tmp = tmp[(start <= tmp['year'].astype(int)) & (tmp['year'].astype(int) <= end)]\n\n return tmp", "def multiple_years(our_data, start, end):\n count = start\n album_list = []\n while count <= end:\n album_list.append(find_by_year(our_data,count))\n count += 1", "def run_global(start_year, end_year, depth_from, depth_to, animate=True):\n# years, times, rootgrps = retrieve(1950,2018)\n# rootgrps_1950 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1950\\EN.4.2.1.f.analysis.g10.195001.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_1951 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1951\\EN.4.2.1.f.analysis.g10.195101.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_1952 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1952\\EN.4.2.1.f.analysis.g10.195201.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_1953 = [nt.Dataset(\"EN.4.2.1.analyses.g10.1953\\EN.4.2.1.f.analysis.g10.195301.nc\", \"r+\", format=\"NETCDF4\")]\n#\n#\n# rootgrps_2015 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2015\\EN.4.2.1.f.analysis.g10.201501.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_2016 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2016\\EN.4.2.1.f.analysis.g10.201601.nc\", \"r+\", format=\"NETCDF4\")]\n# rootgrps_2017 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2017\\EN.4.2.1.f.analysis.g10.201701.nc\", \"r+\", format=\"NETCDF4\")]\n rootgrps_2018 = [nt.Dataset(\"EN.4.2.1.analyses.g10.2018\\EN.4.2.1.f.analysis.g10.201801.nc\", \"r+\", format=\"NETCDF4\")]\n\n# HC_1950 = calculate_HC_global(rootgrps_1950, 0, 2000)\n# print('1950', time.time()-start)\n# HC_1951 = calculate_HC_global(rootgrps_1951, 0, 2000)\n# print('1951', time.time()-start)\n# HC_1952 = calculate_HC_global(rootgrps_1952, 0, 2000)\n# print('1952', time.time()-start)\n# HC_1953 = calculate_HC_global(rootgrps_1953, 0, 2000)\n# print('1953', time.time()-start) \n#\n# HC_2015 = calculate_HC_global(rootgrps_2015, 0, 2000)\n# print('2015', time.time()-start)\n# HC_2016 = calculate_HC_global(rootgrps_2016, 0, 2000)\n# print('2016', time.time()-start)\n# HC_2017 = calculate_HC_global(rootgrps_2017, 0, 2000)\n# print('2017', time.time()-start)\n HC_2018 = calculate_HC_global(rootgrps_2018, 0, 2000)\n# print('2018', time.time()-start)\n# HC_1950_mean = (HC_1950+HC_1951+HC_1952+HC_1953)/4\n# HC_2018_mean = (HC_2015+HC_2016+HC_2017+HC_2018)/4\n\n# dHC = (HC_2018_mean-HC_1950_mean)/(65*365*24*3600)\n if animate == True:\n plot(rootgrps_2018, HC_2018)\n return HC_2018", "def _backward(self):\n if self.units[0].value > 0:\n self.units[0].gradient += 1 * self.utop.gradient\n else:\n self.units[0].gradient += 0 * self.utop.gradient", "def make_financial_periods(date: Date, lookback: PositiveInteger) -> FinancialPeriods:\n ## Get years iterable:\n years = (date.year - i for i in range(1, lookback + 1) if i <= date.year)\n\n ## Build ranges and return:\n return OrderedDict(\n (\n (\"DTD\", DateRange.dtd(date)),\n (\"MTD\", DateRange.mtd(date)),\n (\"YTD\", DateRange.ytd(date)),\n *((f\"{y}\", DateRange.year(PositiveInteger(y))) for y in years),\n )\n )", "def degen_translate(formatted_bands, en_tolerance=0.01, padding=False):\n tmp = np.array(formatted_bands)\n size = np.shape(tmp)\n\n # degen_bands = np.zeros(size) # assumption 1: missing data are assumed null, labeled by 0\n degen_bands = np.zeros(size) + 1 # assumption 2: missing data are assumed non-degenerate, labeled by 1\n\n # Need further test\n for i in range(size[1]):\n each_column = []\n count = 1\n for j in range(size[0]-1):\n if tmp[j][i] == 0:\n count = 0\n break\n else:\n if np.absolute(tmp[j+1][i]-tmp[j][i]) <= en_tolerance:\n count += 1\n else:\n for k in range(count):\n each_column.append(count)\n count = 1\n\n if count == 0:\n pass\n else:\n for k in range(count):\n each_column.append(count)\n degen_bands[:, i] = np.array(each_column)\n\n return degen_bands", "def make_lightcurve(centroids, bands, band_idx, box_size, aperture_radius):\n band_names = np.sort(list(bands.keys()))\n num_stars= range(len(centroids))\n for star_idx in num_stars:\n xcenters, ycenters = [],[]\n aperture_sums = []\n background = []\n fwhms = []\n obs_time = []\n obs_mjd = []\n ##extract lightcurve (enumerate all frames) in a given band\n for i in tqdm(bands[band_names[band_idx]]):\n #import pdb; pdb.set_trace()\n hdr = fits.open(i)[0].header\n img = fits.open(i)[0].data\n #get dates from fits header\n date=dt.strptime(hdr['DATE-OBS'], '%Y-%m-%d')\n time=dt.strptime(hdr['EXP-STRT'], '%H:%M:%S.%f')\n newdate = time.replace(year=date.year, month=date.month, day=date.day)\n obs_time.append(newdate)\n obs_mjd.append(hdr['MJD-STRT'])\n\n #crop\n #import pdb; pdb.set_trace()\n image_crop = get_crop(img, centroids[star_idx], box_size)\n\n ###aperture photometry###\n #compute centroid\n centroid = get_centroid(image_crop)\n\n xcenters.append(centroid[0])\n ycenters.append(centroid[1])\n\n #compute backgound\n bkg_mean=get_bkg(image_crop, centroid, r_in=20., r_out=30.)\n\n #measure fwhm\n fwhm=get_fwhm(image_crop)\n\n #without aperture photometry\n\n aperture_sum = get_phot(image_crop, centroid, r=aperture_radius)\n\n #minus background wihtin annulus\n #aperture_sum = get_phot2(image_crop,bkg_mean,centroid,r=aperture_radius)\n\n aperture_sums.append(aperture_sum)\n background.append(bkg_mean)\n\n # if fwhm < 10*np.median(fwhms):\n # fwhms.append(fwhm)\n # else:\n # fwhms.append(np.nan)\n fwhms.append(fwhm)\n\n #output as dataframe of given band and star\n\n dfs.append(pd.DataFrame(\n {'{0}_{1}_x'.format(band_names[band_idx], str(star_idx)) : xcenters,\n '{0}_{1}_y'.format(band_names[band_idx], str(star_idx)) : ycenters,\n '{0}_{1}_flux_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : aperture_sums,\n '{0}_{1}_bkg_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : background,\n '{0}_{1}_fwhm_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : fwhms},\n #'airmass' : airmass\n index = obs_time))\n return dfs, band_idx, band_names", "def time_bucket_gapfill(self, field: str, interval: str, start: datetime, end: datetime, datapoints: int=240):\n return self.values(bucket=TimeBucketGapFill(field, interval, start, end, datapoints))", "def imdb_crawl_by_years(years, verbose):\n for year in years:\n imdb_crawl_by_year(year, verbose)", "def ramp_down(self):\n value = self.current_event[\"ramp_down\"][\"value\"]\n self.current_value.append(self.current_value[-1] - value)", "def append_flipped_images(self, imdb):\r\n print('append flipped images to imdb', len(imdb))\r\n for i in range(len(imdb)):\r\n imdb_ = imdb[i]\r\n m_bbox = imdb_['bbox_target'].copy()\r\n m_bbox[0], m_bbox[2] = -m_bbox[2], -m_bbox[0]\r\n\r\n landmark_ = imdb_['landmark_target'].copy()\r\n landmark_ = landmark_.reshape((5, 2))\r\n landmark_ = np.asarray([(1 - x, y) for (x, y) in landmark_])\r\n landmark_[[0, 1]] = landmark_[[1, 0]]\r\n landmark_[[3, 4]] = landmark_[[4, 3]]\r\n\r\n item = {'image': imdb_['image'],\r\n 'label': imdb_['label'],\r\n 'bbox_target': m_bbox,\r\n 'landmark_target': landmark_.reshape((10)),\r\n 'flipped': True}\r\n\r\n imdb.append(item)\r\n self.image_set_index *= 2\r\n return imdb", "def update_grad_data():\n t_file = 'hcapgrd1_full_data_*.fits*'\n out_dir = deposit_dir + '/Grad_save/'\n tdir = out_dir + 'Gradcap/'\n#\n#--- read grad group name\n#\n gfile = house_keeping + 'grad_list'\n grad_list = mcf.read_data_file(gfile)\n\n [tstart, tstop, year] = ecf.find_data_collecting_period(tdir, t_file)\n\n get_data(tstart, tstop, year, grad_list, out_dir)", "def _shift(self, arr: np.ndarray, num: int = 1, fill_value: int = 0) -> np.ndarray:\n result = np.empty_like(arr)\n if num > 0:\n result[:num] = fill_value\n result[num:] = arr[:-num]\n elif num < 0:\n result[num:] = fill_value\n result[:num] = arr[-num:]\n else:\n result[:] = arr\n return result", "def backward(self, top, propagate_down, bottom):\n for ib in range(2):\n if not propagate_down[ib]:\n continue\n ndim = bottom[0].data.shape\n count = ndim[0] * ndim[2] * ndim[3]\n if not self.count:\n bottom[ib].diff[ ... ] = np.zeros_like( bottom[0].data )\n continue\n if top[0].data < 1.\n bottom[ib].diff[ ... ] = np.abs( bottom[0].data - bottom[1].data )\n bottom[ib].diff[ ... ] *= ( 1 - 1.0*self.iter/self.maxiter )\n else:\n bottom[ib].diff[ ... ] = np.ones_like( bottom[ib].data )\n inop = bottom[0].data < bottom[1].data\n bottom[ib].diff[ inop ] *= -1\n \n # ingore false label and repair\n ignore = bottom[1].data <= 0.\n count -= np.sum(ignore)\n bottom[ib].diff[ignore] = 0.\n #normlist\n bottom[ib].diff[...] /= count", "def melt_naics_crosswalk():\n # load the mastercroswalk and subset by sectorsourcename,\n # save values to list\n cw_load = load_crosswalk('sector_timeseries')\n\n # create melt table of possible 2007 and 2017 naics that can\n # be mapped to 2012\n cw_melt = cw_load.melt(\n id_vars='NAICS_2012_Code', var_name='NAICS_year', value_name='NAICS')\n # drop the naics year because not relevant for replacement purposes\n cw_replacement = cw_melt.dropna(how='any')\n cw_replacement = cw_replacement[\n ['NAICS_2012_Code', 'NAICS']].drop_duplicates()\n # drop rows where contents are equal\n cw_replacement = cw_replacement[\n cw_replacement['NAICS_2012_Code'] != cw_replacement['NAICS']]\n # drop rows where length > 6\n cw_replacement = cw_replacement[cw_replacement['NAICS_2012_Code'].apply(\n lambda x: len(x) < 7)].reset_index(drop=True)\n # order by naics 2012\n cw_replacement = cw_replacement.sort_values(\n ['NAICS', 'NAICS_2012_Code']).reset_index(drop=True)\n\n # create allocation ratios by determining number of\n # NAICS 2012 to other naics when not a 1:1 ratio\n cw_replacement_2 = cw_replacement.assign(\n naics_count=cw_replacement.groupby(\n ['NAICS'])['NAICS_2012_Code'].transform('count'))\n cw_replacement_2 = cw_replacement_2.assign(\n allocation_ratio=1/cw_replacement_2['naics_count'])\n\n return cw_replacement_2", "def rebin(self, dispersion):\n\n if isinstance(dispersion, float):\n dispersion = np.arange(\n self.dispersion.value[0], self.dispersion.value[-1],\n dispersion)\n\n old_bins = find_bin_edges(self.dispersion.value)\n new_bins = find_bin_edges(dispersion)\n\n widths = np.diff(old_bins)\n\n old_length = len(self.dispersion)\n new_length = len(dispersion)\n\n i = 0 # index of old array\n j = 0 # index of new array\n\n # Variables used for rebinning:\n df = 0.0\n de2 = 0.0\n nbins = 0.0\n\n flux = np.zeros_like(dispersion)\n error = np.zeros_like(dispersion)\n\n # Sanity check:\n if old_bins[-1] < new_bins[0] or new_bins[-1] < old_bins[0]:\n raise ValueError('Dispersion scales do not overlap!')\n\n # Find the first contributing old pixel to the rebinned spectrum:\n if old_bins[i + 1] < new_bins[0]:\n\n # Old dispersion scale extends lower than the new one. Find the\n # first old bin that overlaps with the new scale:\n while old_bins[i + 1] < new_bins[0]:\n i += 1\n\n i -= 1\n\n elif old_bins[0] > new_bins[j + 1]:\n\n # New dispersion scale extends lower than the old one. Find the\n # first new bin that overlaps with the old scale:\n while old_bins[0] > new_bins[j + 1]:\n flux = np.nan\n error = np.nan\n j += 1\n\n j -= 1\n\n l0 = old_bins[i] # lower edge of contributing old bin\n\n while True:\n\n h0 = old_bins[i + 1] # upper edge of contributing old bin\n h1 = new_bins[j + 1] # upper edge of jth new bin\n\n if h0 < h1:\n # Count up the decimal number of old bins that contribute to\n # the new one and start adding up fractional flux values:\n if self.flux.uncertainty.value[i] > 0:\n bin_fraction = (h0 - l0) / widths[i]\n nbins += bin_fraction\n\n # We don't let `Data` handle the error propagation here\n # because a sum of squares will not give us what we\n # want, i.e. 0.25**2 + 0.75**2 != 0.5**2 + 0.5**2 != 1**2\n df += self.flux.value[i] * bin_fraction\n de2 += self.flux.uncertainty.value[i] ** 2 * bin_fraction\n\n l0 = h0\n i += 1\n\n if i == old_length:\n break\n\n else:\n # We have all but one of the old bins that contribute to the\n # new one, so now just add the remaining fraction of the new\n # bin to the decimal bin count and add the remaining\n # fractional flux value to the sum:\n if self.flux.uncertainty.value[i] > 0:\n bin_fraction = (h1 - l0) / widths[i]\n nbins += bin_fraction\n df += self.flux.value[i] * bin_fraction\n de2 += self.flux.uncertainty.value[i] ** 2 * bin_fraction\n\n if nbins > 0:\n # Divide by the decimal bin count to conserve flux density:\n flux[j] = df / nbins\n error[j] = sqrt(de2) / nbins\n\n else:\n flux[j] = 0.0\n error[j] = 0.0\n\n df = 0.0\n de2 = 0.0\n nbins = 0.0\n\n l0 = h1\n j += 1\n\n if j == new_length:\n break\n\n if hasattr(self.dispersion, 'unit'):\n dispersion = Quantity(dispersion, self.dispersion.unit)\n\n if hasattr(self.flux, 'unit'):\n flux = Data(flux, error, self.flux.unit)\n\n # Linearly interpolate the continuum onto the new dispersion scale:\n if self.continuum is not None:\n continuum = np.interp(dispersion, self.dispersion, self.continuum)\n else:\n continuum = None\n\n return self.__class__(dispersion, flux, continuum=continuum)", "def main(years=(2000, 2019)):\n year_list = range(years[0], years[1] + 1)\n dfs = []\n for year in year_list:\n dfs.append(get_df(year))\n print(f\"Done: {len(dfs)} dataframes written\")", "def put_on_even_grounds(beds: List[BedGraphFile]) -> List[BedGraphFile]:\n starts = [bed.data.iloc[0, 1] for bed in beds]\n unified_start = max(starts)\n ends = [bed.data.iloc[-1, 1] for bed in beds]\n unified_end = min(ends)\n new_dfs = [_trim_start_end(bed.data, unified_start, unified_end) for bed in beds]\n for bed, new_df in zip(beds, new_dfs):\n bed.data = new_df\n return beds", "def get_dataframes_for_each_year(main_dataframe, years):\n list_of_dataframes = []\n for year in years:\n dataframe_by_year = main_dataframe.loc[ (main_dataframe['year'] == year) ].T\n # Getting rid of the first two rows \n dataframe_by_year = dataframe_by_year.iloc[2:]\n list_of_dataframes.append(dataframe_by_year)\n return list_of_dataframes", "def request_band_extract(file_prefix, points_layer, region, years, filter_bounds=False):\n roi = ee.FeatureCollection(region)\n plots = ee.FeatureCollection(points_layer)\n for yr in years:\n stack = stack_bands(yr, roi)\n\n if filter_bounds:\n plots = plots.filterBounds(roi)\n\n filtered = plots.filter(ee.Filter.eq('YEAR', yr))\n\n plot_sample_regions = stack.sampleRegions(\n collection=filtered,\n properties=['POINT_TYPE', 'YEAR'],\n scale=30,\n tileScale=16)\n\n task = ee.batch.Export.table.toCloudStorage(\n plot_sample_regions,\n description='{}_{}'.format(file_prefix, yr),\n bucket='wudr',\n fileNamePrefix='{}_{}'.format(file_prefix, yr),\n fileFormat='CSV')\n\n task.start()\n print(yr)\n exit()", "def mask4(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[3]).eq(value)) \n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value)\n change_img1 = imagem.select(bandNames[2]).mask(mask.eq(1)).where(mask.eq(1), value) \n img_out = imagem.select(bandNames[1]).blend(change_img).blend(change_img1)\n return img_out", "def recreateImgsFromLapPyr(imgPyramid): \n layerNum = len(imgPyramid)\n curSrc=imgPyramid[-1].copy()\n for l in np.arange(layerNum-2, -1, -1):\n imgUp = cv2.resize(curSrc, (imgPyramid[l].shape[1], imgPyramid[l].shape[0]))\n imgBlurUp = cv2.GaussianBlur(imgUp, ksize=(0, 0), sigmaX=3)\n curSrc = imgBlurUp + imgPyramid[l]\n \n return(curSrc)" ]
[ "0.7032449", "0.5949247", "0.5812707", "0.5253641", "0.51127684", "0.49725264", "0.47500893", "0.46962532", "0.46487218", "0.45113215", "0.44795865", "0.44701588", "0.4448179", "0.4420199", "0.4388033", "0.43850613", "0.43616962", "0.4360365", "0.43425742", "0.43269235", "0.4294934", "0.4291195", "0.42749843", "0.4262502", "0.42407838", "0.42378584", "0.42289925", "0.42165184", "0.42092714", "0.41809776", "0.41746294", "0.417114", "0.41653028", "0.41512117", "0.41458142", "0.41364214", "0.4114708", "0.41015872", "0.40831485", "0.4081029", "0.40796572", "0.4073436", "0.4068427", "0.40683666", "0.40683138", "0.406647", "0.4033785", "0.40328833", "0.40166", "0.40153542", "0.40094098", "0.39652714", "0.39605057", "0.39595434", "0.39567348", "0.39466563", "0.3940834", "0.39401984", "0.39389387", "0.39275894", "0.39193353", "0.39188412", "0.39106444", "0.39102578", "0.39084625", "0.39042643", "0.39008197", "0.38943857", "0.38908386", "0.38883474", "0.38883373", "0.3885622", "0.38848528", "0.3876568", "0.38731506", "0.3872406", "0.38700974", "0.38676277", "0.38660273", "0.3858633", "0.38585854", "0.38582587", "0.38530537", "0.38529924", "0.38463145", "0.38444757", "0.38440958", "0.38427937", "0.3839871", "0.38314328", "0.3828807", "0.3827174", "0.38263676", "0.38237777", "0.38185456", "0.38179767", "0.38160473", "0.38154075", "0.38138244", "0.38083568" ]
0.7573101
0
Function to apply forward gap filling and backward gap filling to an image. The image bands do not need to be in order, but the bandNames argument must be in chronological order. This funciton calls applyForwardNoDataFilter then applyBackwardNoDataFilter
def applyGapFilter(image, bandNames): filtered = applyForwardNoDataFilter(image, bandNames) filtered = applyBackwardNoDataFilter(filtered, bandNames) return filtered
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyForwardNoDataFilter(image, bandNames):\n #Get a list of band names from year(1) through the last year\n bandNamesEE = ee.List(bandNames[1:])\n \n #Define forwards filter\n #In first iteration, bandName=bandNames[1] and previousImage is image.select(bandNames[0]), or the classifications for the first year\n #currentImage = image.select(bandNames[1]), the image for the second year\n #previousImage = image.select(bandNames[0]), the first year\n #Find where the second year has missing data, replace those values with the values of the first year\n #Append previousImage to currentImage, so now currentImage is a two band image, with the first band being the second year with the gap fill\n #and the second band is the first years classification\n #The iteration continues, now with followingImage.select[0] being the second year with the gap fill applied, and bandName is the third year\n def forwardNoDataFilter(bandName, previousImage):\n currentImage = image.select(ee.String(bandName))\n previousImage = ee.Image(previousImage)\n currentImage = currentImage.unmask(previousImage.select([0]))\n return currentImage.addBands(previousImage)\n \n #Iterate through all the years, starting with the first year's classification\n filtered = bandNamesEE.iterate(forwardNoDataFilter,ee.Image(image.select(bandNames[0])))\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def applyBackwardNoDataFilter(image, bandNames):\n #Get a list of band names to iterate over, from year(-2) through year(0)\n bandNamesEE = ee.List(bandNames[:-1]).reverse()\n \n #Define backwards filter\n #In first iteration, bandName=bandNames[-2] and followingImage is image.select(bandNames[-1]), or the classifications for the final year\n #currentImage = image.select(bandNames[-2]), the second to last year\n #followingImage = image.select(bandNames[-1]), the final year\n #Find where the second to last year has missing data, replace those values with the values of the following year\n #Append followingImage to currentImage, so now currentImage is a two band image, with the first band being the second to last year with the gap fill\n #and the second band is the final years classification\n #The iteration continues, now with followingImage.select[0] being the second to last year with the gap fill applied, and bandName is the third to last year\n def backwardNoDataFilter(bandName, followingImage):\n currentImage = image.select(ee.String(bandName))\n followingImage = ee.Image(followingImage)\n currentImage = currentImage.unmask(followingImage.select([0]))\n return currentImage.addBands(followingImage)\n \n #Apply backwards filter, starting with the final year and iterating through to year(0) \n filtered = bandNamesEE.iterate(backwardNoDataFilter,ee.Image(image.select(bandNames[-1])))\n #Re-order bands to be in chronological order\n filtered = ee.Image(filtered)\n return filtered.select(bandNames)", "def fill_in_data(color,frames,fs=25):\n color = color\n colormat = color.as_matrix()\n frameDiff = np.diff(colormat.T[2])\n locations = np.where(frameDiff!=1)[0]\n\n #Calculate number of frames skipped\n #sample = []\n #sample = colormat.T\n sample = sample[:2].T\n #frames = range(100,len(colormat.T[2])+100)\n #frames = np.linspace(frames[0],frames[-1],frames[-1]-frames[0]+1)\n #frames = frames[:len(frames)-1]\n \n #if locations is empty, try looking for a row of nans\n if np.all(locations):\n for i in range(len(sample)):\n if np.all(sample[i] == 0):\n sample[i]=[np.nan, np.nan]\n missing = list(np.where(np.isnan(sample.T[0])))\n\n else:\n numfill = []\n missing = []\n for i in locations:\n numfill.append(frames[i+1]-frames[i])#-1)\n #pdb.set_trace()\n missing.append(np.linspace(i+1,i+1+numfill[-1],numfill[-1]))\n\n missing = np.concatenate(missing)\n\n missing = missing[:len(missing)-1]\n missing = missing.astype(int)\n\n pdb.set_trace()\n\n for j in reversed(missing):\n sample = np.insert(sample,j,(np.nan,np.nan),axis = 0)\n #frames = np.insert(frames,j,j,axis=0)\n\n color_x,color_y,x_filt=KFilt(sample,fs)\n color_mat = np.column_stack((color_x[:,0],color_y[:,0],color_x[:,1],color_y[:,1]))\n return color_mat,frames,x_filt", "def mask4(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[3]).eq(value)) \n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value)\n change_img1 = imagem.select(bandNames[2]).mask(mask.eq(1)).where(mask.eq(1), value) \n img_out = imagem.select(bandNames[1]).blend(change_img).blend(change_img1)\n return img_out", "def applyMask3last(imagem, value, bandNames):\n mask = imagem.select(bandNames[-3]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[-2]).eq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[-1]).neq(value))\n change_img = imagem.select(bandNames[-1]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[0:-1])\n img_out = img_out.addBands(imagem.select(bandNames[-1]).blend(change_img))\n return img_out", "def stack_bands(yr, roi):\n\n water_year_start = '{}-10-01'.format(yr - 1)\n\n winter_s, winter_e = '{}-01-01'.format(yr), '{}-03-01'.format(yr),\n spring_s, spring_e = '{}-03-01'.format(yr), '{}-05-01'.format(yr),\n late_spring_s, late_spring_e = '{}-05-01'.format(yr), '{}-07-01'.format(yr)\n summer_s, summer_e = '{}-07-01'.format(yr), '{}-09-01'.format(yr)\n fall_s, fall_e = '{}-09-01'.format(yr), '{}-12-31'.format(yr)\n\n periods = [('cy', winter_s, fall_e),\n ('1', spring_s, spring_e),\n ('2', late_spring_s, late_spring_e),\n ('3', summer_s, summer_e),\n ('4', fall_s, fall_e)]\n\n first = True\n for name, start, end in periods:\n bands = landsat_composites(yr, start, end, roi, name)\n if first:\n input_bands = bands\n proj = bands.select('B2_cy').projection().getInfo()\n first = False\n else:\n input_bands = input_bands.addBands(bands)\n\n for s, e, n in [(spring_s, spring_e, 'espr'),\n (late_spring_s, late_spring_e, 'lspr'),\n (summer_s, summer_e, 'smr'),\n (fall_s, fall_e, 'fl'),\n (water_year_start, spring_e, 'wy_espr'),\n (water_year_start, late_spring_e, 'wy_espr'),\n (water_year_start, summer_e, 'wy_smr'),\n (water_year_start, fall_e, 'wy')]:\n gridmet = ee.ImageCollection(\"IDAHO_EPSCOR/GRIDMET\").filterBounds(\n roi).filterDate(s, e).select('pr', 'eto', 'tmmn', 'tmmx')\n temp_reducer = ee.Reducer.mean()\n t_names = ['tmax'.format(n), 'tmin'.format(n)]\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_{}'.format(n), 'pet_total_{}'.format(n)).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n wd_estimate = precip_sum.select('precip_total_{}'.format(n)).subtract(precip_sum.select(\n 'pet_total_{}'.format(n))).rename('wd_est_{}'.format(n))\n input_bands = input_bands.addBands([temp_perc, precip_sum, wd_estimate])\n\n temp_reducer = ee.Reducer.percentile([10, 50, 90])\n t_names = ['tmmn_p10_cy', 'tmmn_p50_cy', 'tmmn_p90_cy', 'tmmx_p10_cy', 'tmmx_p50_cy', 'tmmx_p90_cy']\n temp_perc = gridmet.select('tmmn', 'tmmx').reduce(temp_reducer).rename(t_names).resample(\n 'bilinear').reproject(crs=proj['crs'], scale=30)\n\n precip_reducer = ee.Reducer.sum()\n precip_sum = gridmet.select('pr', 'eto').reduce(precip_reducer).rename(\n 'precip_total_cy', 'pet_total_cy').resample('bilinear').reproject(crs=proj['crs'], scale=30)\n wd_estimate = precip_sum.select('precip_total_cy').subtract(precip_sum.select(\n 'pet_total_cy')).rename('wd_est_cy')\n\n coords = ee.Image.pixelLonLat().rename(['Lon_GCS', 'LAT_GCS']).resample('bilinear').reproject(crs=proj['crs'],\n scale=30)\n ned = ee.Image('USGS/NED')\n terrain = ee.Terrain.products(ned).select('elevation', 'slope', 'aspect').reduceResolution(\n ee.Reducer.mean()).reproject(crs=proj['crs'], scale=30)\n\n world_climate = get_world_climate(proj=proj)\n elev = terrain.select('elevation')\n tpi_1250 = elev.subtract(elev.focal_mean(1250, 'circle', 'meters')).add(0.5).rename('tpi_1250')\n tpi_250 = elev.subtract(elev.focal_mean(250, 'circle', 'meters')).add(0.5).rename('tpi_250')\n tpi_150 = elev.subtract(elev.focal_mean(150, 'circle', 'meters')).add(0.5).rename('tpi_150')\n static_input_bands = coords.addBands([temp_perc, wd_estimate, terrain, tpi_1250, tpi_250, tpi_150, world_climate])\n\n nlcd = ee.Image('USGS/NLCD/NLCD2011').select('landcover').reproject(crs=proj['crs'], scale=30).rename('nlcd')\n\n cdl_cult = ee.Image('USDA/NASS/CDL/2017').select('cultivated'). \\\n remap([1, 2], [0, 1]).reproject(crs=proj['crs'], scale=30).rename('cdlclt')\n\n cdl_crop = ee.Image('USDA/NASS/CDL/2017').select('cropland').reproject(crs=proj['crs'],\n scale=30).rename('cdlcrp')\n\n gsw = ee.Image('JRC/GSW1_0/GlobalSurfaceWater')\n occ_pos = gsw.select('occurrence').gt(0)\n water = occ_pos.unmask(0).rename('gsw')\n\n static_input_bands = static_input_bands.addBands([nlcd, cdl_cult, cdl_crop, water])\n\n input_bands = input_bands.addBands(static_input_bands).clip(roi)\n\n # standardize names to match EE javascript output\n standard_names = []\n temp_ct = 1\n prec_ct = 1\n names = input_bands.bandNames().getInfo()\n for name in names:\n if 'tavg' in name and 'tavg' in standard_names:\n standard_names.append('tavg_{}'.format(temp_ct))\n temp_ct += 1\n elif 'prec' in name and 'prec' in standard_names:\n standard_names.append('prec_{}'.format(prec_ct))\n prec_ct += 1\n else:\n standard_names.append(name)\n\n input_bands = input_bands.rename(standard_names)\n return input_bands", "def bandpasscorrect(data):\n ret=[x for x in data]\n n=len(ret)\n ret[0]=1.083*ret[0]-0.083*ret[1]\n ret[n-1]=1.083*ret[n-1]-0.083*ret[n-2]\n for k in range(1,n-1):\n ret[k]=1.166*ret[k]-0.083*ret[k-1]-0.083*ret[k+1]\n return ret", "def band_filter(self, bands) -> 'ImageCollection':\n\n process_id = 'filter_bands'\n args = {\n 'imagery': self.graph,\n 'bands': bands\n }\n return self.graph_add_process(process_id, args)", "def applyMask3first(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).neq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).eq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).eq(value))\n change_img = imagem.select(bandNames[0]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[0]).blend(change_img)\n img_out = img_out.addBands(imagem.select(bandNames[1:]))\n return img_out", "def applyWindow4years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-2):\n img_out = img_out.addBands(mask4(imagem, value,bandNames[(i-1):(i+3)]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def fmask(bandname=\"fmask\"):\n\n def fmask(image):\n imgFmask = image.select(bandname)\n shadow = imgFmask.eq(3)\n snow = imgFmask.eq(4)\n cloud = imgFmask.eq(5)\n\n mask = shadow.Or(snow).Or(cloud)\n\n imgMask = image.updateMask(mask.Not())\n return imgMask\n return fmask", "def band(self, name, bands, new_name=None, label=None, text_key=None):\n if not self._is_numeric(name):\n msg = \"Can only band numeric typed data! {} is {}.\"\n msg = msg.format(name, self._get_type(name))\n raise TypeError(msg)\n if not text_key: text_key = self.text_key\n if not new_name: new_name = '{}_banded'.format(name)\n if not label: label = self.text(name, False, text_key)\n franges = []\n for idx, band in enumerate(bands, start=1):\n lab = None\n if isinstance(band, dict):\n lab = list(band.keys())[0]\n band = list(band.values())[0]\n if isinstance(band, tuple):\n if band[0] < 0:\n raise ValueError('Cannot band with lower bound < 0.')\n elif band[1] < 0:\n raise ValueError('Cannot band with upper bound < 0.')\n r = '{}-{}'.format(band[0], band[1])\n franges.append([idx, lab or r, {name: frange(r)}])\n else:\n r = str(band)\n franges.append([idx, lab or r, {name: [band]}])\n\n self.derive(new_name, 'single', label, franges,\n text_key=text_key)\n\n return None", "def dwt(image_array, quantization_Array):\n # Create the high pass and low pass filters\n # both filters are non-causal\n # symmetric\n # [-2, -1, 0, 1, 2]\n LPF = [-0.125, 0.25, 0.75, 0.25, -0.125]\n LPF_center = 2\n\n # [ -2,-1, 0]\n HPF = [-0.5, 1, -0.5]\n HPF_center = 2\n\n nrow, ncol = image_array.shape\n\n # create an array that will contain the 4 different subbands of the image\n LL = np.zeros((nrow, ncol))\n LH = np.zeros((nrow, ncol))\n HL = np.zeros((nrow, ncol))\n HH = np.zeros((nrow, ncol))\n filtered_image = [LL, LH, HL, HH]\n\n # filtering the rows using a low pass and high pass filters\n LowPass_rows = np.zeros((nrow, ncol))\n HighPass_rows = np.zeros((nrow, ncol))\n for i in range(0, nrow):\n LowPass_rows[i, :] = lfilter(LPF, image_array[i, :], LPF_center)\n HighPass_rows[i, :] = lfilter(HPF, image_array[i, :], HPF_center)\n\n # down sample rows.\n # which means we will have half the number of columns\n for i in range(0, len(filtered_image)):\n filtered_image[i] = filtered_image[i][:, ::2]\n\n # apply filters accross columns\n for i in range(0, ncol):\n LL[:, i] = lfilter(LPF, LowPass_rows[:, i], LPF_center)\n LH[:, i] = lfilter(HPF, LowPass_rows[:, i], HPF_center)\n HL[:, i] = lfilter(LPF, HighPass_rows[:, i], LPF_center)\n HH[:, i] = lfilter(HPF, HighPass_rows[:, i], HPF_center)\n\n # down sample columns and quantize\n for i in range(0, len(filtered_image)):\n filtered_image[i] = filtered_image[i][::2, :]\n filtered_image[i] = np.round(\n filtered_image[i]/quantization_Array[i]).astype(int)\n\n return filtered_image", "def mask_gradient(self, override=False):\n self.MaskPrefix = 'g' + self.MaskPrefix #append prefix 'g' for gradient\n print('applying gradient filter to remove edge effects and isolated unwrapping errors')\n # If a signal mask exists, use it to prevent np.gradient() from scrapping important data\n indSignal = np.zeros(self.Set.Size)\n if override:\n #manually created boolean array, 1=pixel containing known signal\n indSignal = np.load(override)\n\n for ig in self.Set:\n igram = self.load_ma(ig)\n Fx, Fy = np.gradient(phase) #locate pixels adjacent to NaNs\n Fx[indSignal] = 1\n Fy[indSignal] = 1\n igram[np.isnan(Fx)] = ma.masked\n igram[np.isnan(Fx)] = ma.masked\n mskFile = self.MaskPrefix + 'Mask_' + ig.Name[:-4]\n np.save(os.path.join(self.ProcDir, mskFile), igram.mask)\n print(mskFile)\n print('Done')", "def get_bands(self, data_array_norm, baseline_array_norm, f):\n\n fmax = 50\n fidx = f < fmax\n fnum = f[fidx].size\n\n band_tot = np.zeros((fnum, fnum, data_array_norm.shape[0], data_array_norm.shape[2], data_array_norm.shape[3]))\n band_tot_bl = np.zeros((fnum, fnum, baseline_array_norm.shape[0], baseline_array_norm.shape[2], baseline_array_norm.shape[3]))\n for i in range(fnum):\n for j in range(fnum):\n if j > i:\n idx = (f >= f[i]) & (f < f[j])\n band_tot[i, j, :, :] = np.sum(data_array_norm[:, idx, :, :], axis=1) / (f[j] - f[i])\n band_tot_bl[i, j, :, :] = np.sum(baseline_array_norm[:, idx, :, :], axis=1) / (f[j] - f[i])\n\n\n band_tot_bl1 = np.mean(band_tot_bl, axis=3) # average across time bins\n band_tot_bl2 = np.repeat(band_tot_bl1[:, :, :, None, :], band_tot_bl.shape[3], axis=3) # repeat same value across time\n return band_tot, band_tot_bl2, f[fidx]", "def preprocess_images(input_image, soften=None, fill_holes=None):\n ratio = get_scaling_ratio(input_image)\n if soften == None:\n soften = max(soften_amt_deafult * ratio, 1)\n if fill_holes == None:\n fill_holes = round(fill_holes_deafult * ratio)\n fill_holes = max(fill_holes, 1)\n\n # ensure that all points which are transparent have RGB values of 255 (will become white when\n # converted to non-transparent grayscale.)\n input_image = img_as_float32(input_image)\n if len(input_image.shape) == 3 and input_image.shape[2] == 4:\n input_image = rgba2rgb(input_image)\n gray_img = img_as_ubyte(rgb2gray(input_image))\n\n # get the otsu threshold after running a flood fill on the corners, so that those huge clumps of\n # dark pixels don't mess up the statistics too much (we only care about text!)\n thresh = threshold_otsu(\n fill_corners(gray_img, fill_value=255, thresh=5, tol=1, fill_below_thresh=True)\n )\n\n # n.b. here we are setting black pixels from the original image to have a value of 1 (effectively inverting\n # what you would get from a normal binarization, because the math gets easier this way)\n img_bin = img_as_ubyte(gray_img < thresh)\n \n # need to add clipping because of a weird case where the range of the\n # blurred imagewill be from -1 to 1.0000000004\n blurred = np.clip(gaussian(gray_img, soften), -1, 1)\n img_blur_bin = img_as_ubyte(img_as_ubyte(blurred) < thresh)\n\n # now, fill corners of binarized images with black (value 0)\n img_bin = fill_corners(\n img_bin, fill_value=0, thresh=1, tol=1, fill_below_thresh=False\n )\n img_blur_bin = fill_corners(\n img_blur_bin, fill_value=0, thresh=1, tol=1, fill_below_thresh=False\n )\n\n # run smoothing on the blurred-binarized image so we get blobs of text in neat lines\n kernel = np.ones((fill_holes, fill_holes), np.uint8)\n img_cleaned = binary_opening(binary_closing(img_blur_bin, kernel), kernel)\n\n # find rotation angle of cleaned, smoothed image. use that to correct the rotation of the unsmoothed image\n angle = find_rotation_angle(img_cleaned)\n img_cleaned_rot = rotate(img_cleaned, angle, order=0, mode=\"edge\") > 0\n img_bin_rot = rotate(img_bin, angle, order=0, mode=\"edge\") > 0\n\n return img_bin_rot, img_cleaned_rot, angle", "def bqa_fmask_func(qa):\n # Extracting cloud masks from BQA using np.right_shift() and np.bitwise_and()\n # Cloud (med & high confidence), then snow, then shadow, then fill\n # Low confidence clouds tend to be the FMask buffer\n fill_mask = np.bitwise_and(np.right_shift(qa, 0), 1) >= 1\n cloud_mask = np.bitwise_and(np.right_shift(qa, 4), 1) >= 1 # cloud bit\n cloud_mask &= np.bitwise_and(np.right_shift(qa, 5), 3) >= 2 # cloud conf.\n cloud_mask |= np.bitwise_and(np.right_shift(qa, 11), 3) >= 3 # cirrus\n shadow_mask = np.bitwise_and(np.right_shift(qa, 7), 3) >= 3\n snow_mask = np.bitwise_and(np.right_shift(qa, 9), 3) >= 3\n\n fmask = (fill_mask != True).astype(np.uint8)\n fmask[shadow_mask] = 2\n fmask[snow_mask] = 3\n fmask[cloud_mask] = 4\n\n return fmask", "def iterate_grey_level(prev_mask, new_g_disc, converter, \n num_grey_levels=256, upward=True):\n gl_delta = 1./num_grey_levels\n grey_level = new_g_disc/(num_grey_levels - 1)\n \n # Create desired spectrum.\n desired = desired_PSD_nd(\n new_g_disc*gl_delta, prev_mask.shape[0], prev_mask.ndim)\n desired_radial = converter.radially_average(desired)\n \n # Find error:\n corrected_sig = correct_signal(prev_mask, desired_radial, converter)\n error = np.abs(corrected_sig - prev_mask)\n \n # Make corrections:\n num_replacements = int(np.multiply.reduce(prev_mask.shape)*gl_delta)\n \n ## Identify worst zeros. This is different than BIPPSMA, because we \n ## have to check each replacement's neighbourhood to avoid clusters.\n replace_value = 0 if upward else 1\n replace_to = 1 - replace_value\n \n void = prev_mask == replace_value\n void_error = np.where(void, error, 0)\n void_error_order = np.argsort(-void_error, None)# descending.\n \n ## Replace:\n new_sig = prev_mask.copy()\n error_coords = np.unravel_index(void_error_order[:void.sum()], prev_mask.shape)\n \n # We need to make sure replacements don't cluster, by observing the local\n # means. We do that for the entire array - in NumPy. It's cheaper than\n # doing it individually per point in pure Python.\n half_window = 4\n window_size = (2*half_window + 1)\n window = np.full((window_size,)*prev_mask.ndim, 1/window_size**prev_mask.ndim)\n local_mean = ndi.convolve(prev_mask, window, mode='wrap')\n \n for coords in zip(*error_coords):\n if upward:\n crowded = local_mean[coords] > grey_level\n else:\n crowded = local_mean[coords] < grey_level\n \n if crowded:\n continue\n \n assert(new_sig[coords] == replace_value)\n new_sig[coords] = replace_to\n num_replacements -= 1\n if num_replacements == 0:\n break\n \n # Profit:\n return new_sig", "def mask5(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[3]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[4]).eq(value))\n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value) \n change_img1 = imagem.select(bandNames[2]).mask(mask.eq(1)).where(mask.eq(1), value) \n change_img2 = imagem.select(bandNames[3]).mask(mask.eq(1)).where(mask.eq(1), value) \n img_out = imagem.select(bandNames[1]).blend(change_img).blend(change_img1).blend(change_img2)\n return img_out", "def processframe(pilimage):\n # TODO: Idea on of overfilling\n # [[0,0,0],\n # [1,1,1],\n # [0,0,0]]\n # Keep this as template. aka pattern. use scipy measure and that s pattern to match all connecting\n # this gets all the fills. the rest is thrown into the pile of sets.\n # we assume index 0 as discarded (Can't really do much with black images.)\n numpyarrayfrompil = numpy.array(pilimage)\n # First we pass to regionprops\n props = createfillers(numpyarrayfrompil)\n # pass all the data we need now to the mapprops2color\n # returns a string which can be cerealised.\n return mapprops2color(props, numpyarrayfrompil, pilimage)", "def butter_bandpass_filter(data, lowcut, highcut, fs, order=5, axis=0): \n omega = 0.5 * fs\n low = lowcut / omega\n high = highcut / omega\n b, a = signal.butter(order, [low, high], btype='band')\n y = signal.lfilter(b, a, data, axis=0)\n return y", "def find_bandgap(bandsdata, number_electrons=None, fermi_energy=None):\n\n def nint(num):\n \"\"\"\n Stable rounding function\n \"\"\"\n if num > 0:\n return int(num + 0.5)\n else:\n return int(num - 0.5)\n\n if fermi_energy and number_electrons:\n raise EitherNumberOfElectronsOrFermiEnergyError()\n\n assert bandsdata.units == \"eV\"\n stored_bands = bandsdata.get_bands()\n\n if len(stored_bands.shape) == 3:\n # I write the algorithm for the generic case of having both the\n # spin up and spin down array\n\n # put all spins on one band per kpoint\n bands = np.concatenate(list(stored_bands), axis=1)\n else:\n bands = stored_bands\n\n # analysis on occupations:\n if fermi_energy is None:\n num_kpoints = len(bands)\n\n if number_electrons is None:\n try:\n _, stored_occupations = bandsdata.get_bands(also_occupations=True)\n except KeyError as exc:\n raise FermiEnergyOrOccupationsNotPresentError() from exc\n\n # put the occupations in the same order of bands, also in case of multiple bands\n if len(stored_occupations.shape) == 3:\n # I write the algorithm for the generic case of having both the\n # spin up and spin down array\n\n # put all spins on one band per kpoint\n occupations = np.concatenate(list(stored_occupations), axis=1)\n else:\n occupations = stored_occupations\n\n # now sort the bands by energy\n # Note: I am sort of assuming that I have an electronic ground state\n\n # sort the bands by energy, and reorder the occupations accordingly\n # since after joining the two spins, I might have unsorted stuff\n bands, occupations = (\n np.array(y)\n for y in zip(\n *[\n zip(*j)\n for j in [\n sorted(\n zip(i[0].tolist(), i[1].tolist()),\n key=lambda x: x[0],\n )\n for i in zip(bands, occupations)\n ]\n ]\n )\n )\n number_electrons = int(\n round(sum([sum(i) for i in occupations]) / num_kpoints)\n )\n\n homo_indexes = [\n np.where(np.array([nint(_) for _ in x]) > 0)[0][-1]\n for x in occupations\n ]\n if (\n len(set(homo_indexes)) > 1\n ): # there must be intersections of valence and conduction bands\n return False, None, None, None\n else:\n homo = [_[0][_[1]] for _ in zip(bands, homo_indexes)]\n try:\n lumo = [_[0][_[1] + 1] for _ in zip(bands, homo_indexes)]\n except IndexError as exc:\n raise NeedMoreBandsError() from exc\n\n else:\n bands = np.sort(bands)\n number_electrons = int(number_electrons)\n\n # find the zero-temperature occupation per band (1 for spin-polarized\n # calculation, 2 otherwise)\n number_electrons_per_band = 4 - len(stored_bands.shape) # 1 or 2\n # gather the energies of the homo band, for every kpoint\n homo = [\n i[number_electrons / number_electrons_per_band - 1] for i in bands\n ] # take the nth level\n try:\n # gather the energies of the lumo band, for every kpoint\n lumo = [\n i[number_electrons / number_electrons_per_band] for i in bands\n ] # take the n+1th level\n except IndexError as exc:\n raise NeedMoreBandsError() from exc\n\n if number_electrons % 2 == 1 and len(stored_bands.shape) == 2:\n # if #electrons is odd and we have a non spin polarized calculation\n # it must be a metal and I don't need further checks\n return False, None, None, None\n\n # if the nth band crosses the (n+1)th, it is an insulator\n gap = min(lumo) - max(homo)\n if gap == 0.0:\n return False, 0.0, None, None\n elif gap < 0.0:\n return False, gap, None, None\n else:\n return True, gap, max(homo), min(lumo)\n\n # analysis on the fermi energy\n else:\n # reorganize the bands, rather than per kpoint, per energy level\n\n # I need the bands sorted by energy\n bands.sort()\n\n levels = bands.transpose()\n max_mins = [(max(i), min(i)) for i in levels]\n\n if fermi_energy > bands.max():\n raise FermiEnergyAndBandsEnergiesError(where=\"above\")\n if fermi_energy < bands.min():\n raise FermiEnergyAndBandsEnergiesError(where=\"below\")\n\n # one band is crossed by the fermi energy\n if any(i[1] < fermi_energy and fermi_energy < i[0] for i in max_mins):\n return False, 0.0, None, None\n\n # case of semimetals, fermi energy at the crossing of two bands\n # this will only work if the dirac point is computed!\n elif any(i[0] == fermi_energy for i in max_mins) and any(\n i[1] == fermi_energy for i in max_mins\n ):\n return False, 0.0, None, None\n # insulating case\n else:\n # Take the max of the band maxima below the fermi energy.\n homo = max([i[0] for i in max_mins if i[0] < fermi_energy])\n # Take the min of the band minima above the fermi energy.x\n lumo = min([i[1] for i in max_mins if i[1] > fermi_energy])\n\n gap = lumo - homo\n if gap <= 0.0:\n raise WrongCodeError()\n return True, gap, homo, lumo", "def fill_forward(df):\n df = df.fillna(method='ffill')\n df = df.fillna(method='bfill').fillna(0)\n return df", "def apply_bandpass_filter_timeseries(self, folder_name, indices, start_stop_freq, stop_stop_freq):\n (x_index, y_index) = indices\n photo_list = self.get_photo_list(folder_name)\n\n ts = self.get_pixel_timeseries(folder_name, (x_index, y_index))\n self.plot_fft_pixel_timeseries(folder_name, ts, str(x_index) + '_' + str(y_index) + 'pre_butterworth')\n n = len(ts)\n frequency = self.get_sampling_frequency(folder_name)\n d = 1.0 / frequency # 'sample spacing'\n fig, ax = plt.subplots()\n sample_freqs = np.fft.rfftfreq(n, d)\n fourier = np.fft.rfft(ts)\n print(sample_freqs)\n nyquist = frequency / 2.0\n\n start_stop_band = start_stop_freq / nyquist\n stop_stop_band = stop_stop_freq / nyquist\n\n print(start_stop_band)\n print(stop_stop_band)\n\n sos = sgnl.butter(2, Wn=[start_stop_band, stop_stop_band], btype='bandstop', output='sos')\n filtered = sgnl.sosfilt(sos, ts)\n self.plot_fft_pixel_timeseries(folder_name, filtered, str(x_index) + '_' + str(y_index) + 'post_butterworth')\n fig, ax = plt.subplots()\n indices = self.get_indices_from_filenames(folder_name)\n index_dates = dates.date2num(indices)\n ax.plot_date(index_dates, ts, xdate=True, linestyle='solid', marker='None',\n label=str(x_index) + ' , ' + str(y_index))\n ax.plot_date(index_dates, filtered, xdate=True, linestyle='solid', marker='None',\n label=str(x_index) + ' , ' + str(y_index) + ' filtered')\n\n ax.legend()\n ax.grid(b=True, which='major', color='#666666', linestyle='-')\n\n # Show the minor grid lines with very faint and almost transparent grey lines\n ax.minorticks_on()\n ax.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)\n fig.set_figwidth(40)\n fig.savefig(self.parent_folder + 'analysis/timeseries_filtered_' + str(x_index) + '_' + str(y_index) + '.png')\n fig.savefig(self.parent_folder + 'analysis/timeseries_filtered_' + str(x_index) + '_' + str(y_index) + '.svg')\n fig.clf()", "def calbands( band = 0, tmo = 30 ) :\n optimizeThresholds(band,tmo)\n flattenPhases(band,tmo)\n calibrateSpectra(band=band,tmo=tmo)", "def mask3(imagem, value, bandNames):\n mask = imagem.select(bandNames[0]).eq(value) \\\n .bitwiseAnd(imagem.select(bandNames[1]).neq(value)) \\\n .bitwiseAnd(imagem.select(bandNames[2]).eq(value))\n change_img = imagem.select(bandNames[1]).mask(mask.eq(1)).where(mask.eq(1), value)\n img_out = imagem.select(bandNames[1]).blend(change_img)\n return img_out", "def _build_multiband_mask(data, tractor, filt2pixscale, fill_value=0.0,\n threshmask=0.01, r50mask=0.05, maxshift=10,\n relmaxshift=0.1,\n sigmamask=3.0, neighborfactor=1.0, verbose=False):\n import numpy.ma as ma\n from copy import copy\n from skimage.transform import resize\n from legacyhalos.mge import find_galaxy\n from legacyhalos.misc import srcs2image, ellipse_mask\n\n import matplotlib.pyplot as plt\n from astropy.visualization import simple_norm\n\n bands, refband = data['bands'], data['refband']\n #residual_mask = data['residual_mask']\n\n #nbox = 5\n #box = np.arange(nbox)-nbox // 2\n #box = np.meshgrid(np.arange(nbox), np.arange(nbox))[0]-nbox//2\n\n xobj, yobj = np.ogrid[0:data['refband_height'], 0:data['refband_width']]\n\n # If the row-index of the central galaxy is not provided, use the source\n # nearest to the center of the field.\n if 'galaxy_indx' in data.keys():\n galaxy_indx = np.atleast_1d(data['galaxy_indx'])\n else:\n galaxy_indx = np.array([np.argmin((tractor.bx - data['refband_height']/2)**2 +\n (tractor.by - data['refband_width']/2)**2)])\n data['galaxy_indx'] = np.atleast_1d(galaxy_indx)\n data['galaxy_id'] = ''\n\n #print('Import hack!')\n #norm = simple_norm(img, 'log', min_percent=0.05, clip=True)\n #import matplotlib.pyplot as plt ; from astropy.visualization import simple_norm\n\n ## Get the PSF sources.\n #psfindx = np.where(tractor.type == 'PSF')[0]\n #if len(psfindx) > 0:\n # psfsrcs = tractor.copy()\n # psfsrcs.cut(psfindx)\n #else:\n # psfsrcs = None\n\n def tractor2mge(indx, factor=1.0):\n # Convert a Tractor catalog entry to an MGE object.\n class MGEgalaxy(object):\n pass\n\n default_majoraxis = tractor.diam_init[indx] * 60 / 2 / filt2pixscale[refband] # [pixels]\n default_pa = tractor.pa_init[indx]\n default_ba = tractor.ba_init[indx]\n #default_theta = (270 - default_pa) % 180\n #default_eps = 1 - tractor.ba_init[indx]\n\n #if tractor.sga_id[indx] > -1:\n if tractor.type[indx] == 'PSF' or tractor.shape_r[indx] < 2:\n pa = tractor.pa_init[indx]\n ba = tractor.ba_init[indx]\n # take away the extra factor of 2 we put in in read_sample()\n r50 = tractor.diam_init[indx] * 60 / 2 / 2\n if r50 < 5:\n r50 = 5.0 # minimum size, arcsec\n majoraxis = factor * r50 / filt2pixscale[refband] # [pixels]\n #majoraxis = factor * tractor.diam_init[indx] * 60 / 2 / 2 / filt2pixscale[refband] # [pixels]\n else:\n ee = np.hypot(tractor.shape_e1[indx], tractor.shape_e2[indx])\n ba = (1 - ee) / (1 + ee)\n pa = 180 - (-np.rad2deg(np.arctan2(tractor.shape_e2[indx], tractor.shape_e1[indx]) / 2))\n pa = pa % 180\n\n # can be zero (or very small) if fit as a PSF or REX\n if tractor.shape_r[indx] > 1:\n majoraxis = factor * tractor.shape_r[indx] / filt2pixscale[refband] # [pixels]\n else:\n majoraxis = factor * tractor.diam_init[indx] * 60 / 2 / 2 / filt2pixscale[refband] # [pixels]\n\n mgegalaxy = MGEgalaxy()\n \n mgegalaxy.xmed = tractor.by[indx]\n mgegalaxy.ymed = tractor.bx[indx]\n mgegalaxy.xpeak = tractor.by[indx]\n mgegalaxy.ypeak = tractor.bx[indx]\n\n # never use the Tractor geometry (only the centroid)\n # https://portal.nersc.gov/project/cosmo/temp/ioannis/virgofilaments-html/215/NGC5584/NGC5584.html\n if True:\n mgegalaxy.eps = 1-ba\n mgegalaxy.pa = pa\n mgegalaxy.theta = (270 - pa) % 180\n mgegalaxy.majoraxis = majoraxis\n else:\n mgegalaxy.eps = 1 - default_ba\n mgegalaxy.pa = default_pa\n mgegalaxy.theta = (270 - default_pa) % 180\n mgegalaxy.majoraxis = default_majoraxis\n\n # always restore all pixels within the nominal / initial size of the galaxy\n #objmask = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n # default_majoraxis,\n # default_majoraxis * (1-default_eps), \n # np.radians(default_theta-90), xobj, yobj)\n #objmask = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n # default_majoraxis, default_majoraxis, 0.0, xobj, yobj)\n\n objmask = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n mgegalaxy.majoraxis,\n mgegalaxy.majoraxis * (1-mgegalaxy.eps), \n np.radians(mgegalaxy.theta-90), xobj, yobj)\n\n # central 10% pixels can override the starmask\n objmask_center = ellipse_mask(mgegalaxy.xmed, mgegalaxy.ymed, # object pixels are True\n 0.1*mgegalaxy.majoraxis,\n 0.1*mgegalaxy.majoraxis * (1-mgegalaxy.eps), \n np.radians(mgegalaxy.theta-90), xobj, yobj)\n\n return mgegalaxy, objmask, objmask_center\n\n # Now, loop through each 'galaxy_indx' from bright to faint.\n data['mge'] = []\n for ii, central in enumerate(galaxy_indx):\n print('Determing the geometry for galaxy {}/{}.'.format(\n ii+1, len(galaxy_indx)))\n\n # [1] Determine the non-parametric geometry of the galaxy of interest\n # in the reference band. First, subtract all models except the galaxy\n # and galaxies \"near\" it. Also restore the original pixels of the\n # central in case there was a poor deblend.\n largeshift = False\n mge, centralmask, centralmask2 = tractor2mge(central, factor=1.0)\n #plt.clf() ; plt.imshow(centralmask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-mask.png') ; pdb.set_trace()\n\n iclose = np.where([centralmask[np.int(by), np.int(bx)]\n for by, bx in zip(tractor.by, tractor.bx)])[0]\n \n srcs = tractor.copy()\n srcs.cut(np.delete(np.arange(len(tractor)), iclose))\n model = srcs2image(srcs, data['{}_wcs'.format(refband.lower())],\n band=refband.lower(),\n pixelized_psf=data['{}_psf'.format(refband.lower())])\n\n img = data[refband].data - model\n img[centralmask] = data[refband].data[centralmask]\n\n mask = np.logical_or(ma.getmask(data[refband]), data['residual_mask'])\n #mask = np.logical_or(data[refband].mask, data['residual_mask'])\n\n # restore the central pixels but not the masked stellar pixels\n centralmask[np.logical_and(data['starmask'], np.logical_not(centralmask2))] = False\n mask[centralmask] = False\n\n img = ma.masked_array(img, mask)\n ma.set_fill_value(img, fill_value)\n #if ii == 1:\n # pdb.set_trace()\n\n mgegalaxy = find_galaxy(img, nblob=1, binning=1, quiet=False)#, plot=True) ; plt.savefig('cosmo-www/tmp/junk-mge.png')\n #plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('junk-mask.png')\n ##plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n #pdb.set_trace()\n\n # Did the galaxy position move? If so, revert back to the Tractor geometry.\n if np.abs(mgegalaxy.xmed-mge.xmed) > maxshift or np.abs(mgegalaxy.ymed-mge.ymed) > maxshift:\n print('Large centroid shift (x,y)=({:.3f},{:.3f})-->({:.3f},{:.3f})'.format(\n mgegalaxy.xmed, mgegalaxy.ymed, mge.xmed, mge.ymed))\n print(' Reverting to the default geometry and the Tractor centroid.')\n largeshift = True\n mgegalaxy = copy(mge)\n\n radec_med = data['{}_wcs'.format(refband.lower())].pixelToPosition(\n mgegalaxy.ymed+1, mgegalaxy.xmed+1).vals\n radec_peak = data['{}_wcs'.format(refband.lower())].pixelToPosition(\n mgegalaxy.ypeak+1, mgegalaxy.xpeak+1).vals\n mge = {\n 'largeshift': largeshift,\n 'ra': tractor.ra[central], 'dec': tractor.dec[central],\n 'bx': tractor.bx[central], 'by': tractor.by[central],\n #'mw_transmission_g': tractor.mw_transmission_g[central],\n #'mw_transmission_r': tractor.mw_transmission_r[central],\n #'mw_transmission_z': tractor.mw_transmission_z[central],\n 'ra_moment': radec_med[0], 'dec_moment': radec_med[1],\n #'ra_peak': radec_med[0], 'dec_peak': radec_med[1]\n }\n for key in ('eps', 'majoraxis', 'pa', 'theta', 'xmed', 'ymed', 'xpeak', 'ypeak'):\n mge[key] = np.float32(getattr(mgegalaxy, key))\n if key == 'pa': # put into range [0-180]\n mge[key] = mge[key] % np.float32(180)\n data['mge'].append(mge)\n\n #if False:\n # #plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n # plt.clf() ; mgegalaxy = find_galaxy(img, nblob=1, binning=1, quiet=True, plot=True)\n # plt.savefig('/mnt/legacyhalos-data/debug.png')\n\n # [2] Create the satellite mask in all the bandpasses. Use srcs here,\n # which has had the satellites nearest to the central galaxy trimmed\n # out.\n print('Building the satellite mask.')\n satmask = np.zeros(data[refband].shape, bool)\n for filt in bands:\n # do not let GALEX and WISE contribute to the satellite mask\n if data[filt].shape != satmask.shape:\n continue\n \n cenflux = getattr(tractor, 'flux_{}'.format(filt.lower()))[central]\n satflux = getattr(srcs, 'flux_{}'.format(filt.lower()))\n if cenflux <= 0.0:\n #raise ValueError('Central galaxy flux is negative!')\n print('Central galaxy flux is negative! Proceed with caution...')\n #pdb.set_trace()\n \n satindx = np.where(np.logical_or(\n (srcs.type != 'PSF') * (srcs.shape_r > r50mask) *\n (satflux > 0.0) * ((satflux / cenflux) > threshmask),\n srcs.ref_cat == 'R1'))[0]\n #satindx = np.where(srcs.ref_cat == 'R1')[0]\n #if np.isin(central, satindx):\n # satindx = satindx[np.logical_not(np.isin(satindx, central))]\n if len(satindx) == 0:\n #raise ValueError('All satellites have been dropped!')\n #print('Warning! All satellites have been dropped from band {}!'.format(filt))\n print('Note: no satellites to mask in band {}.'.format(filt))\n else:\n satsrcs = srcs.copy()\n #satsrcs = tractor.copy()\n satsrcs.cut(satindx)\n satimg = srcs2image(satsrcs, data['{}_wcs'.format(filt.lower())],\n band=filt.lower(),\n pixelized_psf=data['{}_psf'.format(filt.lower())])\n thissatmask = satimg > sigmamask*data['{}_sigma'.format(filt.lower())]\n #if filt == 'FUV':\n # plt.clf() ; plt.imshow(thissatmask, origin='lower') ; plt.savefig('junk-{}.png'.format(filt.lower()))\n # #plt.clf() ; plt.imshow(data[filt], origin='lower') ; plt.savefig('junk-{}.png'.format(filt.lower()))\n # pdb.set_trace()\n if satmask.shape != satimg.shape:\n thissatmask = resize(thissatmask*1.0, satmask.shape, mode='reflect') > 0\n\n satmask = np.logical_or(satmask, thissatmask)\n #if True:\n # import matplotlib.pyplot as plt\n # plt.clf() ; plt.imshow(np.log10(satimg), origin='lower') ; plt.savefig('debug.png')\n # plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('debug.png')\n ## #plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n # pdb.set_trace()\n\n #print(filt, np.sum(satmask), np.sum(thissatmask))\n\n #plt.clf() ; plt.imshow(satmask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-satmask.png')\n \n # [3] Build the final image (in each filter) for ellipse-fitting. First,\n # subtract out the PSF sources. Then update the mask (but ignore the\n # residual mask). Finally convert to surface brightness.\n #for filt in ['W1']:\n for filt in bands:\n thismask = ma.getmask(data[filt])\n if satmask.shape != thismask.shape:\n _satmask = (resize(satmask*1.0, thismask.shape, mode='reflect') > 0) == 1.0\n _centralmask = (resize(centralmask*1.0, thismask.shape, mode='reflect') > 0) == 1.0\n mask = np.logical_or(thismask, _satmask)\n mask[_centralmask] = False\n else:\n mask = np.logical_or(thismask, satmask)\n mask[centralmask] = False\n #if filt == 'r':\n # #plt.imshow(_satmask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-satmask-{}.png'.format(filt))\n # plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-mask-{}.png'.format(filt))\n # plt.clf() ; plt.imshow(img, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-img-{}.png'.format(filt))\n # pdb.set_trace()\n\n varkey = '{}_var'.format(filt.lower())\n imagekey = '{}_masked'.format(filt.lower())\n psfimgkey = '{}_psfimg'.format(filt.lower())\n thispixscale = filt2pixscale[filt]\n if imagekey not in data.keys():\n data[imagekey], data[varkey], data[psfimgkey] = [], [], []\n\n img = ma.getdata(data[filt]).copy()\n \n # Get the PSF sources.\n psfindx = np.where((tractor.type == 'PSF') * (getattr(tractor, 'flux_{}'.format(filt.lower())) / cenflux > threshmask))[0]\n if len(psfindx) > 0 and filt.upper() != 'W3' and filt.upper() != 'W4': \n #if len(psfindx) > 0 and filt.upper() != 'NUV' and filt.upper() != 'FUV' and filt.upper() != 'W3' and filt.upper() != 'W4':\n psfsrcs = tractor.copy()\n psfsrcs.cut(psfindx)\n else:\n psfsrcs = None\n \n if psfsrcs:\n psfimg = srcs2image(psfsrcs, data['{}_wcs'.format(filt.lower())],\n band=filt.lower(),\n pixelized_psf=data['{}_psf'.format(filt.lower())])\n if False:\n #import fitsio ; fitsio.write('junk-psf-{}.fits'.format(filt.lower()), data['{}_psf'.format(filt.lower())].img, clobber=True)\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)\n im = ax1.imshow(np.log10(img), origin='lower') ; fig.colorbar(im, ax=ax1)\n im = ax2.imshow(np.log10(psfimg), origin='lower') ; fig.colorbar(im, ax=ax2)\n im = ax3.imshow(np.log10(data['{}_psf'.format(filt.lower())].img), origin='lower') ; fig.colorbar(im, ax=ax3)\n im = ax4.imshow(img-psfimg, origin='lower') ; fig.colorbar(im, ax=ax4)\n plt.savefig('desi-users/ioannis/tmp/qa-psf-{}.png'.format(filt.lower()))\n if filt == 'r':# or filt == 'r':\n pdb.set_trace()\n img -= psfimg\n else:\n psfimg = np.zeros((2, 2), 'f4')\n\n data[psfimgkey].append(psfimg)\n \n img = ma.masked_array((img / thispixscale**2).astype('f4'), mask) # [nanomaggies/arcsec**2]\n var = data['{}_var_'.format(filt.lower())] / thispixscale**4 # [nanomaggies**2/arcsec**4]\n\n # Fill with zeros, for fun--\n ma.set_fill_value(img, fill_value)\n #if ii == 0 and filt == 'r': #filt == 'W1' or \n # plt.clf() ; plt.imshow(img, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-img-{}.png'.format(filt.lower()))\n # plt.clf() ; plt.imshow(mask, origin='lower') ; plt.savefig('desi-users/ioannis/tmp/junk-mask-{}.png'.format(filt.lower()))\n ##### plt.clf() ; plt.imshow(thismask, origin='lower') ; plt.savefig('junk-thismask-{}.png'.format(filt.lower()))\n # pdb.set_trace()\n \n data[imagekey].append(img)\n data[varkey].append(var)\n\n #test = data['r_masked'][0]\n #plt.clf() ; plt.imshow(np.log(test.clip(test[mgegalaxy.xpeak, mgegalaxy.ypeak]/1e4)), origin='lower') ; plt.savefig('/mnt/legacyhalos-data/debug.png')\n #pdb.set_trace()\n\n # Cleanup?\n for filt in bands:\n del data[filt]\n del data['{}_var_'.format(filt.lower())]\n\n return data", "def DrawBands(self, count):\n value = self.little[0]\n mobile_average = float(sum([float(self.little[i])\n for i in range(len(self.little))])) / float(self.period)\n standard_derivation = sqrt(sum([pow(self.little[i] - mobile_average, 2)\n for i in range(len(self.little))]) / self.period)\n upper_band = mobile_average + (standard_derivation * self.sd_coef)\n lower_band = mobile_average - (standard_derivation * self.sd_coef)\n self.upper.insert(0, upper_band)\n self.lower.insert(0, lower_band)\n if len(self.upper) >= self.period:\n self.upper.pop()\n if len(self.lower) >= self.period:\n self.lower.pop()\n if count >= self.period:\n for i in range(len(self.little) - 1):\n self.canvas.create_line((i * self.incr / 1.725) + self.incr * 4,\n self.height - self.incr * 4 + (self.little[i] - 1) * 5000 - 200,\n (i * self.incr / 1.725) + self.incr * 4 + self.incr / 1.725,\n self.height - self.incr * 4 + (self.little[i + 1] - 1) * 5000 - 200,\n fill = \"#FFFF00\", width = 2)\n for i in range(len(self.upper) - 1):\n self.canvas.create_line((i * self.incr / 1.635) + self.incr * 4,\n self.height - self.incr * 4 + (self.upper[i] - 1) * 5000 - 200,\n (i * self.incr / 1.635) + self.incr * 4 + self.incr / 1.635,\n self.height - self.incr * 4 + (self.upper[i + 1] - 1) * 5000 - 200,\n fill = \"#FF6600\", width = 3)\n self.canvas.create_line((i * self.incr / 1.635) + self.incr * 4,\n self.height - self.incr * 4 + (self.lower[i] - 1) * 5000 - 200,\n (i * self.incr / 1.635) + self.incr * 4 + self.incr / 1.635,\n self.height - self.incr * 4 + (self.lower[i + 1] - 1) * 5000 - 200,\n fill = \"#FF0000\", width = 3)", "def ledaps(image):\n cmask = image.select('QA')\n\n valid_data_mask = tools.compute_bits(cmask, 1, 1, 'valid_data')\n cloud_mask = tools.compute_bits(cmask, 2, 2, 'cloud')\n snow_mask = tools.compute_bits(cmask, 4, 4, 'snow')\n\n good_pix = cloud_mask.eq(0).And(valid_data_mask.eq(0)).And(snow_mask.eq(0))\n result = image.updateMask(good_pix)\n\n return result", "def applyWindow5years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-3):\n img_out = img_out.addBands(mask5(imagem, value,bandNames[(i-1):(i+4)]))\n img_out = img_out.addBands(imagem.select(bandNames[-3]))\n img_out = img_out.addBands(imagem.select(bandNames[-2]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def modify_bands(\n xraster: xr.core.dataarray.DataArray, input_bands: List[str],\n output_bands: List[str], drop_bands: List[str] = []):\n # Do not modify if image has the same number of output bands\n if xraster['band'].shape[0] == len(output_bands):\n return xraster\n\n # Drop any bands from input that should not be on output\n for ind_id in list(set(input_bands) - set(output_bands)):\n drop_bands.append(input_bands.index(ind_id)+1)\n return xraster.drop(dim=\"band\", labels=drop_bands, drop=True)", "def estimate_bandpass(data):\n \n est = filter(data, params.st_bp_window_f, axis=0)\n est = filter(est, params.st_bp_window_t, axis=1)\n \n return est", "def replace_missingvalues_bandmean(X):\n if X.ndim != 4:\n raise ValueError('Input not valid, no [pic, row, column, band] data format')\n\n zeros = np.where(X[:,:,:] == 0)\n\n bandmean = {}\n\n for i in sorted(np.unique(zeros[3])):\n bandmean.update({i:np.mean(X[:,:,:,i])})\n\n for i in range(0,len(zeros[0])):\n pic, row, column, band = zeros[0][i],zeros[1][i],zeros[2][i],zeros[3][i]\n mean = bandmean.get(band)\n X[pic,row,column,band] = int(mean)\n\n return X", "def bandpass_filter(self, pmin=0.5, pmax=100, cadence=None, edge=2000,\n zero_fill=False):\n if cadence is None:\n try:\n cadence = self.cadence\n except AttributeError:\n pass\n\n x, y, yerr = bandpass_filter(self._x_full,\n self._y_full,\n self._yerr_full, zero_fill=zero_fill,\n pmin=pmin, pmax=pmax)\n x = x[edge:-edge]\n y = y[edge:-edge]\n yerr = yerr[edge:-edge]\n\n self._x = x.copy()\n self._y = y.copy()\n self._yerr = yerr.copy()\n\n self._x_full = x.copy()\n self._y_full = y.copy()\n self._yerr_full = yerr.copy()\n\n self._x_list = None\n self._y_list = None\n self._yerr_list = None\n\n if self.sub is not None:\n self.subsample(self.sub)", "def forward(self,i,direction):\n \"\"\"the direction argument is used to dertermine the direcrtion of the forward function, designed for the equilibrium of the two classes of the datasets\"\"\"\n if(direction):\n self.mask_A = self.netG_Amask[self.orders[i]](self.real_A)\n self.A = self.netG_A[self.orders[i]](self.real_A)\n self.fake_B = self.A.mul(self.mask_A\n )+(1-self.mask_A).mul(self.real_A) # G_A(A)\n self.mask_B = self.netG_Bmask[self.orders[i]](self.fake_B)\n self.B = self.netG_B[self.orders[i]](self.fake_B)\n self.rec_A = self.B.mul(self.mask_B)+(1-self.mask_B).mul(self.fake_B) # G_B(G_A(A))\n else:\n self.mask_A = self.netG_Bmask[self.orders_rev[i]](self.real_A)\n self.A = self.netG_B[self.orders_rev[i]](self.real_A)\n self.fake_B = self.A.mul(self.mask_A\n )+(1-self.mask_A).mul(self.real_A) # G_A(A)\n self.mask_B = self.netG_Amask[self.orders_rev[i]](self.fake_B)\n self.B = self.netG_A[self.orders_rev[i]](self.fake_B)\n self.rec_A = self.B.mul(\n self.mask_B)+(self.mask_B).mul(1-self.fake_B) # G_B(G_A(A))", "def applyWindow3years(imagem, value, bandNames):\n img_out = imagem.select(bandNames[0])\n for i in np.arange(1, len(bandNames)-1):\n img_out = img_out.addBands(mask3(imagem, value,bandNames[(i-1):(i+2)]))\n img_out = img_out.addBands(imagem.select(bandNames[-1]))\n return img_out", "def bandpass_filter(data, lowcut, highcut, fs=2000, numtaps=255):\n nyq = fs / 2\n\n # design filter\n fe1 = lowcut / nyq\n fe2 = highcut / nyq\n b = firwin(numtaps, (fe1, fe2), pass_zero=False)\n\n filtered = lfilter(b, 1, data)\n\n return filtered", "def stack_layers(inDir, outPath, bands=None):\r\n # ONLY SUPPORTS inDir W/ LANDSAT BANDS ENDING /w '_B#.TIF'\r\n # band = ds.GetRasterBand(1)\r\n # band.GetStatistics(True,True) - returns min,max,mean,std\r\n # band.ReadAsArray()\r\n try:\r\n fns = []\r\n if bands is None:\r\n # process all bands in the directory.\r\n bandtypes = ('*_B*.TIF','*band*.tif')\r\n for bandtype in bandtypes:\r\n fns.extend(glob.glob(inDir + bandtype))\r\n else:\r\n # process the specified bands.\r\n blist = '[' + ','.join(str(i) for i in bands) + ']'\r\n bandtypes = ('*B' + blist + '.TIF', '*band' + blist + '.tif')\r\n for bandtype in bandtypes:\r\n fns.extend(glob.glob(inDir + bandtype))\r\n\r\n # Read the first raster & get its band.\r\n fns.sort()\r\n fn = fns.pop(0)\r\n\r\n ras = gdal.Open(fn)\r\n\r\n band = ras.GetRasterBand(1)\r\n\r\n # rows & cols\r\n cols = ras.RasterXSize\r\n rows = ras.RasterYSize\r\n\r\n # raster info\r\n geo = ras.GetGeoTransform()\r\n originX = geo[0]\r\n originY = geo[3]\r\n pixelWidth = geo[1]\r\n pixelHeight = geo[5]\r\n\r\n # Create the output raster\r\n driver = gdal.GetDriverByName('GTiff')\r\n outRas = driver.Create(outPath, cols, rows, len(fns) + 1, band.DataType)\r\n outRas.SetGeoTransform(geo)\r\n #outRas.SetGeoTransform((originX, pixelWidth, 0, originY, 0, pixelHeight)) # not sure what the zeros are\r\n\r\n # Get the spatial ref info\r\n outRasterSRS = osr.SpatialReference()\r\n outRasterSRS.ImportFromWkt(ras.GetProjectionRef())\r\n\r\n # Write the bands to the new file.\r\n outRas.GetRasterBand(1).WriteArray(band.ReadAsArray())\r\n\r\n # Loop thru any remaining files, adding them to the output.\r\n for i in range(0, len(fns)):\r\n ras = gdal.Open(fns[i])\r\n band = ras.GetRasterBand(1)\r\n outRas.GetRasterBand(i + 2).WriteArray(band.ReadAsArray())\r\n\r\n # Add the spatial ref info at the end.\r\n outRas.SetProjection(outRasterSRS.ExportToWkt())\r\n # write and close the output file.\r\n outRas.FlushCache()\r\n outRas = None\r\n except RuntimeError:\r\n print 'ERROR PROCESSING ' + fn\r\n traceback.print_exc()\r\n return", "def correct_band(self, arr, band):\n if self.lut[band] is None:\n # No interpolation, so return NaNs\n new_arr = arr.copy()\n new_arr[:] = np.nan\n return new_arr\n else:\n return self.lut[band](arr)", "def reshape_bfill(x, y, xnew, left_values=\"first\", right_values=0):\r\n fill_value = [left_values, right_values]\r\n if left_values == \"first\":\r\n fill_value[0] = y[0]\r\n fill_value = tuple(fill_value)\r\n foo = scipy.interpolate.interp1d(\r\n x, y,\r\n axis=0,\r\n copy=False,\r\n kind=\"next\",\r\n bounds_error=False,\r\n fill_value=fill_value,\r\n assume_sorted=True,\r\n )\r\n return foo(xnew)", "def fake_date_fill(df, back_method: str = 'slice'):\n df_index = df.index.to_series().copy()\n df2 = df.sort_index(ascending=False).copy()\n df2 = df2.apply(lambda x: pd.Series(x.dropna().values))\n df2 = df2.sort_index(ascending=False)\n df2.index = df_index.tail(len(df2.index))\n df2 = df2.dropna(how='all', axis=0)\n if df2.empty:\n df2 = df.fillna(0)\n\n if back_method == 'bfill':\n df2 = fill_forward(df2)\n return df\n elif back_method == 'slice':\n thresh = int(df.shape[1] * 0.5)\n thresh = thresh if thresh > 1 else 1\n df3 = df2.dropna(thresh=thresh, axis=0)\n if df3.empty or df3.shape[0] < 8:\n df3 = fill_forward(df2)\n else:\n df3 = fill_forward(df3)\n return df3\n elif back_method == 'keepna':\n return df2\n else:\n print('back_method not recognized in fake_date_fill')\n return df2", "def forward(self, f, b, mask):\n raw_int_fs = list(f.size())\n raw_int_bs = list(b.size())\n kernel = 2 * self.rate\n raw_w = extract_image_patches(b, ksizes=[kernel, kernel], strides=[self.rate * self.stride, self.rate * self.stride], rates=[1, 1], padding='same')\n raw_w = raw_w.view(raw_int_bs[0], raw_int_bs[1], kernel, kernel, -1)\n raw_w = raw_w.permute(0, 4, 1, 2, 3)\n raw_w_groups = torch.split(raw_w, 1, dim=0)\n f = F.interpolate(f, scale_factor=1.0 / self.rate, mode='nearest')\n b = F.interpolate(b, scale_factor=1.0 / self.rate, mode='nearest')\n int_fs = list(f.size())\n int_bs = list(b.size())\n f_groups = torch.split(f, 1, dim=0)\n w = extract_image_patches(b, ksizes=[self.ksize, self.ksize], strides=[self.stride, self.stride], rates=[1, 1], padding='same')\n w = w.view(int_bs[0], int_bs[1], self.ksize, self.ksize, -1)\n w = w.permute(0, 4, 1, 2, 3)\n w_groups = torch.split(w, 1, dim=0)\n mask = F.interpolate(mask, scale_factor=1.0 / self.rate, mode='nearest')\n int_ms = list(mask.size())\n m = extract_image_patches(mask, ksizes=[self.ksize, self.ksize], strides=[self.stride, self.stride], rates=[1, 1], padding='same')\n m = m.view(int_ms[0], int_ms[1], self.ksize, self.ksize, -1)\n m = m.permute(0, 4, 1, 2, 3)\n mm = reduce_mean(m, axis=[3, 4]).unsqueeze(-1)\n y = []\n for i, (xi, wi, raw_wi) in enumerate(zip(f_groups, w_groups, raw_w_groups)):\n \"\"\"\n O => output channel as a conv filter\n I => input channel as a conv filter\n xi : separated tensor along batch dimension of front;\n wi : separated patch tensor along batch dimension of back;\n raw_wi : separated tensor along batch dimension of back;\n \"\"\"\n wi = wi[0]\n max_wi = torch.sqrt(reduce_sum(torch.pow(wi, 2) + 0.0001, axis=[1, 2, 3], keepdim=True))\n wi_normed = wi / max_wi\n xi = same_padding(xi, [self.ksize, self.ksize], [1, 1], [1, 1])\n yi = F.conv2d(xi, wi_normed, stride=1)\n if self.fuse:\n yi = yi.view(1, 1, int_bs[2] * int_bs[3], int_fs[2] * int_fs[3])\n yi = same_padding(yi, [self.fuse_k, self.fuse_k], [1, 1], [1, 1])\n yi = F.conv2d(yi, self.fuse_weight, stride=1)\n yi = yi.contiguous().view(1, int_bs[2], int_bs[3], int_fs[2], int_fs[3])\n yi = yi.permute(0, 2, 1, 4, 3)\n yi = yi.contiguous().view(1, 1, int_bs[2] * int_bs[3], int_fs[2] * int_fs[3])\n yi = same_padding(yi, [self.fuse_k, self.fuse_k], [1, 1], [1, 1])\n yi = F.conv2d(yi, self.fuse_weight, stride=1)\n yi = yi.contiguous().view(1, int_bs[3], int_bs[2], int_fs[3], int_fs[2])\n yi = yi.permute(0, 2, 1, 4, 3).contiguous()\n yi = yi.view(1, int_bs[2] * int_bs[3], int_fs[2], int_fs[3])\n yi = yi * mm[i:i + 1]\n yi = F.softmax(yi * self.softmax_scale, dim=1)\n yi = yi * mm[i:i + 1]\n wi_center = raw_wi[0]\n yi = F.conv_transpose2d(yi, wi_center, stride=self.rate, padding=1) / 4.0\n y.append(yi)\n y = torch.cat(y, dim=0)\n y.contiguous().view(raw_int_fs)\n return y", "def design_filter(interpolation, decimation, fractional_bw):\n\n if fractional_bw >= 0.5 or fractional_bw <= 0:\n raise ValueError('Invalid fractional bandwidth, must be in (0, 0.5)')\n\n if decimation < 1 or interpolation < 1:\n raise ValueError('Invalid interpolation or decimation rate. Must be a non-zero positive integer.')\n\n beta = 7.0\n halfband = 0.5\n rate = float(interpolation)/float(decimation)\n if(rate >= 1.0):\n trans_width = halfband - fractional_bw\n mid_transition_band = halfband - trans_width/2.0\n else:\n trans_width = rate*(halfband - fractional_bw)\n mid_transition_band = rate*halfband - trans_width/2.0\n\n taps = filter.firdes.low_pass(interpolation, # gain\n interpolation, # Fs\n mid_transition_band, # trans mid point\n trans_width, # transition width\n filter.firdes.WIN_KAISER,\n beta) # beta\n\n return taps", "def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): \n #Grab land cover classes as a list of strings\n lc_classes = classDictionary.keys().getInfo()\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Get the frequency of each class through the years by reducing the image collection to an image using the sum reducer\n class_frequency = binary_class_images.reduce(ee.Reducer.sum().unweighted()).rename(lc_classes)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Define an image to add bands with frequency filter applied\n out_img = ee.Image()\n \n #Loop through years\n for yearBand in yearBandNames:\n #Select the target year from the image\n yearImage = image.select(yearBand)\n \n #Loop through land cover classes in filterParams\n for lc_class in lc_classes:\n #Get the minimum occurance allowed in that land cover class\n min_occurance = filterParams.get(lc_class)\n \n #Find if the land cover class had less than the number of min_occurances in each pixel\n change_class = class_frequency.select(lc_class).lt(ee.Image.constant(min_occurance))\n \n #If change_class==1, then replace that pixel with the mode of all the years in that pixel\n #This filter is only applied to pixels of this land cover class\n #First mask yearImage to pixels of this land cover class, then get the union of pixels where change_class==1,\n #if both conditions are true, then the pixel is replaced with the mode\n yearImage = yearImage.where(yearImage.eq(ee.Number(classDictionary.get(lc_class))).bitwiseAnd(change_class),mode_image)\n #Rename yearImage to bandName\n yearImage = yearImage.rename(yearBand)\n #Append to output image\n out_img = out_img.addBands(yearImage)\n \n return out_img", "def bandpass_filter(files, lowpass_freq=0.1, highpass_freq=0.01, tr=2):\n import os\n\n import nibabel as nb\n import numpy as np\n from nipype.utils.filemanip import (\n filename_to_list,\n list_to_filename,\n split_filename\n )\n\n fs = 1./tr\n\n out_files = []\n for filename in filename_to_list(files):\n path, name, ext = split_filename(filename)\n out_file = os.path.join(os.getcwd(), name + '_bandpassed' + ext)\n\n img = nb.load(filename)\n timepoints = img.shape[-1]\n F = np.zeros((timepoints))\n\n lowidx = int(timepoints / 2) + 1\n if lowpass_freq > 0:\n lowidx = np.round(float(lowpass_freq) / fs * timepoints)\n\n highidx = 0\n if highpass_freq > 0:\n highidx = np.round(float(highpass_freq) / fs * timepoints)\n F[int(highidx):int(lowidx)] = 1\n F = ((F + F[::-1]) > 0).astype(int)\n data = img.get_data()\n if np.all(F == 1):\n filtered_data = data\n else:\n filtered_data = np.real(np.fft.ifftn(np.fft.fftn(data) * F))\n img_out = nb.Nifti1Image(filtered_data, img.affine, img.header)\n img_out.to_filename(out_file)\n out_files.append(out_file)\n\n return list_to_filename(out_files)", "def flood_fill_edges(img, stride):\n\n black = 0\n white = 255\n (rows, cols) = img.shape\n msk = np.zeros((rows+2, cols+2, 1), np.uint8)\n\n # Left and right edges\n i = 0\n while i < rows:\n if img[i, 0] == white:\n cv2.floodFill(img, msk, (0, i), black)\n if img[i, cols-1] == white:\n cv2.floodFill(img, msk, (cols-1, i), black)\n i += stride\n\n # Top and bottom edges\n i = 0\n while i < cols:\n if img[0, i] == white:\n cv2.floodFill(img, msk, (i, 0), black)\n if img[rows-1, i] == white:\n cv2.floodFill(img, msk, (i, rows-1), black)\n i += stride", "def create_band_maps(self):\n band_maps = []\n source_band_index = 1\n target_band_index = self.starting_target_band\n for band in self.image['bands']:\n band_maps.append({\n 'source': source_band_index,\n 'target': target_band_index\n })\n source_band_index += 1\n target_band_index += 1\n return band_maps", "def apply_gap_filter(fastalines, allowed_gap_frac=1-eps, verbose=False):\n return apply_lane_mask_and_gap_filter(fastalines, None, \\\n allowed_gap_frac=allowed_gap_frac, verbose=False)", "def sharpen_bands(self):\n for label in self.labels:\n self.sharp_bands[label] = self.bands[label] - self.gauss_bands[\n label]", "def get_aligned_feature_4frames(self, x, flows_backward, flows_forward):\n n = x.size(1)\n x_backward = [torch.zeros_like(x[:, -1, ...])]\n for i in range(n, 1, -1):\n x_i = x[:, i - 1, ...]\n flow1 = flows_backward[0][:, i - 2, ...]\n if i == n:\n x_ii = torch.zeros_like(x[:, n - 2, ...])\n flow2 = torch.zeros_like(flows_backward[1][:, n - 3, ...])\n else:\n x_ii = x[:, i, ...]\n flow2 = flows_backward[1][:, i - 2, ...]\n x_i_warped = flow_warp(x_i, flow1.permute(0, 2, 3, 1), 'bilinear')\n x_ii_warped = flow_warp(x_ii, flow2.permute(0, 2, 3, 1), 'bilinear')\n x_backward.insert(0, self.pa_deform(torch.cat([x_i, x_ii], 1), [x_i_warped, x_ii_warped], x[:, i - 2, ...], [flow1, flow2]))\n x_forward = [torch.zeros_like(x[:, 0, ...])]\n for i in range(-1, n - 2):\n x_i = x[:, i + 1, ...]\n flow1 = flows_forward[0][:, i + 1, ...]\n if i == -1:\n x_ii = torch.zeros_like(x[:, 1, ...])\n flow2 = torch.zeros_like(flows_forward[1][:, 0, ...])\n else:\n x_ii = x[:, i, ...]\n flow2 = flows_forward[1][:, i, ...]\n x_i_warped = flow_warp(x_i, flow1.permute(0, 2, 3, 1), 'bilinear')\n x_ii_warped = flow_warp(x_ii, flow2.permute(0, 2, 3, 1), 'bilinear')\n x_forward.append(self.pa_deform(torch.cat([x_i, x_ii], 1), [x_i_warped, x_ii_warped], x[:, i + 2, ...], [flow1, flow2]))\n return [torch.stack(x_backward, 1), torch.stack(x_forward, 1)]", "def _gap_filter(self):\n res = self.cfg.resolution\n xedges = np.linspace(self.lrx[0]-res/2., self.lrx[-1]+res/2.0, len(self.lrx)+1)\n yedges = np.linspace(self.lry[0]-res/2., self.lry[-1]+res/2.0, len(self.lry)+1)\n\n # Calculates point density of als shots per DEM grid cell\n self.rzhist, xe, ye = np.histogram2d(self.x[self.nonan].flatten(),\n self.y[self.nonan].flatten(),\n bins=[xedges, yedges])\n self.rzhist = self.rzhist.transpose()\n data_mask = self.rzhist > 0.0\n\n filter_algorithm = self.cfg.gap_filter[\"algorithm\"]\n if filter_algorithm == \"maximum_filter\":\n data_mask = maximum_filter(data_mask, **self.cfg.gap_filter[\"keyw\"])\n else:\n raise NotImplementedError(\"Filter algorithm: %s\" % filter_algorithm)\n\n self.dem_mask = ~data_mask", "def blending_example1():\n pic_desert = read_image(relpath(\"./externals/pic_desert.jpg\"), 2)\n pic_pool = read_image(relpath(\"./externals/pic_swim.jpg\"), 2)\n mask = read_image(relpath(\"./externals/mask_desert.jpg\"), 1)\n # making the mask binary (normalizing 2 original values)\n mask = strech_helper(mask).astype(np.bool)\n print(pic_desert.shape[2])\n [R1, G1, B1] = np.dsplit(pic_desert, pic_desert.shape[2])\n [R2, G2, B2] = np.dsplit(pic_pool, pic_pool.shape[2])\n R1 = np.reshape(R1, (512,1024))\n R2 = np.reshape(R2, (512,1024))\n G1 = np.reshape(G1, (512,1024))\n G2 = np.reshape(G2, (512,1024))\n B1 = np.reshape(B1, (512,1024))\n B2 = np.reshape(B2, (512,1024))\n\n blend1 = pyramid_blending(R2, R1, mask, 3, 3, 3)\n blend2 = pyramid_blending(G2, G1, mask, 3, 3, 3)\n blend3 = pyramid_blending(B2, B1, mask, 3, 3, 3)\n\n blend1 = np.reshape(blend1, (blend1.shape[0], blend1.shape[1], 1))\n blend2 = np.reshape(blend2, (blend2.shape[0], blend3.shape[1], 1))\n blend3 = np.reshape(blend3, (blend3.shape[0], blend3.shape[1], 1))\n\n new_pic = np.concatenate((blend1, blend2, blend3), axis=2)\n # plotting the images\n fig = plt.figure()\n ax1 = fig.add_subplot(221)\n ax2 = fig.add_subplot(222)\n ax3 = fig.add_subplot(223)\n ax4 = fig.add_subplot(224)\n ax1.imshow(pic_desert)\n ax2.imshow(pic_pool)\n ax3.imshow(mask, cmap='gray')\n ax4.imshow(new_pic)\n plt.show()\n\n return pic_desert, pic_pool, mask, new_pic", "def test_filter_bg(images):\n print('STARTING BACKGROUND FILTERING TEST')\n\n for i, image in enumerate(images):\n bg_filtered_image = filter_bg(image)\n\n contours, _ = cv2.findContours(bg_filtered_image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n bg_filtered_image = cv2.drawContours(cv2.cvtColor(bg_filtered_image, cv2.COLOR_GRAY2BGR), contours, 0, [255,0,0], 1)\n\n original_and_filtered = hstack_images(image, bg_filtered_image, False)\n cv2.imshow('original & background filtered image ({})'.format(i), original_and_filtered)\n cv2.waitKey(0)\n\n cv2.destroyAllWindows()\n print('FINISHED BACKGROUND FILTERING TEST\\n')", "def bandpassFilter (self, lowerFreq, upperFreq):\n self.bandpassLimits = (lowerFreq, upperFreq)\n # stuff to do", "def butter_filter_unwrapped(image, n, f0, pad = False):\n if pad == True:\n res = [2**kji for kji in range(15)]\n N_int_x = image.shape[1]\n N_int_y = image.shape[0]\n nx_pad = np.where(res > np.tile(N_int_x, len(res)))\n nx_pad = res[nx_pad[0][0]]\n ny_pad = np.where(res > np.tile(N_int_y, len(res)))\n ny_pad = res[ny_pad[0][0]]\n dif_x = (nx_pad - N_int_x)/2\n dif_y = (ny_pad - N_int_y)/2\n orig_shape = image.shape\n image = np.lib.pad(image, ((dif_y, dif_y), (dif_x, dif_x)), 'reflect')\n \n [ny, nx] = image.shape\n dx = 2.0/nx\n dy = 2.0/ny\n dfx = 0.5\n dfy = 0.5\n fx = np.arange(-0.5/dx, 0.5/dx, dfx)\n fy = np.arange(-0.5/dy, 0.5/dy, dfy)\n FX, FY = np.meshgrid(fx, fy)\n shift = np.exp(-2*np.pi*1j*(FX+FY))\n butt_filt = 1/(1 + ( np.sqrt(FX**2 + FY**2)/f0)**(2*n))\n ft_img = shift * np.fft.fftshift(np.fft.fft2(image))\n \n buttered = np.real(np.fft.ifftshift(np.fft.ifft2(butt_filt * ft_img))/ shift)\n if pad == True:\n assert(buttered[dif_y: ny_pad - dif_y, dif_x:nx_pad - dif_x].shape == orig_shape)\n return buttered[dif_y: ny_pad - dif_y, dif_x:nx_pad - dif_x]\n else:\n assert(buttered.shape == image.shape)\n return buttered", "def imgBC(img, mask=None, scale=1.0, numBins=64, returnBias=False):\n spacing = np.array(img.GetSpacing())/scale\n img_ds = imgResample(img, spacing=spacing)\n\n # Calculate bias\n if mask is None:\n mask_ds = sitk.Image(img_ds.GetSize(), sitk.sitkUInt8)+1\n mask_ds.CopyInformation(img_ds)\n else:\n mask_ds = imgResample(mask, spacing=spacing, useNearest=True)\n mask_ds = mask_ds > 0\n\n splineOrder = 2\n img_ds_bc = sitk.N4BiasFieldCorrection(sitk.Cast(img_ds, sitk.sitkFloat32), mask_ds, numberOfHistogramBins=numBins, splineOrder=splineOrder, numberOfControlPoints=[splineOrder+1]*4)\n #bias_ds = img_ds_bc - sitk.Cast(img_ds,img_ds_bc.GetPixelID())\n\n bias_ds = imgFinite(img_ds_bc / img_ds)\n bias_ds = sitk.Mask(bias_ds, mask_ds) + sitk.Cast(1-mask_ds, sitk.sitkFloat32) # Fill background with 1s\n\n # Upsample bias \n bias = imgResample(bias_ds, spacing=img.GetSpacing(), size=img.GetSize())\n bias = sitk.Cast(bias, img.GetPixelID())\n\n # Apply bias to original image and threshold to eliminate negitive values\n try:\n upper = np.iinfo(sitkToNpDataTypes[img.GetPixelID()]).max\n except:\n upper = np.finfo(sitkToNpDataTypes[img.GetPixelID()]).max *0.99\n\n #img_bc = sitk.Threshold(img + sitk.Cast(bias, img.GetPixelID()),\n # lower=0,\n # upper=upper)\n\n img_bc = sitk.Threshold(img * bias, lower=0, upper=upper)\n\n if returnBias:\n return (img_bc, bias)\n else:\n return img_bc", "def filter_bands(self, imagery, bands=None, names=None, wavelengths=None) -> 'ImageCollection':\n\n graph = {\n 'process_id': 'filter_bands',\n 'imagery': imagery.graph,\n }\n\n if bands:\n graph['bands'] = bands\n if names:\n graph['names'] = names\n if wavelengths:\n graph['wavelengths'] = wavelengths\n\n imagery.graph = graph\n return imagery", "def apply_gap_filter(fastalines, allowed_gap_frac=1 - finfo(float).eps,\r\n verbose=False):\r\n return apply_lane_mask_and_gap_filter(fastalines, None,\r\n allowed_gap_frac=allowed_gap_frac, verbose=False)", "def butter_bandpass_filter(data, lowcut, highcut, fs, order=1):\n b, a = butter_bandpass(lowcut, highcut, fs, order=order)\n y = filtfilt(b, a, data)\n return y", "def format_data(self, bands, branches):\n self.new_dict = self.__gen_dict\n order = [] # list to store high-symmetry point\n band_index = {} # dict to store band index info corresponding to its high-symmetry point e.g. \"X\": 18\n formatted_bands = []\n zero_matrix = np.zeros(np.shape(bands))\n \"\"\"\n zero_matrix is for: if one configuration does not have some high-symmetry points listed in __generic_dict\n then fill zeros in those columns \n \"\"\"\n\n for i in range(len(branches)):\n order.append(branches[i][\"name\"])\n spilt = re.split('-', order[i])\n\n band_index[spilt[0]] = branches[i]['start_index']\n band_index[spilt[1]] = branches[i]['end_index']\n\n # print('>>>>>>>>>>>>>>>>>>', band_index)\n # iterate all keys in band_index, and if exists, give value to new_dict, if not, pass\n for hs_point in band_index:\n if hs_point in self.new_dict:\n self.new_dict[hs_point] = band_index[hs_point]\n # print('>>>>>>>>>>>>>>>>>', BandsData.__gen_dict)\n\n # iterate all keys in new_dict, export bands (not arranged in bands dimension)\n for hs_point in self.new_dict:\n hs_value = self.new_dict[hs_point]\n if self.new_dict[hs_point] is None:\n # fill zeros in bands\n formatted_bands.append(zero_matrix[:, 0])\n else:\n formatted_bands.append(bands[:, hs_value])\n\n # transpose of formatted_bands\n formatted_bands = np.transpose(formatted_bands)\n\n return formatted_bands, self.new_dict", "def white_balance(image, perc):\n new_channel = []\n for channel in cv2.split(image):\n mi, ma = (np.percentile(channel, perc), np.percentile(channel, 100.0 - perc))\n channel = np.uint8(np.clip((channel - mi) * 255.0 / (ma - mi), 0, 255))\n new_channel.append(channel)\n\n imWB = np.dstack(new_channel)\n\n return imWB", "def conRFMixAndMaskToBeagle(indfile_name, rephasedhaps_pref, em_iters, win_size, chroms):\n\t### First get individual information\n\twindow_id = 0\n\tem_iter = em_iters\n\tindfile = open(indfile_name, \"r\")\t\n\tinds = []\n\tfor line in indfile:\n\t\tsplits = line.strip(\"\\r\\n\").split()\n\t\tinds.append(splits[1] + \"_A\")\n\t\tinds.append(splits[1] + \"_B\")\n\n\tallloci = []\n\toutfilename = rephasedhaps_pref + \"_w\" + str(win_size) + \".beagle\"\n\toutfile = open(outfilename, \"w\")\n\toutfile.write(\"I\\tid\\t\" + \"\\t\".join(inds) + \"\\n\")\n\t## Write genotype data out to file\n\n\tvitout = open(rephasedhaps_pref + \".vit\", \"w\")\n\twinout = open(rephasedhaps_pref + \".windows\", \"w\")\n\tfbkout = rephasedhaps_pref + \".fbk\"\n\tif os.path.exists(fbkout):\n\t\tos.remove(fbkout)\n\tvitlist = []\n\tfor chrom in chroms:\n\t\tprint chrom\n\t\tshapeitfilename = rephasedhaps_pref + \"_chr\" + str(chrom) + \"_shapeout.allelesRephased\" + str(em_iters) + \".txt\"\n\t\tshapeitfile = open(shapeitfilename, \"rb\")\n\t\tfbkin_name = rephasedhaps_pref + \"_chr\" + str(chrom) + \"_shapeout.\" + str(em_iters) + \".ForwardBackward.txt\"\n\t\tos.system('cat ' + fbkin_name + \" >> \" + fbkout) # Concatenate files together\n\t\tmarkerin = rephasedhaps_pref + \"_chr\" + str(chrom) + \"_shapeout.amaps\"\n\t\tmarkerfile = open(markerin, \"r\")\n\t\tloci=[]\n\t\talleles = {}\n\t\tfor mline in markerfile:\n\t\t\tmsplit = mline.strip().split()\n\t\t\tloci.append(msplit[1])\n\t\t\talleles[msplit[1]] = [msplit[3], msplit[4] ]\n\n\t\tallloci.extend(loci)\n\t\tfor j,line in enumerate(shapeitfile):\n\t\t\tsline = line.strip(\"\\r\\n\")\n\t\t\tzero, ones = alleles[loci[j]]\n\t\t\tfixed = [ recodeAllele(k, zero, ones) for k in sline ]\n\t\t\toutfile.write(\"M\\t\" + loci[j] + \"\\t\" + \"\\t\".join(fixed) + \"\\n\")\n\t\tvitfile = open(rephasedhaps_pref + \"_chr\" + str(chrom) + \"_shapeout.\" + str(em_iters) + \".Viterbi.txt\", \"r\")\n\t\tvitlist.extend([x.strip().split() for x in vitfile])\n\t\tshapeitfile.close()\n\t\tvitfile.close()\n\t\t\n\t# This will transpose the whole Viterbi file\n\t# Yikes this may take a lot of memory\n\tfor i,x in enumerate(zip(*vitlist)):\n\t\tvitout.write(inds[i] + \"\\t\")\n\t\tfor y in x:\n\t\t\tvitout.write(y+\"\\t\")\n\t\tvitout.write(\"\\n\")\n\t\t### This doesn't quite work yet so make sure to fix it next time\n\tfor l in allloci:\n\t\twinout.write(\"window\" + str(window_id) + \"\\t\" + l + \"\\n\")\n\t\twindow_id += 1\n\treturn([outfile.name, vitout.name, winout.name, fbkout])", "def find_bias_groups(self, make_plot=False, show_plot=False, \n save_plot=True, min_gap=.5):\n self.log('This is specific for the Keck K2 umux FPU.')\n self.log('Working on band 2 first')\n tes_freq = {}\n for bg in np.arange(4):\n # The frequency of TESs in MHz\n tes_freq[bg] = self.find_tes(2, bg, make_plot=make_plot) + \\\n self.get_band_center_mhz(2)\n \n good_tes = {}\n\n # Anything we see in BG 0 is noise.\n bad_res = tes_freq[0]\n\n for bg in np.arange(1,4):\n good_tes[bg] = np.array([])\n \n # Find resonators too close to known bad resonators\n for r in tes_freq[bg]:\n if np.min(np.abs(bad_res - r)) > min_gap:\n good_tes[bg] = np.append(good_tes[bg], r)\n \n ca_freq, ca_sb, ca_ch, ca_bg = self.get_master_assignment(2)\n\n for bg in np.arange(1,4):\n for tes in good_tes[bg]:\n nearest = np.min(np.abs(ca_freq - tes))\n print(nearest)\n if nearest < min_gap:\n idx = np.where(np.abs(ca_freq-tes) == nearest)\n ca_bg[idx] = bg\n \n self.write_master_assignment(2, ca_freq, ca_sb, ca_ch,\n groups=ca_bg)\n\n self.log('Working on band 3')\n for bg in np.arange(4,8):\n # The frequency of TESs in MHz\n tes_freq[bg] = self.find_tes(3, bg, make_plot=make_plot) + \\\n self.get_band_center_mhz(3)\n\n # Anything we see in BG 6 is noise.\n bad_res = tes_freq[6]\n\n for bg in np.array([4,5,7]):\n good_tes[bg] = np.array([])\n \n # Find resonators too close to known bad resonators\n for r in tes_freq[bg]:\n if np.min(np.abs(bad_res - r)) > min_gap:\n good_tes[bg] = np.append(good_tes[bg], r)\n \n ca_freq, ca_sb, ca_ch, ca_bg = self.get_master_assignment(3)\n\n for bg in np.array([4,5,7]):\n for tes in good_tes[bg]:\n nearest = np.min(np.abs(ca_freq - tes))\n if nearest < min_gap:\n idx = np.where(np.abs(ca_freq-tes) == nearest)\n ca_bg[idx] = bg\n \n self.write_master_assignment(3, ca_freq, ca_sb, ca_ch,\n groups=ca_bg)\n\n #for k in good_tes.keys():\n # self.log('{} TESs in BG {}'.format(len(good_tes[k], k)))\n\n return good_tes", "def fill_gaps(image, closing_radius=0, min_hole_size=0, median_radius=0.6):\n closing_structure = _disk(closing_radius)\n median_structure = _disk(median_radius)\n\n out = morphology.binary_closing(image, closing_structure)\n out = morphology.remove_small_holes(out, min_size=min_hole_size)\n out = filters.median(out, selem=median_structure)\n\n return(out)", "def bias_vs_wba_plots(pbproject=HS_PROJECT, img_format='png', transpose=True, legend=False):\n\n def detect_discontinuities(y, threshold):\n return np.where(np.abs(np.diff(y)) >= threshold)[0]\n\n def mark_discontinuities(x, y, threshold=0.01, use_x=False):\n discontinuities = detect_discontinuities(y if not use_x else x, threshold) + 1\n x = np.insert(x, discontinuities, np.nan)\n y = np.insert(y, discontinuities, np.nan)\n return x, y\n\n mpl_params()\n\n print('Reading the data...')\n df = perturbation_data_to_records(pbproject=pbproject)\n\n by_freq = df[['genotype', 'freq', 'flyid', 'wba', 'wba_t']].groupby(('freq',))\n\n dest_dir = ensure_dir(op.join(pbproject.plots_dir, 'bias_vs_wba'))\n for freq, freq_data in by_freq:\n min_t = min([wba_t.min() for wba_t in freq_data['wba_t'] if len(wba_t) > 0])\n max_t = max([wba_t.max() for wba_t in freq_data['wba_t'] if len(wba_t) > 0])\n min_wba = min([wba.min() for wba in freq_data['wba'] if len(wba) > 0])\n max_wba = max([wba.max() for wba in freq_data['wba'] if len(wba) > 0])\n for flyid, flydata in freq_data.groupby(('flyid',)):\n print 'Flyid: %s; freq=%g' % (flyid, freq)\n if len(flydata) > 1:\n raise Exception('The flyid %s is not unique!' % flyid)\n # Fly wba trajectory\n flydata = flydata.iloc[0]\n genotype = flydata.genotype\n x, y = np.array(flydata['wba_t']), np.array(flydata['wba'])\n x, y = mark_discontinuities(x, y, use_x=True, threshold=0.02)\n # Ideal perturbation signal\n amplitude = (max_wba - min_wba) / 2.\n phase = 0\n mean_val = 0 # mean/median\n pert_t = np.linspace(min_t, max_t, 1000)\n pert = perturbation_signal(pert_t, amplitude, phase, mean_val, freq)\n # Superimpose both, plot\n plt.figure()\n if not transpose:\n plt.plot(x, y, color=genotype_color(flydata['genotype']), label='Fly R-L (rad)')\n plt.plot(pert_t, pert, color='g', label='Perturbation')\n plt.axhline(linewidth=4, color='g')\n plt.xlabel('time (s)')\n plt.ylabel('Strength of turn')\n if legend:\n plt.legend()\n plt.xlim((min_t - (max_t - min_t) / 10, max_t + (max_t - min_t) / 10))\n else:\n plt.plot(y, x, color=genotype_color(flydata['genotype']), label='Fly R-L (rad)')\n plt.plot(pert, pert_t, color='g', label='Perturbation')\n plt.axvline(linewidth=4, color='g')\n plt.ylabel('time (s)')\n plt.xlabel('Strength of turn')\n if legend:\n plt.legend()\n plt.ylim((min_t - (max_t - min_t) / 10, max_t + (max_t - min_t) / 10))\n plt.title('fly:%s(%s); freq=%.1f rad/s' %\n (flyid, genotype.replace('_', '+'), freq))\n fn = '%s-%.1f-%s.%s' % (genotype, freq, flyid, img_format)\n plt.savefig(op.join(dest_dir, fn))\n plt.close()", "def apply_pixel(self, bands:List, bandfunction) -> 'ImageCollection':\n pickled_lambda = cloudpickle.dumps(bandfunction)\n\n process_id = 'apply_pixel'\n args = {\n 'imagery':self.graph,\n 'bands':bands,\n 'function': str(base64.b64encode(pickled_lambda), \"UTF-8\")\n }\n\n return self.graph_add_process(process_id, args)", "def apply_pixel(self, bands:List, bandfunction) -> 'ImageCollection':\n pickled_lambda = cloudpickle.dumps(bandfunction)\n\n process_id = 'apply_pixel'\n args = {\n 'imagery':self.graph,\n 'bands':bands,\n 'function': str(base64.b64encode(pickled_lambda), \"UTF-8\")\n }\n\n return self.graph_add_process(process_id, args)", "def apply_mask(self, mask_band=None, mask_val=None):\n pass", "def blending_example2():\n pic_earth = read_image(relpath(\"./externals/pic_earth.jpg\"), 2)\n pic_asteroid = read_image(relpath(\"./externals/pic_asteroid.jpg\"), 2)\n mask = read_image(relpath(\"./externals/mask_asteroid.jpg\"), 1)\n # making the mask binary (normalizing 2 original values)\n mask = strech_helper(mask).astype(np.bool)\n [R1, G1, B1] = np.dsplit(pic_earth, pic_earth.shape[2])\n [R2, G2, B2] = np.dsplit(pic_asteroid, pic_asteroid.shape[2])\n R1 = np.reshape(R1, (1024,1024))\n R2 = np.reshape(R2, (1024,1024))\n G1 = np.reshape(G1, (1024,1024))\n G2 = np.reshape(G2, (1024,1024))\n B1 = np.reshape(B1, (1024,1024))\n B2 = np.reshape(B2, (1024,1024))\n\n blend1 = pyramid_blending(R2, R1, mask, 3, 3, 3)\n blend2 = pyramid_blending(G2, G1, mask, 3, 3, 3)\n blend3 = pyramid_blending(B2, B1, mask, 3, 3, 3)\n\n blend1 = np.reshape(blend1, (blend1.shape[0], blend1.shape[1], 1))\n blend2 = np.reshape(blend2, (blend2.shape[0], blend3.shape[1], 1))\n blend3 = np.reshape(blend3, (blend3.shape[0], blend3.shape[1], 1))\n\n new_pic = np.concatenate((blend1, blend2, blend3), axis=2)\n # plotting the images\n fig = plt.figure()\n ax1 = fig.add_subplot(221)\n ax2 = fig.add_subplot(222)\n ax3 = fig.add_subplot(223)\n ax4 = fig.add_subplot(224)\n ax1.imshow(pic_earth)\n ax2.imshow(pic_asteroid)\n ax3.imshow(mask, cmap='gray')\n ax4.imshow(new_pic)\n plt.show()\n\n return pic_earth, pic_asteroid, mask, new_pic", "def n4_bias_correction(mri, mask_image=None, shrink_factor=(4, 4, 4)):\n from tinycat.label import gen_mask\n import SimpleITK as sitk\n\n mri_data = mri.get_data()\n mri_image = sitk.GetImageFromArray(mri_data)\n mri_image = sitk.Cast(mri_image, sitk.sitkFloat32)\n\n if mask_image is None:\n mask_image = sitk.OtsuThreshold(mri_image, 1)\n else:\n mask_image = sitk.GetImageFromArray(mask_image)\n\n # Shrink image to minimize computation cost\n mri_image_sh = sitk.Shrink(mri_image, shrink_factor)\n mask_image_sh = sitk.Shrink(mask_image, shrink_factor)\n corrector = sitk.N4BiasFieldCorrectionImageFilter()\n\n # Default parameters for slicer 3D\n corrector.SetSplineOrder = 3\n corrector.SetConvergenceThreshold = 0.0001\n corrector.SetMaximumNumberOfIterations = [50, 50, 50]\n corrector.SetWienerFilterNoise = 0\n corrector.SetNumberOfHistogramBins = 0\n corrector.SetBiasFieldFullWidthAtHalfMaximum = 0.15\n\n # Calculate bias-field filter\n n4_output = corrector.Execute(mri_image_sh, mask_image_sh)\n n4_filter = sitk.Subtract(n4_output, mri_image_sh)\n\n # Apply bias-field filter to masked original data\n n4_array = ndimage.interpolation.zoom(\n sitk.GetArrayFromImage(n4_filter), zoom=shrink_factor, order=3\n )\n mri_data = sitk.GetArrayFromImage(mri_image)\n semi_mask = mri_data >= mri_data.mean()\n mask = gen_mask(semi_mask)\n mri_data[mask] = mri_data[mask] - n4_array[mask]\n\n return cat.Nifti1Image(mri_data, mri.affine, mri.header)", "def backward_post_hook(self):\n if not (self.trace_nan or self.trace_inf):\n return None\n\n # Perform F.isnan and then F.sum to check the output of incoming function contains the nan value.\n def callback(f):\n # For the first time to check this function.\n self._add_key(f, self.key_to_stat_bwd)\n\n # apply callback to check the outputs of this function has nan values or not.\n nan = []\n if self.trace_nan:\n nan = [F.sum(F.isnan(i.grad)) for i in f.inputs]\n\n inf = []\n if self.trace_inf:\n inf = [F.sum(F.isinf(i.grad)) for i in f.inputs]\n\n self.key_to_stat_bwd[f].update({\n \"inf\": inf,\n \"nan\": nan,\n # rank might be changed between each iteration.\n \"rank\": f.rank,\n })\n\n return callback", "def butter_bandpass_filter(data, lowcut, highcut, fs, order=2):\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n \n # butter() and lfilter() are from scipy.signal\n \n b, a = butter(order, [low, high], btype='band')\n y = lfilter(b, a, data)\n return y", "def bfill(\n self: FrameLike,\n axis: Optional[Axis] = None,\n inplace: bool_type = False,\n limit: Optional[int] = None,\n ) -> FrameLike:\n return self.fillna(method=\"bfill\", axis=axis, inplace=inplace, limit=limit)", "def backfill(arr, arr1):\n \n arr = np.where(arr < 0.01, np.NaN, arr)\n # FIXME:\n # RuntimeWarning: invalid value encountered in less\n # arr = np.where(arr < 0.01, np.NaN, arr)\n\n x = np.isnan(arr1)\n arr1[x] = arr[x]\n return arr1", "def dask_gd2_nanfill(xx, yy, z_array, algorithm='cubic', **kwargs):\n n_jobs = kwargs.pop(\"n_jobs\", 4)\n chunk_size = kwargs.get(\"chunk_size\", int(xx.size / (n_jobs - 1)))\n # make dask arrays\n dask_xyz = da.from_array((xx, yy, z_array), chunks=(3, chunk_size, \"auto\"), name=\"dask_all\")\n dask_xx = dask_xyz[0,:,:]\n dask_yy = dask_xyz[1,:,:]\n dask_zz = dask_xyz[2,:,:]\n\n # select only valid values\n dask_valid_x1 = dask_xx[~da.isnan(dask_zz)]\n dask_valid_y1 = dask_yy[~da.isnan(dask_zz)]\n dask_valid_z1 = dask_zz[~da.isnan(dask_zz)]\n\n # interpolate for missing values\n return dask_interpolate(dask_valid_x1, dask_valid_y1, dask_valid_z1, dask_xx, dask_yy, algorithm=algorithm, **kwargs)", "def time_bucket_gapfill(self, field: str, interval: str, start: datetime, end: datetime, datapoints: int=240):\n return self.values(bucket=TimeBucketGapFill(field, interval, start, end, datapoints))", "def apply_filling(data, filling_list):\n method_name = filling_list[0][MethodKeys.METHOD]\n method_params = filling_list[0][MethodKeys.PARAMETERS]\n return NonSeqFillingMethods.get_method(method_name)(**method_params)(\n data)", "def get_aligned_image_2frames(self, x, flows_backward, flows_forward):\n n = x.size(1)\n x_backward = [torch.zeros_like(x[:, -1, ...]).repeat(1, 4, 1, 1)]\n for i in range(n - 1, 0, -1):\n x_i = x[:, i, ...]\n flow = flows_backward[:, i - 1, ...]\n x_backward.insert(0, flow_warp(x_i, flow.permute(0, 2, 3, 1), 'nearest4'))\n x_forward = [torch.zeros_like(x[:, 0, ...]).repeat(1, 4, 1, 1)]\n for i in range(0, n - 1):\n x_i = x[:, i, ...]\n flow = flows_forward[:, i, ...]\n x_forward.append(flow_warp(x_i, flow.permute(0, 2, 3, 1), 'nearest4'))\n return [torch.stack(x_backward, 1), torch.stack(x_forward, 1)]", "def butter_bandpass_filter(\n data: numpy.ndarray,\n lowcut: float,\n highcut: float,\n samplerate: float,\n order: int = 2,\n):\n nyq = 0.5 * samplerate\n lowf = lowcut / nyq\n highf = highcut / nyq\n # generic names for coefficients in filters\n # pylint: disable=invalid-name\n a, b = butter(order, [lowf, highf], btype=\"band\")\n if len(data) < BUTTER_MIN_LENGTH:\n return None\n return filtfilt(a, b, data)", "def bandpass_filterbank(bands, fs=1.0, order=8, output=\"sos\"):\n\n filters = []\n nyquist = fs / 2.0\n\n for band in bands:\n # remove bands above nyquist frequency\n if band[0] >= nyquist:\n raise ValueError(\"Bands should be below Nyquist frequency\")\n\n # Truncate the highest band to Nyquist frequency\n norm_band = np.minimum(0.99, np.array(band) / nyquist)\n\n # Compute coefficients\n coeffs = butter(order / 2, norm_band, \"bandpass\", output=output)\n filters.append(coeffs)\n\n return filters", "def __band_filter(data: dict, lowFreq: Union[int, float], highFreq: Union[int, float], timestep: int=0,\n samplingFreq: int=240, order: int=5, eegSensor: int=0, filterType: str='bandpass',\n lengthOfTestSeconds: Union[int, float]=32, example: int=0) -> dict:\n #Test\n # Filter.__band_filter_test(data=data, low=lowFreq, high=highFreq, samplingFreq=samplingFreq, order=order,\n # eegSensor=eegSensor, filterType=filterType, lengthOfTestSeconds=lengthOfTestSeconds)\n #Code\n nyq = 0.5 * samplingFreq\n low = lowFreq / nyq\n high = highFreq / nyq\n b, a = signal.butter(order, [low, high], btype=filterType)\n y = signal.lfilter(b, a, data['Signal'])\n ##Graph - This belongs somewhere else probably.\n # t = np.linspace(0, len(data), len(data), endpoint=False)\n # plt.plot(t, y, label='Sensor #' + str(eegSensor) + ' (' + str(lowFreq) + '-' + str(highFreq) + ') Hz')\n # plt.grid(True)\n # plt.axis('tight')\n # plt.xticks(range(10), range(lengthOfTestSeconds)) ##32 seconds per test?\n # plt.xlabel(\"Time in Seconds\")\n # plt.legend(loc='upper left')\n # plt.show()\n output = {}\n timestep = []\n for index, eegChannel in enumerate(y[0]):#the extra [0] is becuase signal.lfilter() puts it in a 1D array. Grrr\n timestep.append(eegChannel)\n output['Signal'] = timestep\n Visualization.channelGraph(y[0][0])\n return output #output is 2D 64xTimeSamples", "def _make_filters(self):\n\n \"\"\"\n filter_bank = bandpass_filterbank(\n self.bands, fs=self.fs, order=order, output=output\n )\n\n return [lambda sig: sosfiltfilt(bpf, sig) for bpf in filter_bank]\n \"\"\"\n\n # This seems to work only for Octave bands out of the box\n centers = self.centers\n n = len(self.centers)\n\n new_bands = [[centers[0] / 2, centers[1]]]\n for i in range(1, n - 1):\n new_bands.append([centers[i - 1], centers[i + 1]])\n new_bands.append([centers[-2], self.fs / 2])\n\n n_freq = self.n_fft // 2 + 1\n freq_resp = np.zeros((n_freq, n))\n freq = np.arange(n_freq) / self.n_fft * self.fs\n\n for b, (band, center) in enumerate(zip(new_bands, centers)):\n lo = np.logical_and(band[0] <= freq, freq < center)\n freq_resp[lo, b] = 0.5 * (1 + np.cos(2 * np.pi * freq[lo] / center))\n\n if b != n - 1:\n hi = np.logical_and(center <= freq, freq < band[1])\n freq_resp[hi, b] = 0.5 * (1 - np.cos(2 * np.pi * freq[hi] / band[1]))\n else:\n hi = center <= freq\n freq_resp[hi, b] = 1.0\n\n filters = np.fft.fftshift(\n np.fft.irfft(freq_resp, n=self.n_fft, axis=0),\n axes=[0],\n )\n\n # remove the first sample to make them odd-length symmetric filters\n self.filters = filters[1:, :]", "def single_channel_stacking(tifs):\n template_ID=int(len(tifs)/2)\n \n template_raster=gdal_array.LoadFile(tifs[template_ID-1])\n avg_raster=np.zeros_like(template_raster)\n avg_raster=avg_raster+1\n new_raster=np.copy(template_raster)\n # ones=np.full(template_raster.shape, 1)\n for i, tif in enumerate(tifs, start=1):\n if i==template_ID: \n continue\n \n tif_raster=gdal_array.LoadFile(tif)\n # tif_raster=cut_transformed_array_borders(tif_raster)\n result=ird.similarity(template_raster,tif_raster , numiter=1, order=1)\n img_transformed= ird.transform_img(tif_raster, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=2)\n \n img_transformed=cut_transformed_array_borders(img_transformed)\n \n # ones_transformed=ird.transform_img(ones, scale=result['scale'], angle=result['angle'], tvec=result['tvec'], mode='constant', bgval=0, order=1)\n ones_transformed=np.zeros_like(template_raster)\n ones_transformed[np.where(img_transformed>0)]=1\n print(ones_transformed)\n \n print(np.mean(ones_transformed), np.max(ones_transformed), np.min(ones_transformed))\n print(ones_transformed[np.where(ones_transformed>0)])\n print(np.min(ones_transformed[np.where(ones_transformed>0)]))\n print(np.max(ones_transformed[np.where(ones_transformed>0)]))\n\n plt.imshow(ones_transformed)\n plt.show()\n plt.close()\n \n # ones_transformed=cut_transformed_array_borders(ones_transformed)\n \n avg_raster=avg_raster+ones_transformed\n # ird.imshow(template_raster, tif_raster, img_transformed)\n \n new_raster=new_raster+img_transformed\n \n # new_raster=new_raster+template_raster \n # new_raster=new_raster/len(tifs)\n\n gtz=np.where(avg_raster>0)\n \n\n \n\n \n \n plt.imshow(new_raster)\n plt.show()\n plt.close()\n # gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_not_abvertaghe_stacked_.tiff\")\n new_raster[gtz]=new_raster[gtz]/avg_raster[gtz] \n gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_stacked_.tiff\")\n plt.imshow(new_raster)\n plt.savefig(\"test.tif\", dpi=800)\n plt.show()\n plt.close()\n\n def discrete_cmap(N, base_cmap=None):\n \"\"\"Create an N-bin discrete colormap from the specified input map\"\"\"\n \n # Note that if base_cmap is a string or None, you can simply do\n # return plt.cm.get_cmap(base_cmap, N)\n # The following works for string, None, or a colormap instance:\n \n base = plt.cm.get_cmap(base_cmap)\n color_list = base(np.linspace(0, 1, N))\n cmap_name = base.name + str(N)\n return base.from_list(cmap_name, color_list, N)\n\n cmap=discrete_cmap(int(avg_raster.max())+1, base_cmap=\"ocean\") \n \n norm=mpl.colors.BoundaryNorm(np.arange(-0.5,int(avg_raster.max()+1)), cmap.N)\n fig=plt.figure()\n fig.set_size_inches(5,4)\n ax=fig.add_subplot(111)\n data=ax.matshow(avg_raster, cmap=cmap, norm=norm)\n fig.colorbar(data, ticks=np.linspace(0,int(avg_raster.max()),int(avg_raster.max()+1)), drawedges=True)\n\n plt.show()\n plt.close()\n\n\n # gdal_array.SaveArray(new_raster, tifs[0][:-4]+\"_stacked_.tiff\")", "def pre_process_pat(x_data, y_data, background, z_data, fig=None):\n backgr_sm = scipy.ndimage.gaussian_filter(background, sigma=5)\n\n imq = z_data - backgr_sm\n imq = imq - np.mean(imq, axis=1).reshape((-1, 1))\n\n ks = 5\n w = np.ones((1, ks)) / ks\n imx = scipy.ndimage.convolve(imq, w, mode='nearest')\n\n qq = np.percentile(imx, [5, 50, 95])\n imx = imx - qq[1]\n qq = np.percentile(imx, [2, 50, 98])\n scale = np.mean([-qq[0], qq[2]])\n imx = imx / scale\n\n if fig is not None:\n # y_data = np.arange(imq.shape[0])\n plt.figure(fig)\n plt.clf()\n plt.subplot(2, 2, 1)\n plt.pcolormesh(x_data, y_data, z_data, shading='auto')\n plt.xlabel('Detuning (mV)')\n plt.ylabel('Frequency (Hz)')\n plt.title('Input data')\n plt.subplot(2, 2, 2)\n plt.pcolormesh(x_data, y_data, imq, shading='auto')\n plt.xlabel('Detuning (mV)')\n plt.ylabel('Frequency (Hz)')\n plt.title('imq')\n plt.subplot(2, 2, 3)\n plt.pcolormesh(x_data, y_data, imx, shading='auto')\n plt.xlabel('Detuning (mV)')\n plt.ylabel('Frequency (Hz)')\n plt.title('imx')\n plt.tight_layout()\n\n return imx, imq, backgr_sm", "def freespaceImageAnalysis( fids, guesses = None, fit=True, bgInput=None, bgPcInput=None, shapes=[None], zeroCorrection=0, zeroCorrectionPC=0,\n keys=None, fitModule=bump, extraPicDictionaries=None, newAnnotation=False, onlyThisPic=None, pltVSize=5, \n plotSigmas=False, plotCounts=False, manualColorRange=None, calcTemperature=False, clearOutput=True, \n dataRange=None, guessTemp=10e-6, trackFitCenter=False, picsPerRep=1, startPic=0, binningParams=None, \n win=pw.PictureWindow(), transferAnalysisOpts=None, tferBinningParams=None, tferWin= pw.PictureWindow(),\n extraTferAnalysisArgs={}, emGainSetting=300, lastConditionIsBackGround=True, showTferAnalysisPlots=True,\n show2dFitsAndResiduals=True, plotFitAmps=False, indvColorRanges=False, fitF2D=gaussian_2d.f_notheta, \n rmHighCounts=True, useBase=True, weightBackgroundByLoading=True, returnPics=False, forceNoAnnotation=False):\n fids = [fids] if type(fids) == int else fids\n keys = [None for _ in fids] if keys is None else keys\n sortedStackedPics = {}\n initThresholds = [None]\n picsForBg = []\n bgWeights = []\n isAnnotatedList = []\n for filenum, fid in enumerate(fids):\n if transferAnalysisOpts is not None:\n res = ta.stage1TransferAnalysis( fid, transferAnalysisOpts, useBase=useBase, **extraTferAnalysisArgs )\n (initAtoms, tferAtoms, initAtomsPs, tferAtomsPs, key, keyName, initPicCounts, tferPicCounts, repetitions, initThresholds,\n avgPics, tferThresholds, initAtomImages, tferAtomImages, basicInfoStr, ensembleHits, groupedPostSelectedPics, isAnnotated) = res\n isAnnotatedList.append(isAnnotated)\n # assumes that you only want to look at the first condition. \n for varPics in groupedPostSelectedPics: # don't remember why 0 works if false...\n picsForBg.append(varPics[-1 if lastConditionIsBackGround else 0])\n bgWeights.append(len(varPics[0]))\n allFSIPics = [ varpics[0][startPic::picsPerRep] for varpics in groupedPostSelectedPics]\n if showTferAnalysisPlots:\n fig, axs = plt.subplots(1,2)\n mp.makeAvgPlts( axs[0], axs[1], avgPics, transferAnalysisOpts, ['r','g','b'] ) \n allFSIPics = [win.window( np.array(pics) ) for pics in allFSIPics]\n allFSIPics = ah.softwareBinning( binningParams, allFSIPics )\n elif type(fid) == int:\n ### For looking at either PGC imgs or FSI imgs \n with exp.ExpFile(fid) as file:\n # I think this only makes sense if there is a specific bg pic in the rotation\n picsForBg.append(list(file.get_pics()))\n allFSIPics = file.get_pics()[startPic::picsPerRep]\n _, key = file.get_key()\n if len(np.array(key).shape) == 2:\n key = key[:,0]\n file.get_basic_info()\n allFSIPics = win.window( allFSIPics )\n allFSIPics = ah.softwareBinning( binningParams, allFSIPics )\n allFSIPics = np.reshape( allFSIPics, (len(key), int(allFSIPics.shape[0]/len(key)), allFSIPics.shape[1], allFSIPics.shape[2]) )\n else:\n ### Assumes given pics have the same start pic and increment (picsPerRep).\n # doesn't combine well w/ transfer analysis\n picsForBg.append(fid)\n allFSIPics = fid[startPic::picsPerRep]\n print(\"Assuming input is list of all pics, then splices to get FSI pics. Old code assumed the given were FSI pics.\")\n allFSIPics = win.window( allFSIPics )\n allFSIPics = ah.softwareBinning( binningParams, allFSIPics )\n allFSIPics = np.reshape( allFSIPics, (len(key), int(allFSIPics.shape[0]/len(key)), allFSIPics.shape[1], allFSIPics.shape[2]) )\n # ##############\n if keys[filenum] is not None:\n key = keys[filenum]\n for i, keyV in enumerate(key):\n keyV = misc.round_sig_str(keyV)\n sortedStackedPics[keyV] = np.append(sortedStackedPics[keyV], allFSIPics[i],axis=0) if (keyV in sortedStackedPics) else allFSIPics[i] \n if lastConditionIsBackGround:\n bgInput, pcBgInput = getBgImgs(picsForBg, startPic = startPic, picsPerRep = picsPerRep, rmHighCounts=rmHighCounts, bgWeights=bgWeights, \n weightBackgrounds=weightBackgroundByLoading)\n elif bgInput == 'lastPic':\n bgInput, pcBgInput = getBgImgs(picsForBg, startPic = picsPerRep-1, picsPerRep = picsPerRep, rmHighCounts=rmHighCounts, bgWeights=bgWeights,\n weightBackgrounds=weightBackgroundByLoading )\n if bgInput is not None: # was broken and not working if not given bg\n bgInput = win.window(bgInput)\n bgInput = ah.softwareBinning(binningParams, bgInput)\n if bgPcInput is not None:\n bgPcInput = win.window(bgPcInput)\n bgPcInput = ah.softwareBinning(binningParams, bgPcInput) \n \n if extraPicDictionaries is not None:\n if type(extraPicDictionaries) == dict:\n extraPicDictionaries = [extraPicDictionaries]\n for dictionary in extraPicDictionaries:\n for keyV, pics in dictionary.items():\n sortedStackedPics[keyV] = (np.append(sortedStackedPics[keyV], pics,axis=0) if keyV in sortedStackedPics else pics) \n sortedStackedKeyFl = [float(keyStr) for keyStr in sortedStackedPics.keys()]\n sortedKey, sortedStackedPics = ah.applyDataRange(dataRange, sortedStackedPics, list(sorted(sortedStackedKeyFl)))\n numVars = len(sortedStackedPics.items())\n if len(np.array(shapes).shape) == 1:\n shapes = [shapes for _ in range(numVars)] \n if guesses is None:\n guesses = [[None for _ in range(4)] for _ in range(numVars)]\n if len(np.array(bgInput).shape) == 2 or bgInput == None:\n bgInput = [bgInput for _ in range(numVars)]\n if len(np.array(bgPcInput).shape) == 2 or bgPcInput == None:\n bgPcInput = [bgPcInput for _ in range(numVars)]\n \n datalen, avgFitSigmas, images, hFitParams, hFitErrs, vFitParams, vFitErrs, fitParams2D, fitErrs2D = [{} for _ in range(9)]\n titles = ['Bare', 'Photon-Count', 'Bare-mbg', 'Photon-Count-mbg']\n assert(len(sortedKey)>0)\n for vari, keyV in enumerate(sortedKey):\n keyV=misc.round_sig_str(keyV)\n if vari==0:\n initKeyv = keyV\n varPics = sortedStackedPics[keyV]\n # 0 is init atom pics for post-selection on atom number... if we wanted to.\n expansionPics = rmHighCountPics(varPics,7000) if rmHighCounts else varPics\n datalen[keyV] = len(expansionPics)\n expPhotonCountImage = photonCounting(expansionPics, 120)[0] / len(expansionPics)\n bgPhotonCountImage = np.zeros(expansionPics[0].shape) if bgPcInput[vari] is None else bgPcInput[vari]\n expAvg = np.mean(expansionPics, 0)\n bgAvg = np.zeros(expansionPics[0].shape) if (bgInput[vari] is None or len(bgInput[vari]) == 1) else bgInput[vari]\n \n if bgPhotonCountImage is None:\n print('no bg photon', expAvg.shape)\n bgPhotonCount = np.zeros(photonCountImage.shape)\n avg_mbg = expAvg - bgAvg\n avg_mbgpc = expPhotonCountImage - bgPhotonCountImage\n images[keyV] = [expAvg, expPhotonCountImage, avg_mbg, avg_mbgpc]\n hFitParams[keyV], hFitErrs[keyV], vFitParams[keyV], vFitErrs[keyV], fitParams2D[keyV], fitErrs2D[keyV] = [[] for _ in range(6)]\n for imnum, (im, guess) in enumerate(zip(images[keyV], guesses[vari])):\n if fit:\n # fancy guess_x and guess_y values use the initial fitted value, typically short time, as a guess.\n _, pictureFitParams2d, pictureFitErrors2d, v_params, v_errs, h_params, h_errs = ah.fitPic(\n im, guessSigma_x=5, guessSigma_y=5, showFit=False, \n guess_x=None if vari==0 else fitParams2D[initKeyv][imnum][1], guess_y=None if vari==0 else fitParams2D[initKeyv][imnum][2],\n fitF=fitF2D)\n fitParams2D[keyV].append(pictureFitParams2d)\n fitErrs2D[keyV].append(pictureFitErrors2d)\n hFitParams[keyV].append(h_params)\n hFitErrs[keyV].append(h_errs)\n vFitParams[keyV].append(v_params)\n vFitErrs[keyV].append(v_errs)\n # conversion from the num of pixels on the camera to microns at the focus of the tweezers\n cf = 16e-6/64\n mins, maxes = [[], []]\n imgs_ = np.array(list(images.values()))\n for imgInc in range(4):\n if indvColorRanges:\n mins.append(None)\n maxes.append(None)\n elif manualColorRange is None:\n mins.append(min(imgs_[:,imgInc].flatten()))\n maxes.append(max(imgs_[:,imgInc].flatten()))\n else:\n mins.append(manualColorRange[0])\n maxes.append(manualColorRange[1])\n numVariations = len(images)\n if onlyThisPic is None:\n fig, axs = plt.subplots(numVariations, 4, figsize=(20, pltVSize*numVariations))\n if numVariations == 1:\n axs = np.array([axs])\n bgFig, bgAxs = plt.subplots(1, 2, figsize=(20, pltVSize))\n else:\n numRows = int(np.ceil((numVariations+3)/4))\n fig, axs = plt.subplots(numRows, 4 if numVariations>1 else 3, figsize=(20, pltVSize*numRows))\n avgPicAx = axs.flatten()[-3]\n avgPicFig = fig\n bgAxs = [axs.flatten()[-1], axs.flatten()[-2]]\n bgFig = fig\n if show2dFitsAndResiduals:\n fig2d, axs2d = plt.subplots(*((2,numVariations) if numVariations>1 else (1,2)))\n keyPlt = np.zeros(len(images))\n (totalSignal, hfitCenter, hFitCenterErrs, hSigmas, hSigmaErrs, h_amp, hAmpErrs, vfitCenter, vFitCenterErrs, vSigmas, vSigmaErrs, v_amp, \n vAmpErrs, hSigma2D, hSigma2dErr, vSigma2D, vSigma2dErr) = [np.zeros((len(images), 4)) for _ in range(17)]\n \n for vari, ((keyV,ims), hParamSet, hErr_set, vParamSet, vErr_set, paramSet2D, errSet2D) in enumerate(zip(\n images.items(), *[dic.values() for dic in [hFitParams, hFitErrs, vFitParams, vFitErrs, fitParams2D, fitErrs2D]])):\n for which in range(4):\n if onlyThisPic is None:\n (im, ax, title, min_, max_, hparams, hErrs, vparams, vErrs, param2d, err2d\n ) = [obj[which] for obj in (ims, axs[vari], titles, mins, maxes, hParamSet, hErr_set, vParamSet, vErr_set, paramSet2D, errSet2D)] \n else:\n which = onlyThisPic\n ax = axs.flatten()[vari]\n (im, title, min_, max_, hparams, hErrs, vparams, vErrs, param2d, err2d\n ) = [obj[which] for obj in (ims, titles, mins, maxes, hParamSet, hErr_set, vParamSet, vErr_set, paramSet2D, errSet2D)] \n h_amp[vari][which], hfitCenter[vari][which], hSigmas[vari][which] = hparams[0], hparams[1], hparams[2]*cf*1e6\n hAmpErrs[vari][which], hFitCenterErrs[vari][which], hSigmaErrs[vari][which] = hErrs[0], hErrs[1], hErrs[2]*cf*1e6\n v_amp[vari][which], vfitCenter[vari][which], vSigmas[vari][which] = vparams[0], vparams[1], vparams[2]*cf*1e6\n vAmpErrs[vari][which], vFitCenterErrs[vari][which], vSigmaErrs[vari][which] = vErrs[0], vErrs[1], vErrs[2]*cf*1e6\n hSigma2D[vari][which], hSigma2dErr[vari][which], vSigma2D[vari][which], vSigma2dErr[vari][which] = [\n val*cf*1e6 for val in [param2d[-3], err2d[-3], param2d[-2], err2d[-2]]]\n \n totalSignal[vari][which] = np.sum(im.flatten())\n keyPlt[vari] = keyV\n res = mp.fancyImshow(fig, ax, im, imageArgs={'cmap':dark_viridis_cmap, 'vmin':min_, 'vmax':max_}, \n hFitParams=hparams, vFitParams=vparams, fitModule=fitModule, flipVAx = True, fitParams2D=param2d)\n ax.set_title(keyV + ': ' + str(datalen[keyV]) + ';\\n' + title + ': ' + misc.errString(hSigmas[vari][which],hSigmaErrs[vari][which]) \n + r'$\\mu m$ sigma, ' + misc.round_sig_str(totalSignal[vari][which],5), fontsize=12) \n if show2dFitsAndResiduals:\n X, Y = np.meshgrid(np.arange(len(im[0])), np.arange(len(im)))\n data_fitted = fitF2D((X,Y), *param2d)\n fitProper = data_fitted.reshape(im.shape[0],im.shape[1])\n ax1 = axs2d[0] if numVariations == 1 else axs2d[0,vari]\n ax2 = axs2d[1] if numVariations == 1 else axs2d[1,vari]\n imr = ax1.imshow(fitProper, vmin=min_, vmax=max_)\n mp.addAxColorbar(fig2d, ax1, imr)\n ax1.contour(np.arange(len(im[0])), np.arange(len(im)), fitProper, 4, colors='w', alpha=0.2)\n imr = ax2.imshow(fitProper-im)\n mp.addAxColorbar(fig2d, ax2, imr)\n ax2.contour(np.arange(len(im[0])), np.arange(len(im)), fitProper, 4, colors='w', alpha=0.2)\n if onlyThisPic is not None:\n break\n \n mp.fancyImshow(avgPicFig, avgPicAx, np.mean([img[onlyThisPic] for img in images.values()],axis=0), imageArgs={'cmap':dark_viridis_cmap},flipVAx = True)\n avgPicAx.set_title('Average Over Variations')\n ### Plotting background and photon counted background\n mp.fancyImshow(bgFig, bgAxs[0], bgAvg, imageArgs={'cmap':dark_viridis_cmap},flipVAx = True) \n bgAxs[0].set_title('Background image (' + str(len(picsForBg)/picsPerRep) + ')')\n mp.fancyImshow(bgFig, bgAxs[1], bgPhotonCountImage, imageArgs={'cmap':dark_viridis_cmap},flipVAx = True) \n bgAxs[1].set_title('Photon counted background image (' + str(len(picsForBg)/picsPerRep) + ')')\n fig.subplots_adjust(left=0,right=1,bottom=0.1, hspace=0.2, **({'top': 0.7, 'wspace': 0.4} if (onlyThisPic is None) else {'top': 0.9, 'wspace': 0.3}))\n \n disp.display(fig)\n temps, tempErrs, tempFitVs, = [],[],[]\n if calcTemperature: \n for sigmas, sigmaerrs in zip([hSigmas, vSigmas, hSigma2D, vSigma2D],[hSigmaErrs, vSigmaErrs, hSigma2dErr, vSigma2dErr]):\n mbgSigmas = np.array([elt[2] for elt in sigmas])\n mbgSigmaErrs = np.array([elt[2] for elt in sigmaerrs])\n myGuess = [0.0, min((mbgSigmas)*1e-6), guessTemp]\n temp, fitV, cov = ah.calcBallisticTemperature(keyPlt*1e-3, (mbgSigmas)*1e-6, guess = myGuess, sizeErrors = mbgSigmaErrs)\n error = np.sqrt(np.diag(cov))\n temps.append(temp)\n tempErrs.append(error[2])\n tempFitVs.append(fitV)\n numAxisCol = int(plotSigmas) + int(plotCounts) + int(trackFitCenter)\n if numAxisCol != 0:\n fig2, axs = plt.subplots(1, numAxisCol, figsize = (15, 5)) \n fig2.subplots_adjust(top=0.75, wspace = 0.4)\n colors = ['b','k','c','purple']\n if plotSigmas:\n ax = (axs if numAxisCol == 1 else axs[0]) \n stdStyle = dict(marker='o',linestyle='',capsize=3)\n if onlyThisPic is not None:\n ax.errorbar(keyPlt, hSigmas[:,onlyThisPic], hSigmaErrs[:,onlyThisPic], color=colors[0], label='h '+titles[onlyThisPic], **stdStyle);\n ax.errorbar(keyPlt, hSigma2D[:,onlyThisPic], hSigma2dErr[:,onlyThisPic], color=colors[1], label='2dh '+titles[onlyThisPic], **stdStyle);\n ax.errorbar(keyPlt, vSigmas[:,onlyThisPic], vSigmaErrs[:,onlyThisPic], color=colors[2], label='v '+titles[onlyThisPic], **stdStyle);\n ax.errorbar(keyPlt, vSigma2D[:,onlyThisPic], vSigma2dErr[:,onlyThisPic], color=colors[3], label='2dv '+titles[onlyThisPic], **stdStyle);\n else:\n for whichPic in range(4):\n ax.errorbar(keyPlt, hSigmas[:,whichPic], hSigmaErrs[:,whichPic], color='b', label='h '+titles[whichPic], **stdStyle);\n ax.errorbar(keyPlt, vSigmas[:,whichPic], vSigmaErrs[:,whichPic], color='c', label='v '+titles[whichPic], **stdStyle);\n ax.set_ylim(max(0,ax.get_ylim()[0]),min([ax.get_ylim()[1],5]))\n ax.set_ylabel(r'Fit Sigma ($\\mu m$)')\n \n if calcTemperature:\n # converting time to s, hSigmas in um \n xPoints = np.linspace(min(keyPlt), max(keyPlt))*1e-3\n for num, fitV in enumerate(tempFitVs):\n #ax.plot(xPoints*1e3, LargeBeamMotExpansion.f(xPoints, *myGuess)*1e6, label = 'guess')\n ax.plot(xPoints*1e3, LargeBeamMotExpansion.f(xPoints, *fitV)*1e6, color=colors[num])\n ax.legend()\n\n if plotFitAmps: \n ax = (axs if numAxisCol == 1 else axs[0])\n ampAx = ax.twinx()\n\n if onlyThisPic is not None:\n ampAx.errorbar(keyPlt, h_amp[:,onlyThisPic], hAmpErrs[:,onlyThisPic], label='h '+titles[onlyThisPic], color = 'orange', **stdStyle);\n ampAx.errorbar(keyPlt, v_amp[:,onlyThisPic], vAmpErrs[:,onlyThisPic], label='v '+titles[onlyThisPic], color = 'r', **stdStyle);\n else:\n for whichPic in range(4):\n ampAx.errorbar(keyPlt, h_amp[:,whichPic], hAmpErrs[:,whichPic], label='h '+titles[whichPic], color = 'orange', **stdStyle);\n ampAx.errorbar(keyPlt, v_amp[:,whichPic], vAmpErrs[:,whichPic], label='v '+titles[whichPic], color = 'r', **stdStyle);\n [tick.set_color('red') for tick in ampAx.yaxis.get_ticklines()]\n [tick.set_color('red') for tick in ampAx.yaxis.get_ticklabels()]\n ampAx.set_ylabel(r'Fit h_amps', color = 'r')\n \n hTotalPhotons, vTotalPhotons = None, None\n if plotCounts:\n # numAxCol = 1: ax = axs\n # numAxCol = 2: plotSigmas + plotCounts -- ax = axs[1]\n # numAxCol = 2: plotCounts + trackFitCenter -- ax = axs[0]\n # numAxCol = 3: ax = axs[1]\n if numAxisCol == 1:\n ax = axs\n elif numAxisCol == 2:\n ax = axs[1 if plotSigmas else 0]\n else:\n ax = axs[1]\n # Create axis to plot photon counts\n ax.set_ylabel(r'Integrated signal')\n photon_axis = ax.twinx()\n # This is not currently doing any correct for e.g. the loading rate.\n countToCameraPhotonEM = 0.018577 / (emGainSetting/200) # the float is is EM200. \n countToScatteredPhotonEM = 0.018577 / 0.07 / (emGainSetting/200)\n\n if onlyThisPic is not None:\n # calculate number of photons\n hamp = h_amp[:,onlyThisPic]*len(expansionPics[0][0]) # Horizontal \"un\"normalization for number of columns begin averaged.\n vamp = v_amp[:,onlyThisPic]*len(expansionPics[0]) \n hsigpx = hSigmas[:,onlyThisPic]/(16/64) # Convert from um back to to pixels.\n vsigpx = vSigmas[:,onlyThisPic]/(16/64)\n htotalCountsPerPic = bump.area_under(hamp, hsigpx)\n vtotalCountsPerPic = bump.area_under(vamp, vsigpx)\n hTotalPhotons = countToScatteredPhotonEM*htotalCountsPerPic\n vTotalPhotons = countToScatteredPhotonEM*vtotalCountsPerPic\n ax.plot(keyPlt, totalSignal[:,onlyThisPic], marker='o', linestyle='', label=titles[onlyThisPic]);\n photon_axis.plot(keyPlt, hTotalPhotons, marker='o', linestyle='', color = 'r', label='Horizontal')\n photon_axis.plot(keyPlt, vTotalPhotons, marker='o', linestyle='', color = 'orange', label='Vertical')\n else:\n for whichPic in range(4):\n # See above comments\n amp = h_amp[:,whichPic]*len(expansionPics[0][0]) \n sig = hSigmas[:,whichPic]/(16/64) \n totalCountsPerPic = bump.area_under(amp, sig)\n hTotalPhotons = countToScatteredPhotonEM*totalCountsPerPic\n ax.plot(keyPlt, totalSignal[:,whichPic], marker='o', linestyle='', label=titles[whichPic]);\n photon_axis.plot(keyPlt, hTotalPhotons, marker='o', linestyle='', color = ['red', 'orange', 'yellow', 'pink'][whichPic]) \n ax.legend()\n photon_axis.legend()\n [tick.set_color('red') for tick in photon_axis.yaxis.get_ticklines()]\n [tick.set_color('red') for tick in photon_axis.yaxis.get_ticklabels()]\n photon_axis.set_ylabel(r'Fit-Based Avg Scattered Photon/Img', color = 'r')\n if trackFitCenter:\n #numaxcol = 1: ax = axs\n #numaxcol = 2: trackfitcenter + plothSigmas: ax = axs[1]\n #numaxcol = 2: trackfitcenter + plotCounts: ax = axs[1]\n #numaxcol = 3: ax = axs[2]\n ax = (axs if numAxisCol == 1 else axs[-1])\n if onlyThisPic is not None:\n #ax.errorbar(keyPlt, hfitCenter[:,onlyThisPic], hFitCenterErrs[:,onlyThisPic], marker='o', linestyle='', capsize=3, label=titles[onlyThisPic]);\n ax.errorbar(keyPlt, vfitCenter[:,onlyThisPic], vFitCenterErrs[:,onlyThisPic], marker='o', linestyle='', capsize=3, label=titles[onlyThisPic]);\n #def accel(t, x0, a):\n # return x0 + 0.5*a*t**2\n #accelFit, AccelCov = opt.curve_fit(accel, keyPlt*1e-3, hfitCenter[:,onlyThisPic], sigma = hFitCenterErrs[:,onlyThisPic])\n #fitx = np.linspace(keyPlt[0], keyPlt[-1])*1e-3\n #fity = accel(fitx, *accelFit)\n #ax.plot(fitx*1e3, fity)\n else:\n for whichPic in range(4):\n ax.errorbar(keyPlt, hfitCenter[:,whichPic], hFitCenterErrs[:,whichPic], marker='o', linestyle='', capsize=3, label=titles[whichPic]);\n #accelErr = np.sqrt(np.diag(AccelCov))\n fig2.legend()\n ax.set_ylabel(r'Fit Centers (pix)')\n ax.set_xlabel('time (ms)')\n \n if numAxisCol != 0:\n disp.display(fig2) \n \n if not forceNoAnnotation:\n for fid, isAnnotated in zip(fids, isAnnotatedList):\n if not isAnnotated:\n if type(fid) == int or type(fid) == type(''):\n if newAnnotation or not exp.checkAnnotation(fid, force=False, quiet=True, useBase=useBase):\n exp.annotate(fid, useBase=useBase)\n if clearOutput:\n disp.clear_output()\n if calcTemperature: \n for temp, err, label in zip(temps, tempErrs, ['Hor', 'Vert', 'Hor2D', 'Vert2D']): \n print(label + ' temperature = ' + misc.errString(temp*1e6, err*1e6) + 'uk')\n\n for fid in fids:\n if type(fid) == int:\n expTitle, _, lev = exp.getAnnotation(fid)\n expTitle = ''.join('#' for _ in range(lev)) + ' File ' + str(fid) + ': ' + expTitle\n disp.display(disp.Markdown(expTitle))\n with exp.ExpFile(fid) as file:\n file.get_basic_info()\n if trackFitCenter:\n pass\n #print('Acceleration in Mpix/s^2 = ' + misc.errString(accelFit[1], accelErr[1]))\n if transferAnalysisOpts is not None and showTferAnalysisPlots:\n colors, colors2 = misc.getColors(len(transferAnalysisOpts.initLocs()) + 2)#, cmStr=dataColor)\n pltShape = (transferAnalysisOpts.initLocsIn[-1], transferAnalysisOpts.initLocsIn[-2])\n # mp.plotThresholdHists([initThresholds[0][0],initThresholds[1][0]], colors, shape=pltShape)\n mp.plotThresholdHists([initThresholds[0][0], initThresholds[0][0]], colors, shape=[1,2])\n returnDictionary = {'images':images, 'fits':hFitParams, 'errs':hFitErrs, 'hSigmas':hSigmas, 'sigmaErrors':hSigmaErrs, 'dataKey':keyPlt, \n 'hTotalPhotons':hTotalPhotons, 'tempCalc':temps, 'tempCalcErr':tempErrs, 'initThresholds':initThresholds[0], \n '2DFit':fitParams2D, '2DErr':fitErrs2D, 'bgPics':picsForBg, 'dataLength':datalen}\n if returnPics: \n returnDictionary['pics'] = sortedStackedPics\n return returnDictionary", "def _register_post_backward_hooks(self) -> None:\n if not torch.is_grad_enabled():\n return # don't register grad hooks if grad isn't enabled\n for p in self.full_params:\n if p.requires_grad:\n if hasattr(p, \"_shard_bwd_hook\"):\n continue\n # Register a hook on the first call, empirically, autograd\n # fires it at the end for this param, which makes sense.\n p_tmp = p.expand_as(p) # Get a grad_fn on p_tmp.\n assert p_tmp.grad_fn is not None\n grad_acc = p_tmp.grad_fn.next_functions[0][\n 0] # Gets its GradAccumulation object.\n handle = grad_acc.register_hook(\n functools.partial(self._post_backward_hook, p))\n p._shard_bwd_hook = (grad_acc, handle)", "def fillNDIGap(self, knn):\n nirrho = self.readImage(self.nirfile, self.rhoband).astype(np.float_)\n nirnhits = self.readImage(self.nirfile, self.nhitsband).astype(np.int)\n nirmask = self.readImage(self.nirfile, self.maskband).astype(np.bool_)\n \n swirrho = self.readImage(self.swirfile, self.rhoband).astype(np.float_)\n swirnhits = self.readImage(self.swirfile, self.nhitsband).astype(np.int)\n swirmask = self.readImage(self.swirfile, self.maskband).astype(np.bool_)\n\n hitmask = np.logical_and(np.greater(nirnhits, 0), np.greater(swirnhits, 0))\n if not hitmask.any():\n # no valid hit at all!\n print \"Error, no shot has returns! Check your data\"\n sys.exit()\n xhit, yhit = np.where(hitmask)\n nirrhohit = nirrho[hitmask]/nirnhits[hitmask]\n swirrhohit = swirrho[hitmask]/swirnhits[hitmask]\n\n ndi = np.zeros_like(nirrho)\n mask = np.zeros_like(nirrho, dtype=int) + 3\n tmpflag = np.logical_and(np.invert(nirmask), np.invert(swirmask))\n mask[tmpflag] = 0\n \n ndihit = (nirrhohit - swirrhohit) / (nirrhohit + swirrhohit)\n ndi[hitmask] = ndihit\n mask[hitmask] = 1\n \n nirgapmask = np.logical_and(np.equal(nirnhits, 0), np.greater(swirnhits, 0))\n swirgapmask = np.logical_and(np.greater(nirnhits, 0), np.equal(swirnhits, 0))\n\n if (not nirgapmask.any()) and (not swirgapmask.any()):\n # no gap\n print \"No fillable gap.\"\n return ndi, mask\n\n gapmask = np.logical_or(nirgapmask, swirgapmask)\n xgap, ygap = np.where(gapmask)\n\n X = np.hstack((xhit.reshape(len(xhit), 1), yhit.reshape(len(yhit), 1))).astype(np.float32)\n T = np.hstack((xgap.reshape(len(xgap), 1), ygap.reshape(len(ygap), 1))).astype(np.float32)\n ndigap = self.fillGap(X, ndihit, T, knn)\n ndi[gapmask] = ndigap\n mask[gapmask] = 2\n\n self.ndi = ndi\n self.mask = mask\n \n return ndi, mask", "def _apply_image_filters(self, image, filters=[]):\n derivative = image\n for filter in filters:\n derivative = filter(derivative)\n return derivative", "def SplitGap(data,gapsize,medwin,fluxdiff):\n \n # defining new empty lists and stuff\n pcount=0\n istamps=[]\n outData={}\n \n data['x'].mask = data['UnMasked']\n data['y'].mask = data['UnMasked']\n data['yerr'].mask = data['UnMasked']\n \n # median smoothing the lightcurve\n mvavg1 = movingMedian(data['y'],medwin)\n mvavg1 = num.append(mvavg1,mvavg1[-1])\n mvavg1 = data['y']\n # first derivative of smoothed lightcurve\n diff1 = num.diff(mvavg1)\n diff1 = num.hstack((diff1,diff1[-1]))\n \n # second derivative of smoothed lightcurve\n diff2 = num.diff(diff1)\n diff2 = num.hstack((diff2[-1],diff2))\n\n # compute ourlier resistant sigma\n sig = compute1Sigma(diff1)\n #pylab.plot(diff1,'g.')\n #pylab.plot([0,6000],[5*sig,5*sig],'k-')\n #pylab.plot([0,6000],[3*sig,3*sig],'k-')\n #pylab.plot([0,6000],[1*sig,1*sig],'k-')\n #pylab.show()\n\n # The grand master loop >=}\n # to make portion slices\n for i in range(len(data['x'])-1):\n dt = data['x'][i+1]- data['x'][i]\n j1 = max(0,i-medwin)\n j2 = i + medwin\n if pcount == 0:\n i0 = 0\n if pcount > 0:\n i0 = i1+1\n if dt > gapsize:\n i1 = i\n istamps.append([i0,i1])\n pcount += 1\n #if num.abs(diff1[i]) > 5*sig:\n #i1 = i\n #istamps.append([i0,i1])\n #pcount += 1\n #print num.abs(diff1[i]/data['y'][i]), diff1[i], data['y'][i], diff1[i+1], data['y'][i+1]\n #print i, ' test flux gap'\n i1 = i+1\n istamps.append([i0,i1])\n \n \n \n if data['bool']==False:\n # Applying slices\n for j in range(len(istamps)):\n #print istamps[j][0], istamps[j][1]\n outData['portion' + str(j+1)] = {'kid':data['kid'],'x':data['x'][istamps[j][0]:istamps[j][1]+1], 'y':data['y'][istamps[j][0]:istamps[j][1]+1], 'yerr':data['yerr'][istamps[j][0]:istamps[j][1]+1],'UnMasked':data['UnMasked'][istamps[j][0]:istamps[j][1]+1],'bool':False}\n else:\n # Applying slices\n for j in range(len(istamps)):\n #print istamps[j][0], istamps[j][1]\n outData['portion' + str(j+1)] = {'kid':data['kid'],'x':data['x'][istamps[j][0]:istamps[j][1]+1], 'y':data['y'][istamps[j][0]:istamps[j][1]+1], 'yerr':data['yerr'][istamps[j][0]:istamps[j][1]+1], 'TransitMask':data['TransitMask'][istamps[j][0]:istamps[j][1]+1],'UnMasked':data['UnMasked'][istamps[j][0]:istamps[j][1]+1],'bool':True}\n \n return outData", "def batch_stack_clip(indir, outdir, shape,bands=None,mask_band=False, remove_stack=False):\r\n walk = os.walk(indir)\r\n names = walk.next()[1]\r\n for name in names:\r\n # Clip and stack the specified bands\r\n stackpath = outdir + name + '.TIF'\r\n clippath = outdir + name + '_CLIP.TIF'\r\n stack_layers(indir + name + '/', stackpath, bands=bands)\r\n clip_raster(stackpath, clippath, shape)\r\n # Optionally move the stacked raster\r\n if remove_stack:\r\n os.remove(stackpath)\r\n\r\n # Optionally clip the mask band.\r\n if mask_band:\r\n cloud_path = os.path.join(indir, name)\r\n try:\r\n cloud_path = glob.glob(cloud_path + '/*cfmask.tif')[0]\r\n cloud_clip_path = outdir + name + '_cfmask_CLIP.tif'\r\n clip_raster(cloud_path, cloud_clip_path, shape)\r\n except IndexError:\r\n print('WARNING: NO CFMASK FOUND FOR ' + cloud_path)\r\n\r\n print(name + ' stacked and clipped')", "def bandstop_filter(data, lowcut, highcut, fs=2000, numtaps=255):\n nyq = fs / 2\n\n # design filter\n fe1 = lowcut / nyq\n fe2 = highcut / nyq\n b = firwin(numtaps, (fe1, fe2), pass_zero=True)\n\n filtered = lfilter(b, 1, data)\n\n return filtered", "def frame_fix_badpix_isolated(array, bpm_mask=None, sigma_clip=3, num_neig=5,\n size=5, protect_mask=0, cxy=None, mad=False, \n ignore_nan=True, verbose=True, full_output=False):\n if array.ndim != 2:\n raise TypeError('Array is not a 2d array or single frame')\n if size % 2 == 0:\n raise TypeError('Size of the median blur kernel must be an odd integer')\n\n if bpm_mask is not None:\n bpm_mask = bpm_mask.astype('bool')\n\n if verbose: start = time_ini()\n\n if num_neig > 0:\n neigh = True\n else:\n neigh = False\n\n frame = array.copy()\n if cxy is None:\n cy, cx = frame_center(frame)\n else:\n cx, cy = cxy\n \n if bpm_mask is None:\n ori_nan_mask = np.where(np.isnan(frame))\n ind = clip_array(frame, sigma_clip, sigma_clip, neighbor=neigh,\n num_neighbor=num_neig, mad=mad)\n bpm_mask = np.zeros_like(frame)\n bpm_mask[ind] = 1\n if ignore_nan:\n bpm_mask[ori_nan_mask] = 0\n if protect_mask:\n cir = disk((cy, cx), protect_mask, shape=bpm_mask.shape)\n bpm_mask[cir] = 0\n bpm_mask = bpm_mask.astype('bool')\n\n smoothed = median_filter(frame, size, mode='mirror')\n frame[np.where(bpm_mask)] = smoothed[np.where(bpm_mask)]\n array_out = frame\n count_bp = np.sum(bpm_mask)\n \n if verbose:\n msg = \"/nDone replacing {} bad pixels using the median of neighbors\"\n print(msg.format(count_bp))\n timing(start)\n \n if full_output:\n return array_out, bpm_mask\n else:\n return array_out", "def analysis_dFF_map(self):\r\n\r\n \r\n\r\n print ('Starting dF/F analysis:')\r\n\r\n self.print_image_info()\r\n\r\n # smoothwin = int(self.imageData.shape[1]/8.)\r\n\r\n # get the average image and the average of the whole image over time\r\n\r\n avgimg = np.mean(self.imageData, axis=0) # get mean image for reference later: average across all time\r\n\r\n \r\n\r\n mpl.figure(99)\r\n\r\n mpl.imshow(avgimg, vmin=0, vmax=np.max(np.max(avgimg, axis=0), axis=0))\r\n\r\n # self.meanimagevalue = np.mean(np.mean(avgimg, axis=1), axis=0)\r\n\r\n # self.stdimg = np.std(self.imageData, axis= 0) # and standard deviation\r\n\r\n imgdatasm = scipy.ndimage.filters.gaussian_filter(self.imageData,[0,2,2],order=0,output=None,mode='reflect',cval=0.0,truncate=4.0)\r\n # field correction: smooth the average image, subtract it from the imagedata, then add back the mean value\r\n avgimgsm = scipy.ndimage.filters.gaussian_filter(avgimg, 2, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n\r\n # avgimgsm = scipy.ndimage.filters.gaussian_filter(avgimg, smoothwin, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n\r\n #self.imageData = (self.imageData-avgimgsm)+ self.meanimagevalue\r\n\r\n mpl.figure(98)\r\n mpl.imshow(avgimgsm,vmin=0, vmax=np.max(np.max(avgimgsm, axis=0), axis=0))\r\n mpl.figure(97)\r\n mpl.imshow(np.mean(imgdatasm,axis=0))\r\n self.n_times = self.timebase\r\n\r\n periodsize = int(self.period*self.framerate)\r\n print('periodsize: ',periodsize)\r\n\r\n # windowsize = int(self.freqperiod*self.framerate) # window size for every response\r\n\r\n # r = range(0, self.imageData.shape[0], windowsize)\r\n\r\n sig = np.reshape(imgdatasm, (self.nrepetitions, periodsize, \r\n\r\n self.imageData.shape[1], self.imageData.shape[2]), order='C')\r\n\r\n delresp=np.zeros([19,256,256])\r\n repback = np.mean(sig[:,1:4,:,:],axis=1)\r\n resp = np.mean(sig[:,5:9,:,:],axis=1)\r\n for counter in range(19):\r\n delresp[counter,:,:]=(resp[counter,:,:]-repback[counter,:,:])/repback[counter,:,:]\r\n quot=np.mean(delresp,axis=0)\r\n quot=-quot\r\n print ('shape of quot: ', np.shape(quot))\r\n # quot=(resp-repback)/repback\r\n # quot[quot>0]=0\r\n # quot=-1000*quot\r\n\r\n mpl.figure(7)\r\n mpl.imshow(quot,cmap=mpl.cm.binary)\r\n mpl.colorbar()\r\n\r\n quotsm = scipy.ndimage.filters.gaussian_filter(quot, 3, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n mpl.figure(8)\r\n mpl.imshow(quotsm,cmap=mpl.cm.binary)\r\n mpl.colorbar()\r\n \r\n # bl = np.mean(sig[:, range(0, sig.shape[1], windowsize), :, :], axis=0)\r\n\r\n # bl = scipy.ndimage.filters.gaussian_filter(bl, smoothwin, order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n\r\n\r\n\r\n # print (' windowsize: ', windowsize)\r\n\r\n # print (' periodsize: ', periodsize)\r\n # mc = matplotlib.cm\r\n\r\n # only use sequential maps here\r\n\r\n # clist = [mc.Reds, mc.YlOrBr, mc.Oranges, mc.Greens, mc.GnBu, mc.Blues, mc.RdPu, mc.Purples,mc.Reds,mc.Greens,mc.Blues,mc.Reds,mc.Reds,mc.Reds,mc.Reds]\r\n # clist2 = ['red', 'orange', 'yellow', 'green', 'blue', 'indigo', 'violet', 'black','red','purple','green','blue','red','red','red','red']\r\n\r\n cs = {}\r\n\r\n # sigd = np.zeros((bl.shape[0], sig.shape[2], sig.shape[3]))\r\n# \r\n # localmax = {}\r\n\r\n # sigmax = 0.\r\n# \r\n # kernel = np.ones((5, 5))\r\n\r\n # psf = kernel / np.sum(kernel)\r\n\r\n # compute dF/F, and get maximum over all frequencies\r\n\r\n print (' sig shape: ', sig.shape)\r\n\r\n # print (' bl shape: ', bl.shape)\r\n\r\n # smax = np.zeros(bl.shape[0])\r\n\r\n # for i in range(bl.shape[0]):\r\n\r\n # sigd[i] = (np.mean(np.max(sig[:,range(i*windowsize, i*windowsize+windowsize),:,:], axis=0), axis=0) - bl[i,:,:])/bl[i,:,:]\r\n\r\n # sigd[i] = sigd[i]**2.0\r\n\r\n # smooth\r\n\r\n #sigd[i] = scipy.ndimage.filters.gaussian_filter(sigd[i], 1., order=0, output=None, mode='reflect', cval=0.0, truncate=4.0)\r\n\r\n # deconvolve\r\n\r\n # sigd[i] = restoration.richardson_lucy(sigd[i], psf, 5)\r\n\r\n# sm = sigd[i].max().max()\r\n\r\n# if sm > sigmax:\r\n\r\n# sigmax = sm\r\n\r\n# smax[i] = sm\r\n\r\n# print( ' i, sm: ', i, sm)\r\n\r\n# # now process for display\r\n\r\n# print (' sigd shape: ', sigd.shape)\r\n\r\n# wdat = np.mean(sig, axis=0)\r\n\r\n# wds = wdat.shape\r\n\r\n# print('wdat shape: ', wds)\r\n\r\n# # print (range(int(wds[1]/2.), int(3.*wds[1]/4.)), range(int(wds[2]/2.), int(3.*wds[2]/4.)))\r\n\r\n# print( 'reduced shape: ', wdat[:,range(int(wds[1]/2.),int(3.*wds[1]/4.)),:][:,:,range(int(wds[2]/2.), int(3.*wds[2]/4.))].shape)\r\n\r\n# wp = wdat[:,range(int(wds[1]/2.),int(3.*wds[1]/4.)),:][:,:,range(int(wds[2]/2.), int(3.*wds[2]/4.))]\r\n\r\n# wp = np.mean(np.mean(wdat, axis=1), axis=1)\r\n\r\n# mpl.figure(1)\r\n\r\n# mpl.plot(np.linspace(0., len(wp)*1./self.framerate, num=len(wp)), wp)\r\n\r\n\r\n\r\n# mpl.figure(2)\r\n\r\n# for i in range(sigd.shape[0]):\r\n\r\n# sigd[i][sigd[i] < self.threshold*sigmax] = 0.\r\n\r\n# # find center of mass of areas above threshold\r\n\r\n# # mass = sigd[i].copy()\r\n\r\n# # mass[sigd[i] > 0.] = 1.\r\n\r\n# # structuring_element = [[0,1,0],[1,1,1],[0,1,0]]\r\n\r\n# # segmentation, segments = scipy.ndimage.label(mass, structuring_element)\r\n\r\n# # coords = scipy.ndimage.center_of_mass(sigd[i], segmentation, range(1,segments+1))\r\n\r\n# # xcoords = np.array([x[1] for x in coords])\r\n\r\n# # ycoords = np.array([x[0] for x in coords])\r\n\r\n# # cs[i] = (xcoords, ycoords)\r\n\r\n\r\n\r\n# # Calculating local maxima\r\n\r\n# lm = skif.peak_local_max(sigd[i], min_distance=2, threshold_rel=0.25, exclude_border=False, \r\n\r\n# indices=True, num_peaks=10, footprint=None, labels=None)\r\n\r\n# localmax[i] = [(m[0], m[1], sigd[i][(m[0], m[1])]) for m in lm]\r\n\r\n# # print ('i, local max: ',i, localmax)\r\n\r\n# mpl.subplot(5,5,i+1)\r\n# print ('shape of sigd: ',[np.shape(sigd),i])\r\n\r\n# imga1 = mpl.imshow(sigd[i], cmap=clist[i], vmin=0, origin='lower')\r\n\r\n# if len(localmax[i]) > 0:\r\n\r\n# max_fr = np.max([m[2] for m in localmax[i]])\r\n\r\n# else:\r\n\r\n# continue\r\n\r\n# scattersize = 30.\r\n\r\n# for k, lm in enumerate(localmax[i]):\r\n\r\n# mpl.scatter(lm[1], lm[0], marker='o', c=clist2[i], edgecolors='k',\r\n\r\n# s=scattersize*lm[2]/max_fr, linewidths=0.125, alpha=0.5)\r\n\r\n# mpl.subplot(6,5,i+15+1)\r\n\r\n# wr = range(i*windowsize, i*windowsize+windowsize)\r\n\r\n# # print (' wr: len, min max: ', len(wr), min(wr), max(wr))\r\n\r\n# wmax = 0.\r\n\r\n# for lmax in localmax[i]: # was xcoords\r\n\r\n# wave = wdat[wr, lmax[0],lmax[1]]\r\n\r\n# wdff = (wave-wave[0])/wave[0]\r\n\r\n# if np.max(wdff) > wmax:\r\n\r\n# wmax = np.max(wdff)\r\n\r\n# mpl.plot(np.linspace(0., len(wave)*1./self.framerate, num=len(wave)),\r\n\r\n# wdff, color=clist2[i])\r\n\r\n# mpl.ylim(-0.1*wmax, wmax)\r\n\r\n# fig = mpl.figure(3)\r\n\r\n# for i in range(sigd.shape[0]):\r\n\r\n# if len(localmax[i]) == 0:\r\n\r\n# continue\r\n\r\n# max_fr = np.max([m[2] for m in localmax[i]])\r\n\r\n# for lm in localmax[i]:\r\n\r\n# mpl.scatter(lm[1], lm[0], marker='o', c=clist2[i], \r\n\r\n# s=scattersize*lm[2]/max_fr, alpha=0.5, edgecolors='k')\r\n\r\n# mpl.ylim(0, sigd.shape[2])\r\n\r\n# mpl.xlim(0, sigd.shape[1])\r\n\r\n# mpl.axis('equal')\r\n\r\n mpl.show()\r\n\r\n print (' DF/F analysis finished.\\n')", "def ffill(\n self: FrameLike,\n axis: Optional[Axis] = None,\n inplace: bool_type = False,\n limit: Optional[int] = None,\n ) -> FrameLike:\n return self.fillna(method=\"ffill\", axis=axis, inplace=inplace, limit=limit)", "def _dipole_forwards(fwd_data, whitener, rr, n_jobs=1):\n B = _compute_forwards_meeg(rr, fwd_data, n_jobs, verbose=False)\n B = np.concatenate(B, axis=1)\n B_orig = B.copy()\n\n # Apply projection and whiten (cov has projections already)\n B = np.dot(B, whitener.T)\n\n # column normalization doesn't affect our fitting, so skip for now\n # S = np.sum(B * B, axis=1) # across channels\n # scales = np.repeat(3. / np.sqrt(np.sum(np.reshape(S, (len(rr), 3)),\n # axis=1)), 3)\n # B *= scales[:, np.newaxis]\n scales = np.ones(3)\n return B, B_orig, scales", "def band_selector(image, colors):\n # convert band to list for downstream compatibilty, if necessary\n if len(colors) == 3: #then it's an RGB image\n\n #housekeeping\n try:\n nbands = len(colors['band'])\n except: \n colors['band'] = [colors['band']]\n nbands = len(colors['band'])\n\n try:\n len(colors['dark_on_light'])\n except:\n colors['dark_on_light'] = [colors['dark_on_light']]\n\n if colors['colorspace'] is 'gray' or colors['colorspace'] is 'grey':\n colors['band'] = [0]\n nbands = 1\n if len(colors['dark_on_light']) > 1:\n raise ValueError(\n \"\"\"Can't interpret multiple arguments for 'dark_on_light' when \n 'colorspace' is {}.\n \"\"\".format(colors['colorspace'])\n )\n \n if nbands != len(colors['dark_on_light']):\n raise ValueError(\n \"\"\"Number of items in `colors['dark_on_light']` doesn't\n equal the number of bands in `colors['band']`!\"\"\"\n )\n\n # convert colorspace if necessary\n try:\n working_image = getattr(color, \"rgb2\" + colors['colorspace'].lower())(image)\n except:\n working_image = image.copy()\n if colors['colorspace'].lower() != 'rgb':\n raise ValueError(\n \"\"\"Didn't recognize specified colorspace. \n See skimage.color.rgb2* for options.\"\"\"\n )\n \n # pull bands\n if len(working_image.shape) == 3: # excludes rgb2gray\n working_image = [img_split(working_image)[i] for i in colors['band']]\n else:\n working_image = [working_image]\n nbands = 1\n \n else: # it's a black and white image\n nbands = 1\n working_image = [image.copy()]\n if len(image.shape) != 2:\n raise ValueError(\n \"\"\"Your `color` argument suggested a grayscale image, but it has \\\n multiple bands!\"\"\"\n )\n \n return(working_image)", "def filterBankPatch(img, width=5):\n half = width / 2 # e.g. for 5, it's 2\n imgE = Views.extendBorder(img)\n ops = [offset(imgE, [x, y]) for x in xrange(-half, half + 1) for y in xrange(-half, half + 1)]\n return ops", "def rebin_image(self):\r\n\r\n # bin the image down to smaller size by combining groups of bins\r\n\r\n print('Rebinning image')\r\n\r\n sh = self.imageData.shape\r\n\r\n if self.binsize > 1 or self.zbinsize > 1:\r\n\r\n nredx = int(sh[1]/self.binsize)\r\n\r\n nredy = int(sh[2]/self.binsize)\r\n\r\n nredz = int(self.imageData.shape[0]/self.zbinsize)\r\n print('nredx,nredy,nredz: ',[nredx,nredy,nredz])\r\n\r\n self.imageData = self.bin_ndarray(self.imageData, new_shape=(nredz, nredx, nredy), operation='mean')\r\n\r\n if nredz > 1:\r\n\r\n beforeFrames = self.nFrames\r\n\r\n self.nFrames = self.imageData.shape[0]\r\n\r\n self.framerate = self.nFrames/(self.nrepetitions*self.period)\r\n\r\n self.times = np.arange(0, self.nFrames/self.framerate, 1.0/self.framerate)\r\n\r\n print(' Image Rebinned')\r\n\r\n self.print_image_info()", "def filterBankEdges(img):\n imgE = Views.extendBorder(img)\n opTop = as2DKernel(imgE, [-1]*3 + [0]*3 + [1]*3)\n opBottom = as2DKernel(imgE, [1]*3 + [0]*3 + [-1]*3)\n opLeft = as2DKernel(imgE, [-1, 0, 1] * 3)\n opRight = as2DKernel(imgE, [1, 0, -1] * 3)\n return [opTop, opBottom, opLeft, opRight]", "def eopatch_dataframe_to_rasterband(eopatch_da, feature_name, crs, band,\n rgb_factor=None):\n timestamps = eopatch_da.coords['time'].values\n band = list(band) if isinstance(band, tuple) else band\n if rgb_factor: \n bands = eopatch_da[..., band] * rgb_factor\n else:\n bands = eopatch_da[..., band]\n\n bands = bands.rename({string_to_variable(feature_name, '_dim'): 'band'})\\\n .transpose('time', 'band', 'y', 'x')\n x_values, y_values = new_xarray_coordinates(eopatch_da, crs, CRS.POP_WEB)\n eopatch_band = xr.DataArray(data=np.clip(bands.data, 0, 1),\n coords={'time': timestamps,\n 'band': band,\n 'y': np.flip(y_values),\n 'x': x_values},\n dims=('time', 'band', 'y', 'x'))\n return eopatch_band" ]
[ "0.7110317", "0.704121", "0.5153824", "0.51472473", "0.50976783", "0.5092685", "0.50910985", "0.5038838", "0.4983372", "0.4950319", "0.48741138", "0.48643586", "0.4801015", "0.47917843", "0.47886074", "0.47822264", "0.4780743", "0.47651857", "0.47144574", "0.4703689", "0.4698323", "0.4694441", "0.46926516", "0.46833235", "0.46519268", "0.4648738", "0.464113", "0.4614552", "0.4609973", "0.46032184", "0.4596996", "0.4592914", "0.4591073", "0.4579161", "0.45761082", "0.45627517", "0.45587406", "0.45341283", "0.45252696", "0.452128", "0.45192602", "0.4506325", "0.44901127", "0.44816026", "0.4481118", "0.44731784", "0.44504675", "0.44473407", "0.44432545", "0.4441682", "0.4434921", "0.4430162", "0.44190434", "0.44138", "0.43993", "0.4367681", "0.4366794", "0.43543154", "0.43494743", "0.43376106", "0.43329108", "0.43268254", "0.43163332", "0.42997268", "0.42886707", "0.42877477", "0.42877477", "0.42863405", "0.42737842", "0.4272946", "0.42717338", "0.42702797", "0.42639035", "0.426117", "0.4260591", "0.42537943", "0.42529437", "0.4244642", "0.42436793", "0.42413166", "0.42382675", "0.42341945", "0.42332074", "0.4232781", "0.4231155", "0.4227543", "0.42237473", "0.42179847", "0.42158464", "0.4210513", "0.42100224", "0.41961402", "0.41952047", "0.4186083", "0.41860682", "0.4180543", "0.41763055", "0.4174091", "0.41718778", "0.41709453" ]
0.745405
0
Function to calculate the total number of times a pixel changed classes across the time series
def calculateNumberOfChanges(image, bandNames): #Get a collection of images where each image has 2 bands: classifications for year(i) and classifications for year(i+1) lc_one_change_col = npv.getYearStackIC(image,bandNames, band_indices=[0,1]) #Get a collection of images where each image represents whether there was change from year(i) to year(i+1) and convert to an image lc_one_change_col = lc_one_change_col.map(npv.LC_OneChange) lc_one_change_image = lc_one_change_col.toBands() #Calculate the number of changes by applying the sum reducer lc_sum_changes = lc_one_change_image.reduce(ee.Reducer.sum().unweighted()) return lc_sum_changes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_class_count(df):\r\n \r\n return df[\"class\"].value_counts()", "def resultCounter(detections):\n counter = 0\n for attribute, value in classIterator(detections):\n if 'crease' in attribute:\n counter += len(value)\n return counter", "def num_classes(self) -> int:\n y = self.data.y\n if y is None:\n return 0\n elif y.numel() == y.size(0) and not torch.is_floating_point(y):\n return int(self.data.y.max()) + 1\n elif y.numel() == y.size(0) and torch.is_floating_point(y):\n return torch.unique(y).numel()\n else:\n return self.data.y.size(-1)", "def get_num_classes(df):\n classes = df.groupby('class_label')\n return classes.ngroups", "def countDiff(self, color):\n count = 0\n for y in range(self.n):\n for x in range(self.n):\n if self[x][y]==color:\n count += 1\n if self[x][y]==-color:\n count -= 1\n return count", "def class_callcount(self):\n # timing is stored by node, we compute timing by class on demand\n rval = {}\n for (fgraph, node), count in self.apply_callcount.items():\n typ = type(node.op)\n rval.setdefault(typ, 0)\n rval[typ] += count\n return rval", "def get_num_classes(self):", "def class_callcount(self):\r\n # timing is stored by node, we compute timing by class on demand\r\n rval = {}\r\n for node, count in self.apply_callcount.items():\r\n typ = type(node.op)\r\n rval.setdefault(typ, 0)\r\n rval[typ] += count\r\n return rval", "def get_gini(self, rows):\n label_count = defaultdict(int)\n total_count = 0\n for row in rows:\n label = row[self.target_attribute]\n label_count[label] += 1\n total_count += 1\n return 1 - sum([np.square(float(label_count[label])/total_count) for label in label_count.keys()])", "def classes_calculations(input):\n counts, _ = np.histogram(input, bins=int(\n input.max() + 1), range=(0, int(input.max())))\n return np.nonzero(counts)[0]", "def class_counts(rows):\n counts = {} # a dictionary of label -> count.\n for row in rows:\n # in our dataset format, the label is always the last column\n label = row[-1]\n if label not in counts:\n counts[label] = 0\n counts[label] += 1\n return counts", "def class_counts(rows):\n counts = {} # a dictionary of label -> count.\n for row in rows:\n # in our dataset format, the label is always the last column\n label = row[-1]\n if label not in counts:\n counts[label] = 0\n counts[label] += 1\n return counts", "def class_counts(rows):\n counts = {} # a dictionary of label -> count.\n for row in rows:\n # in our dataset format, the label is always the last column\n label = row[-1]\n if label not in counts:\n counts[label] = 0\n counts[label] += 1\n return counts", "def compute_performance(cm):\n\n tp = np.diagonal(cm).astype(np.float)\n tpfp = np.sum(cm, axis=0).astype(np.float) # sum of each col\n tpfn = np.sum(cm, axis=1).astype(np.float) # sum of each row\n acc = np.sum(tp) / np.sum(cm)\n precision = tp / tpfp\n recall = tp / tpfn\n f1 = (2 * precision * recall) / (precision + recall)\n mf1 = np.mean(f1)\n\n total = np.sum(cm)\n n_each_class = tpfn\n\n return total, n_each_class, acc, mf1, precision, recall, f1", "def _gini(self, class_counts):\n smoothed = 1.0 + tf.slice(class_counts, [0, 1], [-1, -1])\n sums = tf.reduce_sum(smoothed, 1)\n sum_squares = tf.reduce_sum(tf.square(smoothed), 1)\n\n return 1.0 - sum_squares / (sums * sums)", "def class_nodes(self):\n # timing is stored by node, we compute timing by class on demand\n rval = {}\n for (fgraph, node), count in self.apply_callcount.items():\n typ = type(node.op)\n rval.setdefault(typ, 0)\n rval[typ] += 1\n return rval", "def number_of_new_components(self):\n t_low = self.lower_binary_tree().to_tilting()\n t_up = self.upper_binary_tree().to_tilting()\n return len([p for p in t_low if p in t_up])", "def get_number_of_measurement(self):\n num_of_meas = 0\n for time in self.mdvtc.keys():\n num_of_meas = num_of_meas + self.mdvtc[time].get_number_of_measurement()\n #\n return num_of_meas", "def get_num_instances(im, non_building_labels):\n return np.setdiff1d(im, non_building_labels)", "def class_distribution(y): \n # ===================== PLEASE WRITE HERE =====================\n \n bin_array = np.bincount(y)\n n_class1 = bin_array[1]\n n_class2 = bin_array[2]\n n_class3 = bin_array[3]\n \n # ===================== PLEASE WRITE HERE =====================\n \n print('Number of samples in class_1:', n_class1)\n print('Number of samples in class_2:', n_class2)\n print('Number of samples in class_3:', n_class3)", "def get_nodes_pixel_count(self):\n sum_count = self.pixel_count\n for i in range(8):\n node = self.children[i]\n if node:\n sum_count += node.pixel_count\n return sum_count", "def count_classes(labels):\n class_dict = {}\n for image in labels:\n for row in image:\n for label in row:\n if label not in class_dict:\n class_dict[label] = 1\n else:\n class_dict[label] += 1\n return class_dict", "def counts(e, x):\n arr = np.asarray(arr)\n return len(np.where(arr == x)[0])", "def calculate_durations(labels, num_classes):\n num_segments = len(labels)\n durations = np.zeros((num_segments, num_classes), dtype='int32')\n for segment_index, (segment_indices, segment_labels) in enumerate(labels):\n segment_durations = np.diff(segment_indices, prepend=0)\n for label in range(num_classes):\n durations[segment_index, label] = segment_durations[segment_labels == label].sum()\n return durations", "def class_nodes(self):\r\n # timing is stored by node, we compute timing by class on demand\r\n rval = {}\r\n for node, count in self.apply_callcount.items():\r\n typ = type(node.op)\r\n rval.setdefault(typ, 0)\r\n rval[typ] += 1\r\n return rval", "def gal_count(clusters):\n sum = 0\n for x in clusters:\n sum += x.ngal\n return sum", "def part_1() -> int:\n initial_input = _load_input()\n rows = len(initial_input)\n cols = len(initial_input[0])\n\n input = initial_input.copy()\n total_glow_count = 0\n\n for _ in range(100):\n flashed = list()\n for row in range(rows):\n for col in range(cols):\n coords = [[col, row]]\n new_input, glow_count = _get_glow_counts(coords, input, flashed)\n input = new_input\n total_glow_count += glow_count\n\n return total_glow_count", "def test_counts(self):\n c = array([5,0,1,1,5,5])\n obs = counts(c)\n exp = array([1,2,0,0,0,3])\n self.assertEqual(obs, exp)\n d = array([2,2,1,0])\n obs = counts(d, obs)\n exp = array([2,3,2,0,0,3])\n self.assertEqual(obs, exp)", "def counts_scan(val_addr,val_timestamps,pixels,dwell_time):\n dwell_time = int(dwell_time * 1e5)\n x,y = pixels.shape\n counts = np.zeros((x,y,23))\n for i in range(x):\n for j in range(y):\n if i%2 ==0:\n delta= 1\n while val_timestamps[int(pixels[i,j]+delta)]- val_timestamps[int(pixels[i,j])]<dwell_time:\n if val_addr[int(pixels[i,j]+delta)]<25:\n counts[i,j,val_addr[int(pixels[i,j]+delta)]] += 1\n delta += 1\n else:\n delta= 1\n\n while val_timestamps[int(pixels[i,y-1-j]+delta)]- val_timestamps[int(pixels[i,y-1-j])]<dwell_time:\n if val_addr[int(pixels[i,y-1-j]+delta)]<25:\n counts[i,y-1-j,val_addr[int(pixels[i,y-1-j]+delta)]] += 1\n delta += 1\n\n return(counts)", "def computeNumClass(self):\n # Get the number of data\n n = len(self.data)\n # For IQR\n # First, compute the position of the first and third quartile\n fQPos = ( (n - 1) / 4 ) + 1\n tQPos = ( (3 * (n - 1)) / 4 ) + 1\n # Get the quartiles\n firstQ = 0.0\n thirdQ = 0.0\n if fQPos == round(fQPos):\n firstQ = self.data[int(fQPos)]\n else:\n up = round(fQPos)\n firstQ = self.data[up - 1] + ((self.data[up] - self.data[up - 1]) / 4.0)\n if tQPos == round(tQPos):\n thirdQ = self.data[int(tQPos)]\n else:\n up = round(tQPos)\n thirdQ = self.data[up - 1] + (3 * (self.data[up] - self.data[up - 1]) / 4.0)\n # Compute the IQR\n IQR = thirdQ - firstQ\n # Compute the number of classes and its length\n self.numBins = int(2 * IQR * m.pow(n, -1/3))\n self.computeBinWidth()", "def number_of_running_metrics(self):\n try:\n return len(self.get_classads(\"OSGRSV==\\\"metrics\\\"\"))\n except TypeError:\n self.rsv.log(\"ERROR\", \"Classad parsing failed, unable to count running metrics\")", "def count_difference(patch1, patch2):\n\n\treturn np.sum(np.square(patch1 - patch2))", "def count_nonblack_np(img):\n return img.any(axis=-1).sum()", "def get_num_of_images(self):", "def numberOfClasses(self):\n classes = self.classesAndFrames()\n return len(classes.keys())", "def getEpochCount(rawStimData, epochColumn=3):\n # get the max epoch count from the rawStimData\n # 4th column is the epoch number\n # add plus 1 since the min epoch no is zero\n \n # BG edit: Changed the previous epoch extraction, which uses the maximum \n # number + 1 as the epoch number, to a one finding the unique values and \n # taking the length of it\n epochCount = np.shape(np.unique(rawStimData[:, epochColumn]))[0]\n print(\"Number of epochs = \" + str(epochCount))\n\n return epochCount", "def pred_to_01_loss(self, class_calls):\n return self.N - np.equal(self.y, class_calls).sum()", "def count_timepoints(sc, session, files):\n tuples = zip(range(len(files)), files)\n files_sc = sc.parallelize(tuples)\n\n def count_planes(kv):\n index, path2 = kv\n try:\n from ScanImageTiffReader import ScanImageTiffReader\n img = ScanImageTiffReader(path2).data()\n except Exception:\n import tifffile\n img = tifffile.imread(path2)\n return img.shape[0]\n\n data2 = files_sc.map(count_planes).collect()\n frame_numbers = np.array(data2)\n vol_numbers = frame_numbers / len(session.fieldMask)\n return vol_numbers.astype(int)", "def count_target_class_data(data, target_class):\n count = 0\n for row in data:\n if row[0] == target_class:\n count += 1\n\n return count", "def numPixels(self):\n self._logger.debug(\"numPixels\")\n return self.count", "def inst_class_stats(df, col='num_pkts'):\n classes = df.groupby('class_label')\n stat = classes[col].describe()\n return stat", "def _calculateIterations(self):\n #iterations = self.nb_images/self.batchsize\n imgs = self.protofile.nb_test()\n batch = self.protofile.batch_test()\n iterations = imgs/batch\n if imgs % batch != 0:\n iterations += 1\n return iterations", "def intensity(self) -> int:", "def get_num_measured_outputs(self):\n i = 0\n for o in self.outputs:\n if o.is_measured_output():\n i += 1\n return i", "def get_nsatpix( self, step ):\n \n return np.sum( self.get_image_step( step, divide_by_exptime=False ) >= 1.6e4 )", "def get_transition_probs(x, classes=None):\n ### ==============================================================================================================\n ### define list_ind as the list of unique ID if not defined\n if not classes:\n classes = np.unique(x.values)\n ### ==============================================================================================================\n transitions_matrix = []\n for cl in classes:\n list_mat = []\n list_freq = []\n for yi in np.unique(x.index.year):\n ### select the year\n mat = x.ix[str(yi),].values.flatten()\n ### select the class cl index + 1\n i = np.where(mat == cl)[0] + 1\n ### make sure we clip\n i = np.delete(i, np.where(i >= len(mat)))\n ### count the total number of days following the cluster \"cl\"\n list_freq.append(len(i))\n mat = mat[i,].tolist()\n ### count the number of occurences of each of the N clusters\n list_mat.append([mat.count(c) for c in classes])\n list_mat = np.array(list_mat)\n list_mat = list_mat.sum(0) * 1.0\n list_mat = list_mat / np.array(list_freq).sum()\n transitions_matrix.append(list_mat)\n transitions_matrix = np.array(transitions_matrix)\n return classes, transitions_matrix", "def compute_num_tracks(x_offset: int, y_offset: int,\n x: int, y: int, track_info: Dict[int, int]):\n x_diff = x - x_offset\n y_diff = y - y_offset\n result = 0\n for length, num_track in track_info.items():\n if x_diff % length == 0 and y_diff % length == 0:\n # it's the tile\n result += num_track\n return result", "def calculate_Cls_obs(Cls):\n Cls_obs = np.copy(Cls)\n counter = 0\n for i in range(nbins):\n for j in range(0,i+1):\n if i == j:\n shotnoise = sn**2/nzs[i]\n else: # cross spectra are not contaminated by shot noise\n shotnoise = 0\n\n Cls_obs[counter] += shotnoise\n counter +=1\n\n return Cls_obs", "def num_classes():\n return NUM_CLASSES", "def count_from_top(img):\n pixel_count = 0\n for row in img:\n unique_pixel_vals = np.unique(row)\n if 255 not in unique_pixel_vals: # ignore shading (values between 0-255)\n pixel_count += 1\n else:\n return pixel_count", "def numberDensity(frametracks):\n ftr = frametracks\n return ftr.x.count() / ((ftr.x.max() - ftr.x.min()) * (ftr.y.max() - ftr.y.min()))", "def num_classes_a(self):\r\n return self._num_classes_a", "def count(self,color):\n count = 0\n for y in range(0,HEIGHT):\n for x in range(0,WIDTH):\n if(self.gameState[x,y]==color):\n count+=1\n return count", "def total_histogram_diff(pixel_diff):\n return sum(i * n for i, n in enumerate(pixel_diff.histogram()))", "def CountPixels(image, N_levels): \n \n pixel_count = np.zeros((N_levels, 1));\n for i in range(N_levels):\n pixel_count[i] = np.sum(image == i);\n \n pixel_count_normalized = pixel_count / (image.shape[0] * image.shape[1]);\n \n return pixel_count, pixel_count_normalized;", "def pic_val_count(img_name):\n pic = cv2.imread(img_name)\n pic = cv2.cvtColor(pic, cv2.COLOR_BGR2RGB)\n\n reshaped_pic = np.reshape(pic, (pic.shape[0]*pic.shape[1], 3))\n reshaped_pic = reshaped_pic.tolist()\n reshaped_pic = [tuple(pixel) for pixel in reshaped_pic]\n \n col_count = []\n for i in set(reshaped_pic):\n (col_val, num_pic) = i, reshaped_pic.count(i)\n col_count.append((col_val, num_pic)) \n return col_count", "def get_num_instances(df):\n non_nan = df.dropna(axis='columns') # nan cols would not have valid counts\n classes = non_nan.groupby('class_label')\n counts = classes.count() # count instances in each group (class)\n first_column = counts.iloc[:, 1] # we could get any column instead\n return first_column", "def cps(self):\n return self.datacounts / self.exptime", "def count_pegs(self):\r\n count = 0\r\n\r\n for i in range(0, len(self.matrix)):\r\n for j in range(0, len(self.matrix[i])):\r\n if self.matrix[i][j] == \"1\":\r\n count += 1\r\n\r\n return count", "def count(self):\n self.scale(end_scale=(1.5, 1.5), duration=1.5, \n rel_origin=(0.5, 0.8), harmonic=True, loop=True)", "def gen_img_counts(img_path, model):\n\n img = transform(Image.open(img_path).convert('RGB'))\n print(type(img))\n output = model(img.unsqueeze(0))\n pred_count = int(output.detach().cpu().sum().numpy())\n return pred_count", "def compute_metrics(self, target, data, weight):\n pred = self.predict(data, weight)\n assert len(pred) == len(target)\n # Calculate the mis-classification rate:\n N = len(pred)\n pred = np.reshape(pred, (N,))\n target = np.reshape(target, (N,))\n nb_misclass = np.count_nonzero(target - pred)\n return nb_misclass / N", "def getCounts(self):\n ret = [0]*len(self.numToLabel)\n for block in self.blocks:\n for label in block[1]: ret[label] += 1\n return ret", "def numEvents(self):\n offsets = self.baxH5._offsetsByHole[self.holeNumber]\n return offsets[1] - offsets[0]", "def get_percentage_false_class(arr_of_results):\n\n count_success = np.zeros_like(arr_of_results[:,0], dtype=float)\n count_correct_prediction = 0\n\n for i in range(len(arr_of_results[0])):\n use = True\n for result in arr_of_results[:,i]:\n if result[\"image_target\"] != result[\"prediction_image\"] or result[\"std_noise\"] == 0:\n use = False\n if use:\n count_correct_prediction += 1\n i2 = 0\n for result in arr_of_results[:,i]:\n if result[\"success\"]:\n count_success[i2] += 1\n i2 += 1\n\n\n errors = proportion_confint(count_success, count_correct_prediction)\n count_success = count_success/count_correct_prediction\n errors = np.array(errors)\n\n errors[0] = np.abs(count_success - errors[0])\n errors[1] = np.abs(count_success - errors[1])\n\n return count_success, errors", "def itkRGBAPixelUS_GetNumberOfComponents():\n return _itkRGBAPixelPython.itkRGBAPixelUS_GetNumberOfComponents()", "def count():", "def classesAndFrames(self):\n classes = defaultdict(int)\n with open(self.inputfile) as fin:\n for line in fin:\n arr = line.strip().split()\n y = int(arr[1])\n classes[y] += 1\n return classes", "def num_classes(self):\n raise NotImplementedError", "def times(self) -> int:\n return self._channel_arrays[0].shape[self.time_pos]", "def getDataSetCount(self):\n\t\treturn int(self.numberOfImages / self.slicesPerTimepoint)", "def measure(self, imgage, previous=None):", "def _weighted_gini(self, class_counts):\n smoothed = 1.0 + tf.slice(class_counts, [0, 1], [-1, -1])\n sums = tf.reduce_sum(smoothed, 1)\n sum_squares = tf.reduce_sum(tf.square(smoothed), 1)\n\n return sums - sum_squares / sums", "def count_plot_target_class(self):\r\n print(self.dataframe_name)\r\n print(self.data_frame.groupby([self.target_column]).size()) # print the sum of every class\r\n\r\n sns.countplot(data=self.data_frame, x=self.data_frame[self.target_column])\r\n plt.title(self.dataframe_name + ': Display the distribution of ' + self.target_column + ' class')\r\n plt.xlabel('Target Name: ' + self.target_column)\r\n plt.ylabel('Count')\r\n self.save_plot_as_image()\r\n plt.show()", "def count_classes(self, index=None):\n \n if index is None:\n index = np.arange(self.Samples.shape[0])\n elif isinstance(index, int):\n index = [index]\n \n count = np.zeros((len(index), len(self._classes)), dtype=np.int)\n for _ind in range(len(index)):\n rois = self.__getrois__(index[_ind])\n count[_ind, :] = np.bincount(rois[:,4].astype(np.int), \n minlength=len(self._classes))\n \n return count", "def num_ticks(self, start, end, desired_ticks=8):\n return len(self.ticks(start, end, desired_ticks))", "def num_ticks(self, start, end, desired_ticks=8):\n return len(self.ticks(start, end, desired_ticks))", "def num_ticks(self, start, end, desired_ticks=8):\n return len(self.ticks(start, end, desired_ticks))", "def counts_scan_binned(val_addr,val_timestamps,pixels,dwell_time,bin_time):\n dwell_time = int(dwell_time * 1e5)\n bin_time = int(bin_time * 1e5)\n x,y = pixels.shape\n counts = np.zeros((int(np.ceil(dwell_time/bin_time)),x,y,23))\n for i in range(x):\n for j in range(y):\n delta= 0\n if i%2 ==0:\n delta= 1\n while val_timestamps[int(pixels[i,j]+delta)]- val_timestamps[int(pixels[i,j])]<dwell_time:\n if val_addr[int(pixels[i,j]+delta)]<25:\n bin_number = int(np.floor((val_timestamps[int(pixels[i,j])+delta]- val_timestamps[int(pixels[i,j])])/bin_time))\n counts[bin_number,i,j,val_addr[int(pixels[i,j])+delta]] += 1\n delta += 1\n else:\n delta= 1\n\n while val_timestamps[int(pixels[i,y-1-j]+delta)]- val_timestamps[int(pixels[i,y-1-j])]<dwell_time:\n if val_addr[int(pixels[i,y-1-j]+delta)]<25:\n bin_number = int(np.floor((val_timestamps[int(pixels[i,y-1-j])+delta]- val_timestamps[int(pixels[i,y-1-j])])/bin_time))\n counts[bin_number,i,y-1-j,val_addr[int(pixels[i,y-1-j])+delta]] += 1\n delta += 1\n return(counts)", "def GetNumberOfComponents():\n return _itkRGBAPixelPython.itkRGBAPixelF_GetNumberOfComponents()", "def itkRGBAPixelF_GetNumberOfComponents():\n return _itkRGBAPixelPython.itkRGBAPixelF_GetNumberOfComponents()", "def distict_color_count(img):\n return Counter([tuple(colors) for i in img for colors in i])", "def distict_color_count(img):\n return Counter([tuple(colors) for i in img for colors in i])", "def num_run_cycles(self, run_idx):\n return self.num_traj_frames(run_idx, 0)", "def n_rounds(self) -> int:\n return self.y.shape[0]", "def _class_count_2(X, n_classes, worker_weight=None):\n prob = np.zeros((X.shape[1], n_classes))\n if worker_weight is None:\n for i in range(X.shape[0]):\n for j in range(X.shape[1]):\n prob[j, X[i, j]] += 1\n else:\n for i in range(X.shape[0]):\n for j in range(X.shape[1]):\n prob[j, X[i, j]] += worker_weight[i]\n return prob", "def class_time(self):\n # timing is stored by node, we compute timing by class on demand\n rval = {}\n for (fgraph, node), t in self.apply_time.items():\n typ = type(node.op)\n rval.setdefault(typ, 0)\n rval[typ] += t\n return rval", "def GetNumberOfComponents():\n return _itkRGBAPixelPython.itkRGBAPixelUS_GetNumberOfComponents()", "def itkRGBAPixelUC_GetNumberOfComponents():\n return _itkRGBAPixelPython.itkRGBAPixelUC_GetNumberOfComponents()", "def calculate_accuracy(mod_pred, y):\n count = 0\n for i, y_true in enumerate(y):\n if y_true == mod_pred[i]:\n count += 1\n\n return count/len(y)", "def countComponents26(cube):\n n,l = labelComponents26(cube);\n return n;", "def _find_epochs(self, history):\n \n epoch_count = len(history.history['val_loss'])\n\n return epoch_count", "def cumulativeSpeedRevolutionCount(self):\n return (self.raw[8] << 8) | self.raw[7]", "def GetNumberOfComponents():\n return _itkRGBAPixelPython.itkRGBAPixelUC_GetNumberOfComponents()", "def num_frames(self):\n return self._first_rgb.shape[1]", "def success_rate(x_tapes):\n return np.sum([is_success(x_tape) for x_tape in x_tapes]) / len(x_tapes)", "def epochs_for_cycles(self, cycles: int) -> int:\n cnt = self.warmup\n n = self.n0\n for _ in range(cycles):\n cnt += n\n n = int(round(n * self.length_scale))\n return cnt", "def ntimebins(self, t0, t1):\n t0 = Time(t0, scale='utc')\n t1 = Time(t1, scale='utc')\n nt = ((t1-t0).to(u.s) / self.dtsample /\n (self.setsize)).to(u.dimensionless_unscaled).value\n return np.round(nt).astype(int)", "def calc_class_ratio(da):\n\n # list all class codes in dataset\n list_classes = (np.unique(da, return_counts=False)).tolist()\n\n # create empty dataframe & dictionary\n ratio_table = pd.DataFrame(data=None, columns=list_classes)\n date_line = {}\n\n # count all pixels, should be consistent\n total_pix = int(np.sum(da.isel(time=1)))\n\n # iterate through each year in dataset\n for i in range(0, len(da.time)):\n date = str(da.time[i].data)[0:10]\n\n # for each year iterate though each present class number\n # and count pixels\n for n in list_classes:\n number_of_pixles = int(np.sum(da.isel(time=i) == n))\n percentage = number_of_pixles / total_pix * 100\n date_line[n] = percentage\n\n # add each year's counts to dataframe\n ratio_table.loc[date] = date_line\n\n return ratio_table", "def iterations_in_epoch(self):\n if self._cur_epoch_itr is not None:\n return self._cur_epoch_itr.count\n elif self._next_epoch_itr is not None:\n return self._next_epoch_itr.count\n return 0" ]
[ "0.63271403", "0.6253417", "0.6221684", "0.6215353", "0.61849016", "0.6137284", "0.6085611", "0.60084146", "0.59348655", "0.5917543", "0.588262", "0.588262", "0.588262", "0.58785236", "0.58546215", "0.58513844", "0.58418506", "0.58400005", "0.5837566", "0.5830539", "0.5821067", "0.5771147", "0.5750005", "0.5747294", "0.5745739", "0.57376343", "0.5737613", "0.5732006", "0.5727957", "0.572409", "0.5721409", "0.5717296", "0.5708012", "0.57022035", "0.5693368", "0.56831443", "0.5680564", "0.56541234", "0.56462", "0.5637808", "0.563195", "0.5625961", "0.5612518", "0.560972", "0.5606015", "0.56056386", "0.5591669", "0.55793625", "0.5566781", "0.55644417", "0.5549366", "0.5548608", "0.55402166", "0.55392116", "0.552982", "0.5528838", "0.5522951", "0.5521927", "0.55186796", "0.55033326", "0.5491811", "0.5491434", "0.5490728", "0.54898036", "0.5484009", "0.54833746", "0.5477884", "0.5471272", "0.54575336", "0.5448798", "0.54367566", "0.5435309", "0.5434049", "0.54316574", "0.54310805", "0.5430811", "0.5430811", "0.5430811", "0.5429022", "0.54240173", "0.5423909", "0.5423568", "0.5423568", "0.54235214", "0.5422801", "0.54185295", "0.5415063", "0.5414317", "0.5413696", "0.54006535", "0.539507", "0.53905404", "0.53897685", "0.53893656", "0.53874105", "0.53861505", "0.5376568", "0.53718", "0.5370826", "0.5367856" ]
0.6240185
2
Function to apply an incidence filter. The incidence filter finds all pixels that changed more than numChangesCutoff times and is connected to less than connectedPixelCutoff pixels, then replaces those pixels with the MODE value of that given pixel position in the stack of years.
def applyIncidenceFilter(image, bandNames, classDictionary, numChangesCutoff = 8, connectedPixelCutoff=6): #Calculate the number of times a pixel changes throughout the time series and determine if it is over the numChangesCutoff num_changes = calculateNumberOfChanges(image, bandNames) too_many_changes = num_changes.gt(numChangesCutoff) #Get binary images of the land cover classifications for the current year binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary) #Calculate the number of connected pixels for each land cover class and year, reduce to a single band image representing the number #of connected pixels of the same land cover class as the central pixel, and determine if it is over the connectedPixelCutoff connected_pixel_count = ee.ImageCollection(binary_class_images.map(lambda x: x.mask(x).connectedPixelCount(100,False).reduce(ee.Reducer.sum()).lt(connectedPixelCutoff))) #Get a bitwiseAnd determination if the number of connected pixels <= connectedPixelCutoff and the number of changes > numChangesCutoff incidence_filter = ee.ImageCollection(connected_pixel_count.map(lambda x: x.bitwiseAnd(too_many_changes))).toBands().rename(bandNames) #Get an image that represents the mode of the land cover classes in each pixel mode_image = image.reduce(ee.Reducer.mode()) #Replace pixels of image where incidence_filter is True with mode_image incidence_filtered = image.where(incidence_filter, mode_image) return incidence_filtered
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): \n #Grab land cover classes as a list of strings\n lc_classes = classDictionary.keys().getInfo()\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Get the frequency of each class through the years by reducing the image collection to an image using the sum reducer\n class_frequency = binary_class_images.reduce(ee.Reducer.sum().unweighted()).rename(lc_classes)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Define an image to add bands with frequency filter applied\n out_img = ee.Image()\n \n #Loop through years\n for yearBand in yearBandNames:\n #Select the target year from the image\n yearImage = image.select(yearBand)\n \n #Loop through land cover classes in filterParams\n for lc_class in lc_classes:\n #Get the minimum occurance allowed in that land cover class\n min_occurance = filterParams.get(lc_class)\n \n #Find if the land cover class had less than the number of min_occurances in each pixel\n change_class = class_frequency.select(lc_class).lt(ee.Image.constant(min_occurance))\n \n #If change_class==1, then replace that pixel with the mode of all the years in that pixel\n #This filter is only applied to pixels of this land cover class\n #First mask yearImage to pixels of this land cover class, then get the union of pixels where change_class==1,\n #if both conditions are true, then the pixel is replaced with the mode\n yearImage = yearImage.where(yearImage.eq(ee.Number(classDictionary.get(lc_class))).bitwiseAnd(change_class),mode_image)\n #Rename yearImage to bandName\n yearImage = yearImage.rename(yearBand)\n #Append to output image\n out_img = out_img.addBands(yearImage)\n \n return out_img", "def mask_incoherent(self):\n self.MaskPrefix = 'i' + self.MaskPrefix\n print('Masking pixel values where .msk value is less than {0}...'.format(threshold))\n for ig in self.Set:\n igram = self.load_ma(ig)\n mskFile = ig.Path[:-3] + 'msk'\n coherence = roipy.tools.load_half(ig, 2, mskFile)\n incoherent = ma.masked_less(coherence, self.Cothresh)\n igram[incoherent.mask] = ma.masked\n mskFile = self.MaskPrefix + 'Mask_' + ig.Name[:-4]\n np.save(os.path.join(self.ProcDir, mskFile), igram.mask)\n print(mskFile)\n\n print('Done')", "def apply_filter(self, filter_arg):\n filtered_entries = self.visual.apply_filter(filter_arg, self.get_current_entries())\n # idxs = self.selector.select_by_objects(filtered_entries, yield_ones_index=True)\n self.visual.print_entries_enum(filtered_entries, None)\n # self.list(idxs)", "def offset_ion_cut(df): \n\n offset_threshold = quality_parameters['offset_ion_threshold']\n \n truth_array = pd.Series(data=True, index=df.index)\n for suffix in ion_channel_labels:\n offset_column_name = 'offset_ion{}'.format(suffix)\n offset = abs( df[offset_column_name] )\n truth_array = truth_array & (offset < offset_threshold)\n \n df['offset_ion_cut'] = truth_array\n \n return None", "def apply(self,src,dst):\n cv2.filter2D(src,-1,self._kernel,dst) #The second argument specifies the per-channel depth of the destination image\n #(such as cv2.CV_8U for 8 bits per channel). A negative value (as used here) means\n #that the destination image has the same depth as the source image.", "def filter_segmap(segimage, id_keep, output, blur_kernel=\"\", threshold=0.1):\n seg = pyfits.getdata(segimage)\n mask = np.zeros(seg.shape, 'int')\n # Loop through all IDs... is there a better way??\n for x in id_keep:\n mask = np.where(seg==x, 1, mask)\n seg_masked = np.where(mask==1, 1, 0)\n if os.path.exists(output):\n os.system('rm %s' % output)\n # Now convolve with a blurring kernel if desired\n if len(blur_kernel):\n mask = blur_mask(mask, blur_kernel, threshold=threshold)\n # k = pyfits.getdata(blur_kernel)\n # mask = hconvolve.hconvolve(mask, )\n pyfits.append(output, data=seg_masked, header=pyfits.getheader(segimage))\n return mask", "def filter_Nofinding_imgs(ori_ann_file, filter_info_file, out_file,\n score_thr=0.08, key_name='class'):\n ori_ann_infos = mmcv.load(ori_ann_file)\n df = pd.read_csv(filter_info_file)\n\n ori_image_infos = {os.path.splitext(info['file_name'])[0]: info\n for info in ori_ann_infos['images']}\n print('before filter, there are {} images.'.format(len(ori_image_infos)))\n new_images = []\n for idx, row in df.iterrows():\n image_name = row['image_id']\n cls = row[key_name]\n if cls >= score_thr:\n new_images.append(ori_image_infos[image_name])\n print('after filter, there are {} images.'.format(len(new_images)))\n print('saving new test annotations into file')\n ori_ann_infos['images'] = new_images\n mmcv.dump(ori_ann_infos, out_file)\n print('all done!')", "def sepfirnd(input,filters,axes,output=None,mode='reflect',cval=0.0,origin=0):\n if output is None:\n output = np.empty_like(input)\n tmp = output\n if np.isscalar(filters[0]):\n filters = [np.asarray(filters)]\n if np.isscalar(axes):\n axes = [axes]\n if len(axes) > 1:\n tmp = np.empty_like(output)\n if len(filters) == 1:\n filters = [filters[0]]*len(axes)\n if len(axes) & 1 == 1: #pre-swap so that last write goes to output\n output,tmp = tmp,output \n for filt,ax in zip(filters,axes):\n output,tmp = tmp,output #swap buffers\n convolve1d(input,filt,ax,output,mode,cval,origin)\n input = output\n return output", "def filteringEngine(original, debug=False):\n\n processedImage1 = filterNotInRange(original, LABmin_healthy, LABmax_healthy, cv2.COLOR_BGR2LAB)\n processedImage2 = filterNotInRange(original, LABmin_terrain, LABmax_terrain, cv2.COLOR_BGR2LAB)\n # Image containing many FPs\n processedImage3 = filterNotInRange(original, HSVmin_yellow, HSVmax_yellow, cv2.COLOR_BGR2HSV)\n\n sum1 = cv2.add(processedImage1, processedImage2)\n sub1 = differentialNode(original, sum1)\n\n processedImage = filterNotInRange(sub1, LABmin, LABmax, cv2.COLOR_BGR2LAB)\n # sum2 = cv2.add(processedImage, processedImage3)\n\n kernel = np.ones((6, 6), np.uint8)\n temp = closing(processedImage, kernel)\n\n kernel = np.ones((3, 3), np.uint8)\n out = opening(temp, kernel)\n\n if debug:\n cv2.imshow('processedImage1', processedImage1)\n cv2.imshow('processedImage2', processedImage2)\n cv2.imshow('processedImage3', processedImage3)\n cv2.imshow('sum1', sum1)\n cv2.imshow('sub1', sub1)\n cv2.imshow('processedImage', processedImage)\n cv2.imshow('sum2', sum2)\n cv2.imshow('out', out)\n\n return out", "def equalize_exposure(image, iterations=1, kernel_size=None, min_object_size=500, dark_objects=True, stretch=False):\n\n # Housekeeping\n img = img_as_float(image.copy())\n\n if stretch is True:\n img = img/img.max()\n\n if dark_objects is False:\n img = 1-img # invert\n\n img_in = img.copy() # for use later\n\n if kernel_size is None:\n kernel_size = np.int(max(image.shape[0], image.shape[1])/10)\n\n # mean filter kernel\n kernel = morphology.disk(int(kernel_size/2))\n\n # identify objects to ignore\n if kernel_size % 2 is 0:\n block_size = kernel_size + 1\n else:\n block_size = kernel_size\n\n #objects = ~filters.threshold_adaptive(img, block_size, offset = 0.01*img.max()) # deprecated function\n objects = img > filters.threshold_local(img, block_size, offset = 0.01*img.max())\n objects = morphology.remove_small_objects(objects, min_size = min_object_size)\n\n # Correct Exposure x times\n i = 0\n while i < iterations:\n # Global mean\n img_mean = np.ma.masked_array(img, mask=objects).mean()\n\n # global means\n local_means = filters.rank.mean(img, selem=kernel, mask=~objects)\n local_means = filters.gaussian(local_means, kernel_size)\n\n # Correct Image\n img += (img_mean - local_means)\n img[img>1] = 1 # for compatibilty with img_as_float\n img[img<0] = 0 # for compatibilty with img_as_float\n i += 1\n\n out = img_as_float(img)\n\n return(out)", "def convolve_im(im: np.array,\n kernel: np.array,\n verbose=True):\n ### START YOUR CODE HERE ### (You can change anything inside this block)\n \"\"\"\n\tcompared to the 4a solution this just adds padding to the filter if its smaller than the image\n\tthis is done by using the second parameter in fft.fft2 \n\t\n\tfirst it applies fourier transforms on the kernel and the image\n\tthen it sets the image to be the pointwise multiplication of the transforms\n\n the image is inverse fourier transformed and filtered for real values\n the domain image is shifted and taken the absolute value of\n the fourier transform of the image and kernel are also shifted and set to be the absolute value\n\tlastly everything is displayed in the subplots\n \"\"\"\n conv_result = im \n \n if verbose:\n fftKernel=np.fft.fft2(kernel,im.shape)\n fftImage=np.fft.fft2(conv_result)\n\t\t\n\t\t\n\t\t\n conv_result=np.multiply(fftImage,fftKernel)\n fftImageTransformed=conv_result\n\t\t\n \n conv_result=np.fft.ifft2(conv_result)\n \n conv_result=np.real(conv_result)\n\n fftImageTransformed=np.fft.fftshift(fftImageTransformed)\n fftImage=np.fft.fftshift(fftImage)\n fftKernel=np.fft.fftshift(fftKernel)\n\n fftImageTransformed=np.absolute(fftImageTransformed)\n fftImage=np.absolute(fftImage)\n fftKernel=np.absolute(fftKernel)\n\t\t\n\t\t\n # Use plt.subplot to place two or more images beside eachother\n plt.figure(figsize=(20, 4))\n # plt.subplot(num_rows, num_cols, position (1-indexed))\n plt.subplot(1, 5, 1)\n plt.imshow(im, cmap=\"gray\")\n plt.subplot(1, 5, 2)\n plt.imshow(fftImage, cmap=\"gray\")\n plt.subplot(1, 5, 3)\n plt.imshow(fftKernel, cmap=\"gray\")\n plt.subplot(1, 5, 4)\n plt.imshow(fftImageTransformed, cmap=\"gray\")\n plt.subplot(1, 5, 5)\n plt.imshow(conv_result, cmap=\"gray\")\n ### END YOUR CODE HERE ###\n return conv_result", "def apply_filter(self, image):\n pass", "def CC_2Dfilter(\n h5path_labels,\n map_propnames,\n criteria,\n h5path_int='',\n slicedim=0,\n usempi=False,\n outputfile='',\n protective=False,\n ):\n\n (min_area,\n max_area,\n max_intensity_mb,\n max_eccentricity,\n min_solidity,\n min_euler_number,\n min_extent) = criteria\n\n # prepare mpi\n mpi_info = utils.get_mpi_info(usempi)\n\n # TODO: check output path\n\n # open data for reading\n h5file_mm, ds_mm, _, _ = utils.h5_load(h5path_labels, comm=mpi_info['comm'])\n if h5path_int:\n h5file_mb, ds_mb, _, _ = utils.h5_load(h5path_int, comm=mpi_info['comm'])\n else:\n ds_mb = None\n # mask used as intensity image in mean_intensity criterium\n\n # get the maximum labelvalue in the input\n root = h5path_labels.split('.h5')[0]\n maxlabel = get_maxlabel(root, ds_mm)\n\n # prepare mpi\n n_slices = ds_mm.shape[slicedim]\n series = np.array(range(0, n_slices), dtype=int)\n if mpi_info['enabled']:\n series = utils.scatter_series(mpi_info, series)[0]\n if mpi_info['rank'] == 0:\n fws_reduced = np.zeros((maxlabel + 1, len(map_propnames)),\n dtype='float')\n else:\n fws_reduced = None\n\n fws = np.zeros((maxlabel + 1, len(map_propnames)),\n dtype='float')\n\n mapall = criteria.count(None) == len(criteria)\n\n # pick labels observing the constraints\n go2D = ((max_eccentricity is not None) or\n (min_solidity is not None) or\n (min_euler_number is not None) or\n mapall)\n if go2D:\n\n for i in series:\n slcMM = utils.get_slice(ds_mm, i, slicedim)\n if h5path_int:\n slcMB = utils.get_slice(ds_mb, i, slicedim) # , 'bool'\n else:\n slcMB = None\n fws = check_constraints(slcMM, fws, map_propnames,\n criteria, slcMB, mapall)\n if mpi_info['enabled']:\n mpi_info['comm'].Reduce(fws, fws_reduced, op=MPI.MAX, root=0)\n else:\n fws_reduced = fws\n\n else:\n\n if mpi_info['rank'] == 0:\n fws = check_constraints(ds_mm, fws, map_propnames,\n criteria, ds_mb, mapall)\n fws_reduced = fws\n\n # write the forward maps to a numpy vector\n if mpi_info['rank'] == 0:\n slc = int(n_slices/2)\n slcMM = ds_mm[slc, :, :]\n slcMB = ds_mb[slc, :, :] if h5path_int else None\n datatypes = get_prop_datatypes(slcMM, map_propnames, slcMB)\n for i, propname in enumerate(map_propnames):\n root = outputfile.split('.h5')[0]\n nppath = '{}_{}.npy'.format(root, propname)\n outarray = np.array(fws_reduced[:, i], dtype=datatypes[i])\n np.save(nppath, outarray)\n\n # close and return\n h5file_mm.close()\n if h5path_int:\n h5file_mb.close()\n\n if mpi_info['rank'] == 0:\n return outarray", "def _occlude_image(im, cR, cC, size_patch, stride):\n im[cR:cR + stride, cC:cC + stride, :] = 127.5\n occ_map = np.ones((im_target_size, im_target_size))\n occ_map[cR:cR + stride, cC:cC + stride] = 0\n return im, occ_map", "def plot_filtered_spots(\n adata, \n kernel_matrix, \n contrib_thresh,\n row_key='row',\n col_key='col',\n ax=None,\n figure=None,\n dsize=37,\n ticks=True,\n fig_path=None,\n fig_format='pdf',\n fig_dpi=150\n ):\n if ax is None:\n width = 5\n figure, ax = plt.subplots(\n 1,\n 1,\n figsize=(width,5)\n )\n\n # Filter spots with too little contribution\n # from neighbors\n contrib = np.sum(kernel_matrix, axis=1)\n keep_inds = [\n i\n for i, c in enumerate(contrib)\n if c >= contrib_thresh\n ]\n print('Kept {}/{} spots.'.format(len(keep_inds), len(adata.obs)))\n\n cat = []\n keep_inds = set(keep_inds)\n for ind in range(adata.obs.shape[0]):\n if ind in keep_inds:\n cat.append('Kept')\n else:\n cat.append('Filtered')\n cat_palette = ['#595959', '#d9d9d9']\n plot_slide(\n adata.obs,\n cat,\n cmap='categorical',\n colorbar=False,\n vmin=None,\n vmax=None,\n title='Filtered Spots',\n ax=ax,\n figure=figure,\n ticks=ticks,\n dsize=dsize,\n row_key=row_key,\n col_key=col_key,\n cat_palette=cat_palette\n )\n\n if fig_path:\n plt.tight_layout()\n figure.savefig(\n fig_path,\n format=fig_format,\n dpi=fig_dpi\n )\n plt.show()", "def FoldChangeFilterToControl(X, data_headers, FCto, cutoff=0.4):\n XX = LinearFoldChange(X.copy(), data_headers, FCto)\n Xidx = np.any(XX[data_headers].values <= 1 - cutoff, axis=1) | np.any(XX[data_headers].values >= 1 + cutoff, axis=1)\n return X.iloc[Xidx, :]", "def segment_and_find_positions(self):\n initial_image = self.data\n xdim = self.data.shape[0]\n\n ydim = self.data.shape[1]\n downsized_image = transform.resize(\n initial_image,\n (xdim / DOWNSCALING_FACTOR, ydim / DOWNSCALING_FACTOR),\n mode=\"constant\",\n )\n rescaled_image = exposure.rescale_intensity(downsized_image)\n print(\"Starting Canny filtering\")\n g_edges = skimage.feature.canny(\n rescaled_image,\n sigma=self.canny_sigma,\n low_threshold=self.canny_low_threshold,\n )\n print(\"Starting dilation\")\n dilation = morphology.dilation(g_edges, morphology.disk(3))\n print(\"Starting erosion\")\n eroded = morphology.erosion(dilation, morphology.disk(4))\n dilation = morphology.dilation(\n eroded, morphology.diamond(4)\n ) # Dont change to disk\n print(\"Starting to remove small holes\")\n filled = morphology.remove_small_holes(\n dilation, area_threshold=self.remove_small_holes_area_threshold\n )\n print(\"Starting erosion\")\n eroded = morphology.erosion(filled, morphology.diamond(3))\n print(\"Applying filters\")\n filtered_image = eroded\n if self.colony_filters_dict is not None:\n for filter_name in self.colony_filters_dict.keys():\n filtered_image = segmentation_filters.apply_filter(\n filter_name, filtered_image, self.colony_filters_dict[filter_name]\n )\n\n colony_edges = morphology.dilation(feature.canny(filtered_image, 0.01))\n print(\"Starting outlining\")\n outline = downsized_image.copy()\n outline[colony_edges] = 65535\n distance = ndimage.distance_transform_edt(filtered_image)\n smoothed_well = ndimage.gaussian_filter(downsized_image, 0.35)\n outline.copy()\n objs, num_objs = ndimage.label(filtered_image)\n print(\"Applying filters for points\")\n if self.mode == \"A\":\n # point selection: Smoothest point in the center region\n for obj in range(1, num_objs + 1):\n print(\"On object {} of {}\".format(obj, num_objs))\n mask = objs == obj\n dist_mask = distance * mask\n # for each colony,\n # find the maximum distance from the two fold distance map.\n # The edge is at 0% and the center of the colony is at 100%\n d_max = dist_mask.max()\n # Getting the points which is at least 40% away from the edge\n top_percent = dist_mask > (d_max * 0.40)\n colony_mask = smoothed_well * top_percent\n colony_edges = feature.canny(colony_mask, 0.1)\n # applying the second distance transform\n # to find the smoothest point in the correct region\n inner_edges = ndimage.distance_transform_edt(\n ~colony_edges * top_percent\n )\n smooth_point = numpy.where(inner_edges == inner_edges.max())\n smooth_point = (smooth_point[0][0], smooth_point[1][0])\n smooth_point_corrected = (\n smooth_point[0] * DOWNSCALING_FACTOR,\n smooth_point[1] * DOWNSCALING_FACTOR,\n )\n self._point_locations.append(smooth_point_corrected)\n elif self.mode == \"C\":\n for obj in range(1, num_objs + 1):\n print(\"On object {} of {}\".format(obj, num_objs))\n mask = objs == obj\n dist_mask = distance * mask\n # point selection: edge, ridge & center respectively\n self.get_mode_c_points(dist_mask, 0, 0.03)\n self.get_mode_c_points(dist_mask, 0.15, 0.20)\n self.get_mode_c_points(dist_mask, 0.90, 0.99)", "def cs4243_filter(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n filtered_image = np.zeros((Hi, Wi))\n\n ###Your code here####\n # pad image to handle border pixels\n pad_height = (int)((Hk - 1)/2)\n pad_width = (int)((Wk - 1)/2)\n image_pad = pad_zeros(image, pad_height, pad_width)\n\n # Flip the kernel horizontal and vertical\n kernel = cs4243_rotate180(kernel)\n \n # compute effective output size, assume stride=1\n out_height = 1 + Hi - Hk + 2*pad_height\n out_width = 1 + Wi - Wk + 2*pad_width\n \n # get initial nodes of receptive fields\n recep_fields_h = [i for i in range(out_height)]\n recep_fields_w = [i for i in range(out_width)]\n \n for i in recep_fields_h:\n for j in recep_fields_w: \n # get receptive area\n recep_area = image_pad[i:i+Hk, j:j+Wk] \n\n # multiply recep_area with kernel\n conv_sum = 0.0\n for y in range(Hk):\n for x in range(Wk): \n conv_sum += kernel[y][x] * recep_area[y][x]\n filtered_image[i, j] = conv_sum\n ###\n\n return filtered_image", "def filter_sinc_channel(img, mask_circle_diameter=40.0):\n dft_image = cv2.dft(np.float32(img), flags=cv2.DFT_COMPLEX_OUTPUT)\n dft_shift = np.fft.fftshift(dft_image)\n mask = np.zeros((img.shape[0], img.shape[1], 2), dtype=np.uint8)\n circle_center = (int(img.shape[0] / 2), int(img.shape[1] / 2))\n points_x, points_y = np.ogrid[:img.shape[0], :img.shape[1]]\n mask_area = (points_x - circle_center[0]) ** 2 + (points_y - circle_center[1]) ** 2 <= \\\n (mask_circle_diameter / 2) ** 2\n mask[mask_area] = 1\n filtered_dft = dft_shift * mask\n idft_image = np.fft.ifftshift(filtered_dft)\n img_filtered = cv2.idft(idft_image)\n img_filtered = cv2.magnitude(img_filtered[:, :, 0], img_filtered[:, :, 1])\n return img_filtered", "def filterInRange(frame, min, max, colorMode):\n\n tempFrame = cv2.cvtColor(frame, colorMode)\n\n mask = cv2.inRange(tempFrame, min, max)\n mask = cv2.bitwise_not(mask)\n\n filtered_frame = cv2.bitwise_and(frame, frame, mask=mask)\n\n return filtered_frame", "def flattenFrames(stack, onh_info):\n \n maxHeight=0\n frameList=[]\n\n if onh_info!=-1:\n y_min = onh_info.bbox[0]\n #need to subtract one because index?\n y_max = onh_info.bbox[2]\n \n #hull starts at (0,0), add the y and x min to translate to correct indices.\n hull_onh = np.array(np.where(onh_info.convex_image)) + np.array([[y_min], [onh_info.bbox[1]]])\n elif onh_info==-1:\n #should prevent shiftDetectorONH from running since i will always be greater than -1\n #hull_onh has been left undefined.\n y_min, y_max = -1,-1\n \n for i, frame in enumerate(stack):\n #medFrame = ndimage.filters.median_filter(frame,size=(1,60)) #Takes 3.5 minutes\n medFrame = ndimage.filters.uniform_filter1d(frame, 60) #Takes 1.0 minutes and has same output as med filter\n if i>=y_min and i<y_max:\n #get the index of x pixels that are part of the onh for each frame\n #these are indices of indices\n x_onh_ind = np.array(np.where(hull_onh[0]==i)) \n x_onh = hull_onh.T[x_onh_ind][0].T[1]\n #this should be sorted so that its the x_min and max for each frame\n x_onh_bounds = (x_onh[0], x_onh[-1])\n shifts = shiftDetectorONH(medFrame, onh_info, x_onh_bounds)\n else:\n shifts = shiftDetector(medFrame)\n newFrame = adjustFrame(frame, shifts)\n frameList.append(newFrame)\n if newFrame.shape[0] > maxHeight:\n maxHeight = newFrame.shape[0]\n \n #Show percentage of loop completed.\n print('\\rFinding and correcting horizontal shifts: {:.2f}% done'.format((100.0*((i+1)/len(stack)))), end='', flush=True)\n print('\\n')\n \n flattenedStack = padFrames(frameList, maxHeight)\n\n return flattenedStack", "def adaptiveContrast(image, mask, target_path, name, kernel_sizes, save=False):\n\n transforms = []\n for kernel_size in kernel_sizes:\n image_adapteq = exposure.equalize_adapthist(image, kernel_size=kernel_size, clip_limit=0.03)\n transforms.append(image_adapteq)\n \n # Display results\n fig = plt.figure(figsize=(19, 16))\n axes = np.zeros((2, 5), dtype=np.object)\n axes[0, 0] = fig.add_subplot(2, 5, 1)\n for i in range(1, 5):\n axes[0, i] = fig.add_subplot(2, 5, 1+i, sharex=axes[0,0], sharey=axes[0,0])\n for i in range(0, 5):\n axes[1, i] = fig.add_subplot(2, 5, 6+i)\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(transforms[0], mask, mask_cmap, img_cmap,\n axes[:, 0])\n ax_image.set_title('%d' %kernel_sizes[0])\n \n y_min, y_max = ax_hist.get_ylim()\n ax_hist.set_ylabel('Number of pixels')\n ax_hist.set_yticks(np.linspace(0, y_max, 5))\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(transforms[1], mask, mask_cmap, img_cmap,\n axes[:, 1])\n ax_image.set_title('%d' %kernel_sizes[1])\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(transforms[2], mask, mask_cmap, img_cmap,\n axes[:, 2])\n ax_image.set_title('%d' %kernel_sizes[2])\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(transforms[3],mask, mask_cmap, img_cmap,\n axes[:, 3])\n ax_image.set_title('%d' %kernel_sizes[3])\n \n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(transforms[4],mask, mask_cmap, img_cmap,\n axes[:, 4])\n ax_image.set_title('%d' %kernel_sizes[4])\n \n ax_cdf.set_ylabel('Fraction of total intensity')\n ax_cdf.set_yticks(np.linspace(0, 1, 5))\n \n # prevent overlap of y-axis labels\n fig.tight_layout()\n if save:\n plt.savefig(os.path.join(target_path, name))\n else:\n plt.show()\n plt.close()\n\n return image_adapteq", "def filter_img(inarr, data_resolution):\n outt = inarr.copy()\n print('outmin', np.nanmin(outt), np.nanmax(outt))\n\n t_thresh_size = -40\n t_thresh_cut = -50\n\n outt[outt >= t_thresh_size] = 0\n outt[np.isnan(outt)] = 0\n\n labels, numL = label(outt)\n\n u, inv = np.unique(labels, return_inverse=True)\n n = np.bincount(inv)\n\n pix_nb = 700/data_resolution**2\n\n badinds = u[(n < pix_nb)]\n # all blobs with more than 1000 pixels = 25,000km2 (meteosat regridded 5km), 200pix = 5000km2, 8pix = 200km2\n # scale 30km, radius 15km ca. 700km2 circular area equals 28 pix\n\n for bi in badinds:\n inds = np.where(labels == bi)\n outt[inds] = 0\n\n outt[outt >= t_thresh_cut] = 150\n\n grad = np.gradient(outt)\n outt[outt == 150] = np.nan\n\n nogood = np.isnan(outt) # filters edge maxima later, no maxima in -40 edge area by definition!\n\n # tdiff = np.nanmax(outt) - np.nanmin(outt) # define background temperature for image\n # if tdiff > 28: # temp difference of 28 degrees\n # xmin = 15\n # else:\n # xmin = 10\n\n xmin = 10\n outt[nogood] = t_thresh_cut - xmin\n nok = np.where(abs(grad[0]) > 80)\n d = 2\n i = nok[0]\n j = nok[1]\n # edge smoothing for wavelet application\n for ii, jj in zip(i, j):\n kern = outt[ii - d:ii + d + 1, jj - d:jj + d + 1]\n outt[ii - d:ii + d + 1, jj - d:jj + d + 1] = ndimage.gaussian_filter(kern, 3, mode='nearest')\n\n return outt, nogood, t_thresh_size, t_thresh_cut, pix_nb", "def cs4243_filter_faster(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n filtered_image = np.zeros((Hi, Wi))\n\n ###Your code here####\n \n # pad image to handle border pixels\n pad_height = (int)((Hk - 1)/2)\n pad_width = (int)((Wk - 1)/2)\n image_pad = pad_zeros(image, pad_height, pad_width)\n \n # compute effective output size, assume stride=1\n out_height = 1 + Hi - Hk + 2*pad_height\n out_width = 1 + Wi - Wk + 2*pad_width\n \n # get initial nodes of receptive fields\n recep_fields_h = [i for i in range(out_height)]\n recep_fields_w = [i for i in range(out_width)]\n \n # extract receptive area into matrix of shape (Hi*Wi, Hk*Wk)\n recep_areas = []\n for i in recep_fields_h:\n for j in recep_fields_w:\n recep_areas.append(image_pad[i: i+Hk, j: j+Wk].reshape(-1))\n out = np.stack(recep_areas)\n \n # Flip the kernel horizontal and vertical\n kernel = cs4243_rotate180(kernel).reshape(Hk*Wk, 1)\n \n # dot product kernel and receptive areas\n filtered_image = np.dot(out, kernel).reshape(Hi, Wi)\n \n ###\n\n return filtered_image", "def enhanceContrast(image, mask, target_path, name, save=False):\n \n\n \n # Contrast stretching\n p2, p98 = np.percentile(image, (2, 98))\n image_rescale = exposure.rescale_intensity(image, in_range=(p2, p98))\n \n # Equalization\n image_eq = exposure.equalize_hist(image)\n \n # Adaptive Equalization\n image_adapteq = exposure.equalize_adapthist(image, clip_limit=0.03)\n \n # Display results\n fig = plt.figure(figsize=(19, 13))\n axes = np.zeros((2, 4), dtype=np.object)\n axes[0, 0] = fig.add_subplot(2, 4, 1)\n for i in range(1, 4):\n axes[0, i] = fig.add_subplot(2, 4, 1+i, sharex=axes[0,0], sharey=axes[0,0])\n for i in range(0, 4):\n axes[1, i] = fig.add_subplot(2, 4, 5+i)\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(image, mask, mask_cmap, img_cmap,\n axes[:, 0])\n ax_image.set_title('Low contrast image')\n \n y_min, y_max = ax_hist.get_ylim()\n ax_hist.set_ylabel('Number of pixels')\n ax_hist.set_yticks(np.linspace(0, y_max, 5))\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(image_rescale, mask, mask_cmap, img_cmap,\n axes[:, 1])\n ax_image.set_title('Contrast stretching')\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(image_eq, mask, mask_cmap, img_cmap,\n axes[:, 2])\n ax_image.set_title('Histogram equalization')\n \n ax_image, ax_hist, ax_cdf = plot_image_and_hist(image_adapteq,mask, mask_cmap, img_cmap,\n axes[:, 3])\n ax_image.set_title('Adaptive equalization')\n \n ax_cdf.set_ylabel('Fraction of total intensity')\n ax_cdf.set_yticks(np.linspace(0, 1, 5))\n \n # prevent overlap of y-axis labels\n fig.tight_layout()\n if save:\n plt.savefig(os.path.join(target_path, name))\n else:\n plt.show()\n plt.close()\n return image_adapteq", "def imgFiltering(inputPath, outputPath):\n\t# open the target image\n\tpollenImg = IJ.openImage(inputPath)\n\t\n\t# Create duplicator\n\tduplicator = Duplicator()\n\t\n\t# Duplicate the image with channel 1\n\tpollenImgCopy = duplicator.run(pollenImg, 1, 1, 1, 1, 1, 1);\n\t\n\t# set auto threshold\n\t# IJ.setAutoThreshold(pollenImgCopy, \"Default dark\");\n\t\n\t# set threshold\n\tIJ.setThreshold(pollenImgCopy, 17000, 65520)\n\t\n\t# Call the Thresholder to convert the image to a mask\n\tIJ.run(pollenImgCopy, \"Convert to Mask\", \"\")\n\t\n\t# create result table\n\trt = ResultsTable()\n\t\n\t# create particle analyzer\n\tpAnalyzer = ParticleAnalyzer(ParticleAnalyzer.SHOW_NONE, Measurements.ALL_STATS, rt, 20.0, 1000.0, 0.5 ,1.0)\n\t\n\t# Analyze the particle\n\tpAnalyzer.analyze(pollenImgCopy)\n\t\n\t# Save results as csv\n\trt.saveAs(outputPath)", "def continuous_hann_sinc_filter(\n fs: int, fc: float, L: int, dtype: torch.dtype, device: torch.device\n) -> Tensor:\n assert L % 2 == 1\n assert fc < fs / 2\n hsupp = torch.linspace(-(L-1)/2, (L-1)/2, L, dtype=dtype, device=device)\n hideal = (2 * fc / fs) * torch.sinc(2 * fc * hsupp / fs)\n hann = torch.hann_window(L, dtype=dtype, device=device)\n return hideal * hann", "def calculate_force_change(data, axis=\"x\", forceChannel=\"force\", distanceChannel=\"surfaceSep\", window=15):\n axis = axis.upper()\n\n #check if the regions have been assigned\n if \"unfolding\" not in data.columns.values.tolist():\n raise ValueError(\"The unfolding events have not yet been identified. See function identify_unfolding_events\")\n\n #Label the different isolated events using scipy.ndimage\n data[\"eventID\"], eventsNumber = ndimage.label(data[\"unfolding\"])\n\n #Start the counting in 0\n data[\"eventID\"] -= 1\n #Show how many events were identified\n print(eventsNumber, \"events identified\")\n\n def averaged_values(column, startT, endT, window=5):\n start = column.index.get_loc(startT)\n end = column.index.get_loc(endT)\n averagedBefore = column.iloc[start-window: start - 3].mean()\n averagedAfter = column.iloc[end + 3: end + window].mean()\n diffAverage = averagedAfter - averagedBefore\n return averagedBefore, averagedAfter, diffAverage\n\n startForce = []\n forceChange = []\n\n pullingCycle = []\n #Take the first and last times point of each unfolding event, discarding the first point because it is the\n # unclassified regions\n times = {\"startTimes\": data.groupby(\"eventID\").time.first()[1:], \"endTimes\": data.groupby(\"eventID\").time.last()[1:]}\n newWindow = deepcopy(window)\n for startTime, endTime in zip(times[\"startTimes\"], times[\"endTimes\"]):\n if data.index.get_loc(startTime) < newWindow:\n window = data.index.get_loc(startTime) - 1\n else:\n window = newWindow\n forceBefore, forceAfter, forceDifference = averaged_values(data[forceChannel+axis], startTime, endTime, window)\n startForce.append(forceBefore)\n forceChange.append(forceDifference)\n pullingCycle.append(data.loc[startTime, \"pullingCycle\"])\n\n unfoldingData = pd.DataFrame({\"startTime\": times[\"startTimes\"], \"endTime\": times[\"endTimes\"],\n \"force\": startForce, \"forceChange\": forceChange, \"pullingCycle\": pullingCycle})\n\n return unfoldingData", "def convolve_im(im: np.array,\n kernel: np.array,\n verbose=True):\n\t\n ### START YOUR CODE HERE ### (You can change anything inside this block) \n\t\n H,W = np.shape(im)\n h,w = np.shape(kernel)\n t_b = (H-h)//2\n l_r = (W-w)//2\n kernel_padded = np.pad(kernel, ((t_b, t_b+1),(l_r, l_r+1)), 'constant')\n kernel_padded = np.pad(kernel, ((0, 2*t_b),(0, 2*l_r)), 'constant')\n fft_kernel = np.fft.fft2(kernel_padded, s=None, axes=(-2, -1), norm=None)\n \n \n im_fft = np.fft.fft2(im, s=None, axes=(-2, -1), norm=None) \n im_filt = im_fft*fft_kernel \n conv_result = np.fft.ifft2(im_filt, s=None, axes=(-2, -1), norm=None).real \n\n if verbose:\n # Use plt.subplot to place two or more images beside eachother\n plt.figure(figsize=(12, 4))\n # plt.subplot(num_rows, num_cols, position (1-indexed))\n plt.subplot(1, 2, 1)\n plt.imshow(im, cmap=\"gray\")\n plt.subplot(1, 2, 2) \n plt.imshow(conv_result, cmap=\"gray\")\n\n ### END YOUR CODE HERE ###\n return conv_result", "def enhance_contrast(img):\n for y in range(frame_height):\n for x in range(frame_width):\n if img[y, x, 1] > 100:\n # range of blues to limit of puppet motion 255/(frame_width - 150)\n img[y][x][0] = x*0.4\n if img[y, x, 1] <= 100:\n img[y][x][2] = img[y][x][2]*0.5\n cv2.imwrite(\"contrasted.png\", img)", "def objective6(X, Y):\n filter = ConstArrayExpr(numpy.load(kernel_blur_large))\n return conv2d(X, filter)", "def filter_ms2fits(stack, fit_data, channel=1, peakiness=4.5):\n \n fit_data = fit_data.copy()\n for t in range(0, len(fit_data)):\n frame_data = fit_data[t]\n frame_med = np.median(stack[channel, t])\n xy_width_means = np.mean(frame_data[:,5:7], axis=1)\n peak_heights = frame_data[:,3]\n spot_peakiness = np.log(peak_heights / xy_width_means)\n frame_data_filtered = frame_data[(peak_heights > frame_med) & (spot_peakiness > peakiness),:]\n fit_data[t] = frame_data_filtered\n return fit_data", "def get_all_incidents():\n allIncidents = Incident.get_all()\n #allCops = get_all_cops()\n incidents = []\n for i in allIncidents:\n if(\n (i['operations_center']['id'] in allCops) and\n (inicioAmostragem <= i.reporting_date and i.reporting_date <=terminoAmostragem)\n ):\n \n i['operations_center']['id'] = changeCop(i['operations_center']['id'])\n incidents.append(i)\n \n return incidents", "def cs4243_histmatch(ori_image, refer_image):\n \n ##your code here ###\n\n # get cdf of ori and ref image\n grey_level = 256\n ori_hist, ori_cum_hist, ori_res_image, ori_uni_hist = cs4243_histequ(ori_image, grey_level)\n ref_hist, ref_cum_hist, ref_res_image, ref_uni_hist = cs4243_histequ(refer_image, grey_level)\n \n # map each ori cdf to ref cdf and get the mapped index as matched grey level\n map_value = []\n for i in range(grey_level):\n ori_cdf = ori_cum_hist[i]\n matched_intensity = np.uint8(np.abs(ref_cum_hist - ori_cdf).argmin())\n map_value.append(matched_intensity)\n ##\n\n # Set the intensity of the pixel in the raw image to its corresponding new intensity \n height, width = ori_image.shape\n res_image = np.zeros(ori_image.shape, dtype='uint8') # Note the type of elements\n for i in range(height):\n for j in range(width):\n res_image[i,j] = map_value[ori_image[i,j]]\n \n res_hist = np.bincount(res_image.flatten(), minlength=256)\n \n return ori_hist, ref_hist, res_image, res_hist", "def apply(filter_fn, img):\n width, height = img.size\n newimg = Image.new(\"RGB\", (width, height))\n for j in range(1, height - 1):\n for i in range(1, width - 1):\n newimg.putpixel((i, j), filter_fn(img, i, j))\n return newimg", "def find_inlier(self):\n len_of_matches = len(self.match)\n # The last line of W stores the whole number of consistency of this match\n self.W = np.zeros((len_of_matches+1, len_of_matches))\n for i in np.arange(len_of_matches):\n for j in np.arange(len_of_matches):\n if i >= j:\n continue\n\n # ASSUMPTION : the index of descriptor is the same with the index of image\n wa = self.featureFrameA[self.match[i].queryIdx].pt[0]-self.featureFrameA[self.match[j].queryIdx].pt[0]\n wb = self.featureFrameA[self.match[i].queryIdx].pt[1]-self.featureFrameA[self.match[j].queryIdx].pt[1]\n wa_ = self.featureFrameB[self.match[i].trainIdx].pt[0]-self.featureFrameB[self.match[j].trainIdx].pt[0]\n wb_ = self.featureFrameB[self.match[i].trainIdx].pt[1]-self.featureFrameB[self.match[j].trainIdx].pt[1]\n\n # Compare and complete the matrix W\n if abs(wa-wa_) + abs(wb-wb_) <= INLIER_DIST_THRE:\n self.W[i, j] = 1\n self.W[j, i] = 1\n self.W[len_of_matches, j] += 1\n\n # Choose the best inlier features\n self.best_matches = []\n candidate = np.arange(len_of_matches)\n while True:\n best_matchIdx = self.find_most_compatible_match(candidate)\n if not best_matchIdx or best_matchIdx == -1: # in case no best match is found\n break\n else:\n self.best_matches.append(self.match[best_matchIdx])\n candidate = np.delete(candidate, np.where(candidate == best_matchIdx), axis=0)", "def filter_isolated_pixels(array):\n filtered_array = np.copy(array)\n id_regions, num_ids = ndimage.label(filtered_array,\n structure=np.ones((3, 3)))\n id_sizes = np.array(ndimage.sum(array, id_regions, range(num_ids+1)))\n area_mask = (id_sizes == 1)\n filtered_array[area_mask[id_regions]] = 0\n return filtered_array", "def calculate_daily_climatology(\n pctile,\n windowHalfWidth,\n lenClimYear,\n smoothPercentile,\n smoothPercentileWidth,\n thresh_climYear, # empty array\n seas_climYear, # empty array\n clim, # empty dict\n feb29,\n doyClim,\n clim_start,\n clim_end,\n tempClim,\n temp,\n):\n # Loop over all day-of-year values, and calculate threshold and seasonal climatology across years\n for d in range(1, lenClimYear + 1):\n # Special case for Feb 29\n if d == feb29:\n continue\n # find all indices for each day of the year +/- windowHalfWidth and from them calculate the threshold\n tt0 = np.where(doyClim[clim_start : clim_end + 1] == d)[\n 0\n ] # the index for that day each year\n # If this doy value does not exist (i.e. in 360-day calendars) then skip it\n if len(tt0) == 0:\n continue\n tt = np.array([])\n for w in range(-windowHalfWidth, windowHalfWidth + 1): # -5 : 5 default\n tt = np.append(\n tt, clim_start + tt0 + w\n ) # append the daily values 5days before and 5days after\n tt = tt[tt >= 0] # Reject indices \"before\" the first element\n tt = tt[tt < TClim] # Reject indices \"after\" the last element\n thresh_climYear[d - 1] = np.percentile(nonans(tempClim[tt.astype(int)]), pctile)\n seas_climYear[d - 1] = np.mean(nonans(tempClim[tt.astype(int)]))\n\n # Special case for Feb 29 (LEAP YEAR)\n thresh_climYear[feb29 - 1] = (\n 0.5 * thresh_climYear[feb29 - 2] + 0.5 * thresh_climYear[feb29]\n )\n seas_climYear[feb29 - 1] = (\n 0.5 * seas_climYear[feb29 - 2] + 0.5 * seas_climYear[feb29]\n )\n\n if smoothPercentile:\n thresh_climYear, seas_climYear = smooth_climatologies(\n thresh_climYear, seas_climYear, smoothPercentileWidth\n )\n\n # Generate threshold for full time series\n clim[\"thresh\"] = thresh_climYear[doy.astype(int) - 1]\n clim[\"seas\"] = seas_climYear[doy.astype(int) - 1]\n # Save vector indicating which points in temp are missing values\n clim[\"missing\"] = np.isnan(temp)\n\n return clim", "def apply_filter(\n self,\n labels=None,\n prune_labels=True,\n indices=None,\n threshold=0.0,\n top_n=None\n ):\n df = self.df.copy()\n df_labels = self.labels.copy()\n if indices is not None:\n df = df.loc[indices]\n if labels is not None:\n selected = [\n index for index, row in df.iterrows()\n if row['e1'] in labels and row['e2'] in labels\n ]\n df = df.loc[selected]\n if prune_labels:\n df_labels = sorted(list(set(df['e1']) | set(df['e2'])))\n if threshold > 0.0:\n df = df[df['intensity'] >= threshold]\n if top_n is not None:\n df = df.sort_values(ascending=False, by='intensity')[:top_n]\n return InteractionTable(df=df, labels=df_labels)", "def _filter_imgs(self, min_size=32):\n valid_inds = []\n ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())\n for i, img_info in enumerate(self.img_infos):\n if self.img_ids[i] not in ids_with_ann:\n continue\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n return valid_inds", "def _filter_imgs(self, min_size=32):\n valid_inds = []\n ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())\n for i, img_info in enumerate(self.img_infos):\n if self.img_ids[i] not in ids_with_ann:\n continue\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n return valid_inds", "def _filter_imgs(self, min_size=32):\n valid_inds = []\n ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())\n for i, img_info in enumerate(self.img_infos):\n if self.img_ids[i] not in ids_with_ann:\n continue\n if min(img_info['width'], img_info['height']) >= min_size:\n valid_inds.append(i)\n return valid_inds", "def scoreCirc_ActiveFilter(circuit, gen, indi, makeRedundancyInMatrix):#TODO\n #Calculate density and uniquiness (as in makeNetlist)\n if makeRedundancyInMatrix == True:\n #FullBigCircuitMatrix = deepcopy(fullRedundancyBigCircuitMatrix(circuit.BigCircuitMatrix))\n FullBigCircuitMatrix = deepcopy(circuit.fullRedundancyMatrix)\n else:\n FullBigCircuitMatrix = deepcopy(circuit.BigCircuitMatrix)\n\n rowsR,columnsR,columnsC,rowsC = sortedNonZeroIndices(FullBigCircuitMatrix)\n\n matrixDensity = float(len(rowsR))/float((BigMatrixSize*BigMatrixSize/2))\t#(ones/(all/2))\n matrixQuaziID = sum(rowsR)+sum(columnsR)-BigMatrixSize*(BigMatrixSize-1)\n OcSc, IcNc, SelfConnElm = checkConnsConnected(FullBigCircuitMatrix) #Outer connections Short cut, Inner connections Not connected\n #print \"Kratkih stikov zunanjih povezav:\", OcSc\n \n results = None\n if OcSc > 1:\n score = 1e4*np.exp(OcSc)\n else:\n makeNetlist(circuit, gen, indi, FullBigCircuitMatrix)\n results = runme2.evaluateActiveFilter_SUHAD(gen, indi)#TODO\n \n \n disfCount = 0\n \n ripple = np.array(results['ripple']['nominal'], dtype=float)\n if np.isnan(ripple):\n disfCount = disfCount + 1\n r = 0 \n else:\n r = abs(ripple - 0.5) if ripple > 0.5 else 0\n \n damping = np.array(results['damping']['nominal'], dtype=float)\n if np.isnan(damping):\n disfCount = disfCount + 1\n d = 0\n else:\n d = abs(40 - damping) if damping < 40 else 0\n \n gain = np.array(results['gain']['nominal'], dtype=float)\n if np.isnan(gain):\n disfCount = disfCount + 1\n g = 0\n else:\n g = abs(gain - 10) if gain < 10 else 0\n \n THD = np.array(results['THD']['nominal'], dtype=float)\n if np.isnan(THD):\n disfCount = disfCount + 1\n thd = 0\n else:\n thd = THD-1 if THD > 1 else 0\n\t \n StaticOut = not results['isOutVNonStationary']['nominal']\n \n score = 5*r + 4*d + 2*g + (100*StaticOut + 10*thd)\n\n #print disfCount\n if disfCount > 0:\n score = np.exp(disfCount) * 1e3\n \n ##add a little salt!\n #score = score + random.uniform(0.0, 1)\n\n score = score + (IcNc*IcNc+1)# + abs(BW-bw)*1e2 + abs(CUTOFF-cutoff)*1e2 #add small punishment if not all nodes connected and bw and cutoff are off\n\n \n #print \"\\t\\t\\t\\t\\tG_\" + str(gen) + \"_I_\" + str(indi) + \" SCORE:\", score\n #cleanup current subcircuit\n filename = \"g_\" + str(gen) + \"_i_\" + str(indi) + \"_subckt.cir\"\n os.remove(filename)\n #print \".\",\n return score, matrixDensity, matrixQuaziID, results", "def filterNotInRange(frame, min, max, colorMode):\n\n tempFrame = cv2.cvtColor(frame, colorMode)\n\n mask = cv2.inRange(tempFrame, min, max)\n\n filtered_frame = cv2.bitwise_and(frame, frame, mask=mask)\n\n return filtered_frame", "def drawcntMap(orgimg,filteredimg,wThresh,hThresh):\r\n _, contour, _ = cv2.findContours(filteredimg, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n cnt = cv2.drawContours(orgimg.copy(), contour, -1, (0, 0, 255), 2) # To draw filtered contours on original image\r\n\r\n digitCnts = [] # contours to be surrounded by bounding boxes\r\n\r\n for c in contour:\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n if w >= wThresh and h >= hThresh and w*h <20000: # Length filters to reduce noise\r\n cv2.rectangle(cnt,(x,y),(x+w,y+h),[0,255,0],2)\r\n digitCnts.append(c)\r\n\r\n return cnt, digitCnts", "def color_thresh(orig_img, thresh_img):\n new_img = orig_img\n\n for i in range(orig_img.shape[0]):\n for j in range(orig_img.shape[1]):\n if thresh_img[i,j] == 255:\n new_img[i,j] = 0\n\n return new_img", "def filter(self, op=GaussianFilter):\n\n if self._verbose > 0:\n print(\"Filtering...\")\n\n # Import from utils specified params.\n params = get_filtering_params()\n\n negative = self.image_raw - op(sigma=params['sigma_bgd']).convolve(self.image_raw)\n\n self.image_filtered = op(sigma=params['sigma_spots']).convolve(negative)", "def apply_filter(image: np.ndarray) -> np.ndarray:\n # choose filters to apply\n return clahe(image)", "def filtering(self):\r\n # 1 ###########################################################################################################\r\n fft_image = np.fft.fft2(self.image)\r\n # 2 ###########################################################################################################\r\n fft_shift_image = np.fft.fftshift(fft_image)\r\n\r\n ###\r\n mag_dft = np.log(np.abs(fft_shift_image))\r\n mag_dft = (255 * (mag_dft / np.max(mag_dft))).astype(dtype='uint8')\r\n ###\r\n\r\n # 3 ###########################################################################################################\r\n if self.filter_name == 'butterworth_l' or self.filter_name == 'butterworth_h':\r\n mask = self.filter(fft_shift_image.shape, self.cutoff, self.order)\r\n else:\r\n mask = self.filter(fft_shift_image.shape, self.cutoff)\r\n # 4 ###########################################################################################################\r\n # multiply the dft (fft shift image) by the mask\r\n filtered_image = fft_shift_image * mask\r\n\r\n ###\r\n mag_filtered_image = mag_dft * mask\r\n ###\r\n\r\n # 5 ###########################################################################################################\r\n inverse_fft_shift_image = np.fft.ifftshift(filtered_image)\r\n # 6 ###########################################################################################################\r\n inverse_fft_image = np.fft.ifft2(inverse_fft_shift_image)\r\n # 7 ###########################################################################################################\r\n mag_image = np.zeros(inverse_fft_image.shape, dtype=complex)\r\n for i in range(inverse_fft_image.shape[0]):\r\n for j in range(inverse_fft_image.shape[1]):\r\n if inverse_fft_image[i][j] < 0:\r\n mag_image[i][j] = -1 * inverse_fft_image[i][j]\r\n else:\r\n mag_image[i][j] = inverse_fft_image[i][j]\r\n # magnitude of inverse fft is complete\r\n # 8 ###########################################################################################################\r\n full_contrast_image = self.post_process_image(mag_image)\r\n\r\n return [mag_dft, mag_filtered_image, full_contrast_image]", "def skyPixels(self,i, d,Feeds, selectFeature):\n\n # We store all the pointing information\n x = (d['level1/spectrometer/pixel_pointing/pixel_ra'][...])[Feeds[:,None],selectFeature]\n x = x[...,0:self.datasizes[i]].flatten()\n y = (d['level1/spectrometer/pixel_pointing/pixel_dec'][...])[Feeds[:,None],selectFeature]\n y = y[...,0:self.datasizes[i]].flatten()\n\n\n el = (d['level1/spectrometer/pixel_pointing/pixel_el'][...])[Feeds[:,None],selectFeature]\n el = el[...,0:self.datasizes[i]]\n\n\n pixels = self.getFlatPixels(x,y)\n pixels[pixels < 0] = -1\n pixels[pixels > self.naive.npix] = -1\n\n return pixels", "def filter_ic(ic,year,month):\n \n ic_filtered = (ic.filter(ee.Filter.eq(\"month\",month))\n .filter(ee.Filter.eq(\"year\",year)))\n image = ee.Image(ic_filtered.first())\n return(image)", "def askapsoft_decimate_n_extract(af, over_sampling, kernel_support):\n\n # why is this normalization required..?\n rescale = over_sampling*over_sampling\n #rescale = 1\n\n cSize = 2 * kernel_support + 1\n itsConvFunc=np.zeros((over_sampling, over_sampling, cSize, cSize), dtype=complex)\n\n for fracu in range(0,over_sampling):\n for fracv in range(0,over_sampling):\n\n # Now cut out the inner part of the convolution function and\n # insert it into the convolution function\n for iy in range(-kernel_support,kernel_support+1):\n for ix in range(-kernel_support,kernel_support+1):\n\n nx = af.shape[0]\n ny = af.shape[1]\n\n # assumes support is the same for all w-planes:\n xval = (ix) * over_sampling + fracu + nx / 2\n yval = (iy) * over_sampling + fracv + ny / 2\n\n itsConvFunc[fracu, fracv, ix+cSize/2, iy+cSize/2] \\\n = rescale * af[xval, yval]\n\n return itsConvFunc[::-1,::-1]", "def anoise(this, *args, **kargs):\n\t\t\n\t\t# Arguments\n\t\tif not args: args = [50]\n\t\t\n\t\t# Kernel's retrieval\n\t\tanoisek = this._ANOISEK\n\t\tif anoisek is None: return None\n\t\t\n\t\t# More magic\n\t\tbin = this._BINARY\n\t\tfor thresh in args:\n\t\t\tbin[:,:] = (cv2.filter2D(bin, -1, anoisek) / 2.55 > thresh) * 255\n\t\treturn True", "def MyFilter(data, window_width=10, beta=2.0, draw_graph=False):\n\n #read data and change the format\n if 'time' in data.columns:\n date_list = []\n for i in data.index:\n date_parse = parse(str(data.ix[i].time))\n date_list.append(date_parse)\n data['date'] = date_list\n data_use = data\n data_use.index = data_use['date'].tolist()\n data_use = data_use.drop(['date','time'], axis=1)\n data_use.index.name = 'time'\n else:\n data_use = data\n #design filter, use the kaiser window here\n window = signal.kaiser(window_width, beta=beta)\n data_use['close_filtered'] = signal.convolve(data_use['close'], window, mode='same') / sum(window)\n data_use['high_frequency'] = data_use['close'] - data_use['close_filtered']\n\n #delete the distortion datas after filtered\n if window_width % 2 == 0:\n data_changed = data_use[window_width/2: -(window_width/2 - 1)]\n else:\n data_changed = data_use[(window_width-1)/2: -(window_width-1)/2]\n\n #draw graph\n if (draw_graph == True) :\n fig = plt.figure()\n ax1 = plt.subplot2grid((3,1), (0,0), rowspan=2)\n data_changed.loc[:,'close'].plot(style='r', label='original')\n data_changed.loc[:,'close_filtered'].plot(style='k', label='filtered')\n plt.title('Kaiser window_width = %d , const = %d' % (window_width, beta))\n plt.legend(loc='best')\n\n ax2 = plt.subplot2grid((3,1), (2,0))\n data_changed.loc[:,'high_frequency'].plot(label='high_frequency')\n ax2.set_ylim([-150, 150])\n plt.title('High Frequency')\n plt.legend(loc='best')\n plt.show()\n # print data_use\n # print data_changed\n data_out = data_changed['close_filtered']\n return np.array(data_out.tolist())", "def objective8(X, Y):\n filter = ConstArrayExpr(numpy.load(kernel_sharp_large))\n return conv2d(X, filter)", "def convolve_psf(a, fwhm, edge='invert', replace_nan=True, debug=False):\n const2 = 2.354820046 # 2*sqrt(2*ln(2))\n const100 = 3.034854259 # sqrt(2*ln(100))\n sigma = fwhm / const2\n # gaussian drops to 1/100 of maximum value at x =\n # sqrt(2*ln(100))*sigma, so number of pixels to include from\n # centre of gaussian is:\n n = np.ceil(const100 * sigma)\n if replace_nan:\n a = nan2num(a, replace='interp')\n if debug:\n print(\"First and last {0} pixels of output will be invalid\".format(n))\n x = np.linspace(-n, n, 2*n + 1) # total no. of pixels = 2n+1\n gauss = np.exp(-0.5 * (x / sigma) ** 2 )\n\n return convolve_window(a, gauss, edge=edge)", "def filt1(X, yvals, xvals, ny, nx):\n \n ylen = X.shape[0]\n xlen = X.shape[1]\n\n yflen = (ylen-1)//ny\n xflen = (xlen-1)//nx\n\n Y = np.zeros((X.shape))\n\n #Y = Y[0:yflen,0:xflen,]\n\n ymax = ny*yflen+1\n xmax = nx*xflen+1\n\n Y = Y[0:ymax,0:xmax,]\n Xnew = X[0:ymax,0:xmax,]\n yvals = yvals[0:ymax,0:xmax,]\n xvals = xvals[0:ymax,0:xmax,] \n\n counter = np.zeros((Y.shape))\n \n for i in range(xflen):\n xmin = nx*i\n xmax = nx*(i+1)+1\n for j in range(yflen):\n ymin = ny*j\n ymax = ny*(j + 1)+1\n #print((xmin,xmax), (ymin,ymax))\n Y[ymin:ymax,xmin:xmax,] = Y[ymin:ymax,xmin:xmax,] + np.mean(X[ymin:ymax,xmin:xmax,], axis=(0,1))\n counter[ymin:ymax,xmin:xmax,] = counter[ymin:ymax,xmin:xmax,] + 1\n\n Y = Y/counter #We take the average of the points that appear more than once\n\n return Xnew, Y, yvals, xvals", "def identify_unfolding_events(data, axis=\"x\", forceChannel=\"force\", window=15, STDthreshold=0.8, forceThreshold=5):\n\n axis = axis.upper()\n\n data[\"unfolding\"] = False\n\n def mean_diff(dataSet, n):\n \"\"\"\n Subfunction to calculate the rolling mean of the difference\n Args:\n dataSet (numpy.array): array with the data subset (window) to calculate the moving mean of the difference\n n (integer): number of datapoints to calculate the diff from\n Returns:\n mean of the difference of the subset\n \"\"\"\n return np.mean(np.diff(dataSet, n))\n\n force = data[forceChannel + axis]\n #Calculate the rolling standard deviation\n forceSTD = force.rolling(window, center=True).std()\n #print(window)\n #Calculate the rolling mean of the difference\n forceMeanChange = force.rolling(window, center=True).apply(func=mean_diff, args=(1,))\n #select_event_threshold(data, forceSTD, forceMeanChange, axis)\n #forceMeanChange2 = pd.rolling_apply(force, window, func=mean_diff, args=(2,), center=True)\n\n #plt.plot(forceMeanChange)\n #plt.plot(forceSTD)\n #plt.axhline(STDthreshold)\n #plt.show()\n forceSTD[forceSTD < STDthreshold] = 0\n forceMeanChange[abs(forceMeanChange) < STDthreshold / 3] = 0\n #forceMeanChange2[abs(forceMeanChange2) < 0.3] = 0\n\n\n forceSTD = forceSTD.fillna(0)\n forceMeanChange = forceMeanChange.fillna(0)\n #Create a mapping mask to filter the data and assign unfolding events as True\n mask = forceSTD + abs(forceMeanChange)# + abs(forceMeanChange2)\n mask[(mask < STDthreshold/2) | (data[forceChannel + axis] < forceThreshold) | (data[forceChannel + axis] > 60)] = 0\n mask = (mask != 0)\n # mask = (((forceSTD - forceMeanChange) > threshold) & (data[\"force\" + axis] > 7))\n data.loc[mask, \"unfolding\"] = True\n\n return data", "def filter(self, result):\n convexities = []\n for mask_idx in range(result.masks.shape[2]):\n mask = result.masks[:, :, mask_idx]\n props = regionprops(mask.numpy().astype(np.int8))[0]\n convexities.append(props.filled_area/props.convex_area)\n\n convexities = np.array(convexities)\n convexity_stats = stats.describe(convexities)\n\n heights = result.rois[:, 2] - result.rois[:, 0]\n widths = result.rois[:, 3] - result.rois[:, 1]\n gt_max_width = widths > self.width_stats.minmax[1]\n lt_min_width = widths < self.width_stats.minmax[0]\n gt_max_height = heights > self.height_stats.minmax[1]\n lt_min_height = heights < self.height_stats.minmax[0]\n keep = ~(gt_max_width | lt_min_width |\n gt_max_height | lt_min_height)\n initial_size = heights.shape[0]\n new_size = keep.sum()\n if initial_size != new_size:\n logging.info(f\"Analyzer filtered {initial_size - new_size}\"\n f\" out of {initial_size}.\")\n result.masks = result.masks.permute(2, 0, 1)\n new_result = result.select(keep)\n new_result.masks = new_result.masks.permute(1, 2, 0)\n return new_result", "def filter(self):\n self.filter_means = [self.m_0]\n self.filter_covs = [self.P_0]\n self.marginal_covs = []\n for t in range(self.data.shape[0]):\n m_bar, P_bar = self.one_step_prediction(self.filter_means[-1], self.filter_covs[-1])\n\n # Update step\n y = self.data[t]\n if not np.isnan(y).any():\n v = y[:, None] - self.observation_matrix @ m_bar\n S = self.observation_matrix @ P_bar @ self.observation_matrix.T + self.observation_cov\n K = P_bar @ self.observation_matrix.T @ np.linalg.inv(S)\n\n m_bar = m_bar + K @ v\n P_bar = P_bar - K @ S @ K.T\n\n self.marginal_covs.append(S)\n\n self.filter_means.append(m_bar)\n self.filter_covs.append(P_bar)\n self.filter_means = self.filter_means[1:]\n self.filter_covs = self.filter_covs[1:]", "def _red_detect_(self, nslice = 0, thresh = 2.0):\n zk_1 = 's_' + format(nslice, '03d')\n zk_2 = 's_' + format(nslice+1, '03d')\n\n zf_1 = self.z_dense[zk_1]\n zf_2 = self.z_dense[zk_2]\n\n # extract the y and x coordinates\n y1 = zf_1[:,0]\n x1 = zf_1[:,1]\n\n y2 = zf_2[:,0]\n x2 = zf_2[:,1]\n\n\n # create a meshgrid\n [YC, YR] = np.meshgrid(y2, y1)\n [XC, XR] = np.meshgrid(x2, x1)\n\n\n dist_block = np.sqrt((YC-YR)**2 + (XC-XR)**2)\n red_pair = np.where(dist_block <= thresh) # find out where the distance between cell i in plane k and cell j in plane k+1 is below the threshold.\n\n ind1 = red_pair[0] # the indices in the first frame\n ind2 = red_pair[1] # the indices in the second frame\n\n\n # select those with markers > 0 and markers < 0\n marker_1 = zf_1[ind1, 3]\n\n\n new_idx = (marker_1 == 0) # select those with zero-markers, which are never counted before. These are new cells. marker_1 needs to be updated.\n pool_new = ind1[new_idx] # select the indices in the first frame where new redundancies are detected \n pool_new_cov = ind2[new_idx] # select the indices in the second frame where new redundancies are detected.\n\n\n pool_exist = ind1[~new_idx] # among the detected redundancies, find those already marked.\n pool_exist_cov = ind2[~new_idx] # correspondingly, find those already marked in the adjacent slice\n\n n_new = len(pool_new)\n n_exist = len(pool_exist)\n if self.verbose:\n print(n_new, \"new redundancies, \", n_exist, \"existing redundancies\")\n\n for n_count in np.arange(n_new):\n # build the new keys\n # also, we need to assign each new key an identity number which is unique.\n n_ind1 = pool_new[n_count] # find the indices in the first slice that contains new redundancies\n n_ind2 = pool_new_cov[n_count] # find the indices in the following slice \n pr_number = nslice * 1000 + n_ind1\n pr_key = 'sl_' + str(pr_number) # build a key \n new_sl = Simple_list(nslice) # create a simple list with z_marker = nslice, nslice is the index of the first z-slice \n new_sl.add([nslice, zf_1[n_ind1, 4]])\n new_sl.add([nslice+1, zf_2[n_ind2, 4]])\n zf_1[n_ind1, 3] = pr_number # assign the new pr_number to zf_1\n zf_2[n_ind2, 3] = pr_number # assigne the same new pr_number to zf_2\n\n self.redundancy_pool[pr_key] = new_sl # stored into the redundancy pool\n\n\n for n_count in np.arange(n_exist):\n # search for the existing keys\n n_ind1 = pool_exist[n_count]\n n_ind2 = pool_exist_cov[n_count]\n pr_number = int(zf_1[n_ind1, 3])# catch up the pr_number\n pr_key = 'sl_' + str(pr_number) # this pr_key should already exist in the pool. \n\n self.redundancy_pool[pr_key].add([nslice+1, zf_2[n_ind2, 4]])\n zf_2[n_ind2, 3] = pr_number # update the pr_number in the adjacent slice", "def image_opening(img, c=0.7, thresh=225):\r\n er_img = cv2.dilate(img, cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)))\r\n open_img = cv2.morphologyEx(er_img, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7)), iterations=1)\r\n new_img = img.copy()\r\n for j in range(len(img)):\r\n for i in range(len(img[0])):\r\n if img[j][i] > thresh:\r\n if open_img[j][i] < thresh:\r\n new_img[j][i] = int(c * open_img[j][i] + (1 - c) * img[j][i])\r\n return new_img", "def _filter_imgs(self, min_size=32):\r\n valid_inds = []\r\n ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())\r\n for i, img_info in enumerate(self.img_infos):\r\n if self.filter_empty_gt and self.img_ids[i] not in ids_with_ann:\r\n continue\r\n if min(img_info['width'], img_info['height']) >= min_size:\r\n valid_inds.append(i)\r\n return valid_inds", "def filter_images(self, images):\n status = self.day_or_night(images[0][1],\n self.gray_refs['day'][0],\n self.gray_refs['night'][0])\n print status\n exclusions = self.gray_refs[status]\n threshold = 0.7\n last_ref = None\n result = []\n\n for filename, gray_img, raw_img in images:\n skip = False\n if last_ref:\n dist = ssim(gray_img, exclusions[last_ref], multichannel=False)\n if dist > threshold:\n skip = True\n\n if not skip:\n for i, gray_ref in enumerate(exclusions):\n if i == last_ref:\n continue\n dist = ssim(gray_img, gray_ref, multichannel=False)\n if dist > threshold:\n skip = True\n last_ref = i\n break\n\n if not skip:\n if (time.time() - self.last_notify) > notify_thresh:\n send_alert('Alert! Motion detected near front door.')\n self.last_notify = time.time()\n result.append((filename, gray_img, raw_img))\n return result", "def apply_2_class_filterV4(pred_csv, out_csv, filter_info_file, thr=0.08):\n df_pred = pd.read_csv(pred_csv)\n df_filter = pd.read_csv(filter_info_file)\n pred_strs = df_pred['PredictionString'].tolist()\n img_ids = df_pred['image_id'].tolist()\n\n num_normal = 0\n for idx in tqdm(range(len(pred_strs))):\n im_id = img_ids[idx]\n cls_score = df_filter[df_filter['image_id'] == im_id]['target'].tolist()[0]\n if cls_score < thr: # No finding\n pred_strs[idx] = '14 1 0 0 1 1'\n num_normal += 1\n print('number of No finding images: ', num_normal)\n\n df_save = pd.DataFrame()\n df_save['image_id'] = img_ids\n df_save['PredictionString'] = pred_strs\n df_save.to_csv(out_csv, index=False)\n print('all done!')", "def calibrate(science_list_fname, master_flat_fname, master_dark_fname, hp_map_fname, bp_map_fname, mask_bad_pixels = False,\n clean_Bad_Pix=True, replace_nans=True, background_fname = None, outdir = None):\n\n #Get the list of science frames\n #science_list = np.loadtxt(science_list_fname, dtype=str)\n science_list = science_list_fname\n\n #Open the master dark\n master_dark_hdu = f.open(master_dark_fname)\n master_dark = master_dark_hdu[0].data\n dark_shape = np.shape(master_dark)\n print((\"Subtracting {} from each flat file\".format(master_dark_fname)))\n dark_exp_time = master_dark_hdu[0].header['EXPTIME']\n\n #Open the master flat\n master_flat_hdu = f.open(master_flat_fname)\n master_flat = master_flat_hdu[0].data\n print((\"Dividing each file by {}\".format(master_flat_fname)))\n dark_exp_time = master_dark_hdu[0].header['EXPTIME']\n\n #Open the bad pixel map from flat\n bp_map_hdu = f.open(bp_map_fname)\n bad_pixel_map = bp_map_hdu[0].data\n bad_pixel_map_bool = np.array(bad_pixel_map, dtype=bool)\n print((\"Using bad pixel map {}\".format(bp_map_fname)))\n\n #now if hot pixel map from dark is also given\n if hp_map_fname != None:\n hp_map_hdu = f.open(hp_map_fname)\n hot_pixel_map = hp_map_hdu[0].data\n bad_pixel_map_bool = np.logical_or(bad_pixel_map_bool, hot_pixel_map.astype(bool) )\n\n\n if background_fname != None:\n background_hdu = f.open(background_fname)\n background = background_hdu[0].data\n print(\"Subtracting background frame {} from all science files\".format(background_fname))\n\n\n for fname in science_list:\n #Open the file\n print((\"Calibrating {}\".format(fname\n )))\n hdu = f.open(fname)\n data = hdu[0].data\n science_exp_time = hdu[0].header['EXPTIME']\n\n if dark_exp_time != science_exp_time:\n warnings.warn(\"The master dark file doesn't have the same exposure time as the data. We'll scale the dark for now, but this isn't ideal\", UserWarning)\n factor = science_exp_time/dark_exp_time\n else:\n factor = 1.\n\n #Subtract the dark, divide by flat\n redux = ((data - factor*master_dark)/master_flat)\n #get rid of crazy values at bad pixel\n redux = redux*~bad_pixel_map_bool\n\n if background_fname != None:\n redux -= background\n\n if clean_Bad_Pix:\n # plt.plot(bad_pixel_map_bool)\n redux = cleanBadPix(redux, bad_pixel_map_bool)\n #redux = ccdproc.cosmicray_lacosmic(redux, sigclip=5)[0]\n\n # redux = ccdproc.cosmicray_median(redux, mbox=7, rbox=5, gbox=7)[0]\n\n #Mask the bad pixels if the flag is set\n if mask_bad_pixels:\n redux *= ~bad_pixel_map_bool\n\n if replace_nans:\n # nan_map = ~np.isfinite(redux)\n # redux = cleanBadPix(redux, nan_map)\n # plt.imshow(redux-after)\n nanmask = np.isnan(redux) #nan = True, just in case this is useful\n redux = np.nan_to_num(redux)\n\n #Put the cablibrated data back in the HDU list\n hdu[0].data = redux\n\n #Add pipeline version and history keywords\n vers = version.get_version()\n hdu[0].header.set('PL_VERS',vers,'Version of pipeline used for processing')\n hdu[0].header['HISTORY'] = \"Subtracting {} from each flat file\".format(master_dark_fname)\n hdu[0].header['HISTORY'] = \"Dividing each file by {}\".format(master_flat_fname)\n\n if background_fname != None:\n hdu[0].header['HISTORY'] = \"Subtracted background frame {}\".format(background_fname)\n\n if mask_bad_pixels:\n hdu[0].header['HISTORY'] = \"Masking all bad pixels found in {}\".format(bp_map_fname)\n\n if clean_Bad_Pix:\n hdu[0].header['HISTORY'] = \"Cleaned all bad pixels found in {} using a median filter\".format(bp_map_fname)\n\n # #Append the bad pixel list to the HDU list\n # hdu.append(f.PrimaryHDU([bad_pixel_map]))\n # hdu[1].header['HISTORY'] = \"Appending bad pixel map :{}\".format(bp_map_fname)\n # hdu[1].header['HISTORY'] = \"0 = good pixel\"\n # hdu[1].header['HISTORY'] = \"1 = bad pixel from flat fields\"\n # hdu[1].header['HISTORY'] = \"2 = hot pixel from darks\"\n\n outname = fname.split('.')[0]+\"_calib.fits\"\n\n #if an output directory is specified we can write out to that directory instead\n #making sure to take only the stuff after the last '/' to avoid directory issues from fname\n if outdir:\n outname = outdir + fname.split('/')[-1]\n\n print((\"Writing calibrated file to {}\".format(outname)))\n #Save the calibrated file\n hdu.writeto(outname, overwrite=True)\n\n # f.PrimaryHDU(redux).writeto('redux_'+i, overwrite = True)", "def apply_corrections_from_file(self):\n with self.open_file() as file:\n lines = file.readlines()\n for detector in self.detectors.itervalues():\n nchanged = detector.apply_gain_corrections(lines)\n # Set a modified state if any channels were changed\n self.image_window.modified = nchanged > 0", "def apply_new_mask(ifgs, mask_old, mask_new):\n\n \n for ifg_n, ifg in enumerate(ifgs): # Loop through each source\n ifg_r2 = col_to_ma(ifg, mask_old) # turn it from a row vector into a rank 2 masked array \n ifg_r2_new_mask = ma.array(ifg_r2, mask = mask_new) # apply the new mask \n ifg_r1_new_mask = ma.compressed(ifg_r2_new_mask) # convert to row vector \n if ifg_n == 0: # if it's the first ifg.. \n n_pixs_new = ifg_r1_new_mask.shape[0] # get the new number of pixels \n ifgs_new_mask = np.zeros((ifgs.shape[0], n_pixs_new)) # initiate an array of the correct size\n ifgs_new_mask[ifg_n, :] = ifg_r1_new_mask # put the row vector into the array\n return ifgs_new_mask", "def filter_and_relabel(labels, label_gids, min_imgs_per_occurence, occur_unixtimes=None):\n label_nGids = np.array(list(map(len, label_gids)))\n label_isvalid = label_nGids >= min_imgs_per_occurence\n occur_gids = ut.compress(label_gids, label_isvalid)\n if occur_unixtimes is not None:\n occur_unixtimes = ut.compress(occur_unixtimes, label_isvalid)\n # Rebase ids so occurrence0 has the most images\n # occur_ids = list(range(label_isvalid.sum()))\n # else:\n # sort by time instead\n unixtime_arr = np.array(occur_unixtimes)\n # Reorder occurrences so the oldest has the lowest number\n occur_gids = ut.take(label_gids, unixtime_arr.argsort())\n occur_ids = list(range(len(occur_gids)))\n return occur_ids, occur_gids", "def _update_edges_filtered(self, change=None):\n self._edges_filter.val_range = self.edges_range\n edges_ids = self._edges_filter.val_ids\n self.edges_ids = self._filter_edges(edges_ids)", "def refineDetectedMarkers(image, board, detectedCorners, detectedIds, rejectedCorners, cameraMatrix=None, distCoeffs=None, minRepDistance=None, errorCorrectionRate=None, checkAllOrders=None, recoveredIdxs=None, parameters=None):\n pass", "def filter_data(self):\n if(self.filter_classes == []):\n return\n \n filtered_idx = []\n for id in range(len(self.image_ids)):\n anns = self.load_annotations(id)\n found = False\n for ann in anns:\n if ann['label'] in self.filter_classes:\n found = True\n break\n if found:\n filtered_idx.append(id)\n \n self.filtered_ids = [self.image_ids[id] for id in filtered_idx]\n # self.image_ids = self.filtered_ids\n print(\"Number of filtered instances:\", len(self.filtered_ids))", "def white_balance(device, img, mode='hist',debug=None, roi=None):\n device += 1\n\n ori_img = np.copy(img)\n\n if roi is not None:\n roiint = all(isinstance(item, int) for item in roi)\n\n if len(roi) != 4 | roiint is False:\n fatal_error('If ROI is used ROI must have 4 elements as a list and all must be integers')\n else:\n pass\n\n if len(np.shape(img)) == 3:\n iy, ix, iz = np.shape(img)\n hmax=255\n type = np.uint8\n else:\n iy, ix = np.shape(img)\n if img.dtype == 'uint8':\n hmax=255\n type=np.uint8\n elif img.dtype == 'uint16':\n hmax=65536\n type=np.uint16\n\n mask = np.zeros((iy, ix, 3), dtype=np.uint8)\n\n if roi is None:\n x = 0\n y = 0\n w = ix\n h = iy\n\n else:\n x = roi[0]\n y = roi[1]\n w = roi[2]\n h = roi[3]\n\n if len(np.shape(img)) == 3:\n cv2.rectangle(ori_img, (x, y), (x + w, y + h), (0, 255, 0), 3)\n c1 = img[:, :, 0]\n c2 = img[:, :, 1]\n c3 = img[:, :, 2]\n if mode == 'hist':\n channel1 = _hist(c1, hmax, x, y, h, w, type)\n channel2 = _hist(c2, hmax, x, y, h, w, type)\n channel3 = _hist(c3, hmax, x, y, h, w, type)\n else:\n channel1 = _max(c1, hmax, mask, x, y, h, w, type)\n channel2 = _max(c2, hmax, mask, x, y, h, w, type)\n channel3 = _max(c3, hmax, mask, x, y, h, w, type)\n\n finalcorrected = np.dstack((channel1, channel2, channel3))\n\n else:\n cv2.rectangle(ori_img, (x, y), (x + w, y + h), (255, 255, 255), 3)\n if mode == 'hist':\n finalcorrected = _hist(img, hmax, x, y, h, w, type)\n elif mode == 'max':\n finalcorrected = _max(img, hmax, mask, x, y, h, w, type)\n\n if debug == 'print':\n print_image(ori_img, (str(device) + '_whitebalance_roi.png'))\n print_image(finalcorrected, (str(device) + '_whitebalance.png'))\n\n elif debug == 'plot':\n plot_image(ori_img, cmap='gray')\n plot_image(finalcorrected, cmap='gray')\n\n return device, finalcorrected", "def increase_contrast(img, channels=(0, 1, 2)):\n equalized = img.copy()\n\n for k in channels:\n equalized[:, :, k] = cv2.equalizeHist(img[:, :, k])\n\n return equalized", "def dispersed_pixel(x0, y0, width, height, lams, flxs, order, wmin, wmax,\n sens_waves, sens_resp, seg_wcs, grism_wcs, ID, naxis,\n oversample_factor=2, extrapolate_sed=False, xoffset=0,\n yoffset=0):\n\n # Setup the transforms we need from the input WCS objects\n sky_to_imgxy = grism_wcs.get_transform('world', 'detector')\n imgxy_to_grismxy = grism_wcs.get_transform('detector', 'grism_detector')\n\n # Setup function for retrieving flux values at each dispersed wavelength\n if len(lams) > 1:\n # If we have direct image flux values from more than one filter (lambda),\n # we have the option to extrapolate the fluxes outside the\n # wavelength range of the direct images\n if extrapolate_sed is False:\n flux = interp1d(lams, flxs, fill_value=0., bounds_error=False)\n else:\n flux = interp1d(lams, flxs, fill_value=\"extrapolate\", bounds_error=False)\n else:\n # If we only have flux from one lambda, just use that\n # single flux value at all wavelengths\n def flux(x):\n return flxs[0]\n\n # Get x/y positions in the grism image corresponding to wmin and wmax:\n # Start with RA/Dec of the input pixel position in segmentation map,\n # then convert to x/y in the direct image frame corresponding\n # to the grism image,\n # then finally convert to x/y in the grism image frame\n x0_sky, y0_sky = seg_wcs(x0, y0)\n x0_xy, y0_xy, _, _ = sky_to_imgxy(x0_sky, y0_sky, 1, order)\n xwmin, ywmin = imgxy_to_grismxy(x0_xy + xoffset, y0_xy + yoffset, wmin, order)\n xwmax, ywmax = imgxy_to_grismxy(x0_xy + xoffset, y0_xy + yoffset, wmax, order)\n dxw = xwmax - xwmin\n dyw = ywmax - ywmin\n\n # Compute the delta-wave per pixel\n dw = np.abs((wmax - wmin) / (dyw - dxw))\n\n # Use a natural wavelength scale or the wavelength scale of the input SED/spectrum,\n # whichever is smaller, divided by oversampling requested\n input_dlam = np.median(lams[1:] - lams[:-1])\n if input_dlam < dw:\n dlam = input_dlam / oversample_factor\n else:\n # this value gets used when we only have 1 direct image wavelength\n dlam = dw / oversample_factor\n\n # Create list of wavelengths on which to compute dispersed pixels\n lambdas = np.arange(wmin, wmax + dlam, dlam)\n n_lam = len(lambdas)\n\n # Compute lists of x/y positions in the grism image for\n # the set of desired wavelengths:\n # As above, first get RA/Dec of segmentation map pixel positions,\n # then convert to x/y in image frame of grism image,\n # then convert to x/y in grism frame.\n x0_sky, y0_sky = seg_wcs([x0] * n_lam, [y0] * n_lam)\n x0_xy, y0_xy, _, _ = sky_to_imgxy(x0_sky, y0_sky, lambdas, [order] * n_lam)\n x0s, y0s = imgxy_to_grismxy(x0_xy + xoffset, y0_xy + yoffset, lambdas, [order] * n_lam)\n\n # If none of the dispersed pixel indexes are within the image frame,\n # return a null result without wasting time doing other computations\n if x0s.min() >= naxis[0] or x0s.max() < 0 or y0s.min() >= naxis[1] or y0s.max() < 0:\n return None\n\n # Compute arrays of dispersed pixel locations and areas\n padding = 1\n xs, ys, areas, index = get_clipped_pixels(\n x0s, y0s,\n padding,\n naxis[0], naxis[1],\n width, height\n )\n lams = np.take(lambdas, index)\n\n # If results give no dispersed pixels, return null result\n if xs.size <= 1:\n return None\n\n # compute 1D sensitivity array corresponding to list of wavelengths\n sens, no_cal = create_1d_sens(lams, sens_waves, sens_resp)\n\n # Compute countrates for dispersed pixels. Note that dispersed pixel\n # values are naturally in units of physical fluxes, so we divide out\n # the sensitivity (flux calibration) values to convert to units of\n # countrate (DN/s).\n counts = flux(lams) * areas / sens\n counts[no_cal] = 0. # set to zero where no flux cal info available\n\n return xs, ys, areas, lams, counts, ID", "def filter_and_correct_expression_and_image_features(tissue, model, aggregation, patch_size, M, k, pc_correction=False, tf_correction=False):\n\n\n\n\n # Filter expression\n Y, X, dIDs, tIDs, tfs, ths, t_idx = extract_final_layer_data(tissue, model, aggregation, patch_size)\n filt_X, filt_tIDs, final_exp_idx = filter_expression(X, tIDs, M, k)\n\n\n\n if pc_correction:\n print ('Correcting with {} expression PCs'.format(pc_correction))\n pca = PCA(n_components=pc_correction)\n\n\n pca_predictors = pca.fit_transform(filt_X)\n\n # Correct Y\n lr = LinearRegression()\n lr.fit(pca_predictors, Y)\n predicted_Y = lr.predict(pca_predictors)\n corrected_Y = Y - predicted_Y\n\n # Correct X\n projected_filt_X = np.dot(pca_predictors,pca.components_)\n corrected_filt_X = filt_X - projected_filt_X\n\n # Set as return variables\n final_X = corrected_filt_X\n final_Y = corrected_Y\n\n elif tf_correction:\n print('Correcting with all technical factors')\n tf_Y = Y[t_idx,:]\n tf_filt_X = filt_X[t_idx,:]\n\n tfs[list(ths).index('SMTSISCH')] = np.log2(tfs[list(ths).index('SMTSISCH')] + 1)\n tf_predictors = tfs\n\n #Correct Y\n lr_Y = LinearRegression()\n lr_Y.fit(tf_predictors, tf_Y)\n tf_Y_predicted = lr_Y.predict(tf_predictors)\n corrected_tf_Y = tf_Y - tf_Y_predicted\n\n #Correct X\n lr_X = LinearRegression()\n lr_X.fit(tf_predictors, tf_filt_X)\n tf_filt_X_predicted = lr_X.predict(tf_predictors)\n corrected_tf_filt_X = tf_filt_X - tf_filt_X_predicted\n\n # Set as return variables\n final_X = corrected_tf_filt_X\n final_Y = corrected_tf_Y\n else:\n # Set unmodified values as return variables\n final_X = filt_X\n final_Y = Y\n\n return final_Y, final_X, dIDs, filt_tIDs, tfs, ths, t_idx", "def apply_ec_filters(\n current_gbk_objs,\n ec_filters,\n connection,\n):\n logger = logging.getLogger(__name__)\n \n ec_gbk_ids = set()\n\n # Retrieve all Genbank.genbank_ids for each EC number\n for ec in tqdm(ec_filters, desc=\"Retrieving gbks for EC# filters\"):\n with Session(bind=connection) as session:\n gbk_query = session.query(Genbank.genbank_id).\\\n join(Ec, Genbank.ecs).\\\n filter(Ec.ec_number == ec).\\\n all()\n\n for gbk_id in gbk_query:\n ec_gbk_ids.add(gbk_id)\n\n if len(ec_gbk_ids) == 0:\n logger.error(\n \"Retrieved NO proteins matching the provided EC numbers\\n\"\n \"Check the local CAZyme db contains the EC numbers provided\\n\"\n \"Terminating program\"\n )\n sys.exit(1)\n \n ec_filtered_gbks = set()\n\n for gbk_record in tqdm(current_gbk_objs, desc=\"Checking gbk records against EC filters\"):\n if (gbk_record.genbank_id,) in ec_gbk_ids:\n ec_filtered_gbks.add(gbk_record)\n \n return ec_filtered_gbks", "def subtract_reference_pixels(img,no_channels=32,statfunc=biweight_location,vertical_smooth_window=15,array_size=2048):\n correctedStrips = []\n for channelstrip in np.split(img,np.arange(1,no_channels)*int(array_size/no_channels),axis=1):\n # Correct odd and even columns seperately\n topRefeven = statfunc(channelstrip[:4,0::2])\n topRefodd = statfunc(channelstrip[:4,1::2]) # Calculate median/mean of odd and even columns \n botRefeven = statfunc(channelstrip[-4:,0::2])\n botRefodd = statfunc(channelstrip[-4:,1::2])\n\n Corrected_channelstrip = channelstrip.copy()\n Corrected_channelstrip[:,0::2] = channelstrip[:,0::2] - np.linspace(topRefeven,botRefeven,channelstrip.shape[0])[:,np.newaxis]\n Corrected_channelstrip[:,1::2] = channelstrip[:,1::2] - np.linspace(topRefodd,botRefodd,channelstrip.shape[0])[:,np.newaxis]\n\n correctedStrips.append(Corrected_channelstrip)\n\n HRefSubtractedImg = np.hstack(correctedStrips)\n VRef = statfunc(np.hstack((HRefSubtractedImg[:,:4],HRefSubtractedImg[:,-4:])),axis=1)\n # Remove any DC offset at the edges which could arise due to low value columns in vertical reference pixels\n VRef = VRef - statfunc(np.concatenate((VRef[:4],VRef[-4:]))) # We can set it to zero since we have subtracted top and bottom reference pixels\n if vertical_smooth_window > 1:\n vsmoothdegree = 2 if vertical_smooth_window >= 5 else 1\n VRef = savgol_filter(VRef,window_length=vertical_smooth_window,polyorder=vsmoothdegree)\n return HRefSubtractedImg - VRef[:,np.newaxis]", "def filterBankPatch(img, width=5):\n half = width / 2 # e.g. for 5, it's 2\n imgE = Views.extendBorder(img)\n ops = [offset(imgE, [x, y]) for x in xrange(-half, half + 1) for y in xrange(-half, half + 1)]\n return ops", "def constant_2015():\n\n #Load the CMIP6 historical\n cubes = iris.load(data_dir+'SO2DMS-em-anthro_input4MIPs_emissions_CMIP_CEDS-v2016-07-26-gr_200001-201412_n48.nc')\n #Get low and high level emissions just in the last year (2014)\n cubes = iris.cube.CubeList([cubes[2],cubes[1]])\n final_cubes = iris.cube.CubeList()\n for cube in cubes:\n final_cube = cube[-12:]\n final_cubes.append(final_cube)\n \n #Set the year-on-year proportional reductions to be nothing\n yoy_rates = calc_perc_reducts()\n yoy_rates = np.array(yoy_rates)\n yoy_rates = np.ones_like(yoy_rates)\n\n #Create coordinates for new nc file between 2014 and 2100\n lat_coord = cubes[0].coord('latitude')\n lon_coord = cubes[0].coord('longitude')\n time_coord = DimCoord(np.arange(95055.,95055.+(2100-2014+1)*360.,30.),standard_name=u'time', units=cf_units.Unit('days since 1750-1-1 00:00:00', calendar='360_day'), long_name=u'time', var_name='time')\n\n #Create the cube date\n cube_data_surf = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n cube_data_high = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n #Set first year equal to 2014 in CMIP6 historical\n cube_data_surf[:12,...] = final_cubes[0].data\n cube_data_high[:12,...] = final_cubes[1].data\n #Apply equal emissions in all other years too\n for i in range(12,cube_data_surf.shape[0]):\n cube_data_surf[i,...] = cube_data_surf[(i-12),...] * yoy_rates[0,i]\n cube_data_high[i,...] = cube_data_high[(i-12),...] * yoy_rates[1,i]\n #Make the output cubes\n fut_cube_surf = iris.cube.Cube(cube_data_surf,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[0].standard_name, long_name=final_cubes[0].long_name, var_name=final_cubes[0].var_name, units=final_cubes[0].units, attributes=final_cubes[0].attributes)\n fut_cube_high = iris.cube.Cube(cube_data_high,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[1].standard_name, long_name=final_cubes[1].long_name, var_name=final_cubes[1].var_name, units=final_cubes[1].units, attributes=final_cubes[1].attributes)\n\n fut_cube_high.var_name = 'field569_1'\n fut_cube_high.units='kg/m2/s'\n fut_cube_high.long_name ='HIGH LEVEL SO2 EMISSIONS KG/M2/S'\n fut_cube_surf.var_name = 'field569'\n fut_cube_surf.units='kg/m2/s'\n fut_cube_surf.long_name ='SULPHUR DIOXIDE EMISSIONS'\n\n #Load the DMS cube from standard RCP2.6\n dms_cube = iris.load(data_dir+'DMSSO2NH3_18502100_RCP26_monthly.nc')[0]\n iris.coord_categorisation.add_year(dms_cube,'time',name='year')\n dms_cube = dms_cube.extract(iris.Constraint(year = lambda y: y>=2014))\n\n dms_cube.var_name = 'field570'\n dms_cube.attributes.pop('name')\n dms_cube.coord('time').var_name = 'time'\n dms_cube.coord('time').long_name = 'time'\n\n fut_cube_high = fut_cube_high[:-2]\n fut_cube_surf = fut_cube_surf[:-2]\n\n fut_dms = iris.cube.Cube(dms_cube.data[:,0,::-1,:],dim_coords_and_dims=[(fut_cube_surf.coord('time'),0),(fut_cube_surf.coord('latitude'),1),(fut_cube_surf.coord('longitude'), 2)],standard_name=dms_cube.standard_name, long_name=dms_cube.long_name, var_name=dms_cube.var_name, units=dms_cube.units, attributes=dms_cube.attributes)\n\n #Save the final cubes as netcdf (cutting them to be the same length)\n iris.save(iris.cube.CubeList([fut_dms,fut_cube_high,fut_cube_surf]),data_dir+ \"SO2DMS_const2014.nc\")\n os.system('ncatted -O -a calendar,time,m,c,\"360_day\" '+data_dir+ \"SO2DMS_const2014.nc\")\n\n return", "def get_integral_descriptors(image,filtered_coords,wid=5):\n desc = []\n for coords in filtered_coords:\n patch = image[coords[0]-wid:coords[0]+wid+1,\n coords[1]-wid:coords[1]+wid+1]\n desc.append(patch)\n return desc", "def cs4243_filter_fast(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n filtered_image = np.zeros((Hi, Wi))\n\n ###Your code here####\n \n # pad image to handle border pixels\n pad_height = (int)((Hk - 1)/2)\n pad_width = (int)((Wk - 1)/2)\n image_pad = pad_zeros(image, pad_height, pad_width)\n\n # Flip the kernel horizontal and vertical\n kernel = cs4243_rotate180(kernel)\n \n # compute effective output size, assume stride=1\n out_height = 1 + Hi - Hk + 2*pad_height\n out_width = 1 + Wi - Wk + 2*pad_width\n \n # get initial nodes of receptive fields\n recep_fields_h = [i for i in range(out_height)]\n recep_fields_w = [i for i in range(out_width)]\n \n for i in recep_fields_h:\n for j in recep_fields_w: \n # get receptive area\n recep_area = image_pad[i:i+Hk, j:j+Wk] \n filtered_image[i, j] = np.multiply(kernel, recep_area).sum()\n ###\n\n return filtered_image", "def test_thresh_color(images):\n for img in images:\n # Get the stack bounds to draw onto the main image\n stack_bounds = get_stack_bounds(img)\n\n # Get all the sub-images for each stack\n stack_images = get_stack_images(img)\n\n SIZE = (200, 300)\n filtered_imgs = []\n\n # Loop through all the stacks\n for stack_bound, stack_img in zip(stack_bounds, stack_images):\n #Draw the rectangle for the current stack\n disp = deepcopy(img)\n located_stacks_img = draw_rect(np.copy(disp), stack_bound, [0,0,0])\n cv2.imshow('Filtering stack', located_stacks_img)\n\n # Convert the current stack image into hsv\n stack_img_hsv = cv2.cvtColor(stack_img, cv2.COLOR_BGR2HSV)\n for i, color in enumerate(COLORS):\n contours = thresh_color(stack_img, stack_img_hsv, COLORS[color])\n\n # Draw the contours\n stack2 = deepcopy( stack_img)\n cont_img = cv2.drawContours(stack2, contours, -1, (255,255,255), 2)\n # cont_img = cv2.resize(cont_img, SIZE)\n\n # Put the number of contours as text\n txt = '{}:{}'.format(color, len(contours))\n print(txt)\n\n # Display the contour information to the screen\n cv2.imshow(txt, scale_image(cont_img, 9))\n filtered_imgs.append(cont_img)\n cv2.moveWindow(txt, 180*i, 600)\n # cv2.imshow('filtered_images', np.hstack(filtered_imgs))\n print()\n # Skip to the next image\n if cv2.waitKey(0) == ord('1'):\n break\n cv2.destroyAllWindows()", "def get_cross_correlation_subpixel_offset(self, satLimit=16e3,\n cutoutSize=21):\n # TODO: Test if rough pixel-level alignment is required\n pass\n\n # # Test if a quick WCS integer pixel alignment is possible.\n # if self.image1.has_wcs and self.image2.has_wcs:\n # # Compute the integer pixel offsets using WCS\n # dx, dy = self.get_wcs_integer_pixel_offset()\n # else:\n # # Compute the integer pixel offsets using cross-correlation\n # dx, dy = self.get_cross_correlation_integer_pixel_offset()\n #\n # # Shift image2 array to approximately match image1\n # shiftedImage2 = self.image2.shift(-dx, -dy)\n\n # Compute a combined image and extract stars from that combined image\n combinedImage = 0.5*(self.image1 + self.image2)\n\n xStars, yStars = combinedImage.get_sources(\n satLimit = satLimit,\n crowdLimit = np.sqrt(2)*cutoutSize,\n edgeLimit = cutoutSize + 1\n )\n\n # Grab the list of star cutouts from image one\n starCutouts1 = self.image1.extract_star_cutouts(xStars, yStars,\n cutoutSize = cutoutSize)\n\n # Grab the list of star cutouts from shifted image two\n starCutouts2 = self.image2.extract_star_cutouts(xStars, yStars,\n cutoutSize = cutoutSize)\n\n # Cull any bad cutouts from the cutout list\n starCutouts1, starCutouts2 = self._parse_star_cutouts(\n starCutouts1,\n starCutouts2\n )\n\n\n # Build the square mosaics of cutouts\n cutoutMosaic1 = self._build_star_cutout_mosaic(starCutouts1)\n cutoutMosaic2 = self._build_star_cutout_mosaic(starCutouts2)\n\n #\n # TODO: remove this code block if possible\n #\n # Construct a NEW ImagePair instance from these two mosaics\n mosaicPair = ImagePairOffsetGetter(\n ReducedScience(cutoutMosaic1),\n ReducedScience(cutoutMosaic2)\n )\n\n # Replace any suprious values with local median values\n array1, array2 = mosaicPair._replace_negatives_and_nans_with_medians()\n\n # Do an array flipped convolution, which is a correlation.\n corrImage = signal.fftconvolve(\n array2,\n array1[::-1, ::-1],\n mode='same'\n )\n\n # Fix any suprious pixel values\n corrImage = ImagePairOffsetGetter._fix_bad_correlation_image_pixels(corrImage)\n\n # Grab the subpixel precision offsets from the cross correlation image\n dx, dy = ImagePairOffsetGetter._extract_subpixel_offset_from_correlation_image(corrImage)\n\n # # Add the integer and subpixel offsets and return them to the user\n # dx += dx1\n # dy += dy1\n\n return dx, dy", "def filter_img(img, new_img, f):\n\n datas = img.getdata()\n new_data = []\n for item in datas:\n if f(item[0]) and f(item[1]) and f(item[2]):\n new_data.append((0, 0, 0, 0))\n else:\n new_data.append(item)\n new_img.putdata(new_data)", "def cut_spectrum(input_spectrum, desired_frequency_range):\n channels_ip = []\n for ip in input_spectrum.GetChannels():\n channel_ip = []\n channel_op = []\n for n, i in enumerate(ip):\n if n > desired_frequency_range[0] / input_spectrum.GetResolution() and n < desired_frequency_range[1] / \\\n input_spectrum.GetResolution():\n channel_ip.append(i)\n else:\n channel_ip.append(0.0)\n channel_op.append(0.0)\n channels_ip.append(tuple(channel_ip))\n input_spectrum_modified = sumpf.Spectrum(channels=tuple(channels_ip), resolution=input_spectrum.GetResolution(),\n labels=input_spectrum.GetLabels())\n return input_spectrum_modified", "def filter_fusion(luma_bin, sat_bin, grad_bin, mentor_bin):\n binary = np.zeros_like(luma_bin)\n binary[ (((grad_bin==1) | (sat_bin==1)) & (luma_bin==1)) | (mentor_bin==1) ] = 1\n\n # Erosion and dilation - Seems doesn't work. Mask-off\n #kernel = np.ones((5,5))\n #binary_dilation = cv2.dilate(binary, kernel, iterations=1)\n #binary_erosion = cv2.erode(binary_dilation, kernel, iterations=1)\n #binary = binary_erosion\n\n return binary", "def selection_fn(self, trace, points, selector):\n self.segment = self.fig.layout[\"sliders\"][0].active\n seg = self.segment\n\n xrange = selector.xrange\n wave = self.wave[seg]\n mask = self.mask[seg]\n\n # Choose pixels and value depending on selected type\n if self.mask_type == \"good\":\n value = 1\n idx = (wave > xrange[0]) & (wave < xrange[1]) & (mask == 0)\n elif self.mask_type == \"bad\":\n value = 0\n idx = (wave > xrange[0]) & (wave < xrange[1])\n elif self.mask_type == \"line\":\n value = 1\n idx = (wave > xrange[0]) & (wave < xrange[1]) & (mask != 0)\n print(np.count_nonzero(idx))\n elif self.mask_type == \"cont\":\n value = 2\n idx = (wave > xrange[0]) & (wave < xrange[1]) & (mask == 1)\n else:\n return\n\n # Apply changes if any\n if np.count_nonzero(idx) != 0:\n self.mask[seg][idx] = value\n\n with self.fig.batch_update():\n # Update Line Mask\n m = self.line_mask_idx[seg]\n x, y = self.create_mask_points(\n self.wave[seg], self.spec[seg], self.mask[seg], 1\n )\n self.fig.data[m].x = x\n self.fig.data[m].y = y\n\n # Update Cont Mask\n m = self.cont_mask_idx[seg]\n x, y = self.create_mask_points(\n self.wave[seg], self.spec[seg], self.mask[seg], 2\n )\n self.fig.data[m].x = x\n self.fig.data[m].y = y", "def minFrameFilter(self, min_frames, keep_previous = True):\n\n # In case of no filter give warning and stop\n if not self.index_filter:\n print(\"WARNING! Cells not tracked or all cells filtered. No min_frames condition applied.\")\n return None\n\n if not keep_previous:\n # reset variables if not keeping the old values\n self.index_filter = list(range(len(self.id_seq)))\n self._filter_config = {}\n else:\n # check whether this was still done\n if self.MIN_FRAMES_KEY in self._filter_config:\n # if less restrictive condition, do nothing\n if self._filter_config[self.MIN_FRAMES_KEY] <= min_frames:\n return self.index_filter \n \n # check conditions\n did_not_pass = []\n for idx, id_seq in enumerate(self.id_seq):\n if len(id_seq) < min_frames:\n did_not_pass.append(idx)\n\n # Apply changes\n self.index_filter = list(set(self.index_filter) - set(did_not_pass))\n\n # save filter configuration\n self._filter_config[self.MIN_FRAMES_KEY] = min_frames\n\n\n if not self.index_filter:\n print(\"WARNING! ALL CELLS FILTERED, consider changing the parameter min_frames\")\n else:\n print(\"%d tracks from a total of %d met the min_frames condition\"%(len(self.index_filter), len(self.id_seq)))\n\n return self.index_filter", "def find_img2d_candidates(image, **kwargs):\n\n # filter_kernel = np.array([[-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, 8/225, 8/225, 8/225, 8/225, 8/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, 8/225, 8/225, 8/225, 8/225, 8/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, 8/225, 8/225, 8/225, 8/225, 8/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, 8/225, 8/225, 8/225, 8/225, 8/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, 8/225, 8/225, 8/225, 8/225, 8/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225],\n # [-1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225, -1/225]])\n\n filter_kernel = np.array([[-2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2/324, -2/324, -2/324],\n [-2 / 324, -2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -2/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, 11 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -2 / 324, -2 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -1/324, -2/324],\n [-2 / 324, -2 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1 / 324, -1/324, -2/324, -2/324],\n [-2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2 / 324, -2/324, -2/324, -2/324]])\n\n res = sg.convolve2d(image, filter_kernel, mode='same', boundary='fill', fillvalue=0)\n coord_x, coord_y = find_max_coords(np.absolute(res))\n\n return coord_x, coord_y", "def change_image_pixels(self):\n try:\n image = Image.open(self.cropped_captcha_filename, 'r')\n pixels = list(image.getdata())\n new_pixels_list = []\n for rgb in pixels:\n if rgb[0] < 160:\n rgb = (0, 0, 0)\n if rgb[0] > 160:\n rgb = (255, 255, 255)\n new_pixels_list.append(rgb)\n image.putdata(new_pixels_list)\n image.save(self.cropped_captcha_filename)\n except UnidentifiedImageError as error:\n raise error\n print(error)", "def make_irac_lightmap(id_keep, hr_segmap, hr_mask, irac_psf, irac_drz, irac_output, blur_threshold=0.1, sigma=1.0):\n # First step, zero-out the non cluster members\n mask = filter_segmap(hr_segmap, id_keep, hr_mask, blur_kernel=irac_psf, \n threshold=blur_threshold)\n # Now we have a mask image in high-res, drizzle the pixels onto the low-res\n # pixel grid\n if os.path.exists(\"irac_mask.fits\"):\n os.system('rm irac_mask.fits')\n drizzle_mask(hr_mask, irac_drz, \"irac_mask.fits\")\n irac_input = pyfits.getdata(irac_drz)\n irac_mask = pyfits.getdata(\"irac_mask.fits\")\n irac_map = np.where(irac_mask>0, irac_input, 0.)\n # Also smooth the output light map with a Gaussian kernel\n if sigma > 0:\n print \"Smoothing the IRAC mask...\"\n irac_map = filters.gaussian_filter(irac_map, sigma)\n irac_hdr = pyfits.getheader(irac_drz)\n os.system('rm %s' % irac_output)\n pyfits.append(irac_output, data=irac_map, header=irac_hdr)\n print \"Done.\"", "def object_selection(self, ch1_bright_mag, ch2_bright_mag, selection_band_faint_mag, selection_band='I2_MAG_APER4'):\n\n clusters_to_remove = []\n for cluster_id, cluster_info in self._catalog_dictionary.items():\n # Read in the catalog\n se_catalog = Table.read(cluster_info['se_cat_path'], format='ascii')\n\n # Add the mask name to the catalog. Extracting only the system agnostic portion of the path\n se_catalog['MASK_NAME'] = re.search(r'Data_Repository/.*?\\Z', cluster_info['cov_mask_path']).group(0)\n\n # Preform SExtractor Flag cut. A value of under 4 should indicate the object was extracted well.\n se_catalog = se_catalog[se_catalog['FLAGS'] < 4]\n\n # Preform a faint-end magnitude cut in selection band.\n se_catalog = se_catalog[se_catalog[selection_band] <= selection_band_faint_mag]\n\n # Preform bright-end cuts\n # Limits from Eisenhardt+04 for ch1 = 10.0 and ch2 = 9.8\n se_catalog = se_catalog[se_catalog['I1_MAG_APER4'] > ch1_bright_mag] # [3.6] saturation limit\n se_catalog = se_catalog[se_catalog['I2_MAG_APER4'] > ch2_bright_mag] # [4.5] saturation limit\n\n # For the mask cut we need to check the pixel value for each object's centroid.\n # Read in the mask file\n mask, header = fits.getdata(cluster_info['cov_mask_path'], header=True)\n\n # Recast the mask image as a boolean array so we can use it as a check on the catalog entries\n mask = mask.astype(bool)\n\n # Read in the WCS from the mask\n w = WCS(header)\n\n # Get the objects pixel coordinates\n xy_data = np.array(w.wcs_world2pix(se_catalog['ALPHA_J2000'], se_catalog['DELTA_J2000'], 0))\n\n # Floor the values and cast as integers so we have the pixel indices into the mask\n xy_pix_idxs = np.floor(xy_data).astype(int)\n\n # Filter the catalog according to the boolean value in the mask at the objects' locations.\n se_catalog = se_catalog[mask[xy_pix_idxs[1], xy_pix_idxs[0]]]\n\n # If we have completely exhausted the cluster of any object, we should mark it for removal otherwise add it\n # to the data structure\n if se_catalog:\n cluster_info['catalog'] = se_catalog\n else:\n clusters_to_remove.append(cluster_id)\n\n # Remove any cluster that has no objects surviving our selection cuts\n for cluster_id in clusters_to_remove:\n self._catalog_dictionary.pop(cluster_id, None)", "def image_pre_filtering(left_img: np.ndarray, right_img: np.ndarray) -> tuple:\n\n def clahe(image: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply Contrast Limited Adaptive Histogram Equalization\n :param image: the image to be filtered\n :return: the image filtered with CLAHE\n \"\"\"\n clahe_filter = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n return clahe_filter.apply(image)\n\n def logarithmic(image: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply Logarithmic Transform\n :param image: the image to be filtered\n :return: the image filtered with logarithmic transform\n \"\"\"\n c = max_disparity / math.log(1 + np.max(image))\n sigma = 1\n for i in range(0, image.shape[1]): # image width\n for j in range(0, image.shape[0]): # image height\n # compute logarithmic transform\n image[j, i] = int(c * math.log(1 + ((math.exp(sigma) - 1) * image[j, i])))\n return image\n\n def exponential(image: np.ndarray) -> np.ndarray:\n \"\"\"\n Perform pre-processing - raise to the power, as this subjectively appears\n to improve subsequent disparity calculation\n :param image:\n :return:\n \"\"\"\n return np.power(image, 0.75).astype('uint8')\n\n def apply_filter(image: np.ndarray) -> np.ndarray:\n \"\"\"\n Choose which filter to apply to both images, this could be a combination too\n :param image: the image to be filtered\n :return:\n \"\"\"\n # choose filters to apply\n return clahe(image)\n\n return apply_filter(left_img), apply_filter(right_img)", "def checkpoint_filter_fn(state_dict, model):\n if state_dict['patch_pos'].shape != model.patch_pos.shape:\n state_dict['patch_pos'] = resize_pos_embed(\n state_dict['patch_pos'], model.patch_pos,\n getattr(model, 'num_tokens', 1), model.pixel_embed.grid_size)\n return state_dict", "def channel_blend(pixSrc, pixPng, srcH, srcW, x, y, mode='weight', color_match=False):\n modes = [item for i, item in blend_mode.items()]\n # 1.find all indices satisfying conditions, and replace the value of indices in source image with logo image.\n # note: from pillow to numpy, (w,h) has converted to (h,w).\n index = np.where(pixPng[:, :, 3] > 15)\n y_id = index[0] + y - 1\n x_id = index[1] + x - 1\n\n # ensure the exceeding part remained in boundary.\n y_id = np.where(y_id >= srcH, srcH - 1, y_id)\n x_id = np.where(x_id >= srcW, srcW - 1, x_id)\n id = (y_id, x_id)\n\n # matching logo color with source image.\n if color_match:\n pixSrc_ = pixSrc.copy()[..., :3]\n pixPng_ = pixPng.copy()[..., :3]\n mean_source, stddev_source = cv2.meanStdDev(pixSrc_)\n mean_png, stddev_png = cv2.meanStdDev(pixPng_)\n mdiff = mean_png - mean_source\n mdiff = np.array(mdiff).reshape((1, 1, 3))\n pixPng_ = pixPng_.astype(np.float64)\n pixPng_ -= mdiff\n pixPng_ = np.clip(pixPng_, 0, 255)\n pixPng_ = pixPng_.astype(np.uint8)\n pixPng[..., :3] = pixPng_\n\n if mode not in modes: raise NotImplementedError(\n \"only {0:'naive',1:'weight',2:'poisson',3:'multiply'} are supported.\")\n if mode == 'weight':\n pixSrc = weight_paste(pixSrc, pixPng, id, index)\n elif mode == 'naive':\n pixSrc = naive_paste(pixSrc, pixPng, id, index)\n elif mode == 'poisson':\n pixSrc = poisson_blend(pixSrc, pixPng, id, index, x, y)\n elif mode == 'multiply':\n pixSrc = multiply(pixSrc, pixPng, id, index)\n\n return cv2.cvtColor(pixSrc, cv2.COLOR_RGBA2RGB)", "def _continuity(self, blob_pixels):\n\n # Total amount of blob pixels. If none, return.\n self.blob_size = blob_pixels.size\n if self.blob_size < 4:\n self.blobs = np.zeros(0)\n return\n\n # Find pixels that are continuous in x-direction\n x = blob_pixels[0, :]\n x_shifted = np.zeros(x.shape)\n x_shifted[1:-1] = np.copy(x[:-2])\n x_shifted[0] = -1\n x_shifted[-1] = -1\n\n blob_x = np.where(abs(x_shifted - x) > 1)\n blob_x = np.asarray(blob_x)\n blob_x[:, -1] += 1\n\n # For each continous set in x-direction, find pixels that are also continuous in y-direction\n for i in range(0, blob_x.shape[1]-1):\n x = blob_pixels[0, blob_x[0, i]:blob_x[0, i+1]]\n y = blob_pixels[1, blob_x[0, i]:blob_x[0, i+1]]\n arg_y = np.argsort(y)\n y_sorted = np.sort(y)\n \n y_shifted = np.zeros(y.shape)\n y_shifted[1:-1] = np.copy(y_sorted[:-2])\n y_shifted[0] = -1\n y_shifted[-1] = -1\n\n blob_y = np.where(abs(y_shifted - y_sorted) > 1)\n blob_y = np.asarray(blob_y)\n blob_y[:, -1] += 1\n \n # For pixels continuous in x- and y-direction, find centroids\n for j in range(0, blob_y.shape[1]-1):\n blob_indices = arg_y[np.asscalar(blob_y[:, j]):np.asscalar(blob_y[:, j+1])]\n x_center_temp = round(sum(x[blob_indices])/blob_indices.shape[0], 3)\n y_center_temp = round(sum(y[blob_indices])/blob_indices.shape[0], 3)\n\n # coordinate system with origin at image center, x-axis looking forward, y-axis looking upward\n # x-horizontal, y-vertical\n x_center = y_center_temp\n y_center = x_center_temp\n # x-forward, y-upward\n x_center = self.x_res - x_center\n y_center = self.y_res - y_center\n # x-y-centered\n x_center = x_center - (self.x_res / 2)\n y_center = y_center - (self.y_res / 2)\n # flip image 180 degrees bcs camera mounted upside down\n if self.side == 'right':\n x_center = -x_center\n y_center = -y_center\n\n if self.no_blobs == 0:\n self.blobs[0, 0] = x_center\n self.blobs[0, 1] = y_center\n\n else:\n self.blobs = np.append(self.blobs, [[x_center, y_center]], axis=0)\n \n self.no_blobs += 1", "def update_ci_values(filter_item, config_filename, log_level=logutil.logging.INFO):\n log.setLevel(log_level)\n # Check to see if the CI values need to be updated. If not, simply skip over all of this.\n update_ci_vals = False\n for phot_mode in ['aperture', 'segment']:\n if filter_item.configobj_pars.pars['quality control'].outpars['ci filter'][phot_mode]['lookup_ci_limits_from_table']:\n update_ci_vals = True\n if update_ci_vals:\n # read in the custom SVM param file data\n with open(config_filename) as f:\n json_data = json.load(f)\n for phot_mode in ['aperture', 'segment']:\n if filter_item.configobj_pars.pars['quality control'].outpars['ci filter'][phot_mode]['lookup_ci_limits_from_table']:\n log.info(\"NOTE: The 'lookup_ci_limits_from_table' setting in the 'quality control'>'{}' section of the parameters for filter image {} is set to 'True'. This means that any custom user-tuned values for 'ci_upper_limit' and 'ci_lower_limit' will be overwritten. To prevent this, please set 'lookup_ci_limits_from_table' to 'False' in the custom parameter file {}\".format(phot_mode, filter_item.drizzle_filename, config_filename))\n # set up inputs to ci_table.get_ci_from_file() and execute to get new CI values\n drizzled_image = filter_item.drizzle_filename\n ci_lookup_file_path = \"default_parameters/any\"\n diagnostic_mode = False\n ci_lower_limit = filter_item.configobj_pars.pars['quality control'].outpars['ci filter'][phot_mode]['ci_lower_limit']\n ci_upper_limit = filter_item.configobj_pars.pars['quality control'].outpars['ci filter'][phot_mode]['ci_upper_limit']\n ci_dict = ci_table.get_ci_from_file(drizzled_image, ci_lookup_file_path, log_level,\n diagnostic_mode=diagnostic_mode, ci_lower=ci_lower_limit,\n ci_upper=ci_upper_limit)\n log.debug(\"{} {} CI upper limit updated from {} to {}\".format(filter_item.drizzle_filename,\n phot_mode,\n ci_upper_limit,\n ci_dict[\"ci_upper_limit\"]))\n log.debug(\"{} {} CI lower limit updated from {} to {}\\n\".format(filter_item.drizzle_filename,\n phot_mode,\n ci_lower_limit,\n ci_dict[\"ci_lower_limit\"]))\n # update CI values\n json_data[drizzled_image[:-9]][\"default_values\"][\"quality control\"][\"ci filter\"][phot_mode][\"ci_lower_limit\"] = ci_dict[\"ci_lower_limit\"]\n json_data[drizzled_image[:-9]][\"default_values\"][\"quality control\"][\"ci filter\"][phot_mode][\"ci_upper_limit\"] = ci_dict[\"ci_upper_limit\"]\n\n # Write out the updated custom SVM param file data back to file.\n with open(config_filename, 'w') as f:\n json.dump(json_data, f, indent=4)\n else:\n log.debug(\"Using existing concentration index limits from parameter file\")", "def patchAndDenoise(file_list, file_times=None, K=2, use_orders=None,\n num_iters=50, return_iters=0, running_window=0,\n line_cutoff=0.5, file_cutoff=0.5, outlier_cut=0, err_cut=0,\n fast_pca=False,sparse_pca=False,verbose=False):\n if file_times is None:\n file_times = np.zeros_like(file_list)\n \n ### Gather calibration information\n # Find all observed lines in each order and their wavlengths\n if verbose:\n print('Finding all observed modes')\n orders, names, waves = buildLineDB(file_list, use_orders=use_orders)\n\n # Find x-values of observed lines\n if verbose:\n print('Finding line center for each mode')\n x_values, x_errors = getLineMeasures(file_list, orders, names, err_cut=0)\n \n \n ### Vetting\n # Find where there is no line information\n x_values[x_values < 1] = np.nan # This will throw a warning\n \n # Mask out of order lines\n out_of_order = np.zeros_like(x_values,dtype=bool)\n for m in np.unique(orders):\n I = orders==m\n wave_sort = np.argsort(waves[I])\n for i, exp in enumerate(x_values):\n exp_sort = exp[I][wave_sort]\n exp_diff = np.diff(exp_sort)\n left_diff = np.insert(exp_diff<0,0,False)\n right_diff = np.append(exp_diff<0,False)\n exp_mask = np.logical_or(left_diff,right_diff)\n out_of_order[i,I] = exp_mask.copy()\n x_values[out_of_order] = np.nan\n if verbose:\n num_bad = np.sum(out_of_order)\n num_total = out_of_order.size\n print('{:.3}% of lines masked'.format(\n (num_bad)/num_total*100))\n \n # Get rid of bad lines\n good_lines = np.mean(np.isnan(x_values),axis=0) < line_cutoff\n # Trim everything\n names = names[good_lines]\n orders = orders[good_lines]\n waves = waves[good_lines]\n x_values = x_values[:,good_lines]\n x_errors = x_errors[:,good_lines]\n if verbose:\n num_good = np.sum(good_lines)\n num_total = good_lines.size\n print('{} of {} lines cut ({:.3}%)'.format(\n (num_total - num_good),num_total,\n (num_total - num_good)/num_total*100))\n \n # Get rid of bad files\n good_files = np.mean(np.isnan(x_values),axis=1) < file_cutoff\n # Trim everything\n x_values = x_values[good_files]\n x_errors = x_errors[good_files]\n exp_list = file_list[good_files]\n file_times = file_times[good_files]\n if verbose:\n num_good = np.sum(good_files)\n num_total = good_files.size\n print('{} of {} files cut ({:.3}%)'.format(\n (num_total - num_good),num_total,\n (num_total - num_good)/num_total*100))\n print('Files that were cut:')\n print(file_list[~good_files])\n \n ### Patching\n # Initial patch of bad data with mean\n bad_mask = np.isnan(x_values) # mask to identify patched x_values\n if running_window > 0:\n half_size = int(running_window//2)\n for i in range(x_values.shape[0]):\n # Identify files in window\n file_range = [max((i-half_size,0)), min((i+half_size+1,x_values.shape[1]))]\n # Find mean of non-NaN values\n run_med = np.nanmean(x_values[file_range[0]:file_range[1],:],axis=0)\n # Patch NaN values with mean for center file\n x_values[i][bad_mask[i,:]] = run_med[bad_mask[i,:]]\n counter = 5\n while np.sum(np.isnan(x_values)) > 0:\n for i in range(x_values.shape[0]):\n # Identify files in window\n file_range = [max((i-half_size,0)), min((i+half_size+1,x_values.shape[1]))]\n # Find mean of non-NaN values\n run_med = np.nanmean(x_values[file_range[0]:file_range[1],:],axis=0)\n # Patch NaN values with mean for center file\n x_values[i][bad_mask[i,:]] = run_med[bad_mask[i,:]]\n counter -= 1\n if counter < 0:\n print(\"Persistant NaNs with running mean.\")\n print(\"Replacing remaining NaNs with global mean.\")\n tot_mean = np.nanmean(x_values,axis=0)[None,...]*np.ones_like(x_values)\n x_values[np.isnan(x_values)] = tot_mean[np.isnan(x_values)]\n break\n else: # don't bother with running mean\n mean_values = np.nanmean(x_values,axis=0)\n mean_patch = np.array([mean_values for _ in range(x_values.shape[0])])\n x_values[bad_mask] = mean_patch[bad_mask]\n \n # Iterative PCA\n pca_results = pcaPatch(x_values, bad_mask, K=K, num_iters=num_iters,\n fast_pca=fast_pca, sparse_pca=sparse_pca, return_iters=return_iters)\n x_values, mean_x_values, denoised_xs, uu, ss, vv = pca_results[:6]\n \n # Mask line center outliers\n if outlier_cut > 0:\n x_resids = x_values-denoised_xs\n out_mask = abs(x_resids-np.mean(x_resids)) > (outlier_cut*np.nanstd(x_resids))\n if verbose:\n num_out = np.sum(out_mask)\n num_total = out_mask.size\n num_bad = np.sum(np.logical_and(out_mask,bad_mask))\n print('{:.3}% of lines marked as Outliers'.format(\n (num_out)/num_total*100))\n print('{:.3}% of lines marked as Outliers that were PCA Patched'.format(\n (num_bad)/num_total*100))\n pca_results = pcaPatch(x_values, np.logical_or(bad_mask,out_mask),\n K=K, num_iters=num_iters,\n fast_pca=fast_pca, return_iters=return_iters)\n x_values, mean_x_values, denoised_xs, uu, ss, vv = pca_results[:6]\n \n \n patch_dict = {}\n patch_dict['K'] = K\n # Exposure Information\n patch_dict['files'] = exp_list.copy()\n patch_dict['times'] = file_times.copy()\n # Line Information\n patch_dict['names'] = names.copy()\n patch_dict['orders'] = orders.copy()\n patch_dict['waves'] = waves.copy()\n patch_dict['errors'] = None # Is there such a thing?\n # Line Measurement Information\n patch_dict['x_values'] = x_values.copy()\n patch_dict['x_errors'] = x_errors.copy()\n patch_dict['denoised_xs'] = denoised_xs.copy()\n patch_dict['mean_xs'] = mean_x_values.copy()\n patch_dict['bad_mask'] = bad_mask.copy()\n # PCA Information\n patch_dict['u'] = uu.copy()\n patch_dict['s'] = ss.copy()\n patch_dict['v'] = vv.copy()\n patch_dict['ec'] = (uu*ss)[:,:K]\n # Information by Iteration\n if return_iters > 0:\n patch_dict['iter_vs'] = pca_results[6].copy()\n patch_dict['iter_x_values'] = pca_results[7].copy()\n # Outlier Information\n if outlier_cut > 0:\n patch_dict['out_mask'] = out_mask.copy()\n \n return patch_dict", "def SetPreserveIntensities(self, _arg: 'bool const') -> \"void\":\n return _itkClosingByReconstructionImageFilterPython.itkClosingByReconstructionImageFilterISS2ISS2SE2_SetPreserveIntensities(self, _arg)" ]
[ "0.52584434", "0.501605", "0.50095785", "0.5009003", "0.5002469", "0.49740124", "0.49180493", "0.49099478", "0.48357704", "0.47827873", "0.47746482", "0.47566548", "0.4741615", "0.4737234", "0.46963915", "0.46842596", "0.4639385", "0.46338367", "0.46156648", "0.46046755", "0.45878288", "0.45706195", "0.4564185", "0.4555902", "0.4550769", "0.4549048", "0.45445293", "0.4537495", "0.45249462", "0.45064425", "0.45023996", "0.45002246", "0.4494518", "0.44901457", "0.44897392", "0.4486865", "0.44859615", "0.44692406", "0.4462798", "0.44616425", "0.44616425", "0.44616425", "0.4460986", "0.44602105", "0.4460128", "0.44601017", "0.44589478", "0.44472268", "0.44405663", "0.4439043", "0.44381", "0.44365802", "0.44361332", "0.44337925", "0.44332182", "0.4429664", "0.4424833", "0.44220033", "0.44201455", "0.44024006", "0.44008952", "0.43980542", "0.43969473", "0.4392449", "0.43849418", "0.43816102", "0.43811393", "0.4376001", "0.43729413", "0.43714884", "0.43630287", "0.43549216", "0.43515435", "0.43507925", "0.4347806", "0.43465722", "0.43463346", "0.4344245", "0.43427294", "0.43375146", "0.43362397", "0.43361828", "0.43350992", "0.43347713", "0.43346694", "0.43302244", "0.4328938", "0.43275407", "0.43241733", "0.43207085", "0.43179512", "0.4315727", "0.42965698", "0.42903277", "0.42895257", "0.4288967", "0.42877555", "0.42757753", "0.42682114", "0.42680392" ]
0.7578108
0
Function to apply an frequency filter. This filter takes into consideration the occurrence frequency throughout the entire time series. Thus, all class occurrence with less than given percentage of temporal persistence (eg. 3 years or fewer out of 33) are replaced with the MODE value of that given pixel position in the stack of years.
def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): #Grab land cover classes as a list of strings lc_classes = classDictionary.keys().getInfo() #Get binary images of the land cover classifications for the current year binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary) #Get the frequency of each class through the years by reducing the image collection to an image using the sum reducer class_frequency = binary_class_images.reduce(ee.Reducer.sum().unweighted()).rename(lc_classes) #Get an image that represents the mode of the land cover classes in each pixel mode_image = image.reduce(ee.Reducer.mode()) #Define an image to add bands with frequency filter applied out_img = ee.Image() #Loop through years for yearBand in yearBandNames: #Select the target year from the image yearImage = image.select(yearBand) #Loop through land cover classes in filterParams for lc_class in lc_classes: #Get the minimum occurance allowed in that land cover class min_occurance = filterParams.get(lc_class) #Find if the land cover class had less than the number of min_occurances in each pixel change_class = class_frequency.select(lc_class).lt(ee.Image.constant(min_occurance)) #If change_class==1, then replace that pixel with the mode of all the years in that pixel #This filter is only applied to pixels of this land cover class #First mask yearImage to pixels of this land cover class, then get the union of pixels where change_class==1, #if both conditions are true, then the pixel is replaced with the mode yearImage = yearImage.where(yearImage.eq(ee.Number(classDictionary.get(lc_class))).bitwiseAnd(change_class),mode_image) #Rename yearImage to bandName yearImage = yearImage.rename(yearBand) #Append to output image out_img = out_img.addBands(yearImage) return out_img
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def frequency_filter(fc, L, srf, KIND=2):\n\n if hasattr(KIND, \"__len__\"):\n PASS = KIND\n KIND = 2\n else:\n PASS = [2,3]\n KIND = [KIND]\n\n # fourier transform of lateral inhibitory function \n\n # tonotopic axis\n if issubclass(type(fc), str):\n fc = float(fc)\n R1 = np.arange(L).astype(np.float)/L*srf/2/np.abs(fc)\n\n if KIND == 1:\n # Gabor function\n C1 = 1./2/0.3/0.3\n H = np.exp(-C1*(R1-1)**2) + np.exp(-C1*(R1+1)**2)\n else:\n # Gaussian Function\n R1 = R1 ** 2\n H = R1 * np.exp(1-R1)\n\n # passband\n if PASS[0] == 1:\n #lowpass\n maxi = np.argmax(H)\n sumH = H.sum()\n H[0:maxi] = 1\n H = H / (H.sum() or 1) * sumH\n elif PASS[0] == PASS[1]:\n # highpass\n maxi = np.argmax(H)\n sumH = H.sum()\n H[maxi+1:L] = 1\n H = H / (H.sum() or 1) * sumH\n\n return H", "def temporal_ideal_filter(tensor,low,high,fps,axis=0): \n fft=fftpack.fft(tensor,axis=axis)\n frequencies = fftpack.fftfreq(tensor.shape[0], d=1.0 / fps)\n bound_low = (np.abs(frequencies - low)).argmin()\n bound_high = (np.abs(frequencies - high)).argmin()\n if (bound_low==bound_high) and (bound_high<len(fft)-1):\n bound_high+=1\n fft[:bound_low] = 0\n fft[bound_high:-bound_high] = 0\n fft[-bound_low:] = 0\n iff=fftpack.ifft(fft, axis=axis)\n \n return np.abs(iff)", "def generate_filter(length, fs, f_low=None, f_high=None, mode='box', is_plot=False):\n\n freqs = np.fft.fftfreq(int(length), d=(1. / float(fs)))\n\n filter_array = np.ones(length)\n\n if f_low is None and f_high is None:\n print('no filtering required!')\n elif f_low is None and f_high is not None:\n print('low-pass fileter')\n if f_high <= 0:\n raise(ValueError, 'Higher cutoff frquency should be positive!')\n filter_array[freqs >= f_high] = 0.\n filter_array[freqs <= -f_high] = 0.\n elif f_low is not None and f_high is None:\n print('high-pass fileter')\n if f_low < 0:\n raise (ValueError, 'Lower cutoff frquency should be non-negative!')\n filter_array[np.logical_and((freqs >= -f_low), (freqs <= f_low))] = 0.\n else:\n print('band-pass filter')\n if f_high <= 0:\n raise (ValueError, 'Higher cutoff frquency should be positive!')\n if f_low < 0:\n raise (ValueError, 'Lower cutoff frquency should be non-negative!')\n filter_array[freqs >= f_high] = 0.\n filter_array[freqs <= -f_high] = 0.\n filter_array[np.logical_and((freqs >= -f_low), (freqs <= f_low))] = 0.\n\n if mode == '1/f':\n filter_array[1:] = filter_array[1:] / abs(freqs[1:])\n filter_array[0] = 0\n filter_array = bas.array_nor(filter_array)\n elif mode == 'box':\n filter_array[0] = 0\n else:\n raise(NameError, 'Variable \"mode\" should be either \"1/f\" or \"box\"!')\n\n if is_plot:\n plot_array = zip(freqs, filter_array)\n plot_array.sort(key=lambda x: x[0])\n plot_array = zip(*plot_array)\n\n _ = plt.figure(figsize=(10, 3))\n plt.plot(plot_array[0], plot_array[1])\n plt.xlabel('frequency (Hz)')\n plt.ylim([-0.1, 1.1])\n plt.show()\n\n return freqs, filter_array", "def fir_filter(sig, sampling_freq, critical_freq, kernel_window = 'hamming', taps = 101, kind = 'band', **kwargs):\n\n kernel = make_fir_filter(sampling_freq, critical_freq, kernel_window, taps, kind, **kwargs) \n\n return np.roll(scipy.signal.lfilter(kernel, [1], sig), -taps/2+1)", "def get_frequency(frame):\n frame = clip_centre(frame)\n frame = auto_correlate(frame)\n threshold: int = SAMPLE_RATE // 500\n lag = frame[threshold:].argmax()\n frequency = SAMPLE_RATE / lag\n return frequency", "def detect(self, frame, cur_count, player):\n if cur_count % self.freq is 0:\n frame = cv.GaussianBlur(frame, (3, 3), 1)\n self.process(frame, cur_count, player)", "def mce_filter(freq, f_raw, params):\n\tz = np.exp(-2j*np.pi*freq/f_raw)\n\tb11, b12, b21, b22 = np.array(params[:4])*0.5**14\n\tH = (1+z)**4 / (1-b11*z+b12*z**2) / (1-b21*z+b22*z**2)\n\tH /= 2**4 / (1-b11+b12) / (1-b21+b22)\n\treturn H", "def tconst_filter(freq, tau):\n\treturn 1/(2*np.pi*1j*freq*tau+1)", "def apply_freq_filter(self, min_freq):\n self._apply_filter(lambda ng, freq: freq < min_freq)", "def continuous_hann_sinc_filter(\n fs: int, fc: float, L: int, dtype: torch.dtype, device: torch.device\n) -> Tensor:\n assert L % 2 == 1\n assert fc < fs / 2\n hsupp = torch.linspace(-(L-1)/2, (L-1)/2, L, dtype=dtype, device=device)\n hideal = (2 * fc / fs) * torch.sinc(2 * fc * hsupp / fs)\n hann = torch.hann_window(L, dtype=dtype, device=device)\n return hideal * hann", "def _prevalent_freq(self, data, framerate):\n if not(np.std(data) == 0):\n data = (data-np.mean(data))/np.std(data)\n transform = np.fft.rfft(data)\n freqs = np.fft.rfftfreq(len(data), 1.0/framerate) \n freqs = 60*freqs\n band_pass = np.where((freqs < 40) | (freqs > 240) )[0]\n transform[band_pass] = 0\n transform = np.abs(transform)**2\n sos = scipy.signal.butter(3, 0.2, output='sos')\n transform = scipy.signal.sosfilt(sos, transform)\n powers = np.argsort(-1*transform)\n hr, power = self._respiration_rejection([freqs[powers[0]], freqs[powers[1]]],[transform[powers[0]], transform[powers[1]]])\n return hr, power", "def freq_filt(orig_img: np.ndarray, transfer_func: np.ndarray) -> np.ndarray:\n # pad and center the input image\n M, N = orig_img.shape[:2]\n padded_img = np.pad(\n orig_img,\n (\n (int(np.floor(M / 2)), int(np.ceil(M / 2))),\n (int(np.floor(N / 2)), int(np.ceil(N / 2))),\n (0, 0),\n ),\n constant_values=0,\n )\n\n # take fft of image\n f_img = np.fft.fftshift(np.fft.fft2(padded_img.astype(np.float32)))\n\n # get product of image and transfer func\n f_filtered = np.empty_like(f_img)\n for channel_idx in range(f_img.shape[-1]):\n f_filtered[:, :, channel_idx] = f_img[:, :, channel_idx] * transfer_func\n\n # get image using ifft\n filtered_img = np.real(np.fft.ifft2(np.fft.fftshift(f_filtered)))\n\n # slice to remove padding\n filtered_img = filtered_img[\n int(M / 2) : int(3 * M / 2), int(N / 2) : int(3 * N / 2), :\n ]\n\n # scale and return filtered image\n return (\n 255\n * (filtered_img - np.min(filtered_img))\n / (np.max(filtered_img) - np.min(filtered_img))\n ).astype(np.uint8)", "def temporal_bandpass_filter(video_to_filter, low, high, fps):\n fft = fftpack.fft(video_to_filter, axis=0)\n frequencies = fftpack.fftfreq(video_to_filter.shape[0], d=1.0 / fps)\n bound_low = (np.abs(frequencies - low)).argmin()\n bound_high = (np.abs(frequencies - high)).argmin()\n fft[:bound_low] = 0\n fft[bound_high:-bound_high] = 0\n fft[-bound_low:] = 0\n iff = fftpack.ifft(fft, axis=0)\n return iff", "def filters(array, sample_frequency):\n strain = TimeSeries(array, sample_rate=int(sample_frequency))\n white_data = strain.whiten(fftlength=4, fduration=4)\n bp_data = white_data.bandpass(50, 250)\n return bp_data.value", "def filter_(self,fltr:torch.tensor):\n self.container = self.container[:,fltr]\n self.count_hist = self.count_hist[fltr]", "def temporal_filter(fc, L, srt, PASS = [2,3]):\n if issubclass(type(fc), str):\n fc = float(fc)\n t = np.arange(L).astype(np.float32)/srt\n k = t*fc\n h = np.sin(2*np.pi*k) * k**2 * np.exp(-3.5*k) * fc\n\n h = h-np.mean(h)\n H0 = np.fft.fft(h, n=2*L)\n A = np.angle(H0[0:L])\n H = np.abs(H0[0:L])\n maxi = np.argmax(H)\n H = H / (H[maxi] or 1)\n\n # passband\n if PASS[0] == 1:\n #low pass\n H[0:maxi] = 1\n elif PASS[0] == PASS[1]:\n #high pass\n H[maxi+1:L] = 1\n\n H = H * np.exp(1j*A)\n return H", "def analysis_fourier_map(self, target=1, mode=0):\r\n\r\n \r\n\r\n print('Starting fourier analysis:')\r\n\r\n self.print_image_info()\r\n\r\n # get the average image and the average of the whole image over time\r\n\r\n avgimg = np.mean(self.imageData, axis=0) # get mean image for reference later: average across all time\r\n\r\n self.meanimagevalue = np.mean(np.mean(avgimg, axis=1), axis=0)\r\n\r\n self.stdimg = np.std(self.imageData, axis= 0) # and standard deviation\r\n\r\n\r\n\r\n width = int(self.period*self.framerate*2)\r\n\r\n print( \" Detrending:\")\r\n\r\n print( ' Median filter width: ', width)\r\n\r\n # footprint = np.ones((width, 1, 1))\r\n\r\n # self.imageData = self.imageData - scipy.ndimage.median_filter(self.imageData, footprint=footprint)\r\n\r\n print( \" Done detrending\")\r\n\r\n\r\n\r\n self.n_times = self.timebase\r\n\r\n\r\n\r\n # calculate FFT and get amplitude and phase\r\n\r\n self.DF = np.fft.fft(self.imageData, axis = 0)\r\n self.freqs = np.fft.fftfreq(self.DF.shape[0], d=1./self.framerate)\r\n\r\n # self.freqs = np.fft.fftfreq(self.DF.shape[0], d=1./self.framerate)\r\n\r\n print (' df shape: ', self.DF.shape)\r\n\r\n print (' 1/framerate: ', 1./self.framerate)\r\n\r\n self.freq_point = np.argmin(np.abs(self.freqs - 1./self.period))\r\n print ('period:', self.period)\r\n print ('frequency: ', 1./self.period)\r\n print ('freq_point: ', self.freq_point)\r\n print ('frequency value: ',self.freqs[self.freq_point])\r\n steps = np.arange(1,6,dtype=np.float)\r\n steps = (steps)+1.\r\n self.assigned_freqs=2.*np.pi*1./1.6*steps\r\n print ('assigned freqs', self.assigned_freqs)\r\n\r\n #j = j + 2 # just looking at FFT leakage...`\r\n\r\n print (' closest index/freq, period: ', self.freq_point, self.freqs[self.freq_point], 1./self.period)\r\n\r\n self.print_image_info()\r\n\r\n ampimg = np.absolute(self.DF[self.freq_point,:,:])\r\n\r\n phaseimg = np.angle(self.DF[self.freq_point,:,:])\r\n\r\n \r\n # ampimg = np.absolute(self.DF[self.freq_point,:,:])\r\n\r\n\r\n # phaseimg = np.angle(self.DF[self.freq_point,:,:])\r\n\r\n if target == 1:\r\n\r\n f = open('img_phase1.dat', 'w')\r\n\r\n pickle.dump(phaseimg, f)\r\n\r\n f.close()\r\n\r\n f = open('img_amplitude1.dat', 'w')\r\n\r\n pickle.dump(ampimg, f)\r\n\r\n f.close()\r\n\r\n self.amplitudeImage1 = ampimg\r\n\r\n self.phaseImage1 = phaseimg\r\n\r\n if target == 2:\r\n\r\n f = open('img_phase2.dat', 'w')\r\n\r\n pickle.dump(phaseimg, f)\r\n\r\n f.close()\r\n\r\n f = open('img_amplitude2.dat', 'w')\r\n\r\n pickle.dump(ampimg, f)\r\n\r\n f.close()\r\n\r\n self.amplitudeImage2 = ampimg\r\n\r\n self.phaseImage2 = phaseimg\r\n\r\n print (\" FFT calculated, data saved.\\n\")\r\n\r\n # save most recent calculation to disk\r", "def _adapt_freq(\n ds: xr.Dataset,\n *,\n dim: Sequence[str],\n thresh: float = 0,\n) -> xr.Dataset:\n # Compute the probability of finding a value <= thresh\n # This is the \"dry-day frequency\" in the precipitation case\n P0_sim = ecdf(ds.sim, thresh, dim=dim)\n P0_ref = ecdf(ds.ref, thresh, dim=dim)\n\n # The proportion of values <= thresh in sim that need to be corrected, compared to ref\n dP0 = (P0_sim - P0_ref) / P0_sim\n\n if dP0.isnull().all():\n # All NaN slice.\n pth = dP0.copy()\n sim_ad = ds.sim.copy()\n else:\n # Compute : ecdf_ref^-1( ecdf_sim( thresh ) )\n # The value in ref with the same rank as the first non-zero value in sim.\n # pth is meaningless when freq. adaptation is not needed\n pth = nbu.vecquantiles(ds.ref, P0_sim, dim).where(dP0 > 0)\n\n # Probabilities and quantiles computed within all dims, but correction along the first one only.\n if \"window\" in dim:\n # P0_sim was computed using the window, but only the original time series is corrected.\n # Grouper.apply does this step, but if done here it makes the code faster.\n sim = ds.sim.isel(window=(ds.sim.window.size - 1) // 2)\n else:\n sim = ds.sim\n dim = dim[0]\n\n # Get the percentile rank of each value in sim.\n rank = sim.rank(dim, pct=True)\n\n # Frequency-adapted sim\n sim_ad = sim.where(\n dP0 < 0, # dP0 < 0 means no-adaptation.\n sim.where(\n (rank < P0_ref) | (rank > P0_sim), # Preserve current values\n # Generate random numbers ~ U[T0, Pth]\n (pth.broadcast_like(sim) - thresh)\n * np.random.random_sample(size=sim.shape)\n + thresh,\n ),\n )\n\n # Tell group_apply that these will need reshaping (regrouping)\n # This is needed since if any variable comes out a `groupby` with the original group axis,\n # the whole output is broadcasted back to the original dims.\n pth.attrs[\"_group_apply_reshape\"] = True\n dP0.attrs[\"_group_apply_reshape\"] = True\n return xr.Dataset(data_vars={\"pth\": pth, \"dP0\": dP0, \"sim_ad\": sim_ad})", "def filter_ms2fits(stack, fit_data, channel=1, peakiness=4.5):\n \n fit_data = fit_data.copy()\n for t in range(0, len(fit_data)):\n frame_data = fit_data[t]\n frame_med = np.median(stack[channel, t])\n xy_width_means = np.mean(frame_data[:,5:7], axis=1)\n peak_heights = frame_data[:,3]\n spot_peakiness = np.log(peak_heights / xy_width_means)\n frame_data_filtered = frame_data[(peak_heights > frame_med) & (spot_peakiness > peakiness),:]\n fit_data[t] = frame_data_filtered\n return fit_data", "def apply_filter(data, filter_bank, sfreq): \n if data.ndim == 1:\n filtered = np.zeros((1, filter_bank.shape[0], sfreq))\n for filt in range(filter_bank.shape[0]):\n filtered[0, filt, :] = np.convolve(filter_bank[filt,:], data)[int(sfreq-sfreq/2):int(sfreq+sfreq/2)]\n elif data.ndim == 2:\n filtered = np.zeros((data.shape[0], filter_bank.shape[0], sfreq))\n for chan in range(data.shape[0]):\n for filt in range(filter_bank.shape[0]):\n filtered[chan, filt, :] = np.convolve(filter_bank[filt, :], \\\n data[chan,:])[int(sfreq-sfreq/2):int(sfreq+sfreq/2)] # mode=\"full\"\n return filtered", "def bandpass_cnt(data, low_cut_hz, high_cut_hz, fs, filt_order=3, axis=0):\n if (low_cut_hz == 0 or low_cut_hz is None) and (\n high_cut_hz == None or high_cut_hz == fs / 2.0):\n log.info(\"Not doing any bandpass, since low 0 or None and \"\n \"high None or nyquist frequency\")\n return data.copy()\n if low_cut_hz == 0 or low_cut_hz == None:\n log.info(\"Using lowpass filter since low cut hz is 0 or None\")\n return lowpass_cnt(data, high_cut_hz, fs, filt_order=filt_order, axis=axis)\n if high_cut_hz == None or high_cut_hz == (fs / 2.0):\n log.info(\n \"Using highpass filter since high cut hz is None or nyquist freq\")\n return highpass_cnt(data, low_cut_hz, fs, filt_order=filt_order, axis=axis)\n\n nyq_freq = 0.5 * fs\n low = low_cut_hz / nyq_freq\n high = high_cut_hz / nyq_freq\n b, a = scipy.signal.butter(filt_order, [low, high], btype='bandpass')\n assert filter_is_stable(a), \"Filter should be stable...\"\n data_bandpassed = scipy.signal.lfilter(b, a, data, axis=axis)\n return data_bandpassed", "def _apply_filter(self, fn=lambda ngram, freq: False):\n tmp_ngram = FreqDist()\n for ngram, freq in self.ngram_fd.items():\n if not fn(ngram, freq):\n tmp_ngram[ngram] = freq\n self.ngram_fd = tmp_ngram", "def freq(self, frequency: Optional[int]):", "def filter_freq(self, low_freq=None, high_freq=None, axes=None, win_fcn='boxcar'):\n axes = self._get_axes_numbers(axes)\n fdomain = self.fft(axes=axes)\n low_freq = self._cook_args(low_freq, axes)\n high_freq = self._cook_args(high_freq, axes)\n\n if low_freq is None:\n low_freq = [0]*len(axes)\n if high_freq is None:\n high_freq = [self.ts[ax]/2. for ax in axes]\n\n fupper, flower = fdomain.copy(), fdomain.copy()\n for ax in axes:\n fupper = fupper.select(lambda x: x >= 0, axis=ax)\n flower = flower.select(lambda x: x < 0, axis=ax)\n\n fupper = fupper.window(index1=low_freq, index2=high_freq, axes=axes, win_fcn=win_fcn)\n flower = flower.window(index1=-np.array(high_freq), index2=-np.array(low_freq),\n axes=axes, win_fcn=win_fcn)\n fdomain.update(fupper)\n fdomain.update(flower)\n vals = fftshift(fdomain.values, axes=axes)\n ift = ifft2(vals, axes=axes, shape=np.array(self.shape)[axes])\n return Signal2D(np.real(ift), index=self.index, columns=self.columns)", "def apply_filter(image: np.ndarray) -> np.ndarray:\n # choose filters to apply\n return clahe(image)", "def rcfilter_C(freq, R):\n R = _normalizevalue(R)\n C = 1/(R*freq*2*math.pi)\n return _Cap(C)", "def filter(f,fcutoff=10.,w=10.0,dt=.001):\r\n\r\n tshift=float(w)/2.\r\n \r\n fpad=padzeros(f)\r\n Fpad=np.fft.fft(fpad)\r\n fc=fcutoff\r\n \r\n t=np.arange(start=-tshift,stop=tshift,step=dt)\r\n filt=np.zeros(len(fpad))\r\n fs=2*fc*np.sinc(2*t*fc)\r\n norm=sum(fs)\r\n filt[0:len(t)]=fs/norm\r\n Filt=np.fft.fft(filt)\r\n \r\n Filtfunc=Fpad*Filt\r\n filtfunc=np.fft.ifft(Filtfunc)\r\n filtfunc=filtfunc[len(t)/2:len(f)+len(t)/2]\r\n \r\n return filtfunc", "def filtered_fourier(self):\r\n\r\n freqs = tsu.get_freqs(self.sampling_rate, self.data.shape[-1])\r\n\r\n if self.ub is None:\r\n self.ub = freqs[-1]\r\n\r\n power = fftpack.fft(self.data)\r\n idx_0 = np.hstack([np.where(freqs < self.lb)[0],\r\n np.where(freqs > self.ub)[0]])\r\n\r\n #Make sure that you keep the DC component:\r\n keep_dc = np.copy(power[..., 0])\r\n power[..., idx_0] = 0\r\n power[..., -1 * idx_0] = 0 # Take care of the negative frequencies\r\n power[..., 0] = keep_dc # And put the DC back in when you're done:\r\n\r\n data_out = fftpack.ifft(power)\r\n\r\n data_out = np.real(data_out) # In order to make sure that you are not\r\n # left with float-precision residual\r\n # complex parts\r\n\r\n return ts.TimeSeries(data=data_out,\r\n sampling_rate=self.sampling_rate,\r\n time_unit=self.time_unit)", "def filters(self, low_freq=1/7, high_freq=128, notch_freq=50):\n self.raw.filter(l_freq=low_freq, h_freq=high_freq)\n self.raw.notch_filter(range(notch_freq, high_freq, notch_freq), filter_length='auto',\n phase='zero', fir_design='firwin')", "def source_freq(self) -> int:", "def applyIncidenceFilter(image, bandNames, classDictionary, numChangesCutoff = 8, connectedPixelCutoff=6):\n #Calculate the number of times a pixel changes throughout the time series and determine if it is over the numChangesCutoff\n num_changes = calculateNumberOfChanges(image, bandNames)\n too_many_changes = num_changes.gt(numChangesCutoff)\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Calculate the number of connected pixels for each land cover class and year, reduce to a single band image representing the number\n #of connected pixels of the same land cover class as the central pixel, and determine if it is over the connectedPixelCutoff\n connected_pixel_count = ee.ImageCollection(binary_class_images.map(lambda x: x.mask(x).connectedPixelCount(100,False).reduce(ee.Reducer.sum()).lt(connectedPixelCutoff)))\n \n #Get a bitwiseAnd determination if the number of connected pixels <= connectedPixelCutoff and the number of changes > numChangesCutoff \n incidence_filter = ee.ImageCollection(connected_pixel_count.map(lambda x: x.bitwiseAnd(too_many_changes))).toBands().rename(bandNames)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Replace pixels of image where incidence_filter is True with mode_image\n incidence_filtered = image.where(incidence_filter, mode_image)\n \n return incidence_filtered", "def filtering(self):\r\n # 1 ###########################################################################################################\r\n fft_image = np.fft.fft2(self.image)\r\n # 2 ###########################################################################################################\r\n fft_shift_image = np.fft.fftshift(fft_image)\r\n\r\n ###\r\n mag_dft = np.log(np.abs(fft_shift_image))\r\n mag_dft = (255 * (mag_dft / np.max(mag_dft))).astype(dtype='uint8')\r\n ###\r\n\r\n # 3 ###########################################################################################################\r\n if self.filter_name == 'butterworth_l' or self.filter_name == 'butterworth_h':\r\n mask = self.filter(fft_shift_image.shape, self.cutoff, self.order)\r\n else:\r\n mask = self.filter(fft_shift_image.shape, self.cutoff)\r\n # 4 ###########################################################################################################\r\n # multiply the dft (fft shift image) by the mask\r\n filtered_image = fft_shift_image * mask\r\n\r\n ###\r\n mag_filtered_image = mag_dft * mask\r\n ###\r\n\r\n # 5 ###########################################################################################################\r\n inverse_fft_shift_image = np.fft.ifftshift(filtered_image)\r\n # 6 ###########################################################################################################\r\n inverse_fft_image = np.fft.ifft2(inverse_fft_shift_image)\r\n # 7 ###########################################################################################################\r\n mag_image = np.zeros(inverse_fft_image.shape, dtype=complex)\r\n for i in range(inverse_fft_image.shape[0]):\r\n for j in range(inverse_fft_image.shape[1]):\r\n if inverse_fft_image[i][j] < 0:\r\n mag_image[i][j] = -1 * inverse_fft_image[i][j]\r\n else:\r\n mag_image[i][j] = inverse_fft_image[i][j]\r\n # magnitude of inverse fft is complete\r\n # 8 ###########################################################################################################\r\n full_contrast_image = self.post_process_image(mag_image)\r\n\r\n return [mag_dft, mag_filtered_image, full_contrast_image]", "def test_3d_steam_freq():\n dic,data = ng.pipe.read_lowmem(\"common_data/3d_pipe/full3D.ft3\")\n assert data.shape == (128, 128, 4096)\n assert data.dtype == 'float32'\n assert round(data[0,1,2],2) == 25980.13\n assert round(data[10,22,5],2) == 1561.09\n check_ppm_limits(dic,data,0,[78.10, 34.24])\n check_ppm_limits(dic,data,1,[147.42, 93.01])\n check_ppm_limits(dic,data,2,[254.92, -142.83])\n lowmem_write_readback(dic,data)", "def downsample_fluorescence(F, thres=20, verbose=1):\n diff_F = np.diff(F, axis=1)\n sum_F = np.sum(diff_F, axis=0)\n F = F[:,:-1]\n if verbose > 0:\n print(\n 'Downsampling fluorescence data to {} frames using threshold {}'\n .format(np.sum(np.greater(sum_F, thres))))\n \n return F[:, np.greater(sum_F, thres)]", "def apply_filter(self, image):\n pass", "def test_3d_steam_freq():\n dic,data = ng.pipe.read(\"common_data/3d_pipe/full3D.ft3\")\n assert data.shape == (128, 128, 4096)\n assert data.dtype == 'float32'\n assert round(data[0,1,2],2) == 25980.13\n assert round(data[10,22,5],2) == 1561.09\n check_ppm_limits(dic,data,0,[78.10, 34.24])\n check_ppm_limits(dic,data,1,[147.42, 93.01])\n check_ppm_limits(dic,data,2,[254.92, -142.83])\n write_readback(dic,data)", "def fir(self):\r\n #Passband and stop-band are expressed as fraction of the Nyquist\r\n #frequency:\r\n if self.ub is not None:\r\n ub_frac = self.ub / (self.sampling_rate / 2.)\r\n else:\r\n ub_frac = 1.0\r\n\r\n lb_frac = self.lb / (self.sampling_rate / 2.)\r\n\r\n if lb_frac < 0 or ub_frac > 1:\r\n e_s = \"The lower-bound or upper bound used to filter\"\r\n e_s += \" are beyond the range 0-Nyquist. You asked for\"\r\n e_s += \" a filter between\"\r\n e_s += \"%s and %s percent of\" % (lb_frac * 100, ub_frac * 100)\r\n e_s += \"the Nyquist frequency\"\r\n raise ValueError(e_s)\r\n\r\n n_taps = self._filt_order + 1\r\n\r\n #This means the filter order you chose was too large (needs to be\r\n #shorter than a 1/3 of your time-series )\r\n if n_taps > self.data.shape[-1] * 3:\r\n e_s = \"The filter order chosen is too large for this time-series\"\r\n raise ValueError(e_s)\r\n\r\n # a is always 1:\r\n a = [1]\r\n\r\n sig = ts.TimeSeries(data=self.data, sampling_rate=self.sampling_rate)\r\n\r\n #Lowpass:\r\n if ub_frac < 1:\r\n b = signal.firwin(n_taps, ub_frac, window=self._win)\r\n sig = self.filtfilt(b, a, sig)\r\n\r\n #High-pass\r\n if lb_frac > 0:\r\n #Includes a spectral inversion:\r\n b = -1 * signal.firwin(n_taps, lb_frac, window=self._win)\r\n b[n_taps / 2] = b[n_taps / 2] + 1\r\n sig = self.filtfilt(b, a, sig)\r\n\r\n return sig", "def set_filter_fq_pab(self, threshold):\n frequency_table = self._get_existence_frequency()\n self.filter = frequency_table > threshold", "def PassFilter(xdata, ydata, fs, order=5, btype='high', freq = None, cutoff=None):\r\n if freq == None:\r\n freq = _frequency_estimation(xdata, ydata)\r\n if cutoff==None:\r\n cutoff = _cutoff(xdata, ydata, btype, fs, ff = freq)\r\n b, a = _butter_pass(cutoff, fs, order=order, btype=btype)\r\n y = signal.filtfilt(b, a, ydata, padtype='even')\r\n return y", "def filtering(self):\n from numpy import fft\n import numpy as np\n\n _image_dft = fft.fft2(self.image)\n _image_dft = fft.fftshift(_image_dft)\n # dft = DFT.DFT()\n # plt.figure(1) \n # plt.imshow(self.image)\n # plt.figure(2)\n # plt.imshow(20*np.log10(abs(_image_dft))) \n # print(_image_dft)\n # print(abs(_image_dft))\n # plt.show()\n filter = self.filter(self.image.shape, self.cutoff, self.order) \\\n if self.filter_name.startswith('butterworth') \\\n else self.filter(self.image.shape, self.cutoff)\n \n _image_dft_filtered = _image_dft * filter\n _image_filtered = abs(fft.ifft2(_image_dft_filtered))\n \n return [ self.post_process_image(_image_filtered), \\\n self.post_process_image(20*np.log10(abs(_image_dft)+.00001)), \\\n self.post_process_image(20*np.log10(abs(_image_dft_filtered)+.00001)) ]", "def _imfilter(x_data, f_data):\n return pipe(f_data, ifftshift, fftn, lambda x: x * fftn(x_data), ifftn).real", "def filter_by_freq(self, low=0.5, high=40):\n self.epochs.load_data()\n self.epochs.filter(l_freq=low, h_freq=high, picks = 'all')\n return self.epochs", "def frequency_watt(self, p_req = 0, p_prev = 0, ts=datetime.utcnow(), location=0, db_UF = 0.05, db_OF = 0.05): #datetime.\n f = self.grid.get_frequency(ts,location)\n \n if (f < 60 - db_UF).any():\n p_mod = 0\n elif (f > 60 + db_OF).any():\n p_mod = p_req\n else:\n p_mod = p_prev\n \n return p_mod", "def __restrict_features_freq(self, min_count=1):\n col_idx = self.X.tocsc().nonzero()[1]\n counter = np.bincount(col_idx)\n print(\"Counter:\", len(counter))\n include_cols = np.where(counter > min_count)[0]\n return include_cols", "def notch_filter_raw_plot(data, fs, fc):\n b, a = sp.iirnotch(w0=fc / fs * 2, Q=100)\n w, h = sp.freqz(b, a)\n f = w / np.pi * fs / 2\n plt.figure()\n plt.plot(f, 10 * np.log10(abs(h)))\n plt.xlabel('frequency (Hz)')\n plt.ylabel('Magnitude (dB)')\n plt.title('frequency response of notch filter at 50Hz')\n plt.grid()\n\n data1 = sp.filtfilt(b, a, data)\n return data1", "def create_frequency_feature(temp_df):\n start = time.time()\n cat_dfs = []\n for num in np.arange(1080,0,-30):\n temp_df.loc[temp_df['event_time'] > int(num), 'event_time'] = np.nan\n for col in ['event_name', 'specialty', 'plan_type']:\n cat_df = temp_df.groupby([\"id\", col],).agg({\"event_time\": 'count'}).unstack(level=col)\n cat_df.columns = ['__'.join(['frequency', col, name, str(int(num))]) for name in cat_df.columns.droplevel()]\n cat_dfs.append(cat_df)\n res_df = pd.concat(cat_dfs, axis = 1)\n res_df = res_df.fillna(0)\n end = time.time()\n print('time taken (in secs) for frequency feature creation:', end-start)\n \n res_idx, res_col = np.array(res_df.index), np.array(res_df.columns)\n res_data = get_sparse_matrix(res_df.values)\n \n del res_df\n # get data\n return res_idx, res_col, res_data", "def apply_filter(self, filt):\n new_timetraces = filt(self.timetraces)\n return self.__class__(\n new_timetraces,\n self.time,\n self.tx,\n self.rx,\n self.probe,\n self.examination_object,\n self.metadata,\n )", "def frequency_threshold(workFunction=1, units=eV):\n\n var = sy.var('W h')\n par = workFunction, units['h']\n\n y = W / h\n\n return dic_result(var,par,y)", "def _filter_frequencies(self):\n import scipy.signal as spsg\n freq_bands = ['alpha', 'beta', 'gamma']\n if len(freq_bands) != self.n_bands:\n raise ValueError('Rename frequency bands')\n freqs_ts = np.empty([0, self.total_trials, self.ms, self.n_raw_features])\n for i_band in range(self.n_bands):\n freq_band = freq_bands[i_band]\n\n if freq_band == 'alpha':\n low_f = 8./self.sampling_freq\n high_f = 15./self.sampling_freq\n elif freq_band == 'beta':\n # beta\n low_f = 15./self.sampling_freq\n high_f = 32./self.sampling_freq\n elif freq_band == 'gamma':\n # gamma\n low_f = 32./self.sampling_freq\n high_f = 80./self.sampling_freq\n else:\n raise NameError('unknown filter')\n\n b, a = spsg.iirfilter(self.band_filter_order, [low_f, high_f],\n btype='bandpass', ftype='butter', output='ba')\n # ts_data: (trials, t, n)\n filtered_ts = spsg.filtfilt(b, a, self.ts_data, axis=-2)\n freqs_ts = np.concatenate((freqs_ts, np.array([filtered_ts])))\n\n return freqs_ts", "def freq():", "def filter_spectrum(spectrum):\n # avoid division by 0\n spectrum.hs[1:] /= spectrum.fs[1:]\n spectrum.hs[0] = 0", "def mut_filter(df, rate, binary_cutoff=12):\n get_min_count = lambda s: s.value_counts().min() if len(s.unique()) > 1 else -1\n df = df[df.apply(get_min_count, axis=1) > binary_cutoff]\n cc = H.screen_feature(rate, rev_kruskal, df)\n\n fc_apply = lambda s: fc(s, rate)\n direction = df.apply(fc_apply, axis=1)\n direction.name = 'direction'\n\n cc = cc.join(direction)\n #cc = cc[cc.direction == False]\n #return cc\n\n df = df.ix[H.true_index((cc.p > .01) | (cc.direction == True))]\n df = df.dropna(axis=1)\n return df", "def set_center_freq(self, *args):\n return _uhd_swig.usrp_source_set_center_freq(self, *args)", "def plot_carrier_frequency(self):\r\n roi = self.phase_roi\r\n phase_slice = (slice(roi[2], roi[3]), slice(roi[0], roi[1]))\r\n # calculation\r\n S = self.image[phase_slice] # signal in phase_roi\r\n t_axis = self.image.x_axis[roi[0]:roi[1]] # [ns] time axis\r\n y_axis = self.image.y_axis[roi[2]:roi[3]] # [mic] spatial scale\r\n N = S.shape[0]//2\r\n\r\n s = np.fft.fft(S, axis=0) / N # fft\r\n s_abs = np.abs(s[:N,:])\r\n f_axis = np.arange(N) / (2 * N) # spatial frequency axis\r\n\r\n s_mean = np.log10(np.mean(s_abs, axis=1))\r\n i0 = np.argmax(s_mean[3:])\r\n f0 = f_axis[3+i0] # [px^-1] fringe carrier frequency (estimate)\r\n s0 = s_mean[3+i0]\r\n sys.stdout.write(\"{} VISAR-{} fringe period = {:.1f} px\\n\".format(\r\n self.h5.shot_id[:11], self.leg, 1/f0))\r\n\r\n # plot calcs\r\n vlim_0 = dataimage.thresh_vlim(S, 0.01)\r\n vlim_1 = dataimage.thresh_vlim(np.log10(s_abs), (0.02, 0.005))\r\n tlim = (t_axis[0], t_axis[-1])\r\n ylim = (y_axis[0], y_axis[-1])\r\n flim = (0, 0.5) # [1/px]\r\n extent_0 = tlim + (0, S.shape[0]) # extent for signal\r\n# extent_0 = tlim + ylim # extent for signal\r\n extent_1 = tlim + flim # extent for fft\r\n\r\n # figure\r\n fig = plt.figure(figsize=(7,7), dpi=100)\r\n axs = []\r\n axs.append(fig.add_subplot(221, ylabel='[px]', title='signal'))\r\n axs.append(fig.add_subplot(222, sharey=axs[0], title='spatial lineout'))\r\n axs.append(fig.add_subplot(223, sharex=axs[0], title='log(fft(signal))',\r\n xlabel='time [ns]', ylabel=\"spatial frequency [px^-1]\"))\r\n axs.append(fig.add_subplot(224, sharey=axs[2], xlabel='log10(power)', title='spectral lineout'))\r\n\r\n axs[0].imshow(S, extent=extent_0,\r\n aspect='auto', vmin=vlim_0[0], vmax=vlim_0[1])\r\n axs[2].imshow(np.log10(s_abs), extent=extent_1,\r\n aspect='auto', vmin=vlim_1[0], vmax=vlim_1[1])\r\n axs[1].plot(np.mean(S, axis=1), np.arange(S.shape[0]))\r\n axs[3].plot(s_mean, f_axis)\r\n axs[0].set_ylim(*extent_0[2:])\r\n \r\n axs[3].annotate(\"fringe period\\n= {:.1f} px\".format(1/f0),\r\n (s0, f0), (0.95, 0.5), textcoords='axes fraction',\r\n arrowprops=dict(width=1, headwidth=6, facecolor='k',\r\n shrink=0.03), ha='right',)\r\n\r\n axs[3].axhline(f0*0.7, color='r', linestyle='dashed')\r\n axs[3].axhline(f0*1.4, color='r', linestyle='dashed')\r\n\r\n fig.tight_layout()\r\n fig.canvas.window().move(0,0)\r\n return fig", "def detect_freqs(self):\n n_fft_bins = self._config[\"audio_config\"][\"N_FFT_BINS\"]\n channel_avgs = []\n differences = []\n \n for i in range(n_fft_bins):\n channel_avgs.append(sum(self.freq_channels[i])/len(self.freq_channels[i]))\n differences.append(((self.freq_channels[i][0]-channel_avgs[i])*100)//channel_avgs[i])\n for i in [\"beat\", \"low\", \"mid\", \"high\"]:\n if any(differences[j] >= self.min_percent_diff[i]\\\n and self.freq_channels[j][0] >= self.min_detect_amplitude[i]\\\n for j in range(*self.detection_ranges[i]))\\\n and (time.time() - self.prev_freq_detects[i] > 0.2)\\\n and len(self.freq_channels[0]) == self.freq_channel_history:\n self.prev_freq_detects[i] = time.time()\n self.current_freq_detects[i] = True\n else:\n self.current_freq_detects[i] = False", "def set_frequency(self, new_freq):\n self.freq = new_freq\n self.ts_resample()", "def filtfilt_mmap(timestamps, finname, foutname, fs, fl=None, fh=None,\r\n gpass=None, gstop=None, dtype=None, ftype='cheby2',\r\n buffer_len=4194304, overlap_len=None, max_len=None,\r\n **kwargs):\r\n\r\n if overlap_len is None:\r\n overlap_len = int(fs*2)\r\n\r\n if dtype is None:\r\n dtype=np.int16\r\n\r\n if gpass is None:\r\n gpass = 0.1 # max loss in passband, dB\r\n\r\n if gstop is None:\r\n gstop = 30 # min attenuation in stopband (dB)\r\n\r\n fso2 = fs/2.0\r\n\r\n try:\r\n if np.isinf(fh):\r\n fh = None\r\n except AttributeError:\r\n pass\r\n if fl == 0:\r\n fl = None\r\n\r\n if (fl is None) and (fh is None):\r\n print('wut? nothing to filter, man!')\r\n raise ValueError('nonsensical all-pass filter requested...')\r\n elif fl is None: # lowpass\r\n wp = fh/fso2\r\n ws = 1.4*fh/fso2\r\n elif fh is None: # highpass\r\n wp = fl/fso2\r\n ws = 0.8*fl/fso2\r\n else: # bandpass\r\n wp = [fl/fso2, fh/fso2]\r\n ws = [0.8*fl/fso2,1.4*fh/fso2]\r\n\r\n sos = iirdesign(wp, ws, gpass=gpass, gstop=gstop, ftype=ftype, output='sos')\r\n\r\n y = filtfilt_within_epochs_mmap(timestamps=timestamps,\r\n finname=finname,\r\n foutname=foutname,\r\n dtype=dtype,\r\n sos=sos,\r\n buffer_len=buffer_len,\r\n overlap_len=overlap_len,\r\n max_len=max_len,\r\n **kwargs)\r\n return y", "def freq_by_time(sig, fs, f_range, hilbert_increase_n=False, remove_edges=True, **filter_kwargs):\n\n pha = phase_by_time(sig, fs, f_range, hilbert_increase_n,\n remove_edges, **filter_kwargs)\n\n phadiff = np.diff(pha)\n phadiff[phadiff < 0] = phadiff[phadiff < 0] + 2 * np.pi\n\n i_f = fs * phadiff / (2 * np.pi)\n i_f = np.insert(i_f, 0, np.nan)\n\n return i_f", "def bandpass_filtfilt(rawsong, samp_freq, freq_cutoffs=None):\n\n Nyquist_rate = samp_freq / 2\n if freq_cutoffs is None:\n freq_cutoffs = [500, 10000]\n if rawsong.shape[-1] < 387:\n numtaps = 64\n elif rawsong.shape[-1] < 771:\n numtaps = 128\n elif rawsong.shape[-1] < 1539:\n numtaps = 256\n else:\n numtaps = 512\n\n cutoffs = np.asarray([freq_cutoffs[0] / Nyquist_rate,\n freq_cutoffs[1] / Nyquist_rate])\n # code on which this is based, bandpass_filtfilt.m, says it uses Hann(ing)\n # window to design filter, but default for matlab's fir1\n # is actually Hamming\n # note that first parameter for scipy.signal.firwin is filter *length*\n # whereas argument to matlab's fir1 is filter *order*\n # for linear FIR, filter length is filter order + 1\n b = scipy.signal.firwin(numtaps + 1, cutoffs, pass_zero=False)\n a = np.zeros((numtaps+1,))\n a[0] = 1 # make an \"all-zero filter\"\n padlen = np.max((b.shape[-1] - 1, a.shape[-1] - 1))\n filtsong = scipy.signal.filtfilt(b, a, rawsong, padlen=padlen)\n return filtsong", "def filters(data, f_interval, f_resolution=None, sampling=None, w_column=None):\n print('-------------------------- filters')\n\n # Avoid overwritting data:\n data0 = data.copy()\n \n # Avoid 0 as input as not peaks are found:\n if f_interval[0]==0:\n f_interval = [f_resolution, f_interval[1]]\n \n # Calculates power spectrum:\n Pf_power, P_comp, _, _, = tt.power(data0, f_interval, f_resolution, sampling, w_column)\n t = data0[:,0]\n f = Pf_power[:,0]\n alpha = P_comp[:,0] \n beta = P_comp[:,1]\n\n # Calculates P_filter:\n P_filter = np.zeros(len(t))\n fpicon = 2*np.pi*f # Optimization constant\n for i in range(len(t)):\n tfpicon = fpicon*t[i] # Optimization constant\n alpha_sin = alpha*np.sin(tfpicon)\n beta_cos = beta* np.cos(tfpicon)\n P_filter[i] = np.sum(alpha_sin + beta_cos)\n\n # Calculates window function:\n Pf_window = tt.window(data0, f_interval, f_resolution, sampling)\n P_window = Pf_window[:,1]\n \n # Bandpass/Lowpass and Highpass filter:\n S_low_band = P_filter/np.sum(P_window)\n S_high = data0[:,1]-S_low_band\n St_low_band = np.vstack([t, S_low_band]).T\n St_high = np.vstack([t, S_high]).T\n return St_low_band, St_high", "def apply_filter_sweep(\n sound: np.ndarray, event: 'sinethesizer.synth.core.Event',\n kind: str = 'absolute',\n bands: List[Tuple[Optional[float], Optional[float]]] = None,\n invert: bool = False, order: int = 25,\n frequency: float = 6, phase: float = 0.0, waveform: str = 'sine'\n) -> np.ndarray:\n bands = bands or [(None, None)]\n if len(bands) == 1:\n sound = apply_frequency_filter(sound, event, kind, bands[0][0], bands[0][1], invert, order)\n return sound\n filtered_sounds = [\n apply_frequency_filter(\n sound, event, kind, min_cutoff_frequency, max_cutoff_frequency, invert, order\n )\n for min_cutoff_frequency, max_cutoff_frequency in bands\n ]\n filtered_sounds = [x.reshape((1, x.shape[0], x.shape[1])) for x in filtered_sounds]\n filtered_sounds = np.concatenate(filtered_sounds)\n sound = oscillate_between_sounds(filtered_sounds, event.frame_rate, frequency, phase, waveform)\n return sound", "def set_frequency(self, newval):\n rest_val = str(int(round(newval * 65536.0, 1)))\n return self._setAttr(\"frequency\", rest_val)", "def process(self, frame, cur_count):\n gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\n _, gray = cv.threshold(gray, 30, 255, cv.THRESH_BINARY)\n black_count = float(np.sum(gray)) / float(gray.size)\n # If at least 80% of the frame is true black, race has stopped\n if black_count <= 0.2:\n self.handle(frame, cur_count)", "def apply(self, *args):\n return _ida_hexrays.microcode_filter_t_apply(self, *args)", "def MyFilter(data, window_width=10, beta=2.0, draw_graph=False):\n\n #read data and change the format\n if 'time' in data.columns:\n date_list = []\n for i in data.index:\n date_parse = parse(str(data.ix[i].time))\n date_list.append(date_parse)\n data['date'] = date_list\n data_use = data\n data_use.index = data_use['date'].tolist()\n data_use = data_use.drop(['date','time'], axis=1)\n data_use.index.name = 'time'\n else:\n data_use = data\n #design filter, use the kaiser window here\n window = signal.kaiser(window_width, beta=beta)\n data_use['close_filtered'] = signal.convolve(data_use['close'], window, mode='same') / sum(window)\n data_use['high_frequency'] = data_use['close'] - data_use['close_filtered']\n\n #delete the distortion datas after filtered\n if window_width % 2 == 0:\n data_changed = data_use[window_width/2: -(window_width/2 - 1)]\n else:\n data_changed = data_use[(window_width-1)/2: -(window_width-1)/2]\n\n #draw graph\n if (draw_graph == True) :\n fig = plt.figure()\n ax1 = plt.subplot2grid((3,1), (0,0), rowspan=2)\n data_changed.loc[:,'close'].plot(style='r', label='original')\n data_changed.loc[:,'close_filtered'].plot(style='k', label='filtered')\n plt.title('Kaiser window_width = %d , const = %d' % (window_width, beta))\n plt.legend(loc='best')\n\n ax2 = plt.subplot2grid((3,1), (2,0))\n data_changed.loc[:,'high_frequency'].plot(label='high_frequency')\n ax2.set_ylim([-150, 150])\n plt.title('High Frequency')\n plt.legend(loc='best')\n plt.show()\n # print data_use\n # print data_changed\n data_out = data_changed['close_filtered']\n return np.array(data_out.tolist())", "def get_over(self, filter_dict, percentage):\n pass", "def main():\n # Create a new instance of a high pass filter, using the default constructor\n hpf = GRT.HighPassFilter()\n\n # Set the cutoff frequency of the filter to 2.0Hz\n hpf.setCutoffFrequency(2, 1.0 / 1000.0)\n\n # Create some variables to help generate the signal data\n num_seconds = 6 # The number of seconds of data we want to generate\n t = 0 # This keeps track of the time\n t_step = 1.0 / 1000.0 # This is how much the time will be updated at each iteration in the for loop\n\n # Add the freq rates\n # The first value is the time in seconds and the second value is the frequency that should be set at that time\n freq_rates = {0: 0.1, 1: 0.5, 2: 1, 3: 2, 4: 4, 5: 8, 6: 16}\n\n # Generate the signal and filter the data\n for i in range(num_seconds * 1000):\n # Check to see if we should update the freq rate to the next value\n # Set the new frequency value\n freq = [v for (k, v) in freq_rates.items() if k > (i / 1000)][0]\n\n # Generate the signal\n signal = math.sin(t * math.tau * freq)\n\n # Filter the signal\n filtered_value = hpf.filter(signal)\n\n # Print the signal and the filtered data\n print(\"%.3f %.3f %.3f\" % (freq, signal, filtered_value))\n\n # Update the t\n t += t_step\n\n # Save the HighPassFilter settings to a file\n hpf.save(\"HighPassFilterSettings.grt\")\n\n # We can then load the settings later if needed\n hpf.load(\"HighPassFilterSettings.grt\")", "def __init__(self, fs=250, smoothing_win=20):\n self.fs = fs\n self.threshold = .35 # Generally between 0.3125 and 0.475\n self.ns200ms = int(self.fs * .2)\n self.r_peaks_buffer = [(0., 0.)]\n self.noise_peaks_buffer = [(0., 0., 0.)]\n self.prev_samples = np.zeros(smoothing_win)\n self.prev_diff_samples = np.zeros(smoothing_win)\n self.prev_times = np.zeros(smoothing_win)\n self.prev_max_slope = 0\n\n self.bp_filter = ExGFilter(cutoff_freq=(1, 30), filter_type='bandpass', s_rate=fs, n_chan=1, order=3)\n self.hamming_window = signal.windows.hamming(smoothing_win, sym=True)\n self.hamming_window /= self.hamming_window.sum()", "def freq(self, value: int, /) -> None:", "def filter_Nofinding_imgs(ori_ann_file, filter_info_file, out_file,\n score_thr=0.08, key_name='class'):\n ori_ann_infos = mmcv.load(ori_ann_file)\n df = pd.read_csv(filter_info_file)\n\n ori_image_infos = {os.path.splitext(info['file_name'])[0]: info\n for info in ori_ann_infos['images']}\n print('before filter, there are {} images.'.format(len(ori_image_infos)))\n new_images = []\n for idx, row in df.iterrows():\n image_name = row['image_id']\n cls = row[key_name]\n if cls >= score_thr:\n new_images.append(ori_image_infos[image_name])\n print('after filter, there are {} images.'.format(len(new_images)))\n print('saving new test annotations into file')\n ori_ann_infos['images'] = new_images\n mmcv.dump(ori_ann_infos, out_file)\n print('all done!')", "def hann_sinc_low_pass(x: Tensor, N: int, fs: int, fc: float) -> Tensor:\n w = continuous_hann_sinc_filter(fs, fc, 2*N+1, x.dtype, x.device)\n w = (w / w.sum()).view(1, 1, -1)\n return torch.nn.functional.conv1d(x, w, padding=N)", "def test_filter(resp_wfm, frequency_filters=None, noise_threshold=None, excit_wfm=None, show_plots=True,\n plot_title=None, verbose=False):\n if not isinstance(resp_wfm, (np.ndarray, list)):\n raise TypeError('resp_wfm should be array-like')\n resp_wfm = np.array(resp_wfm)\n\n show_loops = False\n if excit_wfm is not None and show_plots:\n if len(resp_wfm) % len(excit_wfm) == 0:\n show_loops = True\n else:\n raise ValueError('Length of resp_wfm should be divisibe by length of excit_wfm')\n if show_loops:\n if plot_title is None:\n plot_title = 'FFT Filtering'\n else:\n assert isinstance(plot_title, (str, unicode))\n\n if frequency_filters is None and noise_threshold is None:\n raise ValueError('Need to specify at least some noise thresholding / frequency filter')\n\n if noise_threshold is not None:\n if noise_threshold >= 1 or noise_threshold <= 0:\n raise ValueError('Noise threshold must be within (0 1)')\n\n samp_rate = 1\n composite_filter = 1\n if frequency_filters is not None:\n if not isinstance(frequency_filters, Iterable):\n frequency_filters = [frequency_filters]\n if not are_compatible_filters(frequency_filters):\n raise ValueError('frequency filters must be a single or list of FrequencyFilter objects')\n composite_filter = build_composite_freq_filter(frequency_filters)\n samp_rate = frequency_filters[0].samp_rate\n\n resp_wfm = np.array(resp_wfm)\n num_pts = resp_wfm.size\n\n fft_pix_data = np.fft.fftshift(np.fft.fft(resp_wfm))\n\n if noise_threshold is not None:\n noise_floor = get_noise_floor(fft_pix_data, noise_threshold)[0]\n if verbose:\n print('The noise_floor is', noise_floor)\n\n fig_fft = None\n if show_plots:\n w_vec = np.linspace(-0.5 * samp_rate, 0.5 * samp_rate, num_pts) * 1E-3\n\n fig_fft, [ax_raw, ax_filt] = plt.subplots(figsize=(12, 8), nrows=2)\n axes_fft = [ax_raw, ax_filt]\n set_tick_font_size(axes_fft, 14)\n\n r_ind = num_pts\n if isinstance(composite_filter, np.ndarray):\n r_ind = np.max(np.where(composite_filter > 0)[0])\n\n x_lims = slice(len(w_vec) // 2, r_ind)\n amp = np.abs(fft_pix_data)\n ax_raw.semilogy(w_vec[x_lims], amp[x_lims], label='Raw')\n if frequency_filters is not None:\n ax_raw.semilogy(w_vec[x_lims], (composite_filter[x_lims] + np.min(amp)) * (np.max(amp) - np.min(amp)),\n linewidth=3, color='orange', label='Composite Filter')\n if noise_threshold is not None:\n ax_raw.axhline(noise_floor,\n # ax_raw.semilogy(w_vec, np.ones(r_ind - l_ind) * noise_floor,\n linewidth=2, color='r', label='Noise Threshold')\n ax_raw.legend(loc='best', fontsize=14)\n ax_raw.set_title('Raw Signal', fontsize=16)\n ax_raw.set_ylabel('Magnitude (a. u.)', fontsize=14)\n\n fft_pix_data *= composite_filter\n\n if noise_threshold is not None:\n fft_pix_data[np.abs(fft_pix_data) < noise_floor] = 1E-16 # DON'T use 0 here. ipython kernel dies\n\n if show_plots:\n ax_filt.semilogy(w_vec[x_lims], np.abs(fft_pix_data)[x_lims])\n ax_filt.set_title('Filtered Signal', fontsize=16)\n ax_filt.set_xlabel('Frequency(kHz)', fontsize=14)\n ax_filt.set_ylabel('Magnitude (a. u.)', fontsize=14)\n if noise_threshold is not None:\n ax_filt.set_ylim(bottom=noise_floor) # prevents the noise threshold from messing up plots\n fig_fft.tight_layout()\n\n filt_data = np.real(np.fft.ifft(np.fft.ifftshift(fft_pix_data)))\n\n if verbose:\n print('The shape of the filtered data is {}'.format(filt_data.shape))\n print('The shape of the excitation waveform is {}'.format(excit_wfm.shape))\n\n fig_loops = None\n if show_loops:\n if len(resp_wfm) == len(excit_wfm):\n # single plot:\n fig_loops, axis = plt.subplots(figsize=(5.5, 5))\n axis.plot(excit_wfm, resp_wfm, 'r', label='Raw')\n axis.plot(excit_wfm, filt_data, 'k', label='Filtered')\n axis.legend(fontsize=14)\n set_tick_font_size(axis, 14)\n axis.set_xlabel('Excitation', fontsize=16)\n axis.set_ylabel('Signal', fontsize=16)\n axis.set_title(plot_title, fontsize=16)\n fig_loops.tight_layout()\n else:\n # N loops:\n raw_pixels = np.reshape(resp_wfm, (-1, len(excit_wfm)))\n filt_pixels = np.reshape(filt_data, (-1, len(excit_wfm)))\n print(raw_pixels.shape, filt_pixels.shape)\n\n fig_loops, axes_loops = plot_curves(excit_wfm, [raw_pixels, filt_pixels], line_colors=['r', 'k'],\n dataset_names=['Raw', 'Filtered'], x_label='Excitation',\n y_label='Signal', subtitle_prefix='Col ', num_plots=16,\n title=plot_title)\n\n return filt_data, fig_fft, fig_loops", "def callback_freq_cut(val):\n global plot_mode\n global idx_freq\n last_plot_mode = plot_mode\n plot_mode = 'freq_cut'\n# print( 'scale_freq', scale_freq)\n idx_freq = freq_to_idx( val, scale_freq )\n val_freq = idx_freq * scale_freq\n# print( 'val idx_freq val_freq', val, idx_freq, val_freq )\n update_num_shadow(int(sld['neighbors'].val))\n #plot 121\n lcutfreq.set_ydata( [val_freq, val_freq])\n lcuttime.set_alpha( 0.0 )\n lcutfreq.set_alpha( alpha_hm )\n #plot 122\n if plot_mode == last_plot_mode:\n replot_flags = get_replot_flag( idx_freq )\n replot_shadow( replot_flags )\n update_shadow( ~replot_flags )\n update_light()\n else:\n replot_shadow( [True, True])\n replot_light()\n reform_axis()\n \n fig.canvas.draw_idle()", "def mask(self):\n\n mask = self.freqs >= self.minimum_threshold\n mask = mask.astype(int)\n self.freqs = self.freqs * mask\n self.sums = self.sums * mask", "def butterworth_filter(freq):\n\tf_raw = 1/(0.00000002*100*33)\n\tb = np.array([[-32092,15750],[-31238,14895]])*2.0**(-14)\n\tomega = 2*np.pi*freq/f_raw\n\te1, e2 = np.exp(-1j*omega), np.exp(-2j*omega)\n\ttmp = (1+2*e1+e2)**2/(1+b[0,0]*e1+b[0,1]*e2)/(1+b[1,0]*e1+b[1,1]*e2)\n\treturn tmp * (1+sum(b[0]))*(1+sum(b[1]))/16", "def flood_frequency(data_vector, feat_keep, feature_edges_keep):\n classes = len(feat_keep)\n flood_index = feat_keep.index('flooded')\n perm_index = feat_keep.index('GSW_perm')\n data_vector[data_vector[:, perm_index] == 1, flood_index] = 0 # Remove detected water that is perm water\n freqs = []\n for i in range(classes):\n NFi = np.sum(\n np.logical_and(data_vector[:, i] == 1, data_vector[:, flood_index] == 1)) # Num flood pixels in class\n NFt = np.sum(data_vector[:, flood_index]) # Num total flood pixels\n freq = NFi / NFt\n freqs.append(freq)\n\n freq_feats = pd.concat([pd.DataFrame(data=feat_keep, columns=['feature']),\n pd.DataFrame(data=freqs, columns=['Frequency']),\n feature_edges_keep['edge']], axis=1)\n freq_feats = freq_feats.drop([flood_index, perm_index], axis=0)\n\n return freq_feats", "def time_filter(self, width, kernel='t', bgwindow=4, show=0):\n\n print 'Applying fft time filter. Assumes no missing data in time.'\n\n if not isinstance(width, types.ListType):\n width = [width] * len(self.chans)\n\n # time filter by convolution. functions have different normlizations. m has central peak integral=1 and total is 0. others integrate to 1, so they don't do bg subtraction.\n kernelset = {} # optionally could make set of kernels. one per width needed. (used only by 'w' for now).\n\n if kernel == 'm':\n from scipy import signal\n print 'Applying mexican hat filter. Note that effective width is somewhat larger than equivalent tophat width.'\n for w in n.unique(width):\n kernel = signal.wavelets.ricker(len(self.data), w) # mexican hat (ricker) function can have given width and integral=0, so good for smoothing in time and doing bg-subtraction at same time! width of averaging is tied to width of bgsub though...\n kernelset[w] = kernel/n.where(kernel>0, kernel, 0).sum() # normalize to have peak integral=1, thus outside integral=-1.\n elif kernel == 't':\n import math\n print 'Applying tophat filter.'\n for w in n.unique(width):\n kernel = n.zeros(len(self.data)) # tophat.\n onrange = range(len(kernel)/2 - w/2, len(kernel)/2 + int(math.ceil(w/2.)))\n kernel[onrange] = 1.\n kernelset[w] = kernel/n.where(kernel>0, kernel, 0).sum() # normalize to have peak integral=1, thus outside integral=-1.\n elif kernel == 'b':\n import math\n print 'Applying tophat filter with bg subtraction (square mexican hat).'\n for w in n.unique(width):\n kernel = n.zeros(len(self.data)) # tophat.\n onrange = range(len(kernel)/2 - w/2, len(kernel)/2 + int(math.ceil(w/2.)))\n kernel[onrange] = 1.\n offrange = range(len(kernel)/2 - (bgwindow/2+w)+1, len(kernel)/2-w+1) + range(len(kernel)/2 + w, len(kernel)/2 + (w+bgwindow/2))\n offrange = range(len(kernel)/2 - (bgwindow+w)/2, len(kernel)/2-w/2) + range(len(kernel)/2 + int(math.ceil(w/2.)), len(kernel)/2 + int(math.ceil((w+bgwindow)/2.)))\n kernel[offrange] = -1.\n posnorm = n.where(kernel>0, kernel, 0).sum() # find normalization of positive\n negnorm = n.abs(n.where(kernel<0, kernel, 0).sum()) # find normalization of negative\n kernelset[w] = n.where(kernel>0, kernel/posnorm, kernel/negnorm) # pos and neg both sum to 1/-1, so total integral=0\n elif kernel == 'g':\n from scipy import signal\n print 'Applying gaussian filter. Note that effective width is much larger than equivalent tophat width.'\n for w in n.unique(width):\n kernel = signal.gaussian(len(self.data), w) # gaussian. peak not quite at 1 for widths less than 3, so it is later renormalized.\n kernelset[w] = kernel / (w * n.sqrt(2*n.pi)) # normalize to pdf, not peak of 1.\n elif kernel == 'w':\n import math\n print 'Applying tophat filter that varies with channel.'\n for w in n.unique(width):\n kernel = n.zeros(len(self.data)) # tophat.\n onrange = range(len(kernel)/2 - w/2, len(kernel)/2 + int(math.ceil(w/2.)))\n kernel[onrange] = 1.\n kernelset[w] = kernel/n.where(kernel>0, kernel, 0).sum() # normalize to have peak integral=1, thus outside integral=-1.\n\n if show:\n for kernel in kernelset.values():\n p.plot(kernel,'.')\n p.title('Time filter kernel')\n p.show()\n\n # take ffts (in time)\n datafft = n.fft.fft(self.data, axis=0)\n kernelsetfft = {}\n for w in n.unique(width):\n kernelsetfft[w] = n.fft.fft(n.roll(kernelset[w], len(self.data)/2)) # seemingly need to shift kernel to have peak centered near first bin if convolving complex array (but not for real array?)\n\n # filter by product in fourier space\n for i in range(self.nbl): # **can't find matrix product I need, so iterating over nbl, chans, npol**\n for j in range(len(self.chans)):\n for k in range(self.npol):\n datafft[:,i,j,k] = datafft[:,i,j,k]*kernelsetfft[width[j]] # index fft kernel by twidth\n\n # ifft to restore time series\n self.data = n.ma.masked_array(n.fft.ifft(datafft, axis=0), self.flags[:self.nints,:, self.chans,:] == 0)\n self.dataph = (self.data.mean(axis=3).mean(axis=1)).real", "def _get_fir_filter(passband, fs, order=183, weights=[5.75, 1., 5.75], mask=[0, 1, 0]):\n # return remez(order, passband, mask, weights, Hz=fs), 1.\n return remez(order, passband, mask, Hz=fs), 1.", "def freq(self, freq=None):\n raise NotImplementedError()", "def computeFreq(self):\n for x in self.data:\n i = 0\n for interval in self.classesInterval:\n if interval[0] <= x <= interval[1]:\n self.frequencies[i] += 1\n break\n i += 1\n\n self.minFreq = self.frequencies[0]\n self.maxFreq = self.frequencies[0]\n for f in self.frequencies:\n if f < self.minFreq:\n self.minFreq = f\n elif f > self.maxFreq:\n self.maxFreq = f", "def filter(self, record):\n if super(FrequencyFilter, self).filter(record) == 0:\n return 0\n\n # distinguish this error log\n params = [\n record.module,\n record.filename,\n record.funcName,\n str(record.lineno),\n record.levelname\n ]\n if self._prefix:\n params.append(self._prefix)\n key = ','.join(params)\n\n # get redis value of this key\n cache = FrequencyCache.p_col.find_one_and_update(\n {'_id': key},\n {\n '$inc': {FrequencyCache.Field.data: 1},\n '$setOnInsert': {\n '_id': key,\n FrequencyCache.Field.time: datetime.utcnow()\n }\n },\n return_document=ReturnDocument.AFTER,\n upsert=True\n )\n v = cache[FrequencyCache.Field.data]\n\n if v <= self._repeat_count + 1:\n # will be handled\n return 1\n else:\n return 0", "def LP_filt(filterLength, x):\n b=np.ones(filterLength,)/(filterLength) #Finite Impulse Response (FIR) Moving Average (MA) filter with one second filter length\n a=1\n y = signal.filtfilt(b, a, x)\n return y", "def bandpass_filter_audio(audio, f_low=400, f_high=450):\n filtered_audio = core.sinc_filter(audio, f_low, window_size=256, high_pass=True)\n filtered_audio = core.sinc_filter(filtered_audio, f_high, window_size=256, high_pass=False)\n return tf.squeeze(filtered_audio)", "def bandpass_filtfilt(rawsong, samp_freq, freq_cutoffs=(500, 10000)):\n if freq_cutoffs[0] <= 0:\n raise ValueError('Low frequency cutoff {} is invalid, '\n 'must be greater than zero.'\n .format(freq_cutoffs[0]))\n\n Nyquist_rate = samp_freq / 2\n if freq_cutoffs[1] >= Nyquist_rate:\n raise ValueError('High frequency cutoff {} is invalid, '\n 'must be less than Nyquist rate, {}.'\n .format(freq_cutoffs[1], Nyquist_rate))\n\n if rawsong.shape[-1] < 387:\n numtaps = 64\n elif rawsong.shape[-1] < 771:\n numtaps = 128\n elif rawsong.shape[-1] < 1539:\n numtaps = 256\n else:\n numtaps = 512\n\n cutoffs = np.asarray([freq_cutoffs[0] / Nyquist_rate,\n freq_cutoffs[1] / Nyquist_rate])\n # code on which this is based, bandpass_filtfilt.m, says it uses Hann(ing)\n # window to design filter, but default for matlab's fir1\n # is actually Hamming\n # note that first parameter for scipy.signal.firwin is filter *length*\n # whereas argument to matlab's fir1 is filter *order*\n # for linear FIR, filter length is filter order + 1\n b = scipy.signal.firwin(numtaps + 1, cutoffs, pass_zero=False)\n a = np.zeros((numtaps+1,))\n a[0] = 1 # make an \"all-zero filter\"\n padlen = np.max((b.shape[-1] - 1, a.shape[-1] - 1))\n filtsong = scipy.signal.filtfilt(b, a, rawsong, padlen=padlen)\n return filtsong", "def test_3d_freq_lowmem():\n dic,data = ng.pipe.read_lowmem(\"common_data/3d_pipe/ft/test%03d.ft3\")\n assert data.shape == (128, 128, 4096)\n assert data.dtype == 'float32'\n assert round(data[0,1,2],2) == 25980.13\n assert round(data[10,22,5],2) == 1561.09\n check_ppm_limits(dic,data,0,[78.10, 34.24])\n check_ppm_limits(dic,data,1,[147.42, 93.01])\n check_ppm_limits(dic,data,2,[254.92, -142.83])\n lowmem_write_readback_3D(dic,data)", "def plot_feature_correlation(self, N=20, save=None):\n makedir(self.plotdir)\n if save is None:\n save = '/feature_correlation.png'.format(self.plotdir)\n \n # compile feature frequencies\n feats = []\n fls = glob('{:s}/*.fts'.format(self.modeldir))\n for i,fl in enumerate(fls):\n with open(fl) as fp:\n lns = fp.readlines()\n feats += [' '.join(ln.rstrip().split()[1:]) for ln in lns] \n labels = list(set(feats) - set(['']))\n freqs = [feats.count(label) for label in labels]\n\n labels = [label for _,label in sorted(zip(freqs,labels))][::-1]\n freqs = sorted(freqs)[::-1]\n fts = copy(labels)\n \n N = np.min([N, len(freqs)])\n labels = ['\\n'.join(wrap(' '.join(l.split('_')), 30)) for l in labels ][:N]\n \n f = plt.figure(figsize=(8, 8))\n\n fM,ys = self._extract_features(self.ti_forecast, self.tf_forecast)\n filt_df = np.log10(fM[fts[:N]]).replace([np.inf, -np.inf], np.nan).dropna()\n low, high = [0.005, 0.995]\n quant_df = filt_df.quantile([low, high])\n filt_df.index = range(filt_df.shape[0])\n filt_df = filt_df.apply(lambda x: x[(x>quant_df.loc[low,x.name]) & (x < quant_df.loc[high,x.name])], axis=0)\n fM[fts[:N]]\n\n inds = []\n for te in self.data.tes:\n inds.append(np.where((ys['label']>0)&(abs((te-ys.index).total_seconds()/(3600*24))<5.)))\n cols = ['b','g','r','m','c']\n truths = []\n for i, ind, col in zip(range(-2,3), inds, cols):\n truths.append(np.log10(fM[fts[:N]].iloc[ind]).replace([np.inf, -np.inf], np.nan).dropna())\n #te = self.data.tes[i+2]\n \n fig = corner(filt_df.dropna(), labels=labels[:N])\n axes = np.array(fig.axes).reshape((N, N))\n for yi in range(N):\n for xi in range(yi):\n ax = axes[yi,xi]\n for col,truth in zip(cols,truths):\n ax.scatter(truth[fts[xi]].values, truth[fts[yi]].values, np.arange(1,5)*6, col)\n \n plt.savefig(save,dpi=300)\n plt.close(fig)", "def set_center_freq(self, *args):\n return _uhd_swig.usrp_sink_set_center_freq(self, *args)", "def test_inspect_freq_filter(tmp_path, l_freq, h_freq):\n pytest.importorskip(\"matplotlib\")\n bids_root = setup_bids_test_dir(tmp_path)\n bids_path = _bids_path.copy().update(root=bids_root)\n inspect_dataset(bids_path, l_freq=l_freq, h_freq=h_freq, find_flat=False)", "def filtered_families(seq_fam, minimum_count = 500, draw_histogram=False):\n \n families = Counter(fam for seq, fam in seq_fam.items())\n print('Number of families before filter: {}'.format(len(families)))\n \n filtered_fam = {fam : count for fam, count in families.items() if count >= minimum_count }\n ff_counts = np.array([*filtered_fam.values()])\n\n if draw_histogram:\n # Draw histogram\n fig, ax = plt.subplots()\n ax.hist(ff_counts)\n ax.set_ylabel('Count')\n ax.set_xlabel('Examples / Family')\n\n print('Num Examples: {} | Families: {} \\n'\n 'Mean: {:.2f} | Variance: {:.2f} \\n'\n 'Min: {} | Max: {}'.format(\n np.sum(ff_counts),\n len(filtered_fam),\n np.mean(ff_counts),\n np.var(ff_counts),\n np.min(ff_counts),\n np.max(ff_counts)))\n \n return filtered_fam", "def ftm_analyze(dat, flim=(1,-1)):\r\n # TODO: f from inverse window to inverse pixels \r\n \r\n #-- see P. M. Celliers et al, RSI, (2004), Section V --\r\n S = dat # (eq. 10), signal\r\n s = np.fft.fft(S, axis=0) # (eq. 11), 1-D fft of columns\r\n\r\n fmin, fmax = flim # frequency filter\r\n if fmax is None:\r\n fmax = -s.shape[0]//2 # clip out all negative freqs\r\n d = np.zeros_like(s)\r\n d[fmin:fmax, :] = s[fmin:fmax, :] # filtered s\r\n D = np.fft.ifft(d, axis=0) # (eq. 12) back transform\r\n\r\n W = np.angle(D) # (eq. 13) \"Wrapped\" phase angle...\r\n A = np.abs(D) # fringe modulation amplitude\r\n\r\n ratio = D[:,1:]/D[:,:-1]\r\n dphi = np.zeros_like(D)\r\n dphi[:,1:] = np.arctan(ratio.imag/np.abs(ratio)) # phase differences\r\n dphi2 = np.zeros_like(W)\r\n dphi2[:,1:] = np.angle(ratio) # angle differences\r\n\r\n U = -np.cumsum(dphi.real, 1) / TAU # unwrapped fringes\r\n U2 = -np.cumsum(dphi2, 1) / TAU # unwrapped fringes alt\r\n\r\n return A, W, dphi2, U2", "def bandpass_filter(data, lowcut, highcut, fs=2000, numtaps=255):\n nyq = fs / 2\n\n # design filter\n fe1 = lowcut / nyq\n fe2 = highcut / nyq\n b = firwin(numtaps, (fe1, fe2), pass_zero=False)\n\n filtered = lfilter(b, 1, data)\n\n return filtered", "def freq(self, freq: Optional[int] = None) -> Optional[int]:\n ...", "def noise(self, freq: int, /) -> None:", "def butter_highpass_filter(data, cutoff, fs, order=1):\n b, a = butter_highpass(cutoff, fs, order=order)\n y = filtfilt(b, a, data)\n return y", "def retina(fx, fy, ft, df=.07, sigma=.5):\n # removing high frequencies in the corners\n fr = frequency_radius(fx, fy, ft, ft_0=ft_0)\n env = (1-np.exp((fr-.5)/(.5*df)))*(fr<.5)\n # removing low frequencies\n # N_X, N_Y, N_frame = fx.shape[0], fy.shape[1], ft.shape[2]\n # env *= 1-np.exp(-.5*(fr**2)/((sigma/N_X)**2))\n return env", "def _real_2_frequency(self, kernel):\n return self.basis._fftn(np.fft.ifftshift(kernel,\n axes=self.basis._axes))", "def test_3d_freq():\n dic,data = ng.pipe.read(\"common_data/3d_pipe/ft/test%03d.ft3\")\n sdic,sdata = ng.pipe.read(\"common_data/3d_pipe/ft/test001.ft3\")\n\n assert data.shape == (128, 128, 4096)\n assert data.dtype == 'float32'\n assert round(data[0,1,2],2) == 25980.13\n assert round(data[10,22,5],2) == 1561.09\n check_ppm_limits(dic,data,0,[78.10, 34.24])\n check_ppm_limits(dic,data,1,[147.42, 93.01])\n check_ppm_limits(dic,data,2,[254.92, -142.83])\n\n # and the first slice\n assert sdata.shape == (128, 4096)\n assert sdata.dtype == 'float32'\n assert round(sdata[1,2],2) == 25980.13\n assert round(sdata[22,5],2) == -8336.05\n check_ppm_limits(sdic,sdata,0,[147.42, 93.01])\n check_ppm_limits(sdic,sdata,1,[254.92, -142.83])\n\n # slice/data matching\n assert_array_equal(data[0],sdata)\n\n write_readback_3D(dic,data)", "def setFrequencyThreshold(self, v):\n return self._set(frequencyThreshold=v)", "def highpass1(y, dt, fc=3) :\r\n tau=1/(2*np.pi*fc)\r\n alpha=tau/(tau+dt)\r\n y_filt=np.zeros(y.shape)\r\n y_filt[0]=0\r\n for i in np.arange(1,len(y)):\r\n y_filt[i]=alpha*y_filt[i-1] + alpha*(y[i]-y[i-1])\r\n m0=np.mean(y)\r\n m1=np.mean(y_filt)\r\n y_filt+=m0-m1\r\n return y_filt", "def GetFrequency(self):\n ..." ]
[ "0.60981953", "0.56942517", "0.5577544", "0.5543279", "0.55281085", "0.5500619", "0.5476223", "0.5473143", "0.54327935", "0.5337489", "0.5303522", "0.5286453", "0.52189946", "0.521232", "0.5156549", "0.51182044", "0.51102227", "0.50913894", "0.5088179", "0.5085512", "0.50194836", "0.5018264", "0.4991428", "0.49619165", "0.49598998", "0.49554652", "0.494272", "0.4938644", "0.49384308", "0.49173498", "0.49096298", "0.4909597", "0.4900421", "0.48940834", "0.4890292", "0.48801318", "0.4848724", "0.48462465", "0.4827993", "0.4825212", "0.4824245", "0.4820693", "0.48050672", "0.48032966", "0.4798281", "0.47978908", "0.47860983", "0.47784993", "0.4768915", "0.47524926", "0.47502747", "0.47300014", "0.4711414", "0.4707285", "0.47025916", "0.4702472", "0.4701557", "0.4695369", "0.4691258", "0.467382", "0.4673144", "0.46712044", "0.46596992", "0.46555126", "0.46551815", "0.4642137", "0.46421105", "0.46369982", "0.46356416", "0.4633554", "0.4631268", "0.46250173", "0.46216938", "0.4619962", "0.46150538", "0.46137786", "0.4605834", "0.46037835", "0.46034658", "0.4597724", "0.4596765", "0.45923948", "0.4588244", "0.45755166", "0.45716962", "0.45716724", "0.4569424", "0.45633462", "0.4562826", "0.45587158", "0.45562106", "0.45526323", "0.45500582", "0.45474926", "0.45427215", "0.45419577", "0.45369294", "0.45337912", "0.45320353", "0.4529439" ]
0.69544667
0
Function to apply a probability filter to land cover probabilities in each image of imageCollection. The user defines which classes will be filtered and how to filter them in the params list. The params list is a list of dictionaries, one for each class the user wants to filter.
def applyProbabilityCutoffs(imageCollection, params): #Define function to map across imageCollection def probabilityFilter(image): #Get the classifications from the class with the highest probability classifications = npv.probabilityToClassification(image) #Loop through parameters for param in params: #Load parameter values class_name = param.get('class_name') class_value = param.get('class_value') filter_name = param.get('filter') threshold = param.get('threshold') if filter_name=='gt': #Find where the class_name is greater than threshold prob_mask = image.select(class_name).gt(ee.Image.constant(threshold)) #Replace those pixels with the class value classifications = classifications.where(prob_mask,class_value) elif filter_name=='gte': #Find where the class_name is greater than or equal to threshold prob_mask = image.select(class_name).gte(ee.Image.constant(threshold)) #Replace those pixels with the class value classifications = classifications.where(prob_mask,class_value) elif filter_name == 'lte': #Find where the class_name is less than or equal to threshold prob_mask = image.select(class_name).lte(ee.Image.constant(threshold)) #Find where classifications are equal to class value class_mask = classifications.eq(class_value) #We only want to replace pixels where the class probability<=threshold AND classification==class_value reclass_mask = prob_mask.bitwiseAnd(class_mask) #Define square kernel of surrounding pixels kernel = ee.Kernel.square(1) #Convert to a multiband image, one band for each neighbor neighs = classifications.neighborhoodToBands(kernel) #Reduce to find the majority class in neighborhood majority = neighs.reduce(ee.Reducer.mode()) #Replace pixels where the class probability<=threshold AND classification==class_value with the neighborhood majority class classifications = classifications.where(reclass_mask,majority) else: #Find where the class_name is less than or equal to threshold prob_mask = image.select(class_name).lt(ee.Image.constant(threshold)) #Find where classifications are equal to class value class_mask = classifications.eq(class_value) #We only want to replace pixels where the class probability<=threshold AND classification==class_value reclass_mask = prob_mask.bitwiseAnd(class_mask) #Define square kernel of surrounding pixels kernel = ee.Kernel.square(1) #Convert to a multiband image, one band for each neighbor neighs = classifications.neighborhoodToBands(kernel) #Reduce to find the majority class in neighborhood majority = neighs.reduce(ee.Reducer.mode()) #Replace pixels where the class probability<=threshold AND classification==class_value with the neighborhood majority class classifications = classifications.where(reclass_mask,majority) return ee.Image(classifications) return ee.ImageCollection(imageCollection.map(probabilityFilter))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyFrequencyFilter(image, yearBandNames, classDictionary, filterParams): \n #Grab land cover classes as a list of strings\n lc_classes = classDictionary.keys().getInfo()\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Get the frequency of each class through the years by reducing the image collection to an image using the sum reducer\n class_frequency = binary_class_images.reduce(ee.Reducer.sum().unweighted()).rename(lc_classes)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Define an image to add bands with frequency filter applied\n out_img = ee.Image()\n \n #Loop through years\n for yearBand in yearBandNames:\n #Select the target year from the image\n yearImage = image.select(yearBand)\n \n #Loop through land cover classes in filterParams\n for lc_class in lc_classes:\n #Get the minimum occurance allowed in that land cover class\n min_occurance = filterParams.get(lc_class)\n \n #Find if the land cover class had less than the number of min_occurances in each pixel\n change_class = class_frequency.select(lc_class).lt(ee.Image.constant(min_occurance))\n \n #If change_class==1, then replace that pixel with the mode of all the years in that pixel\n #This filter is only applied to pixels of this land cover class\n #First mask yearImage to pixels of this land cover class, then get the union of pixels where change_class==1,\n #if both conditions are true, then the pixel is replaced with the mode\n yearImage = yearImage.where(yearImage.eq(ee.Number(classDictionary.get(lc_class))).bitwiseAnd(change_class),mode_image)\n #Rename yearImage to bandName\n yearImage = yearImage.rename(yearBand)\n #Append to output image\n out_img = out_img.addBands(yearImage)\n \n return out_img", "def filter_data(self):\n if(self.filter_classes == []):\n return\n \n filtered_idx = []\n for id in range(len(self.image_ids)):\n anns = self.load_annotations(id)\n found = False\n for ann in anns:\n if ann['label'] in self.filter_classes:\n found = True\n break\n if found:\n filtered_idx.append(id)\n \n self.filtered_ids = [self.image_ids[id] for id in filtered_idx]\n # self.image_ids = self.filtered_ids\n print(\"Number of filtered instances:\", len(self.filtered_ids))", "def filtro_probs(prediccion,p_min):\n clases = []\n for probabilidad in prediccion:\n if probabilidad[1]>=p_min:\n clases.append(probabilidad)\n else:\n clases.append(\"-\")\n return clases", "def filters(im, filter_list=[\"MedianFilter\"]):\n out = im\n for filter_name in filter_list:\n out = out.filter(getattr(ImageFilter, filter_name))\n return out", "def filter_classes(class_ints, class_list, class_filt):\n class_names = [class_list[int(c)] for c in class_ints]\n filter = [name in class_filt for name in class_names]\n return np.array(filter)", "def compute(self,filter_name):\n self.result = []\n for img in self.imgs:\n r = filters_dict[filter_name](img)\n if \"threshold\" in filter_name:\n r = img>r\n r = 1.0*r.copy()\n self.result.append(r)", "def filterp(th,ProbClass1):\n y=np.zeros(ProbClass1.shape[0])\n for i,v in enumerate(ProbClass1):\n if ProbClass1[i]>th:\n y[i]=1\n return y", "def apply_filter(self, image):\n pass", "def filter_detections(detections, arg_to_class, conf_thresh=0.5):\n num_classes = detections.shape[0]\n filtered_detections = []\n for class_arg in range(1, num_classes):\n class_detections = detections[class_arg, :]\n confidence_mask = np.squeeze(class_detections[:, -1] >= conf_thresh)\n confident_class_detections = class_detections[confidence_mask]\n if len(confident_class_detections) == 0:\n continue\n class_name = arg_to_class[class_arg]\n for confident_class_detection in confident_class_detections:\n coordinates = confident_class_detection[:4]\n score = confident_class_detection[4]\n detection = Box2D(coordinates, score, class_name)\n filtered_detections.append(detection)\n return filtered_detections", "def filterp(th, ProbClass1):\n y = np.zeros(ProbClass1.shape[0])\n for i, v in enumerate(ProbClass1):\n if ProbClass1[i] > th:\n y[i] = 1\n return y", "def filters(im, detail=False, sharpen=False, **kwargs):\n filters = []\n if detail:\n filters.append(('detail', True))\n if sharpen:\n filters.append(('sharpen', True))\n return im", "def compute_classifications(depc, gid_list, config=None):\n logger.info('[ibs] Process Image Classifications')\n logger.info('config = {!r}'.format(config))\n # Get controller\n ibs = depc.controller\n depc = ibs.depc_image\n if config['classifier_algo'] in ['cnn']:\n config_ = {\n 'draw_annots': False,\n 'thumbsize': (192, 192),\n }\n thumbnail_list = depc.get_property('thumbnails', gid_list, 'img', config=config_)\n result_list = ibs.generate_thumbnail_class_list(thumbnail_list, **config)\n elif config['classifier_algo'] in ['svm']:\n from wbia.algo.detect.svm import classify\n\n config_ = {'algo': 'resnet'}\n vector_list = depc.get_property('features', gid_list, 'vector', config=config_)\n classifier_weight_filepath = config['classifier_weight_filepath']\n result_list = classify(vector_list, weight_filepath=classifier_weight_filepath)\n elif config['classifier_algo'] in ['densenet']:\n from wbia.algo.detect import densenet\n\n config_ = {\n 'draw_annots': False,\n 'thumbsize': (densenet.INPUT_SIZE, densenet.INPUT_SIZE),\n }\n thumbpath_list = ibs.depc_image.get(\n 'thumbnails', gid_list, 'img', config=config_, read_extern=False, ensure=True\n )\n result_list = densenet.test(thumbpath_list, ibs=ibs, gid_list=gid_list, **config)\n elif config['classifier_algo'] in ['tile_aggregation', 'tile_aggregation_quick']:\n classifier_weight_filepath = config['classifier_weight_filepath']\n classifier_weight_filepath = classifier_weight_filepath.strip().split(';')\n\n assert len(classifier_weight_filepath) == 2\n classifier_algo_, model_tag_ = classifier_weight_filepath\n\n include_grid2 = config['classifier_algo'] in ['tile_aggregation']\n tid_list = ibs.scout_get_valid_tile_rowids(\n gid_list=gid_list, include_grid2=include_grid2\n )\n ancestor_gid_list = ibs.get_tile_ancestor_gids(tid_list)\n confidence_list = ibs.scout_wic_test(\n tid_list, classifier_algo=classifier_algo_, model_tag=model_tag_\n )\n\n gid_dict = {}\n for ancestor_gid, tid, confidence in zip(\n ancestor_gid_list, tid_list, confidence_list\n ):\n if ancestor_gid not in gid_dict:\n gid_dict[ancestor_gid] = []\n gid_dict[ancestor_gid].append(confidence)\n\n result_list = []\n for gid in tqdm.tqdm(gid_list):\n gid_confidence_list = gid_dict.get(gid, None)\n assert gid_confidence_list is not None\n best_score = np.max(gid_confidence_list)\n\n if best_score >= 0.5:\n best_key = 'positive'\n else:\n best_key = 'negative'\n best_score = 1.0 - best_score\n\n result = (\n best_score,\n best_key,\n )\n result_list.append(result)\n elif config['classifier_algo'] in ['densenet+neighbors']:\n raise NotImplementedError\n # ut.embed()\n # classifier_weight_filepath = config['classifier_weight_filepath']\n\n # all_bbox_list = ibs.get_image_bboxes(gid_list)\n # wic_confidence_list = ibs.scout_wic_test(gid_list, classifier_algo='densenet',\n # model_tag=classifier_weight_filepath)\n #\n # ancestor_gid_list = list(set(ibs.get_tile_ancestor_gids(gid_list)))\n # all_tile_list = list(set(ibs.scout_get_valid_tile_rowids(gid_list=ancestor_gid_list)))\n # all_bbox_list = ibs.get_image_bboxes(all_tile_list)\n # all_confidence_list = ibs.scout_wic_test(all_tile_list, classifier_algo='densenet',\n # model_tag=classifier_weight_filepath)\n #\n # TODO: USE THRESHOLDED AVERAGE, NOT MAX\n # result_list = []\n # for gid, wic_confidence in zip(gid_list, wic_confidence_list):\n # best_score = wic_confidence\n # for aid in aid_list:\n # wic_confidence_ = aid_conf_dict.get(aid, None)\n # assert wic_confidence_ is not None\n # best_score = max(best_score, wic_confidence_)\n #\n # if wic_confidence < 0.5:\n # best_key = 'negative'\n # best_score = 1.0 - best_score\n # else:\n # best_key = 'positive'\n # if best_score > wic_confidence:\n # recovered += 1\n # result = (best_score, best_key, )\n # result_list.append(result)\n elif config['classifier_algo'] in ['scout_detectnet']:\n import json\n\n json_filepath = join(ibs.dbdir, config['classifier_weight_filepath'])\n assert exists(json_filepath)\n with open(json_filepath, 'r') as json_file:\n values = json.load(json_file)\n annotations = values.get('annotations', {})\n\n gpath_list = ibs.get_image_paths(gid_list)\n gname_list = [split(gpath)[1] for gpath in gpath_list]\n\n result_list = []\n for gname in gname_list:\n annotation = annotations.get(gname, None)\n assert annotation is not None\n\n best_score = 1.0\n if len(annotation) == 0:\n best_key = 'negative'\n else:\n best_key = 'positive'\n result = (\n best_score,\n best_key,\n )\n result_list.append(result)\n elif config['classifier_algo'] in ['scout_detectnet_csv', 'scout_faster_rcnn_csv']:\n uuid_str_list = list(map(str, ibs.get_image_uuids(gid_list)))\n\n manifest_filepath = join(ibs.dbdir, 'WIC_manifest_output.csv')\n csv_filepath = join(ibs.dbdir, config['classifier_weight_filepath'])\n\n assert exists(manifest_filepath)\n assert exists(csv_filepath)\n\n manifest_dict = {}\n with open(manifest_filepath, 'r') as manifest_file:\n manifest_file.readline() # Discard column header row\n manifest_line_list = manifest_file.readlines()\n for manifest_line in manifest_line_list:\n manifest = manifest_line.strip().split(',')\n assert len(manifest) == 2\n manifest_filename, manifest_uuid = manifest\n manifest_dict[manifest_filename] = manifest_uuid\n\n csv_dict = {}\n with open(csv_filepath, 'r') as csv_file:\n csv_file.readline() # Discard column header row\n csv_line_list = csv_file.readlines()\n for csv_line in csv_line_list:\n csv = csv_line.strip().split(',')\n assert len(csv) == 2\n csv_filename, csv_score = csv\n csv_uuid = manifest_dict.get(csv_filename, None)\n assert (\n csv_uuid is not None\n ), 'Test image {!r} is not in the manifest'.format(\n csv,\n )\n csv_dict[csv_uuid] = csv_score\n\n result_list = []\n for uuid_str in uuid_str_list:\n best_score = csv_dict.get(uuid_str, None)\n assert best_score is not None\n\n if config['classifier_algo'] in ['scout_detectnet_csv']:\n assert best_score in ['yes', 'no']\n best_key = 'positive' if best_score == 'yes' else 'negative'\n best_score = 1.0\n elif config['classifier_algo'] in ['scout_faster_rcnn_csv']:\n best_score = float(best_score)\n if best_score >= 0.5:\n best_key = 'positive'\n else:\n best_key = 'negative'\n best_score = 1.0 - best_score\n else:\n raise ValueError\n\n result = (\n best_score,\n best_key,\n )\n result_list.append(result)\n elif config['classifier_algo'] in [\n 'lightnet',\n 'densenet+lightnet',\n 'densenet+lightnet!',\n ]:\n min_area = 10\n\n classifier_weight_filepath = config['classifier_weight_filepath']\n classifier_weight_filepath = classifier_weight_filepath.strip().split(',')\n\n if config['classifier_algo'] in ['lightnet']:\n assert len(classifier_weight_filepath) == 2\n weight_filepath, nms_thresh = classifier_weight_filepath\n wic_thresh = 0.0\n nms_thresh = float(nms_thresh)\n wic_confidence_list = [np.inf] * len(gid_list)\n wic_filter = False\n elif config['classifier_algo'] in ['densenet+lightnet', 'densenet+lightnet!']:\n assert len(classifier_weight_filepath) == 4\n (\n wic_model_tag,\n wic_thresh,\n weight_filepath,\n nms_thresh,\n ) = classifier_weight_filepath\n wic_thresh = float(wic_thresh)\n nms_thresh = float(nms_thresh)\n wic_confidence_list = ibs.scout_wic_test(\n gid_list, classifier_algo='densenet', model_tag=wic_model_tag\n )\n wic_filter = config['classifier_algo'] in ['densenet+lightnet']\n else:\n raise ValueError\n\n flag_list = [\n wic_confidence >= wic_thresh for wic_confidence in wic_confidence_list\n ]\n if wic_filter:\n gid_list_ = ut.compress(gid_list, flag_list)\n else:\n gid_list_ = gid_list[:]\n config = {\n 'grid': False,\n 'algo': 'lightnet',\n 'config_filepath': weight_filepath,\n 'weight_filepath': weight_filepath,\n 'nms': True,\n 'nms_thresh': nms_thresh,\n 'sensitivity': 0.0,\n }\n prediction_list = depc.get_property(\n 'localizations', gid_list_, None, config=config\n )\n prediction_dict = dict(zip(gid_list_, prediction_list))\n\n result_list = []\n for gid, wic_confidence, flag in zip(gid_list, wic_confidence_list, flag_list):\n if not flag:\n best_key = 'negative'\n best_score = 1.0 - wic_confidence\n else:\n prediction = prediction_dict.get(gid, None)\n assert prediction is not None\n\n best_score = 0.0\n if prediction is not None:\n score, bboxes, thetas, confs, classes = prediction\n for bbox, conf in zip(bboxes, confs):\n xtl, ytl, w, h = bbox\n area = w * h\n if area >= min_area:\n best_score = max(best_score, conf)\n\n if best_score >= 0.5:\n best_key = 'positive'\n else:\n best_key = 'negative'\n best_score = 1.0 - best_score\n result = (\n best_score,\n best_key,\n )\n result_list.append(result)\n else:\n raise ValueError(\n 'specified classifier algo is not supported in config = {!r}'.format(config)\n )\n\n # yield detections\n for result in result_list:\n yield result", "def class_imgs(list_img):\n numberimg = len(list_img)\n resize(net, numberimg, cursize)\n i = 0\n for img in list_img:\n image = caffe.io.load_image(img)\n transformed_image = transformer.preprocess('data', image)\n net.blobs['data'].data[i] = transformed_image\n i = i + 1\n\n output = net.forward()\n\n results = []\n for n in range(0, numberimg):\n themax = output['prob'][n].argmax()\n results.append({'filename':list_img[n], 'class': themax, 'prob': output['prob'][n].tolist()})\n\n return results", "def run(self, images):\n\n # Apply filtering\n if len(self.preprocessing) > 0: \n print('Applying', len(self.preprocessing), 'filter(s) to input images')\n for filter in self.preprocessing:\n for i in range(len(images)):\n images[i] = filter(images[i])\n\n # Apply feature extraction\n if len(self.features) > 0:\n print('Extracting', len(self.features), 'feature(s) from input images')\n scaler = MinMaxScaler(feature_range=(0, 1))\n for i in range(len(images)):\n features = []\n for feature in self.features:\n features.append(feature(images[i]))\n images[i] = np.hstack(features)\n images = scaler.fit_transform(images)\n else:\n # Flatten images (not necessary when using feature extraction)\n train_data = np.array(train_data).reshape((len(train_data), -1))\n\n # Run predictions\n print('Predicting presence of parasites in', len(images), 'images\\n')\n return self.classifier.predict(images)", "def filter(self, filters):", "def selection_profiles_by_chance(true, compare):\n n_neurons, M = true.shape\n probabilities = np.zeros(n_neurons)\n\n for neuron in range(n_neurons):\n n = np.count_nonzero(true[neuron])\n N = np.count_nonzero(compare[neuron])\n rv = hypergeom(M=M, n=n, N=N)\n\n overlap = np.count_nonzero(true[neuron] * compare[neuron])\n probabilities[neuron] = 1 - rv.cdf(x=overlap)\n\n return probabilities", "def filter_prediction(boxes, probs, cls_idx): \n if cfg.TOP_N_DETECTION < len(probs) and cfg.TOP_N_DETECTION > 0:\n order = probs.argsort()[:-cfg.TOP_N_DETECTION-1:-1]\n probs = probs[order]\n boxes = boxes[order]\n cls_idx = cls_idx[order]\n else:\n filtered_idx = np.nonzero(probs > cfg.PROB_THRESHOLD)[0]\n probs = probs[filtered_idx]\n boxes = boxes[filtered_idx]\n cls_idx = cls_idx[filtered_idx]\n\n final_boxes = []\n final_probs = []\n final_cls_idx = []\n\n for c in range(cfg.NUM_CLASSES):\n idx_per_class = [i for i in range(len(probs)) if cls_idx[i] == c]\n keep = nms(boxes[idx_per_class], probs[idx_per_class], cfg.NMS_THRESHOLD)\n for i in range(len(keep)):\n if keep[i]:\n final_boxes.append(boxes[idx_per_class[i]])\n final_probs.append(probs[idx_per_class[i]])\n final_cls_idx.append(c)\n return final_boxes, final_probs, final_cls_idx", "def classify_images():\n\n # Load the desired image\n img_path = 'dataset/colorize_images/n02085782_919.jpg'\n img = image.load_img(img_path, target_size=(299, 299))\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n\n model = InceptionV3(weights=\"imagenet\")\n preds = model.predict(x)\n # decode the results into a list of tuples (class, description, probability)\n # (one such list for each sample in the batch)\n print('Predicted:', decode_predictions(preds, top=3)[0])", "def classify_image_proba(image, model, image_box=None):\n images_list = []\n image = image.resize((IMAGE_WIDTH, IMAGE_HEIGHT), box=image_box)\n # box argument clips image to (x1, y1, x2, y2)\n image = np.array(image)\n images_list.append(image)\n \n return [np.amax(model.predict(np.array(images_list)))]", "def selection_profiles_by_chance(fits_path, dataset1, dataset2):\n fits = h5py.File(fits_path, 'r')\n true = np.median(fits[dataset1]['coupling_coefs'][:], axis=0)\n compare = np.median(fits[dataset2]['coupling_coefs'][:], axis=0)\n\n n_neurons, M = true.shape\n probabilities = np.zeros(n_neurons)\n\n for neuron in range(n_neurons):\n n = np.count_nonzero(true[neuron])\n N = np.count_nonzero(compare[neuron])\n rv = hypergeom(M=M, n=n, N=N)\n\n overlap = np.count_nonzero(true[neuron] * compare[neuron])\n probabilities[neuron] = 1 - rv.cdf(x=overlap)\n\n return probabilities", "def OF1_CalculateThresholdValues(param_list, classNum):\n thresholdValues = [(-1., -1.) for _ in range(classNum-1)] # np.arange(classNum - 1)\n #numRow = sp.math.factorial(classNum-1)\n #numCol = classNum-1\n #thresholdValues = np.arange(numCol*numRow).reshape(numRow, numCol)\n indexOrder = np.argsort(param_list[classNum:classNum * 2])\n\n P = [param_list[indexOrder[i]] for i in range(classNum)]\n my = np.sort(param_list[classNum:classNum * 2])\n sigma = [param_list[classNum * 2 + indexOrder[i]] for i in range(classNum)]\n\n for i in range(classNum - 1):\n a = sigma[i] ** 2 - sigma[i + 1] ** 2\n b = 2 * ( my[i] * ( sigma[i + 1] ** 2 ) - my[i + 1] * ( sigma[i] ** 2 ) )\n c = ( sigma[i] * my[i + 1] ) ** 2 - ( sigma[i + 1] * my[i] ) ** 2 + 2 * ( ( sigma[i] * sigma[i + 1] ) ** 2 ) * math.log(( ( sigma[i + 1] * P[i] ) / ( sigma[i] * P[i + 1] ) ))\n\n p = np.poly1d([a, b, c], False, \"T\")\n p_roots = np.roots(p)\n\n if p_roots.size == 1:\n thresholdValues[i] = (np.real(p_roots[0]), -1)\n else:\n r1 = np.real(p_roots[0])\n r2 = np.real(p_roots[1])\n if (r1 == r2) or (r2 < 0.) or (r2 > 255.):\n thresholdValues[i] = (r1, -1)\n elif (r1 < 0) or (r1 > 255):\n thresholdValues[i] = (r2, -1)\n else:\n thresholdValues[i] = (r1, r2)\n #r1 = np.amin(p_roots)\n #r2 = np.amax(p_roots)\n #if i > 0:\n #if r1 >= thresholdValues[i-1]:\n #thresholdValues[i] = r1\n #else:\n #thresholdValues[i] = r2\n #else:\n #if (r1 >= my[i]) and (r1 < my[i+1]):\n #thresholdValues[i] = r1\n #else:\n #thresholdValues[i] = r2\n\n return thresholdValues", "def apply_filter(image: np.ndarray) -> np.ndarray:\n # choose filters to apply\n return clahe(image)", "def classify(priors, likelihoods, testData, classes):\r\n results = []\r\n for document in testData:\r\n bestClass = None\r\n bestProb = None\r\n currentProb = 0.0\r\n for cls in classes:\r\n prior = priors[cls]\r\n currentProb = log(prior)\r\n lhoods = likelihoods[cls]\r\n for (word, count) in document:\r\n if word in lhoods:\r\n currentProb += log(lhoods[word])\r\n else:\r\n currentProb += log(lhoods[None])\r\n if currentProb > bestProb or bestClass == None:\r\n bestProb = currentProb\r\n bestClass = cls\r\n results.append(bestClass)\r\n return results", "def classify_image_probavec(image, model, image_box=None):\n images_list = []\n image = image.resize((IMAGE_WIDTH, IMAGE_HEIGHT), box=image_box)\n # box argument clips image to (x1, y1, x2, y2)\n image = np.array(image)\n images_list.append(image)\n \n return model.predict(np.array(images_list))", "def inference(self, img, probe_roi=None, threshold=0.75):\n device = self.cls_score.weight.device\n processed_img, scale = img_preprocessing(img)\n # [C, H, W] -> [N, C, H, W]\n processed_img = torch.from_numpy(processed_img).unsqueeze(0).to(device)\n # img_info: (height, width, scale)\n img_info = torch.Tensor([processed_img.shape[2], processed_img.shape[3], scale]).to(device)\n if probe_roi is not None:\n probe_roi = torch.from_numpy(probe_roi).float().view(1, 4)\n probe_roi *= scale\n # Add an extra 0, which means the probe_roi is from the first image in the batch\n probe_roi = torch.cat((torch.zeros(1, 1), probe_roi.float()), dim=1).to(device)\n\n with torch.no_grad():\n proposals, probs, proposal_deltas, features, _, _, _, _, _ = self.forward(\n processed_img, img_info, None, probe_roi\n )\n\n if probe_roi is not None:\n return features\n\n # Unscale proposals back to raw image space\n proposals = proposals[:, 1:5] / scale\n # Unnormalize proposal deltas\n num_classes = proposal_deltas.shape[1] // 4\n stds = torch.Tensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).repeat(num_classes).to(device)\n means = torch.Tensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).repeat(num_classes).to(device)\n proposal_deltas = proposal_deltas * stds + means\n # Apply proposal regression deltas\n boxes = bbox_transform_inv(proposals, proposal_deltas)\n boxes = clip_boxes(boxes, img.shape)\n\n # Remove those boxes with scores below the threshold\n j = 1 # Only consider foreground class\n keep = torch.nonzero(probs[:, j] > threshold, as_tuple=False)[:, 0]\n boxes = boxes[keep, j * 4 : (j + 1) * 4]\n probs = probs[keep, j]\n features = features[keep]\n\n # Remove redundant boxes with NMS\n detections = torch.cat((boxes, probs.unsqueeze(1)), dim=1)\n keep = nms(boxes, probs, cfg.TEST.NMS)\n detections = detections[keep]\n features = features[keep]\n\n return detections, features", "def adjusted_classes(pred_prob, threshold):\n return [1 if y >= threshold else 0 for y in pred_prob]", "def detect_objects(image, threshold, classes_incl=None):\n set_input_tensor(image)\n interpreter.invoke()\n\n # Get all output details\n boxes = get_output_tensor(0)\n classes = get_output_tensor(1)\n scores = get_output_tensor(2)\n count = int(get_output_tensor(3))\n\n results = []\n for i in range(count):\n if scores[i] >= threshold:\n result = {\n 'bounding_box': boxes[i],\n 'class_id': int(classes[i]),\n 'score': scores[i]\n }\n if not classes_incl:\n results.append(result)\n elif classes[i] in classes_incl:\n results.append(result)\n return results", "def on_image(image):\n objects = [obj for obj in coco.classify(image) if obj.confidence > config.OBJECT_CONFIDENCE_THRESHOLD]\n queue.put((image, objects))", "def apply_2_class_filterV4(pred_csv, out_csv, filter_info_file, thr=0.08):\n df_pred = pd.read_csv(pred_csv)\n df_filter = pd.read_csv(filter_info_file)\n pred_strs = df_pred['PredictionString'].tolist()\n img_ids = df_pred['image_id'].tolist()\n\n num_normal = 0\n for idx in tqdm(range(len(pred_strs))):\n im_id = img_ids[idx]\n cls_score = df_filter[df_filter['image_id'] == im_id]['target'].tolist()[0]\n if cls_score < thr: # No finding\n pred_strs[idx] = '14 1 0 0 1 1'\n num_normal += 1\n print('number of No finding images: ', num_normal)\n\n df_save = pd.DataFrame()\n df_save['image_id'] = img_ids\n df_save['PredictionString'] = pred_strs\n df_save.to_csv(out_csv, index=False)\n print('all done!')", "def detect_objects(interpreter, image):\n set_input_tensor(interpreter, image)\n interpreter.invoke()\n\n # Get all output details\n boxes = get_output_tensor(interpreter, 0)\n classes = get_output_tensor(interpreter, 1)\n scores = get_output_tensor(interpreter, 2)\n count = int(get_output_tensor(interpreter, 3))\n\n results = []\n for i in range(count):\n # Only check for people that meet the threshold\n if classes[i] == 0.0 and scores[i] >= THRESHOLD:\n result = {\n \"bounding_box\": boxes[i],\n \"class_id\": classes[i],\n \"score\": scores[i],\n }\n results.append(result)\n return results", "def vis_detections(im, class_name, dets, thresh=0.8):\n\n dict = {'HolderA': 'Holder', 'WheelA': 'WheelA', 'WheelB': 'WheelB', 'BrakeA': 'Brake', 'SpringA': 'Spring',\n 'BuckleA': 'BuckleA', 'BuckleB': 'BuckleB', 'TubeA': 'Tube', 'NutA': 'NutA', 'ScrewA': 'ScrewA',\n 'NutB': 'NutB', 'ScrewB': 'ScrewB',\n 'WireA': 'Wire', 'PlateA': 'PlateA', 'PlateB': 'PlateB', 'PlateD': 'PlateC', 'PlateE': 'PlateD',\n 'BoltA': 'Bolt', 'LoopB': 'Loop', 'JointA': 'JointA', 'JointB': 'JointB', 'FixatorA': 'Fixator',\n 'BearingA': 'Bearing', 'PlugA': 'Plug'}\n\n for i in range(np.minimum(10, dets.shape[0])):\n bbox = tuple(int(np.round(x)) for x in dets[i, :4])\n score = dets[i, -1]\n if score > thresh:\n # Color site: http://www.wahart.com.hk/rgb.htm\n if class_name == 'HolderA':\n color = (255, 255, 0) # Cyan\n elif class_name == 'WheelA':\n color = (212, 255, 127) # Aquamarina\n elif class_name == 'WheelB':\n color = (99, 99, 238) # IndianRed2\n elif class_name == 'BrakeA':\n color = (99, 99, 238) # IndianRed2\n elif class_name == 'SpringA':\n color = (180, 130, 70) # SteelBlue\n elif class_name == 'BuckleA':\n color = (205, 0, 0) # MediumBlue\n elif class_name == 'BuckleB':\n color = (170, 205, 102) # MediumAquamarine\n elif class_name == 'BuckleC':\n color = (0, 252, 124) # LawnGreen\n elif class_name == 'BuckleD':\n color = (50, 205, 50) # LimeGreen\n elif class_name == 'TubeA':\n color = (147, 112, 219) # PaleVioletRed\n elif class_name == 'ScrewA':\n color = (240, 32, 160) # Purple\n elif class_name == 'ScrewB':\n color = (0, 165, 255) # Orange1\n elif class_name == 'ScrewC':\n color = (48, 48, 255) # Firebrick1\n elif class_name == 'NutA':\n color = (0, 255, 255) # Yellow\n elif class_name == 'NutB':\n color = (255, 144, 30) # DodgerBlue\n elif class_name == 'NutC':\n color = (180, 238, 180) # DarkSeaGreen2\n elif class_name == 'WireA':\n color = (255, 255, 255) # White\n elif class_name == 'PlateA':\n color = (0, 69, 255) # OrangeRed\n elif class_name == 'PlateB':\n color = (102, 205, 0) # SpringGreen3\n elif class_name == 'PlateD':\n color = (0, 255, 0) # Green\n elif class_name == 'PlateE':\n color = (0, 140, 250) # DarkOrange\n elif class_name == 'BoltA':\n color = (255, 255, 0) # Cyan\n elif class_name == 'LoopB':\n color = (180, 105, 255) # HotPink\n elif class_name == 'JointA':\n color = (105, 140, 255) # Salmon1\n elif class_name == 'JointB':\n color = (255, 0, 255) # Magenta3\n elif class_name == 'FixatorA':\n color = (0, 205, 102) # Chartreuse3\n elif class_name == 'BearingA':\n color = (185, 218, 255) # PeachPuff\n elif class_name == 'PlugA':\n color = (193, 193, 255) # RosyBrown1\n else:\n color = (139, 0, 139) # DarkMagenta\n cv2.rectangle(im, bbox[0:2], bbox[2:4], color, 2)\n # cv2.putText(im, '%s: %.3f' % (class_name, score), (bbox[0], bbox[1] + 15), cv2.FONT_HERSHEY_COMPLEX,\n # 0.5, color, thickness=1)\n cv2.putText(im, '%s: %.3f' % (dict[class_name], score), (bbox[0], bbox[1] + 15), cv2.FONT_HERSHEY_COMPLEX,\n 0.5, color, thickness=1)\n return im", "def preds_proba_to_preds_class(preds_proba,threshold):\n return [True if pred > threshold else False for pred in preds_proba]", "def eval_pascal_one_class(pascal, detections, c):\n gts = {} \n num_objs = 0 \n for img_name in pascal:\n gts[img_name] = []\n for obj in pascal[img_name]:\n if obj['class_id'] == c and obj['difficult']==0:\n gts[img_name] += [{'bbox':obj['bbox'], 'detected': False}]\n num_objs += 1\n\n dts = []\n scores = []\n num_dets = 0\n for img_name in detections:\n for dt in detections[img_name]:\n if dt['class_id'] == c:\n dts.append([img_name, dt['bbox'], dt['score']])\n scores.append(dt['score'])\n num_dets += 1\n \n # Sort the detections based on their scores\n scores = np.array(scores, np.float32)\n sorted_idx = np.argsort(scores)[::-1]\n\n tp = np.zeros((num_dets))\n fp = np.zeros((num_dets))\n\n for i in tqdm(list(range(num_dets))):\n idx = sorted_idx[i]\n img_name = dts[idx][0]\n bbox = dts[idx][1] \n gt_bboxes = np.array([obj['bbox'] for obj in gts[img_name]], np.float32) \n\n # Compute the max IoU of current detection with the ground truths\n max_iou = 0.0\n if gt_bboxes.size > 0:\n ixmin = np.maximum(gt_bboxes[:, 0], bbox[0])\n iymin = np.maximum(gt_bboxes[:, 1], bbox[1])\n ixmax = np.minimum(gt_bboxes[:, 2], bbox[2])\n iymax = np.minimum(gt_bboxes[:, 3], bbox[3])\n\n iw = np.maximum(ixmax - ixmin + 1.0, 0.0)\n ih = np.maximum(iymax - iymin + 1.0, 0.0)\n\n area_intersect = iw * ih\n area_union = (bbox[2] - bbox[0] + 1.0) * (bbox[3] - bbox[1] + 1.0) + (gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1.0) * (gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1.0) - area_intersect\n\n ious = area_intersect / area_union\n max_iou = np.max(ious, axis=0)\n j = np.argmax(ious)\n\n # Determine if the current detection is a true or false positive\n if max_iou > 0.5:\n if not gts[img_name][j]['detected']:\n tp[i] = 1.0\n gts[img_name][j]['detected'] = True\n else:\n fp[i] = 1.0\n else:\n fp[i] = 1.0\n\n # Accumulate the numbers of true and false positives\n tp = np.cumsum(tp)\n fp = np.cumsum(fp)\n\n # Compute the average precision based on these data\n rec = tp * 1.0 / num_objs\n prec = tp * 1.0 / np.maximum((tp + fp), np.finfo(np.float64).eps)\n\n mrec = np.concatenate(([0.], rec, [1.]))\n mpre = np.concatenate(([0.], prec, [0.]))\n\n for i in range(mpre.size - 1, 0, -1):\n mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])\n\n i = np.where(mrec[1:] != mrec[:-1])[0]\n\n ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n print('average precision for class %s = %f' %(pascal_class_names[c], ap))\n\n return ap", "def process_images(pool, func, images, entries):\n start = time.perf_counter()\n images = pool.map(func, images)\n logger.info(\"Erased white background from %i images:\", len(images))\n util.pprint_log([x.name for x in entries], logger.info)\n logger.info(util.elapsed(start))\n logger.info(\"\\n\")\n return images", "def __init__(self, classes, data_size):\r\n self.classes = classes\r\n self.data_size = data_size\r\n self.conditional_prob = {class_:{} for class_ in classes} # Conditional Probability Table for storing parameters useful to compute P(feat|class_)\r\n self.class_prob = {} # Stores the priors\r", "def compute_classifications2(depc, gid_list, config=None):\n logger.info('[ibs] Process Image Classifications2')\n logger.info('config = {!r}'.format(config))\n # Get controller\n ibs = depc.controller\n depc = ibs.depc_image\n if config['classifier_two_algo'] in ['cnn']:\n config_ = {\n 'draw_annots': False,\n 'thumbsize': (192, 192),\n }\n # depc.delete_property('thumbnails', gid_list, config=config_)\n thumbnail_list = depc.get_property('thumbnails', gid_list, 'img', config=config_)\n result_list = ibs.generate_thumbnail_class2_list(thumbnail_list, **config)\n elif config['classifier_two_algo'] in ['rf']:\n from wbia.algo.detect.rf import classify\n\n config_ = {'algo': 'resnet'}\n vector_list = depc.get_property('features', gid_list, 'vector', config=config_)\n classifier_weight_filepath = config['classifier_weight_filepath']\n result_list = classify(vector_list, weight_filepath=classifier_weight_filepath)\n elif config['classifier_two_algo'] in ['densenet']:\n from wbia.algo.detect import densenet\n\n config_ = {\n 'draw_annots': False,\n 'thumbsize': (densenet.INPUT_SIZE, densenet.INPUT_SIZE),\n }\n thumbpath_list = ibs.depc_image.get(\n 'thumbnails', gid_list, 'img', config=config_, read_extern=False, ensure=True\n )\n config_ = {\n 'classifier_weight_filepath': config['classifier_two_weight_filepath'],\n }\n result_list = densenet.test(\n thumbpath_list,\n ibs=ibs,\n gid_list=gid_list,\n return_dict=True,\n multiclass=True,\n **config_,\n )\n result_list = list(result_list)\n for index in range(len(result_list)):\n best_score, best_key, scores = result_list[index]\n classes = [best_key]\n result_list[index] = (\n scores,\n classes,\n )\n else:\n raise ValueError(\n 'specified classifier_two algo is not supported in config = {!r}'.format(\n config\n )\n )\n\n # yield detections\n for result in result_list:\n yield result", "def filter_Nofinding_imgs(ori_ann_file, filter_info_file, out_file,\n score_thr=0.08, key_name='class'):\n ori_ann_infos = mmcv.load(ori_ann_file)\n df = pd.read_csv(filter_info_file)\n\n ori_image_infos = {os.path.splitext(info['file_name'])[0]: info\n for info in ori_ann_infos['images']}\n print('before filter, there are {} images.'.format(len(ori_image_infos)))\n new_images = []\n for idx, row in df.iterrows():\n image_name = row['image_id']\n cls = row[key_name]\n if cls >= score_thr:\n new_images.append(ori_image_infos[image_name])\n print('after filter, there are {} images.'.format(len(new_images)))\n print('saving new test annotations into file')\n ori_ann_infos['images'] = new_images\n mmcv.dump(ori_ann_infos, out_file)\n print('all done!')", "def filterPick(list, filter, classification):\n y = []\n for job in list:\n x = [(job, classification) for l in job for m in (filter(l),) if m]\n y.append(x)\n return y", "def apply_classifier(self):\n for detected_object in self.detected_objects:\n detected_object.predict_class(self.original_image)", "def classProbs(observation, tree, classes):\n res = classify(observation, tree) #res = results\n total = sum(res.values())\n probs = []\n for c in classes:\n if c in res.keys():\n probs.append(float(res[c])/total)\n else:\n probs.append(0)\n return probs", "def threshold_probs(probs):\n classes = np.ones(len(probs),)\n classes[probs < 0.5] = 0\n return classes", "def class_probabilities(image, dict, show_debug=False):\n probabilities = {}\n for label, compare_img in dict.items():\n # equalize size\n ch = compare_img.shape[0]\n cw = compare_img.shape[1]\n img = resize(image, (ch, cw), preserve_range=True)\n\n # take diff\n diff = np.abs(img - compare_img)\n histo, _ = np.histogram(diff, bins=20)\n histo = histo[1:]\n probabilities[label] = sum(histo)\n\n # show images/diff\n if show_debug:\n plt.subplot(321)\n plt.title('orig')\n plt.imshow(image, cmap='gray')\n plt.subplot(322)\n plt.title('compare_img')\n plt.imshow(compare_img, cmap='gray')\n plt.subplot(323)\n plt.title('diff')\n plt.imshow(diff, cmap='gray')\n plt.show()\n\n return probabilities", "def predict(self, images, batch_size=1):\n predictions = []\n \n for image in images.astype(\"float\"):\n filtered_image = self.apply_filter(image)\n _, pred = cv2.threshold(filtered_image.astype('uint8'), 0, 1, cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n predictions.append(pred)\n \n return np.reshape(predictions, images.shape)", "def grid_search(self, params):\n train_X, train_y, dev_X, dev_y = self.extract_train_dev_data()\n clf = self.classifiers[0]\n pred_y = clf.grid_search(params, train_X, train_y, dev_X)\n logger.info(classification_report(dev_y, pred_y))", "def thresh(self, thresh=25, total_ratings=False):\n before = self.item_count()\n\n if total_ratings: self.filter(self.n_per_item() >= thresh)\n else: self.filter(np.all(self.lam() >= thresh, axis=0))\n\n after = self.item_count()\n thresh_type = 'on each item total' if total_ratings else 'by each group' \n with msg(f'Applying threshold of {thresh} ratings {thresh_type} : {after} of {before}', done=False, enabled=self.output):pass", "def postprocessing(bbox, image_path, side=416, threshold=0.3): # pylint: disable=R0914\n\n bounds = bbox[:, 0: 4]\n confidence = bbox[:, 4]\n probability = bbox[:, 5:]\n\n image = Image.open(image_path)\n width, height = image.size\n scale = side / max([width, height])\n width_scaled = int(width * scale)\n height_scaled = int(height * scale)\n width_offset = (side - width_scaled) // 2\n height_offset = (side - height_scaled) // 2\n bounds[:, (0, 2)] = (bounds[:, (0, 2)] - width_offset) / scale\n bounds[:, [1, 3]] = (bounds[:, [1, 3]] - height_offset) / scale\n bounds = bounds.astype(np.int32)\n\n bounds[np.where(bounds < 0)] = 0\n bounds[np.where(bounds[:, 2] > width), 2] = width - 1\n bounds[np.where(bounds[:, 3] > height), 3] = height - 1\n mask = np.ones(bounds.shape, dtype=bool)\n mask[:, 2] = (bounds[:, 2] - bounds[:, 0]) > 0\n mask[:, 3] = (bounds[:, 3] - bounds[:, 1]) > 0\n mask = np.logical_and.reduce(mask, axis=1)\n classes = np.argmax(probability, axis=1)\n scores = confidence * probability[np.arange(classes.size), classes]\n mask = mask & (scores > threshold)\n bounds = bounds[mask]\n classes = classes[mask]\n scores = scores[mask]\n return nms(bounds, classes, scores)", "def highpass_filter(display):\r\n for trainOrTest in trainTest:\r\n resultPath = os.path.join('hpf_data', trainOrTest)\r\n originalPath = 'original_data'\r\n for pokemon in pokemons:\r\n pokeData = os.path.join(originalPath, trainOrTest, pokemon)\r\n files = os.listdir(pokeData)\r\n for picture in files:\r\n # Setting path\r\n path = os.path.join(pokeData, picture)\r\n\r\n # Reading image\r\n Img = dip.im_to_float(cv2.imread(path, 1))\r\n\r\n # Splitting the image into blue, green, red portions\r\n b, g, r = cv2.split(Img)\r\n\r\n # Splitting image, taking mean\r\n avg = np.mean([np.mean(b.flatten()), np.mean(g.flatten()), np.mean(r.flatten())])\r\n\r\n # Finding acceptable frequency\r\n precision = 0.002\r\n target = avg / 12\r\n _, j = hpf(b, target, precision)\r\n\r\n # Running hpf\r\n b_out, _ = hpf(b, target, precision, j)\r\n g_out, _ = hpf(g, target, precision, j)\r\n r_out, _ = hpf(r, target, precision, j)\r\n\r\n # Normalizing mean to 1\r\n b_out = b_out * (1 / np.max(b_out))\r\n g_out = g_out * (1 / np.max(g_out))\r\n r_out = r_out * (1 / np.max(r_out))\r\n\r\n # Combiner (Logic)\r\n std = 100 # how many standard deviations above mean for rgb parts\r\n sigmas = [np.var(b_out) ** 0.5, np.var(g_out) ** 0.5, np.var(r_out) ** 0.5]\r\n means = [np.mean(b_out), np.mean(g_out), np.mean(r_out)]\r\n output = combiner(b_out, g_out, r_out, means + sigmas * std)\r\n\r\n output = dip.float_to_im(output)\r\n\r\n if display:\r\n plt.subplot(1, 2, 1)\r\n plt.title('Original Image')\r\n plt.imshow(Img)\r\n plt.subplot(1, 2, 2)\r\n plt.title(\"High pass filter result\")\r\n plt.imshow(output)\r\n\r\n resultPic = os.path.join(resultPath, pokemon, picture)\r\n # Saving resultant image\r\n dip.im_write(output, resultPic)", "def compute_detections(depc, gid_list, config=None):\n logger.info('[ibs] Preprocess Detections')\n logger.info('config = {!r}'.format(config))\n # Get controller\n ibs = depc.controller\n ibs.assert_valid_gids(gid_list)\n\n USE_CLASSIFIER = False\n\n if USE_CLASSIFIER:\n classifier_config = {\n 'classifier_weight_filepath': config['classifier_weight_filepath'],\n }\n # Filter the gids by annotations\n prediction_list = depc.get_property(\n 'classifier', gid_list, 'class', config=classifier_config\n )\n confidence_list = depc.get_property(\n 'classifier', gid_list, 'score', config=classifier_config\n )\n confidence_list = [\n confidence if prediction == 'positive' else 1.0 - confidence\n for prediction, confidence in zip(prediction_list, confidence_list)\n ]\n gid_list_ = [\n gid\n for gid, confidence in zip(gid_list, confidence_list)\n if confidence >= config['classifier_sensitivity']\n ]\n else:\n classifier_config = {\n 'classifier_two_weight_filepath': config['classifier_weight_filepath'],\n }\n # Filter the gids by annotations\n predictions_list = depc.get_property(\n 'classifier_two', gid_list, 'classes', config=classifier_config\n )\n gid_list_ = [\n gid\n for gid, prediction_list in zip(gid_list, predictions_list)\n if len(prediction_list) > 0\n ]\n\n gid_set_ = set(gid_list_)\n # Get the localizations for the good gids and add formal annotations\n localizer_config = {\n 'algo': config['localizer_algo'],\n 'config_filepath': config['localizer_config_filepath'],\n 'weight_filepath': config['localizer_weight_filepath'],\n 'grid': config['localizer_grid'],\n }\n bboxes_list = depc.get_property(\n 'localizations', gid_list_, 'bboxes', config=localizer_config\n )\n thetas_list = depc.get_property(\n 'localizations', gid_list_, 'thetas', config=localizer_config\n )\n confses_list = depc.get_property(\n 'localizations', gid_list_, 'confs', config=localizer_config\n )\n\n # Get the corrected species and viewpoints\n labeler_config = {\n 'labeler_weight_filepath': config['labeler_weight_filepath'],\n }\n # depc.delete_property('localizations_labeler', gid_list_, config=labeler_config)\n specieses_list = depc.get_property(\n 'localizations_labeler', gid_list_, 'species', config=labeler_config\n )\n viewpoints_list = depc.get_property(\n 'localizations_labeler', gid_list_, 'viewpoint', config=labeler_config\n )\n scores_list = depc.get_property(\n 'localizations_labeler', gid_list_, 'score', config=labeler_config\n )\n\n # Collect the detections, filtering by the localization confidence\n empty_list = [\n 0.0,\n np.array([]),\n np.array([]),\n np.array([]),\n np.array([]),\n np.array([]),\n ]\n detect_dict = {}\n for index, gid in enumerate(gid_list_):\n bbox_list = bboxes_list[index]\n theta_list = thetas_list[index]\n species_list = specieses_list[index]\n # species_dict = {}\n # for species in species_list:\n # if species not in species_dict:\n # species_dict[species] = 0\n # species_dict[species] += 1\n # for tup in species_dict.iteritems():\n # logger.info('\\t%r' % (tup, ))\n # logger.info('----')\n viewpoint_list = viewpoints_list[index]\n conf_list = confses_list[index]\n score_list = scores_list[index]\n zipped = list(\n zip(\n bbox_list,\n theta_list,\n species_list,\n viewpoint_list,\n conf_list,\n score_list,\n )\n )\n zipped_ = []\n for bbox, theta, species, viewpoint, conf, score in zipped:\n if (\n conf >= config['localizer_sensitivity']\n and score >= config['labeler_sensitivity']\n ):\n zipped_.append([bbox, theta, species, viewpoint, conf * score])\n else:\n logger.info(\n 'Localizer {:0.02f} {:0.02f}'.format(\n conf, config['localizer_sensitivity']\n )\n )\n logger.info(\n 'Labeler {:0.02f} {:0.02f}'.format(\n score, config['labeler_sensitivity']\n )\n )\n if len(zipped_) == 0:\n detect_list = list(empty_list)\n else:\n detect_list = [0.0] + [np.array(_) for _ in zip(*zipped_)]\n detect_dict[gid] = detect_list\n\n # Filter the annotations by the localizer operating point\n for gid in gid_list:\n if gid not in gid_set_:\n assert gid not in detect_dict\n result = list(empty_list)\n else:\n assert gid in detect_dict\n result = detect_dict[gid]\n # logger.info(result)\n # raw_input()\n # logger.info('')\n # image = ibs.get_images(gid)\n # image = vt.resize(image, (500, 500))\n # cv2.imshow('', image)\n # cv2.waitKey(0)\n yield tuple(result)", "def do_classify(img,mask,n_sigmas,multichannel,intensity,edges,texture,sigma_min,sigma_max, downsample_value):\n if np.ndim(img)==3:\n features = extract_features(\n img,\n n_sigmas,\n multichannel=multichannel,\n intensity=intensity,\n edges=edges,\n texture=texture,\n sigma_min=sigma_min,\n sigma_max=sigma_max,\n )\n else:\n features = extract_features(\n np.dstack((img,img,img)),\n n_sigmas,\n multichannel=multichannel,\n intensity=intensity,\n edges=edges,\n texture=texture,\n sigma_min=sigma_min,\n sigma_max=sigma_max,\n )\n\n if mask is None:\n raise ValueError(\"If no classifier clf is passed, you must specify a mask.\")\n training_data = features[:, mask > 0].T\n\n training_data = memmap_feats(training_data)\n\n training_labels = mask[mask > 0].ravel()\n\n training_data = training_data[::downsample_value]\n training_labels = training_labels[::downsample_value]\n\n lim_samples = 100000 #200000\n\n if training_data.shape[0]>lim_samples:\n logging.info('Number of samples exceeds %i'% lim_samples)\n ind = np.round(np.linspace(0,training_data.shape[0]-1,lim_samples)).astype('int')\n training_data = training_data[ind,:]\n training_labels = training_labels[ind]\n logging.info('Samples have been subsampled')\n logging.info('Number of samples in training data: %i' % (training_data.shape[0]))\n print(training_data.shape)\n\n clf = make_pipeline(\n StandardScaler(),\n MLPClassifier(\n solver='adam', alpha=1, random_state=1, max_iter=2000,\n early_stopping=True, hidden_layer_sizes=[100, 60],\n ))\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Initializing MLP model')\n\n clf.fit(training_data, training_labels)\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('MLP model fit to data')\n\n del training_data, training_labels\n\n logging.info('Create and memory map model input data')\n\n data = features[:, mask == 0].T\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n data = memmap_feats(data)\n logging.info('Memory mapped model input data')\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n labels = clf.predict(data)\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Model used on data to estimate labels')\n\n if mask is None:\n result = labels.reshape(img.shape[:2])\n result2 = result.copy()\n else:\n result = np.copy(mask)#+1\n result[mask == 0] = labels\n del labels, mask\n result2 = result.copy()\n del result\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('RF feature extraction and model fitting complete')\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n return result2", "def test_lots_of_probability_thresholds(self):\n data = np.array(\n [\n [[2.9, 2.9, 2.9], [2.9, 2.9, 2.9], [2.9, 2.9, 2.9]],\n [[14.5, 14.5, 14.5], [14.5, 14.5, 14.5], [14.5, 14.5, 14.5]],\n [\n [26.099998, 26.099998, 26.099998],\n [26.099998, 26.099998, 26.099998],\n [26.099998, 26.099998, 26.099998],\n ],\n ],\n dtype=np.float32,\n )\n\n input_probs = np.tile(np.linspace(1, 0, 30), (3, 3, 1)).T\n cube = set_up_probability_cube(\n input_probs.astype(np.float32),\n np.arange(30).astype(np.float32),\n threshold_units=\"degC\",\n )\n\n result = Plugin()._probabilities_to_percentiles(cube, self.percentiles)\n\n self.assertArrayAlmostEqual(result.data, data)", "def test_image_at_levels(image_name, percentages, blur=False, blur_amount=10):\n # img = skimage.io.imread(os.path.join(IMAGE_DIR, image_name[:-3]+'JPEG'))\n img = cv2.imread(os.path.join(IMAGE_DIR, image_name[:-3]+'JPEG'))\n # mask_img = skimage.io.imread(os.path.join(MASK_DIR, image_name))\n mask_img = cv2.imread(os.path.join(MASK_DIR, image_name))\n results = []\n level_list = get_ntiles_for_img(mask_img, percentages)\n print(level_list)\n for level in level_list:\n masked_image = make_masked_image(img, mask_img, level, blur, blur_amount)\n cv2.imshow('img',masked_image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n # Transform image for VGG\n masked_image = cv2.resize(masked_image, (224,224)).astype(np.float32)\n masked_image[:,:,0] -= 103.939\n masked_image[:,:,1] -= 116.779\n masked_image[:,:,2] -= 123.68\n masked_image = masked_image.transpose((1,0,2))\n masked_image = np.expand_dims(masked_image, axis=0)\n out = model.predict(masked_image)\n ordered_idx = np.argsort(-out)\n print(out.max(), ordered_idx[0][0])\n result = (CallResult.lines[int(ordered_idx[0][0])], out[0][ordered_idx[0]][0])\n results.append(result)\n\n return results", "def runClassifier(interpreter, image, threshold):\n set_input_tensor(interpreter, image)\n interpreter.invoke()\n\n # Get all output details\n boxes = get_output_tensor(interpreter, 0)\n classes = get_output_tensor(interpreter, 1)\n scores = get_output_tensor(interpreter, 2)\n count = int(get_output_tensor(interpreter, 3))\n\n results = []\n for i in range(count):\n if scores[i] >= threshold:\n result = {\n \"bounding_box\": boxes[i],\n \"class_id\": classes[i],\n \"score\": scores[i],\n }\n results.append(result)\n return results", "def get_prob_l_can_see_x_flickr(self, obj_type):\n ret_probs = []\n\n for i in range(len(self.obj_locations[0])):\n #get the visibility here\n vtags, itags_t = self.obj_to_visibility[i]\n\n itags = []\n for elt_t in itags_t:\n if(not elt_t in vtags and not elt_t in itags):\n itags.append(elt_t)\n\n\n myprob = self.p_can_see_tag(obj_type, vtags, itags)\n if obj_type in vtags:\n myprob = 1.0\n \n ret_probs.append(myprob)\n \n return ret_probs", "def scan(self, img: Array3D, det_prob_threshold: float = None) -> List[ScannedFace]:\n raise NotImplementedError", "def postprocess(image: np.ndarray, results_list: list, threshold_confidence: float, threshold_nms: float) -> list:\n frameHeight = image.shape[0]\n frameWidth = image.shape[1]\n\n # Scan through all the bounding boxes output from the network and..\n # 1. keep only the ones with high confidence scores.\n # 2. assign the box class label as the class with the highest score.\n # 3. construct a list of bounding boxes, class labels and confidence scores\n\n classIds = []\n confidences = []\n boxes = []\n for result in results_list:\n for detection in result:\n scores = detection[5:]\n classId = np.argmax(scores)\n confidence = scores[classId]\n if confidence > threshold_confidence:\n center_x = int(detection[0] * frameWidth)\n center_y = int(detection[1] * frameHeight)\n width = int(detection[2] * frameWidth)\n height = int(detection[3] * frameHeight)\n left = max(0, int(center_x - width / 2))\n top = max(0, int(center_y - height / 2))\n classIds.append(classId)\n confidences.append(float(confidence))\n boxes.append([left, top, width, height])\n\n # Perform non maximum suppression to eliminate redundant overlapping boxes with\n # lower confidences\n list_of_tuples = []\n\n indices = cv2.dnn.NMSBoxes(boxes, confidences, threshold_confidence, threshold_nms)\n for i in indices:\n i = i[0]\n list_of_tuples.append((classIds[i], confidences[i], boxes[i]))\n # return post processed lists of classIds, confidences and bounding boxes\n return list_of_tuples", "def threshold_filter(cohort, threshold=0.02):\n\n # For each trajectory in each participant of cohort\n # set filter attribute to True if VAF reaches threshold, otherwise False.\n for part in cohort:\n for traj in part.trajectories:\n if max(traj.data.AF) >= threshold:\n traj.filter = True\n else:\n traj.filter = False\n\n # Create a model_cohort class using filter attribute of trajectories.\n model = model_cohort(cohort)\n # Figure with threshold filter results.\n fig = plot.CHIP_plot_inset(cohort)\n # Bar plot of genes selected through filteringfigure.\n genes = plot.gene_bar(model)\n\n # Create a filter class object\n filter_class = filter(model=model,\n gradient_plot=fig,\n gene_bar=genes[0],\n gene_dict=genes[1])\n\n return filter_class", "def filter(self):\n self.filter_means = [self.m_0]\n self.filter_covs = [self.P_0]\n self.marginal_covs = []\n for t in range(self.data.shape[0]):\n m_bar, P_bar = self.one_step_prediction(self.filter_means[-1], self.filter_covs[-1])\n\n # Update step\n y = self.data[t]\n if not np.isnan(y).any():\n v = y[:, None] - self.observation_matrix @ m_bar\n S = self.observation_matrix @ P_bar @ self.observation_matrix.T + self.observation_cov\n K = P_bar @ self.observation_matrix.T @ np.linalg.inv(S)\n\n m_bar = m_bar + K @ v\n P_bar = P_bar - K @ S @ K.T\n\n self.marginal_covs.append(S)\n\n self.filter_means.append(m_bar)\n self.filter_covs.append(P_bar)\n self.filter_means = self.filter_means[1:]\n self.filter_covs = self.filter_covs[1:]", "def generate_images_thresholded(self, n_images, threshold, modifier):\n self.build_gan()\n\n list_images = []\n for index_current_image in range(n_images):\n # Default score.\n score = [0]\n while not (threshold[0] < score[0] < threshold[1]):\n img = self.generate_images(1)\n score = self.discriminator.predict(img)\n print(\"Image found: \", score[0])\n list_images.append(img)\n\n list_images = np.asarray(list_images).squeeze()\n\n # ???????? Intensity adjustment?\n list_images = 0.5 * list_images + 0.5\n\n print(list_images.shape)\n\n # Save all images.\n for index_current_image, np_array_current_image in enumerate(list_images):\n path = f\"{self.output_directory}/{unique_name()}_generated_{threshold[0]}_{threshold[1]}\"\n if not os.path.exists(path):\n os.makedirs(path)\n imsave(\n path + f\"/{modifier}_{index_current_image}.png\", np_array_current_image\n )", "def forward(image, label):\n # We transform the image from [0, 255] to [-0.5, 0.5] to make it easier\n # to work with. This is standard practice.\n\n # num_filters hard coded as = 3\n filter_values = []\n for i in range(20): # population\n # firstly, generate 20 different filters\n filter_values.append(np.random.randn(8, 3, 3) / 9)\n\n out = []\n loss = 100\n acc = 0\n\n for generation in range(100): # generation size = 100\n for j, filter_value in enumerate(filter_values): # population size\n out = conv.forward((image / 255) - 0.5, filter_value)\n out = pool.forward(out)\n out = softmax.forward(out)\n # Calculate cross-entropy loss and accuracy. np.log() is the natural log.\n new_loss = -np.log(out[label])\n if new_loss < loss:\n loss = new_loss\n acc = 1 if np.argmax(out) == label else 0\n # else:\n # filter_values[j] = np.random.randn(8, 3, 3) / 9\n\n # mutation\n for k, filter_value in enumerate(filter_values):\n mutation_probability = random.uniform(0, 1)\n # if larger than 0.5 then mutate\n if mutation_probability > 0.5:\n # random number of elements to change\n # because it is 3x3 filter,\n # 8 x (3x3) = 72\n # so, we don't want to change to many element\n number_of_elements = random.randint(1, 20) # TODO: optimize the param\n\n # the elements that have been already changed\n has_changed_list = []\n for h in range(number_of_elements):\n row = random.randint(0, 2)\n col = random.randint(0, 2)\n # filter_size = 8 x (3x3),\n # so randomly change one filter\n the_number = random.randint(0, 7)\n key_value_pair = the_number + row + col\n\n if key_value_pair not in has_changed_list:\n element = filter_value[the_number, row, col]\n # TODO: find a better way of mutating the filter weight\n filter_value[the_number, row, col] = mutation(element)\n has_changed_list.append(key_value_pair)\n\n return out, loss, acc", "def _filter_boxes(self, boxes, box_confidences, box_class_probs):\n box_scores = box_confidences * box_class_probs\n box_classes = np.argmax(box_scores, axis=-1)\n box_class_scores = np.max(box_scores, axis=-1)\n pos = np.where(box_class_scores >= self.object_threshold)\n\n boxes = boxes[pos]\n classes = box_classes[pos]\n scores = box_class_scores[pos]\n\n return boxes, classes, scores", "def custom_filter(image: Image) -> Image:\n image = image.filter(ImageFilter.Kernel(\n size=(3, 3), kernel=(1, 0, 1, 0, 0, 0, 1, 0, 1)))\n return image", "def result_filter_routine(self, args):\n file_types = set(['fb2', 'epub'])\n page_url, page_weight, weighted_links, thresholds, result_class = args\n page_threshold, link_threshold, book_link_cnt = thresholds\n\n ret_links = {}\n for link, weight in weighted_links:\n link_href = link.get('href', None)\n if not link_href:\n continue\n link_tgt_type = link_href.split('.')[-1]\n if link_tgt_type in file_types:\n page_weight += page_threshold / book_link_cnt\n link_url = extract_url_from_link(link_href,page_url)\n result_class.submit_book_link(link_url,page_url)\n continue\n if link.get('rel', None) == 'nofollow':\n continue\n if weight >= link_threshold:\n link_url = extract_url_from_link(link_href,page_url)\n if is_html_url( link_url ):\n old_weight = ret_links.get(link_url, 0)\n ret_links[link_url] = max(old_weight,weight)\n if page_weight >= page_threshold:\n result_class.submit_site(page_url)\n self.result = [page_url, page_weight, ret_links.items()]\n self.complete = True", "def filter_boxes(self, boxes, box_confidences, box_class_probs):\n box_scores = [x * y for x, y in zip(box_confidences, box_class_probs)]\n box_class_scores = [np.max(x, axis=-1).reshape(-1) for x in box_scores]\n box_class_scores = np.concatenate(box_class_scores)\n box_classes = [np.argmax(x, axis=-1).reshape(-1) for x in box_scores]\n box_classes = np.concatenate(box_classes)\n filtering_mask = box_class_scores >= self.class_t\n list = [np.reshape(x, (-1, 4)) for x in boxes]\n boxes = np.concatenate(list)\n boxes = boxes[filtering_mask]\n scores = box_class_scores[filtering_mask]\n classes = box_classes[filtering_mask]\n return (boxes, classes, scores)", "def forward(sess, net, img, CONF_THRESH=0.8, NMS_THRESH=0.7):\n\n results = {'face': [],\n 'lp': []}\n\n # Detect all object classes and regress object bounds\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(sess, net, img)\n timer.toc()\n\n for cls in ('face', 'lp') :# enumerate(CLASSES[1:]):\n cls_ind = CLASSES.index(cls) # because we skipped background\n cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(dets, NMS_THRESH)\n results[cls] = [\n [x0, y0, x1-x0, y1-y0] for (x0, y0, x1, y1, score) in dets[keep, :]]\n\n return results, timer.total_time", "def _filter_and_extract(\n imgs,\n extraction_function,\n parameters,\n memory_level=0,\n memory=Memory(location=None),\n verbose=0,\n confounds=None,\n sample_mask=None,\n copy=True,\n dtype=None,\n):\n # Since the calling class can be any *Nifti*Masker, we look for exact type\n if verbose > 0:\n class_name = enclosing_scope_name(stack_level=10)\n\n # If we have a string (filename), we won't need to copy, as\n # there will be no side effect\n imgs = stringify_path(imgs)\n if isinstance(imgs, str):\n copy = False\n\n if verbose > 0:\n print(\n f\"[{class_name}] Loading data \"\n f\"from {_utils._repr_niimgs(imgs, shorten=False)}\"\n )\n\n # Convert input to niimg to check shape.\n # This must be repeated after the shape check because check_niimg will\n # coerce 5D data to 4D, which we don't want.\n temp_imgs = _utils.check_niimg(imgs)\n\n # Raise warning if a 3D niimg is provided.\n if temp_imgs.ndim == 3:\n warnings.warn(\n \"Starting in version 0.12, 3D images will be transformed to \"\n \"1D arrays. \"\n \"Until then, 3D images will be coerced to 2D arrays, with a \"\n \"singleton first dimension representing time.\",\n DeprecationWarning,\n )\n\n imgs = _utils.check_niimg(\n imgs, atleast_4d=True, ensure_ndim=4, dtype=dtype\n )\n\n target_shape = parameters.get(\"target_shape\")\n target_affine = parameters.get(\"target_affine\")\n if target_shape is not None or target_affine is not None:\n if verbose > 0:\n print(f\"[{class_name}] Resampling images\")\n imgs = cache(\n image.resample_img,\n memory,\n func_memory_level=2,\n memory_level=memory_level,\n ignore=[\"copy\"],\n )(\n imgs,\n interpolation=\"continuous\",\n target_shape=target_shape,\n target_affine=target_affine,\n copy=copy,\n )\n\n smoothing_fwhm = parameters.get(\"smoothing_fwhm\")\n if smoothing_fwhm is not None:\n if verbose > 0:\n print(f\"[{class_name}] Smoothing images\")\n imgs = cache(\n image.smooth_img,\n memory,\n func_memory_level=2,\n memory_level=memory_level,\n )(imgs, parameters[\"smoothing_fwhm\"])\n\n if verbose > 0:\n print(f\"[{class_name}] Extracting region signals\")\n region_signals, aux = cache(\n extraction_function,\n memory,\n func_memory_level=2,\n memory_level=memory_level,\n )(imgs)\n\n # Temporal\n # --------\n # Detrending (optional)\n # Filtering\n # Confounds removing (from csv file or numpy array)\n # Normalizing\n if verbose > 0:\n print(f\"[{class_name}] Cleaning extracted signals\")\n runs = parameters.get(\"runs\", None)\n region_signals = cache(\n signal.clean,\n memory=memory,\n func_memory_level=2,\n memory_level=memory_level,\n )(\n region_signals,\n detrend=parameters[\"detrend\"],\n standardize=parameters[\"standardize\"],\n standardize_confounds=parameters[\"standardize_confounds\"],\n t_r=parameters[\"t_r\"],\n low_pass=parameters[\"low_pass\"],\n high_pass=parameters[\"high_pass\"],\n confounds=confounds,\n sample_mask=sample_mask,\n runs=runs,\n **parameters[\"clean_kwargs\"],\n )\n\n return region_signals, aux", "def huang_threshold_image_filter(*args, **kwargs):\n import itk\n instance = itk.HuangThresholdImageFilter.New(*args, **kwargs)\n return instance.__internal_call__()", "def _apply_image_filters(self, image, filters=[]):\n derivative = image\n for filter in filters:\n derivative = filter(derivative)\n return derivative", "def Predict_Class_Probabilities(array, model, step = 100, normalize = False, \n filename = None, path = ''):\n \n img_height, img_width = model.input_shape[1:3]\n num_class = model.output_shape[3]\n image_orig = array.astype('float')\n if normalize:\n image_orig = Normalize_Image(image_orig)\n ## Create \"batch\" dimension for compatibility with CNN \n image_orig = np.expand_dims(image_orig, axis=0)\n shp = np.array(array.shape)\n shp[2] = num_class\n predicted_image = 0.0*np.zeros(shp).astype('float')\n count_image = 0.0*predicted_image\n for i in range(array.shape[0]//step + 1):\n starti = step*i\n endi = starti + img_height\n if(endi > array.shape[0]):\n starti = array.shape[0] - img_height\n endi = array.shape[0]\n for j in range(array.shape[1]//step + 1):\n startj = step*j\n endj = startj + img_width\n if(endj > array.shape[1]):\n startj = array.shape[1] - img_width\n endj = array.shape[1]\n subimage = image_orig[:,starti:endi, startj:endj, :] \n predicted_subimage = model.predict(subimage)\n predicted_image[starti:endi, startj:endj,:] = predicted_image[starti:endi, startj:endj,:] + \\\n predicted_subimage[0,:,:,:]\n count_image[starti:endi, startj:endj,:] = count_image[starti:endi, startj:endj,:] + 1.0\n ## Average predictions\n predicted_image = np.divide(predicted_image,count_image); \n predicted_class = predicted_image.argmax(axis = 2) \n if filename is not None:\n try:\n if path == '':\n path = 'Predictions'\n os.makedirs(path)\n except OSError as error: \n print('') \n fig, ax = plt.subplots(figsize=(18, 20))\n ax.imshow(predicted_class)\n plt.tight_layout()\n plt.savefig(path + '/' + filename + '_Predict_Mask.png', bbox_inches='tight')\n plt.close(fig)\n return predicted_image, predicted_class", "def get_noise_thresholds(size_of_class=45, fakes='./data/CASIA1_fakes', originals='./data/CASIA1_originals', \n fakes_ela='./data/CASIA1_fakes_ela'):\n fakes_list = os.listdir(fakes)\n\n fakes = load_fakes(fakes_list, fakes, originals)\n\n noises = []\n for i, item in enumerate(fakes):\n image = cv2.imread(os.path.join(fakes_ela, item.path.split('\\\\')[-1]))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n \n image = cv2.inRange(image, np.array([0,0,0]), np.array([180,255,60]))\n image = cv2.bitwise_not(image)\n noises.append(estimate_noise(image))\n\n fakes = np.array(fakes)\n noises = np.array(noises)\n idxs = noises.argsort()\n sorted_by_noise = fakes[idxs]\n\n for i, item in enumerate(sorted(noises)):\n if (i+1) % size_of_class == 0:\n print(\"####\", i+1, item)\n else:\n print(i+1, item)", "def make_iterator_extract_scores_from_images_batched(dataloader, net, logger, image_batch_size, is_cuda,\n num_random_pyramid_scales=0, num_random_negative_labels=-1,\n class_image_augmentation=\"\"):\n\n logger.info(\"Extracting scores from all images\")\n # get images of all classes\n class_images, class_aspect_ratios, class_ids = dataloader.get_all_class_images()\n num_classes = len(class_images)\n assert len(class_aspect_ratios) == num_classes\n assert len(class_ids) == num_classes\n query_img_sizes = [FeatureMapSize(img=img) for img in class_images]\n \n # the current code works only with class batch == 1, this in inefficient in some place, but good in others\n # is there a better way?\n class_batch_size = 1\n\n # extract all class convolutions from batched class images\n class_conv_layer_batched = []\n logger.info(\"Extracting weights from {0} classes{1}\".format(num_classes,\n f\" with {class_image_augmentation} augmentation\" if class_image_augmentation else \"\"))\n for i in range(0, num_classes, class_batch_size):\n batch_class_ids = class_ids[i : i + class_batch_size]\n\n batch_class_images = []\n for i_label in range(len(batch_class_ids)):\n im = class_images[i + i_label].squeeze(0)\n if is_cuda:\n im = im.cuda()\n batch_class_images.append(im)\n if not class_image_augmentation:\n num_class_views = 1\n elif class_image_augmentation == \"rotation90\":\n im90 = im.rot90(1, [1, 2])\n im180 = im90.rot90(1, [1, 2])\n im270 = im180.rot90(1, [1, 2])\n batch_class_images.append(im90)\n batch_class_images.append(im180)\n batch_class_images.append(im270)\n num_class_views = 4\n elif class_image_augmentation == \"horflip\":\n im_flipped = im.flip(2)\n batch_class_images.append(im_flipped)\n num_class_views = 2\n elif class_image_augmentation == \"horflip_rotation90\":\n im90 = im.rot90(1, [1, 2])\n im180 = im90.rot90(1, [1, 2])\n im270 = im180.rot90(1, [1, 2])\n im_flipped = im.flip(2)\n im90_flipped = im90.flip(2)\n im180_flipped = im180.flip(2)\n im270_flipped = im270.flip(2)\n\n for new_im in [im90, im180, im270, im_flipped, im90_flipped, im180_flipped, im270_flipped]:\n batch_class_images.append(new_im)\n\n num_class_views = len(batch_class_images)\n else:\n raise RuntimeError(f\"Unknown value of class_image_augmentation: {class_image_augmentation}\")\n\n for b_im in batch_class_images:\n class_feature_maps = net.net_label_features([b_im])\n class_conv_layer = net.os2d_head_creator.create_os2d_head(class_feature_maps)\n class_conv_layer_batched.append(class_conv_layer)\n \n # loop over all images\n iterator_batches = dataloader.make_iterator_for_all_images(image_batch_size, num_random_pyramid_scales=num_random_pyramid_scales)\n for batch_ids, pyramids_batch, box_transforms_batch, initial_img_size_batch in iterator_batches:\n t_start_batch = time.time()\n # select labels to use for search at this batch\n if num_random_negative_labels >= 0 :\n # randomly shuffle labels\n neg_labels = torch.randperm(len(class_conv_layer_batched))\n neg_labels = neg_labels[:num_random_negative_labels]\n # add positive labels\n pos_labels = dataloader.get_class_ids_for_image_ids(batch_ids)\n pos_labels = dataloader.convert_label_ids_global_to_local(pos_labels, class_ids)\n batch_labels_local = torch.cat([neg_labels, pos_labels], 0).unique()\n else:\n # take all the labels - needed for evaluation\n batch_labels_local = torch.arange(len(class_conv_layer_batched))\n \n batch_class_ids = [class_ids[l // num_class_views] for l in batch_labels_local]\n batch_query_img_sizes = [query_img_sizes[l // num_class_views] for l in batch_labels_local]\n\n # extract features at all pyramid levels\n batch_images_pyramid = []\n loc_scores = []\n class_scores = []\n fm_sizes = []\n transform_corners = []\n num_pyramid_levels = len(pyramids_batch)\n \n t_cum_features = 0.0\n t_cum_labels = 0.0\n for batch_images in pyramids_batch:\n if is_cuda:\n batch_images = batch_images.cuda()\n \n t_start_features = time.time()\n feature_maps = net.net_feature_maps(batch_images)\n torch.cuda.synchronize()\n t_cum_features += time.time() - t_start_features\n\n # batch class images\n loc_scores.append([])\n class_scores.append([])\n fm_sizes.append([])\n transform_corners.append([])\n t_start_labels = time.time()\n assert class_batch_size == 1, \"the iterator on images works only with labels batches of size 1\"\n\n for i_class_batch in batch_labels_local:\n # apply net at this pyramid level\n loc_s_p, class_s_p, _, fm_sizes_p, transform_corners_p = \\\n net(class_head=class_conv_layer_batched[i_class_batch],\n feature_maps=feature_maps)\n loc_scores[-1].append(loc_s_p)\n class_scores[-1].append(class_s_p)\n fm_sizes[-1].append(fm_sizes_p)\n transform_corners[-1].append(transform_corners_p)\n torch.cuda.synchronize()\n t_cum_labels += time.time() - t_start_labels\n\n if not feature_maps.requires_grad:\n # explicitly remove a possibly large chunk of GPU memory\n del feature_maps\n\n batch_images_pyramid.append(batch_images)\n\n timing_str = \"Feature time: {0}, Label time: {1}, \".format(time_for_printing(t_cum_features, mode=\"s\"),\n time_for_printing(t_cum_labels, mode=\"s\"))\n\n # loc_scores, class_scores: pyramid_level x class_batch x image_in_batch x\n for i_image_in_batch, image_id in enumerate(batch_ids):\n # get scores from all pyramid levels\n image_loc_scores_p, image_class_scores_p, image_fm_sizes_p = [], [], []\n transform_corners_p = []\n for i_p in range(num_pyramid_levels):\n if loc_scores is not None and loc_scores[0] is not None and loc_scores[0][0] is not None:\n image_loc_scores_p.append(torch.cat([s[i_image_in_batch] for s in loc_scores[i_p]], 0))\n else:\n image_loc_scores_p.append(None)\n image_class_scores_p.append(torch.cat([s[i_image_in_batch] for s in class_scores[i_p]], 0))\n\n if transform_corners is not None and transform_corners[0] is not None and transform_corners[0][0] is not None:\n transform_corners_p.append(torch.cat([s[i_image_in_batch] for s in transform_corners[i_p]], 0))\n else:\n transform_corners_p.append(None)\n\n image_fm_sizes_p.append(fm_sizes[i_p][0])\n\n # get a pyramid of one image[i_p]\n one_image_pyramid = [p[i_image_in_batch] for p in batch_images_pyramid]\n\n # extract the box transformations\n box_reverse_transforms = box_transforms_batch[i_image_in_batch]\n\n logger.info(timing_str + \"Net time: {0}\".format(time_since(t_start_batch)))\n yield image_id, image_loc_scores_p, image_class_scores_p, one_image_pyramid,\\\n batch_query_img_sizes, batch_class_ids, box_reverse_transforms, image_fm_sizes_p, transform_corners_p", "def get_over(self, filter_dict, percentage):\n pass", "def detect_class_onpic(boxes, allowed_classes):\n object_class = \"all\"\n highest_prob = 0\n for box in boxes:\n box_prob = float(box[1].strip('%')) / 100.0\n if box[0] in allowed_classes and box_prob > highest_prob:\n highest_prob = box_prob\n object_class = box[0]\n return object_class, highest_prob", "def classify(self, projections):\n res = []\n for proj in projections:\n n = self.root\n while len(n.children):\n found = False\n for i, b in enumerate(n.branch):\n if proj[n.axis] <= b:\n n = n.children[i]\n found = True\n if not found:\n if len(n.branch)+1==len(n.children):\n n = n.children[-1]\n else:\n break\n res.append(n.probabilities)\n return np.array(res)", "def filter(self, *args, **kwargs):", "def anoise(this, *args, **kargs):\n\t\t\n\t\t# Arguments\n\t\tif not args: args = [50]\n\t\t\n\t\t# Kernel's retrieval\n\t\tanoisek = this._ANOISEK\n\t\tif anoisek is None: return None\n\t\t\n\t\t# More magic\n\t\tbin = this._BINARY\n\t\tfor thresh in args:\n\t\t\tbin[:,:] = (cv2.filter2D(bin, -1, anoisek) / 2.55 > thresh) * 255\n\t\treturn True", "def __init__(self, filter1x1):\n super(poolproj, self).__init__()\n self.max = layers.MaxPooling2D(pool_size=3, strides=1, padding=\"same\")\n self.conv = layers.Conv2D(\n filter1x1, kernel_size=1, padding=\"same\", activation=\"relu\"\n )", "def classify(self):\n infer = self.model.signatures['serving_default']\n for i, original_image in enumerate(self.images):\n image = original_image.copy()\n image = cv.cvtColor(image, cv.COLOR_BGR2RGB)\n image = cv.resize(image, (self.image_size, self.image_size))\n image = image / 255.\n\n image = [image]\n image = np.asarray(image).astype(np.float32)\n batch_data = tf.constant(image)\n pred_bbox = infer(batch_data)\n for key, value in pred_bbox.items():\n boxes = value[:, :, 0:4]\n pred_conf = value[:, :, 4:]\n\n boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(\n boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),\n scores=tf.reshape(\n pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),\n max_output_size_per_class=10,\n max_total_size=10,\n iou_threshold=FLAGS.iou,\n score_threshold=FLAGS.score\n )\n\n height, width, _ = original_image.shape\n\n print(scores)\n classes = classes[0]\n print(classes)\n\n bbox = boxes[0][0].numpy()\n bbox[0] = int(bbox[0] * height)\n bbox[2] = int(bbox[2] * height)\n bbox[1] = int(bbox[1] * width)\n bbox[3] = int(bbox[3] * width)\n\n if BIRD_CLASS in classes:\n idx = np.where(classes == BIRD_CLASS)\n bbox = bbox.astype(np.int)\n x = int((bbox[1] + bbox[3]) / 2)\n y = int((bbox[0] + bbox[2]) / 2)\n self.thumbnail_center.append((x, y))\n cropped_img = original_image[bbox[0]:bbox[2], bbox[1]: bbox[3]]\n self.bird_images.append(cropped_img)\n self.confidence_arr.append(scores[idx[0][0]][0])\n\n self.generate_thumbnail(size=150)", "def grid_search(title, class_names, background_images, option_threshold_curve):\n # prepare title\n if title == DEFAULT_TITLE:\n title = f\"class{'es' if len(class_names) > 1 else ''} {', '.join(class_names)}\"\n\n # calculate true positive confidences\n true_positive_confidences = []\n num_samples = 0\n for class_name in class_names:\n images = load_base64(\n class_name,\n os.path.join(TEST_DIR, class_name),\n desc=f\"[{class_name}] loading\"\n )\n num_samples = num_samples + len(images)\n true_positive_confidences.extend(_calculate_tp_confidences(\n images,\n class_name\n ))\n\n # calculate false positive confidences\n false_positive_confidences = _calculate_fp_confidences(\n background_images,\n class_names\n )\n\n print(\"Grid searching...\")\n tvals = np.linspace(0, 1, int(1 / STEP_SIZE) + 1)\n # discard thresholds below the 1/num_classes\n tvals = [t for t in tvals if t >= 1 / NUM_CLASSES]\n deltas, tp_percentages, fp_percentages = calculate_deltas(\n tvals,\n true_positive_confidences,\n false_positive_confidences,\n num_samples\n )\n best_t_index = np.argmin(deltas)\n best_t = tvals[best_t_index]\n print(f\"Best threshold: {best_t}\")\n\n plt.figure()\n plt.plot(tvals, deltas)\n plt.xlim([tvals[0], tvals[-1]])\n plt.xlabel(\"Threshold\")\n plt.ylabel(\"$\\delta$\")\n plt.title(TITLE_DELTA % title)\n plt.grid()\n\n if option_threshold_curve:\n plt.figure()\n plt.axes().set_aspect('equal')\n plt.plot(\n tp_percentages,\n fp_percentages,\n label=\"Threshold values\"\n )\n optimal_tp = len(true_positive_confidences) / num_samples\n plt.scatter([optimal_tp], [0], color=\"orange\", label=\"Optimal point\")\n nearest_point = (\n tp_percentages[best_t_index],\n fp_percentages[best_t_index]\n )\n plt.plot(\n [optimal_tp, nearest_point[0]],\n [0, nearest_point[1]],\n color=\"gray\",\n linestyle=\":\",\n label=\"Nearest point\"\n )\n plt.text(nearest_point[0], nearest_point[1], f\"$t$ = {best_t:.2f}\")\n left, right = plt.xlim()\n bottom, top = plt.ylim()\n plt.xlim(min(left, bottom), max(right, top))\n plt.ylim(min(left, bottom), max(right, top))\n plt.xlabel(\"True positive percentage\")\n plt.ylabel(\"False positive percentage\")\n plt.title(TITLE_THRESHOLD_CURVE % title)\n plt.legend()\n plt.grid()", "def applyIncidenceFilter(image, bandNames, classDictionary, numChangesCutoff = 8, connectedPixelCutoff=6):\n #Calculate the number of times a pixel changes throughout the time series and determine if it is over the numChangesCutoff\n num_changes = calculateNumberOfChanges(image, bandNames)\n too_many_changes = num_changes.gt(numChangesCutoff)\n \n #Get binary images of the land cover classifications for the current year\n binary_class_images = npv.convertClassificationsToBinaryImages(image, classDictionary)\n \n #Calculate the number of connected pixels for each land cover class and year, reduce to a single band image representing the number\n #of connected pixels of the same land cover class as the central pixel, and determine if it is over the connectedPixelCutoff\n connected_pixel_count = ee.ImageCollection(binary_class_images.map(lambda x: x.mask(x).connectedPixelCount(100,False).reduce(ee.Reducer.sum()).lt(connectedPixelCutoff)))\n \n #Get a bitwiseAnd determination if the number of connected pixels <= connectedPixelCutoff and the number of changes > numChangesCutoff \n incidence_filter = ee.ImageCollection(connected_pixel_count.map(lambda x: x.bitwiseAnd(too_many_changes))).toBands().rename(bandNames)\n \n #Get an image that represents the mode of the land cover classes in each pixel\n mode_image = image.reduce(ee.Reducer.mode())\n \n #Replace pixels of image where incidence_filter is True with mode_image\n incidence_filtered = image.where(incidence_filter, mode_image)\n \n return incidence_filtered", "def _classify_from_probs(predicts_proba):\n def find_majority(dict_probs):\n \"\"\"Find the majority class\"\"\"\n # if there is no majority class, pick the first from the sorted\n max_val = max(dict_probs.values())\n max_keys = [key for key in dict_probs.keys()\n if dict_probs[key] == max_val]\n return sorted(max_keys)[0]\n\n predicts = [find_majority(dict_probs) for dict_probs in predicts_proba]\n return predicts", "def get_candidate_objects(output, img_size, classes, anchors, threshold):\n\n #threshold = 0.8\n iou_threshold = 0.4\n\n boxes, probs = parse_yolo_output_v2(output, img_size, len(classes), anchors)\n filter_mat_probs = (probs >= threshold)\n filter_mat_boxes = np.nonzero(filter_mat_probs)[0:3]\n boxes_filtered = boxes[filter_mat_boxes]\n probs_filtered = probs[filter_mat_probs]\n classes_num_filtered = np.argmax(probs, axis=3)[filter_mat_boxes]\n\n idx = np.argsort(probs_filtered)[::-1]\n boxes_filtered = boxes_filtered[idx]\n probs_filtered = probs_filtered[idx]\n classes_num_filtered = classes_num_filtered[idx]\n\n # too many detections - exit\n if len(boxes_filtered) > 1e3:\n print(\"Too many detections, maybe an error? : {}\".format(\n len(boxes_filtered)))\n return []\n\n probs_filtered = non_maxima_suppression(boxes_filtered, probs_filtered,\n classes_num_filtered, iou_threshold)\n\n filter_iou = (probs_filtered > 0.0)\n boxes_filtered = boxes_filtered[filter_iou]\n probs_filtered = probs_filtered[filter_iou]\n classes_num_filtered = classes_num_filtered[filter_iou]\n\n result = []\n for class_id, box, prob in zip(classes_num_filtered, boxes_filtered, probs_filtered):\n result.append([classes[class_id], box[0], box[1], box[2], box[3], prob])\n\n return result", "def __call__(self, results):\n if np.random.rand() > self.prob:\n return results\n self._adjust_color_img(results, self.factor)\n return results", "def filter(self, filter_dict):\n pass", "def conditional_probability(data, attr, cp_table):\n # gets class names for dataframe manipulation\n classes = attr.tail(1)['vars'].tolist()\n classlist = [classes[0][0], classes[0][1]]\n class0 = classlist[0]\n class1 = classlist[1]\n # number of instances beloning to each class\n nclass0 = cp_table.loc[0, class0].sum()\n nclass1 = cp_table.loc[0, class1].sum()\n total = nclass0 + nclass1\n # all probabilities include a laplace est of 1\n prior0 = (nclass0 + 1) / (total + 2)\n prior1 = (nclass1 + 1) / (total + 2)\n list0 = []\n list1 = []\n for index, row in cp_table.iterrows():\n numattr = len(attr.loc[index, 'vars'])\n numer0 = row[class0] + 1\n numer1 = row[class1] + 1\n denom0 = nclass0 + (1 * numattr)\n denom1 = nclass1 + (1 * numattr)\n cp0 = numer0 / denom0\n cp1 = numer1 / denom1\n list0.append(cp0)\n list1.append(cp1)\n # replacing columns in previous table with cond probs\n del cp_table[class0]\n del cp_table[class1]\n cp_table[class0] = list0\n cp_table[class1] = list1\n \n return cp_table, prior0, prior1", "def boxes_filter(dets, bbox_id=1, class_name='None', color=(255, 255, 255), scale=1.0, thresh=0.5, min_size=(2, 2)):\n _objs = []\n inds = np.where(dets[:, -1] >= thresh)[0]\n if len(inds) == 0:\n return _objs\n\n for i in inds:\n bbox = dets[i, :4] / scale\n bbox_confidence = dets[i, -1]\n if bbox[3] - bbox[1] <= min_size[0] or bbox[2] - bbox[0] <= min_size[1]:\n continue\n attribute = dict(class_name=class_name, color=color)\n _objs.append(dict(bbox=bbox, bbox_id=bbox_id, bbox_confidence=bbox_confidence, keypoints=[],\n attribute=attribute, person_id=-1, person_confidence=-1, segment=[]))\n\n return _objs", "def detect_objects(self, image, threshold):\n self.set_input_tensor(image)\n self.interpreter.invoke()\n\n # Get all output details\n boxes = self.get_output_tensor(0)\n classes = self.get_output_tensor(1)\n scores = self.get_output_tensor(2)\n count = int(self.get_output_tensor(3))\n\n results = []\n for i in range(count):\n if scores[i] >= threshold:\n result = {\n 'bounding_box': boxes[i],\n 'class_id': classes[i],\n 'score': scores[i]\n }\n results.append(result)\n return results", "def classify(self, image, nclasses, gamma):\n\n nclasses = nclasses + 1 # One extra class for the background\n energy_sum = [1e-05]\n\n com = ConstantObservationModel()\n icm = IteratedConditionalModes()\n\n if image.max() > 1:\n image = np.interp(image, [0, image.max()], [0.0, 1.0])\n\n mu, sigma = com.initialize_param_uniform(image, nclasses)\n p = np.argsort(mu)\n mu = mu[p]\n sigma = sigma[p]\n sigmasq = sigma ** 2\n\n neglogl = com.negloglikelihood(image, mu, sigmasq, nclasses)\n seg_init = icm.initialize_maximum_likelihood(neglogl)\n\n mu, sigma = com.seg_stats(image, seg_init, nclasses)\n sigmasq = sigma ** 2\n\n zero = np.zeros_like(image) + 0.001\n zero_noise = add_noise(zero, 10000, 1, noise_type='gaussian')\n image_gauss = np.where(image == 0, zero_noise, image)\n\n final_segmentation = np.empty_like(image)\n initial_segmentation = seg_init.copy()\n\n max_iter = 100\n tolerance = 1e-05\n\n for i in range(max_iter):\n\n if self.verbose:\n print('>> Iteration: ' + str(i))\n\n PLN = icm.prob_neighborhood(seg_init, beta, nclasses)\n PVE = com.prob_image(image_gauss, nclasses, mu, sigmasq, PLN)\n\n mu_upd, sigmasq_upd = com.update_param(image_gauss,\n PVE, mu, nclasses)\n ind = np.argsort(mu_upd)\n mu_upd = mu_upd[ind]\n sigmasq_upd = sigmasq_upd[ind]\n\n negll = com.negloglikelihood(image_gauss, mu_upd, sigmasq_upd, nclasses)\n final_segmentation, energy = icm.icm_ising(negll, beta, seg_init)\n energy_sum.append(energy[energy > -np.inf].sum())\n\n if self.save_history:\n self.segmentations.append(final_segmentation)\n self.pves.append(PVE)\n self.energies.append(energy)\n self.energies_sum.append(energy[energy > -np.inf].sum())\n\n if i % 10 == 0 and i != 0:\n\n tol = tolerance * (np.amax(energy_sum) - np.amin(energy_sum))\n\n test_dist = np.absolute(np.amax(energy_sum[np.size(energy_sum) - 5: i]) -\n np.amin(energy_sum[np.size(energy_sum) - 5: i]))\n\n if test_dist < tol:\n\n break\n\n seg_init = final_segmentation.copy()\n mu = mu_upd.copy()\n sigmasq = sigmasq_upd.copy()\n\n PVE = PVE[..., 1:]\n\n return initial_segmentation, final_segmentation, PVE", "def swarpfilter(d, dir, directory, images, keys, filter, lamp, camera, done, output, type):\n filt = images.files_filtered(FWINAME=filter, FLSPECTR=lamp, CAMNAME=camera, HISTORY=done)\n files = [d + x for x in filt.tolist()]\n print(files)\n if files:\n swarp(files, output=directory + '/' + output + '.fits', celestial_type=type)", "def GlobalThresholding(image, kernel_sigma, N_levels, N_classes, step = 1): \n \n if kernel_sigma >= 1:\n image = Denoising(image, kernel_sigma);\n \n pixel_count, pixel_count_normalized = CountPixels(image, N_levels);\n mean_g = image.mean(); # global mean\n\n if N_classes == 2: \n interclass_var = np.zeros((N_levels)); # inter-class variance\n range_array = np.arange(0, N_levels, 1).reshape(N_levels, 1);\n for ii in range(0, N_levels - 1, step): \n\n threshold = ii;\n \n mask_1 = range_array <= threshold;\n mask_2 = range_array > threshold;\n \n p_1 = pixel_count_normalized[mask_1].sum(); # probability of class 1\n p_2 = 1 - p_1; # probability of class 2\n \n mean_1 = 1 / p_1 * np.sum(range_array[mask_1] * pixel_count_normalized[mask_1]); # mean of class 1\n mean_2 = 1 / p_2 * np.sum(range_array[mask_2] * pixel_count_normalized[mask_2]); # mean of class 2\n \n temp = p_1 * (mean_1 - mean_g) ** 2 + p_2 * (mean_2 - mean_g) ** 2;\n interclass_var[ii] = np.nan_to_num(temp);\n \n threshold = np.argmax(interclass_var);\n mask_1 = image <= threshold;\n mask_2 = image > threshold;\n mask = np.zeros(image.shape);\n mask[mask_1] = 0;\n mask[mask_2] = 1;\n return mask;\n elif N_classes == 3:\n interclass_var = np.zeros((N_levels, N_levels)); # inter-class variance\n range_array = np.arange(0, N_levels, 1).reshape(N_levels, 1);\n for ii in range(0, N_levels - 2, step): \n for jj in range(ii + 1, N_levels - 1, step):\n\n threshold1 = ii;\n threshold2 = jj;\n \n mask_1 = range_array <= threshold1;\n mask_2 = (range_array > threshold1) * (range_array <= threshold2);\n mask_3 = range_array > threshold2;\n \n p_1 = pixel_count_normalized[mask_1].sum(); # probability of class 1\n p_2 = pixel_count_normalized[mask_2].sum(); # probability of class 2\n p_3 = 1 - (p_1 + p_2); # probability of class 3\n \n mean_1 = 1 / p_1 * np.sum(range_array[mask_1] * pixel_count_normalized[mask_1]); # mean of class 1\n mean_2 = 1 / p_2 * np.sum(range_array[mask_2] * pixel_count_normalized[mask_2]); # mean of class 2\n mean_3 = 1 / p_3 * np.sum(range_array[mask_3] * pixel_count_normalized[mask_3]); # mean of class 3\n \n temp = p_1 * (mean_1 - mean_g) ** 2 + p_2 * (mean_2 - mean_g) ** 2 + p_3 * (mean_3 - mean_g) ** 2;\n interclass_var[ii, jj] = np.nan_to_num(temp);\n \n threshold = np.unravel_index(np.argmax(interclass_var, axis=None), interclass_var.shape);\n threshold1 = threshold[0];\n threshold2 = threshold[1];\n \n mask_1 = image <= threshold1;\n mask_2 = (image > threshold1) * (image <= threshold2);\n mask_3 = image > threshold2;\n mask = np.zeros(image.shape);\n mask[mask_1] = 0;\n mask[mask_2] = 1;\n mask[mask_3] = 2;\n return mask;\n elif N_classes == 4:\n interclass_var = np.zeros((N_levels, N_levels, N_levels)); # inter-class variance\n range_array = np.arange(0, N_levels, 1).reshape(N_levels, 1);\n for ii in range(0, N_levels - 3, step): \n for jj in range(ii + 1, N_levels - 2, step):\n for kk in range(jj + 1, N_levels - 1, step): \n \n threshold1 = ii;\n threshold2 = jj;\n threshold3 = kk;\n \n mask_1 = range_array <= threshold1;\n mask_2 = (range_array > threshold1) * (range_array <= threshold2);\n mask_3 = (range_array > threshold2) * (range_array <= threshold3); \n mask_4 = range_array > threshold3;\n \n p_1 = pixel_count_normalized[mask_1].sum(); # probability of class 1\n p_2 = pixel_count_normalized[mask_2].sum(); # probability of class 2\n p_3 = pixel_count_normalized[mask_3].sum(); # probability of class 3\n p_4 = 1 - (p_1 + p_2 + p_3); # probability of class 4\n \n mean_1 = 1 / p_1 * np.sum(range_array[mask_1] * pixel_count_normalized[mask_1]); # mean of class 1\n mean_2 = 1 / p_2 * np.sum(range_array[mask_2] * pixel_count_normalized[mask_2]); # mean of class 2\n mean_3 = 1 / p_3 * np.sum(range_array[mask_3] * pixel_count_normalized[mask_3]); # mean of class 3\n mean_4 = 1 / p_4 * np.sum(range_array[mask_4] * pixel_count_normalized[mask_4]); # mean of class 4\n \n temp = p_1 * (mean_1 - mean_g) ** 2 + p_2 * (mean_2 - mean_g) ** 2 + \\\n p_3 * (mean_3 - mean_g) ** 2 + p_4 * (mean_4 - mean_g) ** 2;\n interclass_var[ii, jj, kk] = np.nan_to_num(temp);\n \n threshold = np.unravel_index(np.argmax(interclass_var, axis=None), interclass_var.shape);\n threshold1 = threshold[0];\n threshold2 = threshold[1];\n threshold3 = threshold[2];\n \n mask_1 = image <= threshold1;\n mask_2 = (image > threshold1) * (image <= threshold2);\n mask_3 = (image > threshold2) * (image <= threshold3);\n mask_4 = image > threshold3;\n mask = np.zeros(image.shape);\n mask[mask_1] = 0;\n mask[mask_2] = 1;\n mask[mask_3] = 2;\n mask[mask_4] = 3;\n return mask;\n else:\n print('max supported N_class == 4. Abort..\\n')\n return None;", "def evaluate_probabilities(self, batches):\n total_batches = batches.batches_per_epoch()\n catprobs = []\n for batch in range(total_batches):\n X_batch, y_batch = batches.get_batch()\n feed_dict = {\n self.x: X_batch,\n self.y: y_batch,\n self.keep_prob: 1.0}\n fetch_dict = {\n \"catprobs\": self.categorical_probabilities}\n result = self.session.run(fetch_dict, feed_dict)\n catprobs.append(result[\"catprobs\"])\n catprobs = np.concatenate(catprobs)\n return catprobs", "def metrics(img_gt, img_pred, voxel_size):\n\n if img_gt.ndim != img_pred.ndim:\n raise ValueError(\"The arrays 'img_gt' and 'img_pred' should have the \"\n \"same dimension, {} against {}\".format(img_gt.ndim,\n img_pred.ndim))\n\n res = []\n # Loop on each classes of the input images\n for c in [3, 1, 2]:\n # Copy the gt image to not alterate the input\n gt_c_i = np.copy(img_gt)\n gt_c_i[gt_c_i != c] = 0\n\n # Copy the pred image to not alterate the input\n pred_c_i = np.copy(img_pred)\n pred_c_i[pred_c_i != c] = 0\n\n # Clip the value to compute the volumes\n gt_c_i = np.clip(gt_c_i, 0, 1)\n pred_c_i = np.clip(pred_c_i, 0, 1)\n\n # Compute the Dice\n dice = dc(gt_c_i, pred_c_i)\n\n # Compute volume\n volpred = pred_c_i.sum() * np.prod(voxel_size) / 1000.\n volgt = gt_c_i.sum() * np.prod(voxel_size) / 1000.\n\n res += [dice, volpred, volpred-volgt]\n\n return res", "def apply_activation(self, mlvl_preds, remove_background_channel_if_any):\n mlvl_activated_preds = []\n for lvl_idx, preds in enumerate(mlvl_preds):\n cls_pred = apply_class_activation(preds[0], self.class_activation)\n if self.with_background_channel and remove_background_channel_if_any:\n cls_pred = cls_pred[..., 1:]\n mlvl_activated_preds.append((cls_pred, *preds[1:]))\n return mlvl_activated_preds", "def test_11(self):\n for _ in range(1000):\n num_types = np.random.randint(1, 10)\n edu_start = np.random.randint(10, 100)\n type_shares = np.random.normal(0, 1, size=num_types * 2)\n\n args = [type_shares, np.array([edu_start])]\n\n py = get_conditional_probabilities(*args)\n fort = fort_debug.wrapper_get_conditional_probabilities(*args + [num_types])\n\n assert_almost_equal(np.sum(py), 1.0)\n assert_almost_equal(py, fort)", "def limit_pics(X, y, classes=None, nim=None) :\r\n \r\n n, d = X.shape\r\n if classes is None : classes = np.unique(y)\r\n if nim is None : nim = n\r\n \r\n num_classes = len(classes)\r\n X1 = np.zeros((num_classes*nim, d), dtype=float)\r\n y1 = np.zeros(num_classes*nim, dtype=int)\r\n \r\n index = 0\r\n for ni, i in enumerate(classes) : # for each class\r\n count = 0 # count how many samples in class so far\r\n for j in range(n): # look over data\r\n if count < nim and y[j] == i : # element of class\r\n X1[index] = X[j]\r\n y1[index] = ni\r\n index += 1\r\n count += 1\r\n \r\n X1 = X1[:index,:]\r\n y1 = y1[:index]\r\n return X1, y1", "def show_filters(self):\n weight_mat = self.sess.run(self.W_fc_out)\n\n # Loop channels\n plt.figure(figsize=(12,5), facecolor='w', edgecolor='w')\n with sns.axes_style(\"white\"):\n for cl in range(weight_mat.shape[1]):\n # Get filters of this output class\n w_list = ia.vec2image( lin_image=weight_mat[:,cl],\n n_channels=self.n_input_channels,\n image_size=(self.y_res,self.x_res) )\n\n # Show channels\n for ch,w in enumerate(w_list):\n colormax = np.abs(w).max()\n ax = plt.subplot2grid( (self.n_output_classes,\n self.n_input_channels), (cl,ch) )\n ax.imshow( w, interpolation='nearest',\n cmap=plt.get_cmap('seismic'),\n clim=(-1*colormax,colormax) )\n ax.set_title(\"Class {}, Channel {}\".format(cl,ch))\n plt.axis('tight')\n plt.axis('off')\n colormax = np.abs(w).max()\n\n if self.n_output_classes == 2:\n plt.figure(figsize=(12,5), facecolor='w', edgecolor='w')\n with sns.axes_style(\"white\"):\n # Get filters of this output class\n w_list0 = ia.vec2image( lin_image=weight_mat[:,0],\n n_channels=self.n_input_channels,\n image_size=(self.y_res,self.x_res) )\n w_list1 = ia.vec2image( lin_image=weight_mat[:,1],\n n_channels=self.n_input_channels,\n image_size=(self.y_res,self.x_res) )\n for ch in range(len(w_list)):\n w_both = w_list1[ch]-w_list0[ch]\n\n colormax = np.abs(w_both).max()\n ax = plt.subplot2grid( (1,\n self.n_input_channels), (0,ch) )\n ax.imshow( w_both, interpolation='nearest',\n cmap=plt.get_cmap('seismic'),\n clim=(-1*colormax,colormax) )\n ax.set_title(\"Class {}, Channel {}\".format(cl,ch))\n plt.axis('tight')\n plt.axis('off')\n plt.tight_layout()", "def apply(filter_fn, img):\n width, height = img.size\n newimg = Image.new(\"RGB\", (width, height))\n for j in range(1, height - 1):\n for i in range(1, width - 1):\n newimg.putpixel((i, j), filter_fn(img, i, j))\n return newimg", "def component_filter_by_color(components, img):\n new_component = []\n for component in components:\n component_left_neighbor = img[component[0].start:component[0].stop,\n max(component[1].start - 10, 0):component[1].start]\n component_right_neighbor = img[component[0].start:component[0].stop,\n component[1].stop:min(component[1].stop + 10, img.shape[1])]\n component_up_neighbor = img[max(component[0].start - 10, 0):component[0].start,\n component[1].start:component[1].stop]\n component_low_neighbor = img[component[0].stop:min(component[0].stop + 10, img.shape[0]),\n component[1].start:component[1].stop]\n left_white_ratio = np.sum(component_right_neighbor > 240) / (\n component_right_neighbor.shape[0] * component_right_neighbor.shape[1])\n right_white_ratio = np.sum(component_left_neighbor > 240) / (\n component_left_neighbor.shape[0] * component_left_neighbor.shape[1])\n up_white_ratio = np.sum(component_up_neighbor > 240) / (\n component_up_neighbor.shape[0] * component_up_neighbor.shape[1])\n low_white_ratio = np.sum(component_low_neighbor > 240) / (\n component_low_neighbor.shape[0] * component_low_neighbor.shape[1])\n if np.sum([left_white_ratio > 0.9, right_white_ratio > 0.9, up_white_ratio > 0.9, low_white_ratio > 0.9]) > 2:\n new_component.append(component)\n return new_component", "def get_preds(img_path):\n # load image\n img = Image.open(img_path).convert(\"RGB\")\n # process it\n x = t(img)\n # get in in the right format\n x = Variable(x).unsqueeze(0)\n # predictions\n output = model(x)\n # decode\n output = decode(output.cpu().data.numpy()[0])\n\n # filter\n # return pred, proba\n return output", "def filtering(image):\n output = np.array(image)\n for x in xrange(0,1):\n bilateralFilter_img = cv2.bilateralFilter(output,5, 75, 75)\n\n return bilateralFilter_img", "def classify_all_images(cc):\n print 'Classify images'\n images = cc.d.images\n for img_idx in range(comm_rank, len(images), comm_size): # PARALLEL\n print 'classify image %d/%d at %d'%(img_idx/comm_size, len(images)/comm_size, comm_rank)\n img = images[img_idx]\n scores = classify_image(cc, img_idx)\n savefile = config.get_classifier_score_name(img, cc.L)\n cPickle.dump(scores, open(savefile,'w'))" ]
[ "0.6045485", "0.5797399", "0.5739651", "0.57369685", "0.5731208", "0.56458217", "0.55604017", "0.55141634", "0.5474806", "0.5470038", "0.5451431", "0.5425758", "0.53987706", "0.53475237", "0.5286839", "0.52788055", "0.52607375", "0.5203152", "0.5157591", "0.5144034", "0.5125432", "0.51221365", "0.511948", "0.51067436", "0.5079235", "0.5066152", "0.5060088", "0.5052882", "0.50246495", "0.49993414", "0.49745783", "0.49688816", "0.49536464", "0.49427727", "0.49385816", "0.49377343", "0.49362165", "0.49343073", "0.49339786", "0.49296868", "0.49295992", "0.491658", "0.48909074", "0.48865166", "0.48785678", "0.4875008", "0.48729435", "0.48642415", "0.48614562", "0.48484787", "0.4835538", "0.4835297", "0.48305732", "0.4828707", "0.4815093", "0.48074", "0.48072845", "0.4805458", "0.48009202", "0.4800301", "0.4800165", "0.47793996", "0.47782922", "0.47711316", "0.47674817", "0.47537297", "0.47509694", "0.47490284", "0.47486648", "0.4748213", "0.47473827", "0.47466964", "0.47417584", "0.47410488", "0.47262067", "0.47260776", "0.4708431", "0.47051692", "0.4704174", "0.47005075", "0.46996146", "0.46892804", "0.46882457", "0.46874538", "0.4686739", "0.4681687", "0.4674702", "0.46736228", "0.46728626", "0.467051", "0.4662624", "0.46529233", "0.46479958", "0.4645475", "0.4645233", "0.4645119", "0.4640932", "0.46316767", "0.46285322", "0.46271434" ]
0.79373574
0
Return a logger with a default ColoredFormatter.
def setup_logger(): formatter = ColoredFormatter( ( '%(log_color)s%(levelname)-5s%(reset)s ' '%(yellow)s[%(asctime)s]%(reset)s' '%(green)s %(name)s %(purple)s %(filename)s %(purple)s %(funcName)s %(purple)s:%(lineno)d%(reset)s ' '%(bold_blue)s%(message)s%(reset)s' ), datefmt='%y-%m-%d %H;%M:%S', log_colors={ 'DEBUG': 'blue', 'INFO': 'yellow', 'WARNING': 'red', 'ERROR': 'blue,bg_bold_red', 'CRITICAL': 'red,bg_white', } ) logger = logging.getLogger('shen-yue-is-beautiful') handler = logging.StreamHandler() handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.DEBUG) return logger
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_logger():\n log = logging.getLogger() # root logger\n log.setLevel(logging.DEBUG)\n format_str = '%(asctime)s - %(levelname)-8s - %(message)s'\n date_format = '%Y-%m-%d %H:%M:%S'\n if os.isatty(2):\n cformat = '%(log_color)s' + format_str\n colors = {'DEBUG': 'reset',\n 'INFO': 'reset',\n 'WARNING': 'bold_yellow',\n 'ERROR': 'bold_red',\n 'CRITICAL': 'bold_red'}\n formatter = colorlog.ColoredFormatter(cformat, date_format,\n log_colors=colors)\n else:\n formatter = logging.Formatter(format_str, date_format)\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n log.addHandler(stream_handler)\n return logging.getLogger(__name__)", "def get_logger(name: str, level: str = LOG_LEVEL) -> logging.Logger:\n logger = logging.getLogger(name)\n logger.propagate = False\n logger.setLevel(level)\n coloredlogs.install(\n level=level, logger=logger, fmt='%(asctime)s %(name)s: %(lineno)s %(levelname)s: %(message)s', field_styles=FIELD_STYLES\n )\n return logger", "def init_logger():\n LOG_LEVEL = logging.INFO\n LOGFORMAT = \"%(log_color)s%(levelname)-1s: %(log_color)s%(message)s\"\n logging.root.setLevel(LOG_LEVEL)\n formatter = ColoredFormatter(LOGFORMAT)\n stream = logging.StreamHandler()\n stream.setLevel(LOG_LEVEL)\n stream.setFormatter(formatter)\n log = logging.getLogger('pythonConfig')\n log.setLevel(LOG_LEVEL)\n log.addHandler(stream)\n return log", "def setup_logger():\n formatter = ColoredFormatter(\n \"%(log_color)s%(levelname)-8s%(reset)s %(blue)s%(message)s\",\n datefmt=None,\n reset=True,\n log_colors={\n 'DEBUG': 'cyan',\n 'INFO': 'green',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'red',\n }\n )\n\n logger = logging.getLogger('cedulas')\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.DEBUG)\n\n return logger", "def init_logger_color():\n if os.environ.get('COLOREDLOGS_LOG_LEVEL') is None:\n os.environ['COLOREDLOGS_LOG_LEVEL'] = 'INFO'\n if os.environ.get('COLOREDLOGS_LOG_FORMAT') is None:\n os.environ['COLOREDLOGS_LOG_FORMAT'] = '%(asctime)s '\n '[%(levelname)s] %(message)s'\n if os.environ.get('COLOREDLOGS_DATE_FORMAT') is None:\n os.environ['COLOREDLOGS_LOG_DATE_FORMAT'] = '%Y-%m-%d %H:%M:%S'\n coloredlogs.install()", "def setup_logger(log_file_path =\"\"):\n formatter = ColoredFormatter(\n \"%(log_color)s%(levelname)-8s%(reset)s %(blue)s%(message)s\",\n datefmt=None,\n reset=True,\n log_colors={\n 'DEBUG': 'cyan',\n 'INFO': 'green',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'purple',\n }\n )\n logging.basicConfig(handlers=[logging.FileHandler(log_file_path, 'w', 'utf-8')],\n format=\"%(message)s\"\n )\n logger = logging.getLogger('')\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.DEBUG)\n\n return logger", "def default_logger_creator(config):\n return UnifiedLogger(config, logdir, loggers=None)", "def colored_console_logger(init_context: \"InitLoggerContext\") -> logging.Logger:\n return create_console_logger(\n name=init_context.logger_config[\"name\"],\n level=coerce_valid_log_level(init_context.logger_config[\"log_level\"]),\n )", "def init_logger(name, path=None):\n import logging.handlers\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n logger.propagate = 0\n _nf = ['[%(asctime)s]',\n '[%(name)s]',\n '[%(filename)20s:%(funcName)15s:%(lineno)5d]',\n '[%(levelname)s]',\n ' %(message)s']\n _cf = ['$GREEN[%(asctime)s]$RESET',\n '[%(name)s]',\n '$BLUE[%(filename)20s:%(funcName)15s:%(lineno)5d]$RESET',\n '[%(levelname)s]',\n ' $CYAN%(message)s$RESET']\n nformatter = logging.Formatter('-'.join(_nf))\n cformatter = ColoredFormatter('-'.join(_cf))\n\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n ch.setFormatter(cformatter)\n\n if path:\n path += '/' + name + '.log'\n else:\n path = get_path('log') + '/' + name + '.log'\n rf = logging.handlers.RotatingFileHandler(path, maxBytes=5 * 1024 * 1024, backupCount=5)\n rf.setLevel(logging.DEBUG)\n rf.setFormatter(nformatter)\n\n logger.addHandler(ch)\n logger.addHandler(rf)\n return logger", "def setup_custom_logger(name):\n formatter = logging.Formatter(fmt=FORMAT, datefmt=DATEFMT)\n\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(LEVEL)\n logger.addHandler(handler)\n\n return logger", "def default_logger_creator(config):\n cfg = config[\"logger_config\"].copy()\n cls = cfg.pop(\"type\")\n # Provide default for logdir, in case the user does\n # not specify this in the \"logger_config\" dict.\n logdir_ = cfg.pop(\"logdir\", logdir)\n return from_config(cls=cls, _args=[cfg], logdir=logdir_)", "def initialize_formatter(config):\n if config.json: # pylint: disable=R1705\n return formatters.JsonFormatter()\n elif config.severity: # pylint: disable=R1705\n return formatters.SeverityFormatter(config.colored)\n return formatters.Formatter(config.colored)", "def get_logger(name='default.log', level=logging.DEBUG):\n logger = logging.getLogger(name)\n logger.setLevel(level)\n hdlr = logging.StreamHandler()\n hdlr.setLevel(level)\n fmt = PrettyFormatter()\n hdlr.setFormatter(fmt)\n logger.addHandler(hdlr)\n return logger", "def get_logger(name):\n log = logging.getLogger(name)\n # we don't set the logger's level to inherit from the parent logger.\n if log.handlers:\n return log\n fmt = logging.Formatter(LOG_FMT)\n shdlr = logging.StreamHandler()\n shdlr.setFormatter(fmt)\n log.addHandler(shdlr)\n log.propagate = False\n return log", "def get_logger(name):\n return StyleAdapter(logging.getLogger(name))", "def get_logger(name: str):\n # setup logger\n logger = logging.getLogger(name)\n logger.setLevel(logging.INFO)\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger", "def get_logger(name: str) -> logging.Logger:\n logger = logging.getLogger(name)\n logger.setLevel(__lvl__)\n ch = logging.StreamHandler()\n ch.setLevel(__lvl__)\n preformat = f'[{logger.name}]'\n # [%(threadName)s/%(levelname)s] = [MainThread/INFO]\n ch.setFormatter(logging.Formatter(fmt=preformat + ' %(levelname)s [%(asctime)s] %(message)s',\n datefmt='%H:%M:%S'))\n logger.addHandler(ch)\n return logger", "def _get_logger():\n logger = logging.getLogger(__name__)\n\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n\n ch.setFormatter(logging.Formatter(\"%(asctime)s [%(levelname)8s] %(message)s\"))\n\n logger.addHandler(ch)\n logger.setLevel(logging.DEBUG)\n\n return logger", "def _logger(self):\n logger = logging.getLogger(self.NAME)\n logger.setLevel(self.LOG_LEVEL)\n shandler = logging.StreamHandler(sys.stdout)\n fmt = '\\033[1;32m%(levelname)-5s %(module)s:%(funcName)s():'\n fmt += '%(lineno)d %(asctime)s\\033[0m| %(message)s'\n shandler.setFormatter(logging.Formatter(fmt))\n logger.addHandler(shandler)\n return logger", "def logger_initiate():\n logger.setLevel(logging.DEBUG)\n return logging.basicConfig(\n format=(\n '%(asctime)s.%(msecs)03d %(name)-12s %(levelname)-8s %(message)s'),\n datefmt='%Y-%m-%d %H:%M:%S')", "def create_logger():\r\n global logger\r\n logger = logging.getLogger(logger_name)\r\n\r\n formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s %(message)s')\r\n \r\n handler = logging.StreamHandler()\r\n handler.setFormatter(formatter)\r\n logger.addHandler(handler)\r\n \r\n return logger", "def setup_logging(log_level=logging.DEBUG):\n logging.basicConfig(level=log_level)\n fmt = \"%(asctime)s %(levelname)s (%(threadName)s) \" \"[%(name)s] %(message)s\"\n colorfmt = \"%(log_color)s{}%(reset)s\".format(fmt)\n datefmt = \"%Y-%m-%d %H:%M:%S\"\n\n try:\n from colorlog import ColoredFormatter\n\n logging.getLogger().handlers[0].setFormatter(\n ColoredFormatter(\n colorfmt,\n datefmt=datefmt,\n reset=True,\n log_colors={\n \"DEBUG\": \"cyan\",\n \"INFO\": \"green\",\n \"WARNING\": \"yellow\",\n \"ERROR\": \"red\",\n \"CRITICAL\": \"red\",\n },\n )\n )\n except ImportError:\n pass\n\n logger = logging.getLogger(\"\")\n logger.setLevel(log_level)", "def get_logger(name: str, level=None, propagate=False, handlers=DEFAULT_HANDLERS, args=[[DEFAULT_STREAM]]):\n\n logger = logging.getLogger(name)\n if level is None:\n logger.setLevel(Logging.DEFAULT_LEVEL)\n else:\n logger.setLevel(level)\n\n for func, arg in zip(handlers, args):\n logger.addHandler(func(*arg))\n\n for handler in logger.handlers:\n handler.setFormatter(FORMATTER)\n\n logger.propagate = propagate\n return logger", "def get_logger(level: Optional[int] = None) -> logging.Logger:\n logger = logging.getLogger(LOGGER_NAME)\n if level is not None:\n logger.setLevel(level)\n\n if not logger.handlers:\n formatter = logging.Formatter(fmt=\"%(levelname)-8s %(message)s\", datefmt=\"%H:%M:%S\")\n handler = logging.StreamHandler()\n if level is not None:\n handler.setLevel(level)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n return logger", "def get_logger():\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter(fmt=\"%(asctime)s %(levelname)s %(name)s: %(message)s\",\n datefmt=\"%Y-%m-%d - %H:%M:%S\")\n if logger.hasHandlers():\n logger.handlers.clear()\n\n console = logging.StreamHandler(sys.stdout)\n console.setLevel(logging.INFO)\n console.setFormatter(formatter)\n\n logger.addHandler(console)\n\n return logger", "def get_logger(name=\"unknown_logger\"):\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n handler = logging.StreamHandler(sys.stdout)\n handler.setLevel(logging.DEBUG)\n handler.setFormatter(FORMATTER)\n logger.addHandler(handler)\n logger.propagate = False # to avoid printing the same logs multiple times\n return logger", "def get_logger(name, level=None):\n if not level:\n level = os.environ.get('LOGGER_LEVEL', 'INFO')\n\n logger = logging.getLogger(name)\n\n set_formatter(logger)\n\n try:\n logger.setLevel(level.upper())\n except (TypeError, ValueError) as err:\n logger.setLevel('INFO')\n logger.error('Defaulting to INFO logging: %s', str(err))\n\n return logger", "def init_logging(log_format: str='default', level: str='INFO') -> Union[DefaultFormatter, DebugFormatter]:\n stream_handler = logging.StreamHandler()\n if log_format == 'default':\n formatter = DefaultFormatter\n elif log_format == 'human':\n formatter = DebugFormatter\n else:\n raise ValueError('Unrecognized Format: {}'.format(log_format))\n stream_handler.setFormatter(formatter())\n ROOT_LOGGER.addHandler(stream_handler)\n ROOT_LOGGER.setLevel(level)\n return formatter", "def logger(self) -> logging.Logger:\n logging.basicConfig(\n level=logging.DEBUG,\n format=\"%(asctime)s - %(name)-15s - [%(levelname)-10s] %(message)s\"\n )\n return logging.getLogger(os.path.basename(__file__))", "def get_logger(name):\n logger = logging.getLogger(name)\n if not logger.handlers:\n logger.propagate = 1 # propagate to parent\n console = logging.StreamHandler()\n logger.addHandler(console)\n formatter = logging.Formatter(\n '%(name)s - [%(levelname)s] - %(message)s')\n console.setFormatter(formatter)\n return logger", "def getLogger(self, *args, **kwargs):\r\n return loggers.getLogger(*args, **kwargs)", "def getLogger(name):\n log = logging.getLogger(name)\n log.setLevel(logging.DEBUG)\n hnd2 = logging.StreamHandler(sys.stdout)\n fmt2 = logging.Formatter(fmt='%(name)-20s %(levelname)-8s %(message)s')\n hnd2.setLevel(logging.NOTSET)\n hnd2.addFilter(FilterLevel(True, [logging.INFO]))\n hnd2.setFormatter(fmt2)\n log.addHandler(hnd2)\n hnd1 = logging.StreamHandler(sys.stdout)\n fmt1 = logging.Formatter(fmt=('%(name)-20s %(levelname)-8s' +\n '%(filename)s:%(lineno)s %(message)s'))\n hnd1.setLevel(logging.NOTSET)\n hnd1.addFilter(FilterLevel(False, [logging.INFO]))\n hnd1.setFormatter(fmt1)\n log.addHandler(hnd1)\n return log", "def _setup_default_logger(self):\n #print(f\"setup default logger is called by {self}\")\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(logging.Formatter(\n '%(process)d-%(levelname)s-%(asctime)s.%(msecs)02d-%(message)s',\n datefmt='%Y-%m-%d,%H:%M:%S'))\n self.logger.addHandler(stream_handler)\n self.logger.propagate = True # don't propagate to the root logger! ", "def get_logger(name=\"LazySusan\"):\n level = get_level()\n _configure(level)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n\n return logger", "def get_logger(name):\n logger = logging.getLogger(name)\n logger.setLevel(logging.INFO)\n\n # Console handler with a higher log level\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n\n logger.addHandler(ch)\n\n return logger", "def get_standard_logger():\n standard_logger = logging.getLogger(\"instana\")\n\n ch = logging.StreamHandler()\n f = logging.Formatter('%(asctime)s: %(process)d %(levelname)s %(name)s: %(message)s')\n ch.setFormatter(f)\n standard_logger.addHandler(ch)\n standard_logger.setLevel(logging.DEBUG)\n return standard_logger", "def _get_logger(verbose: bool = False) -> logging:\n logger = logging.getLogger() # root logger\n if verbose:\n logger.setLevel(logging.DEBUG)\n format_str = '%(asctime)s - %(levelname)-8s - %(module)s:%(funcName)-20s - %(message)s'\n else:\n logger.setLevel(logging.INFO)\n format_str = '%(message)s'\n\n date_format = '%Y-%m-%d %H:%M:%S'\n if os.isatty(2):\n color_format = '%(log_color)s' + format_str\n colors = {'DEBUG': 'green',\n 'INFO': 'reset',\n 'WARNING': 'bold_yellow',\n 'ERROR': 'bold_red',\n 'CRITICAL': 'bold_red'}\n formatter = colorlog.ColoredFormatter(color_format, date_format, log_colors=colors)\n else:\n formatter = logging.Formatter(format_str, date_format)\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n return logging.getLogger(__name__)", "def get_main_logger():\n\n # Use verbose debug logging for now.\n console_loglevel = VERBOSITY_LEVELS[2]\n file_loglevel = VERBOSITY_LEVELS[2]\n\n console_fmt = logging.Formatter(\n '%(name)s: %(levelname)s %(message)s')\n file_fmt = logging.Formatter(\n '%(asctime)s - %(name)s: %(levelname)s %(message)s')\n\n log = logging.getLogger('toggledarkly')\n\n console_log = logging.StreamHandler()\n console_log.setFormatter(console_fmt)\n console_log.setLevel(console_loglevel)\n log.addHandler(console_log)\n\n file_log = handlers.RotatingFileHandler(\n LOG_FILE_PATH, maxBytes=(1048576*5), backupCount=5\n )\n file_log.setFormatter(file_fmt)\n file_log.setLevel(file_loglevel)\n log.addHandler(file_log)\n\n if SYSTEMD_SUPPORT:\n journald_log = JournalHandler()\n journald_log.setLevel(file_loglevel)\n journald_log.setFormatter(console_fmt)\n log.addHandler(journald_log)\n \n log.setLevel(VERBOSITY_LEVELS[2])\n\n return log", "def init_logger(self):\n\n if self.args.log_level:\n log_level = getattr(logging, self.args.log_level)\n if coloredlogs:\n coloredlogs.install(level=log_level, fmt=LOG_FMT)\n else:\n logging.basicConfig(level=log_level)\n ch = logging.StreamHandler()\n formatter = logging.Formatter(LOG_FMT)\n ch.setFormatter(formatter)\n elif coloredlogs:\n coloredlogs.install(level='INFO', fmt=LOG_FMT)\n\n if coloredlogs:\n effective_level = coloredlogs.get_level()\n else:\n effective_level = logger.getEffectiveLevel()\n\n # make sure warning and error display at any effective level\n if effective_level > logging.WARNING:\n self.warning = logger.critical\n else:\n self.warning = logger.warning\n\n if effective_level > logging.ERROR:\n self.error = logger.critical\n else:\n self.error = logger.error\n\n self.info = logger.info\n self.debug = logger.debug\n self.exception = logger.exception\n self.critical = logger.critical", "def get_logger(name: str) -> logging.Logger:\n logger = logging.getLogger(name)\n logger.propagate = False\n logger.setLevel(logging.DEBUG)\n if not logger.handlers:\n handler = logging.StreamHandler(sys.stdout)\n handler.setFormatter(logging.Formatter(\"[%(asctime)s] %(message)s\"))\n logger.addHandler(handler)\n return logger", "def __init__(self, default_level=logging.WARNING):\n # All loggers are an attr of self for tab completion in iPython\n # (with . replaced with _)\n self._loggerdict = logging.Logger.manager.loggerDict\n for name, logger in self._loggerdict.iteritems():\n attr = name.replace('.', '_')\n setattr(self, attr, logger)\n\n if len(logging.root.handlers) == 0:\n # The default level is INFO\n fmt='%(levelname)-7s | %(asctime)-23s | %(name)-8s | %(message)s'\n logging.basicConfig(format=fmt, level=default_level)\n logging.StreamHandler.emit = self._emit_wrap", "def logger(self) -> Logger:\n logger = getLogger(\"WatchTheDoor\")\n logger.setLevel(INFO)\n return logger", "def get_logger(name: str, formatter: logging.Formatter, debug_file: str, warning_file: str):\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n # Handler 1 (Debug)\n filehandler_debug = logging.FileHandler(debug_file)\n filehandler_debug.setLevel(logging.DEBUG)\n # Handler 2 (Wichtig)\n filehandler_important = logging.FileHandler(warning_file)\n filehandler_important.setLevel(logging.WARNING)\n\n # Initialisiere Formatter\n filehandler_debug.setFormatter(formatter)\n filehandler_important.setFormatter(formatter)\n\n while logger.handlers: # verhindert doppelte Logs bei mehrfacher Erstellung eines Loggers\n logger.removeHandler(logger.handlers[0])\n\n logger.addHandler(filehandler_debug)\n logger.addHandler(filehandler_important)\n\n return logger", "def logger() -> logging.Logger:\n return logging.getLogger(__name__)", "def logger(self):\n return logging", "def get_logger(set_info=False):\n\n logging.basicConfig(format=\"%(message)s\", stream=sys.stdout)\n logger = logging.getLogger(\"pythonanywhere\")\n if set_info:\n logger.setLevel(logging.INFO)\n else:\n logger.setLevel(logging.WARNING)\n return logger", "def _logger(self) -> logging.Logger:\n return logging.getLogger(\n type(self).__name__\n )", "def create_logger(level=logging.DEBUG, record_format=None):\n if record_format is None:\n record_format = \"[%(asctime)s][%(thread)d][%(filename)s][line: %(lineno)d][%(levelname)s] ## %(message)s\"\n\n logger = logging.getLogger(\"mylogger\")\n logger.setLevel(level)\n # 修改\n fh.setLevel(level)\n ch.setLevel(level)\n formatter = logging.Formatter(record_format)\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n logger.addHandler(fh)\n logger.addHandler(ch)\n return logger", "def config_logger(name: str = '', level: int = logging.INFO,\n format_str: str = '%(levelname)s - [%(processName)s(%(process)d)] [%(asctime)s] - %(filename)s:%(lineno)d: %(message)s',\n handler: Type[Handler] = logging.StreamHandler, propagate: bool = True):\n _handler = handler()\n _handler.setFormatter(logging.Formatter(format_str))\n _logger = logging.getLogger(name)\n _logger.addHandler(_handler)\n _logger.setLevel(level)\n _logger.propagate = propagate\n return _logger", "def get_logger(name):\n logger = logging.getLogger(name)\n # clear handlers if they were created in other runs\n if (logger.hasHandlers()):\n logger.handlers.clear()\n logger.setLevel(logging.DEBUG)\n # create formatter\n formatter = logging.Formatter('%(asctime)s - %(message)s')\n # create console handler add add to logger\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n # create file handler add add to logger when name is not None\n if name is not None:\n fh = logging.FileHandler(f'GNN-{name}.log')\n fh.setFormatter(formatter)\n fh.setLevel(logging.DEBUG)\n logger.addHandler(fh)\n return logger", "def get_logger():\r\n global logger\r\n \r\n if logger:\r\n return logger\r\n else:\r\n return create_logger()", "def logger():\n return logging.getLogger(__name__)", "def get_logger(name, conf):\n\n try:\n # try absolute path\n lfile = conf['log_file']\n except KeyError:\n print('config warning: log file is not configured, logging to default.log')\n lfile = 'default.log'\n except:\n print('config error: log file directory does not exist')\n lfile = 'default.log'\n\n try:\n timezone = conf['time_zone']\n except KeyError:\n timezone = 'America/Chicago'\n\n tz = pytz.timezone(timezone)\n\n class Formatter(logging.Formatter):\n def converter(self, timestamp):\n return datetime.datetime.fromtimestamp(timestamp, tz)\n\n def formatTime(self, record, datefmt=None):\n dt = self.converter(record.created)\n if datefmt:\n s = dt.strftime(datefmt)\n else:\n t = dt.strftime(self.default_time_format)\n s = self.default_msec_format % (t, record.msecs)\n return s\n\n logger = logging.getLogger(name)\n handler = logging.FileHandler(lfile)\n handler.setFormatter(Formatter(\"%(asctime)s: %(levelname)s: %(name)s: %(message)s\", \"%Y-%m-%dT%H:%M:%S%z\"))\n logger.addHandler(handler)\n logger.setLevel(logging.DEBUG)\n return logger", "def create_logger(job_name, log_file=None, debug=True):\n logging.basicConfig(level=5,\n format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%m-%d %H:%M')\n logging.root.handlers = []\n if debug:\n chosen_level = 5\n else:\n chosen_level = logging.INFO\n logger = logging.getLogger(job_name)\n formatter = logging.Formatter(fmt='%(asctime)s %(message)s',\n datefmt='%m/%d %H:%M:%S')\n if log_file is not None:\n log_dir = osp.dirname(log_file)\n if log_dir:\n if not osp.exists(log_dir):\n os.makedirs(log_dir)\n # cerate file handler\n fh = logging.FileHandler(log_file)\n fh.setLevel(chosen_level)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n # Colored stream handler\n sh = ColorStreamHandler()\n sh.setLevel(chosen_level)\n sh.setFormatter(formatter)\n logger.addHandler(sh)\n return logger", "def test_default():\n logger = logging.getLogger(__name__)\n log_all_levels(logger)\n log_all_levels_decorated(logger)\n log_all_levels_loop(logger)\n return logger", "def get_console_logger(name=None):\n if name is None:\n name = os.path.splitext(os.path.basename(sys.argv[0]))[0]\n logger = logging.getLogger(name)\n\n # reset handlers\n logger.handlers = []\n sh = logging.StreamHandler()\n fmt = logging.Formatter(LOG_FMT)\n sh.setFormatter(fmt)\n logger.addHandler(sh)\n logger.setLevel(logging.INFO)\n\n return logger", "def init_logger(level, printout=True):\n root_logger = logging.getLogger(\"optimus\")\n root_logger.setLevel(level)\n\n # Redirect outputs to the void space, mostly for usage within unittests\n if not printout:\n from io import StringIO\n\n dummystream = StringIO()\n handler = logging.StreamHandler(dummystream)\n # Standard output with colored messages\n else:\n handler = logging.StreamHandler()\n handler.setFormatter(\n colorlog.ColoredFormatter(\n \"%(asctime)s - %(log_color)s%(message)s\", datefmt=\"%H:%M:%S\"\n )\n )\n\n root_logger.addHandler(handler)\n\n return root_logger", "def get_logger(self, name=\"amulet-logger\", level=logging.DEBUG):\n log = logging\n logger = log.getLogger(name)\n fmt = log.Formatter(\"%(asctime)s %(funcName)s \"\n \"%(levelname)s: %(message)s\")\n\n handler = log.StreamHandler(stream=sys.stdout)\n handler.setLevel(level)\n handler.setFormatter(fmt)\n\n logger.addHandler(handler)\n logger.setLevel(level)\n\n return logger", "def get_logger(name):\n #### Configure Logger ####\n # Log to stdout\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n formatter = logging.Formatter('%(asctime)s - %(message)s',\n '%m/%d/%Y %H:%M:%S')\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(logging.DEBUG)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n return logger", "def __init__(self, fmt, datefmt=None):\n logging.Formatter.__init__(self, fmt, datefmt)", "def loggerSetup(logLevel=logging.INFO):\n logger = logging.getLogger(__name__)\n outHandler = logging.StreamHandler(sys.stdout)\n outHandler.setFormatter(logging.Formatter(\"%(asctime)s:%(levelname)s:%(module)s: %(message)s\"))\n outHandler.setLevel(logLevel)\n logger.addHandler(outHandler)\n logger.setLevel(logLevel)\n return logger", "def getLogger():\n return GlobalLogger.logger", "def get_logger(name=None, level=\"warn\"):\n logger_name = str(uuid.uuid4())[:8] if name is None else name\n logger = logging.getLogger(logger_name)\n level = os.environ.get(\"LOG_LEVEL\", level)\n\n msg_formats = {\n \"debug\": \"%(asctime)s [%(levelname)s] %(message)s [at %(filename)s:%(lineno)d]\",\n \"info\": \"%(asctime)s %(message)s [at %(filename)s:%(lineno)d]\",\n \"warn\": \"%(asctime)s %(message)s\",\n \"warning\": \"%(asctime)s %(message)s\",\n \"error\": \"%(asctime)s [%(levelname)s] %(message)s [at %(filename)s:%(lineno)d]\",\n \"critical\": \"%(asctime)s [%(levelname)s] %(message)s [at %(filename)s:%(lineno)d]\",\n }\n level_mapping = {\n \"debug\": logging.DEBUG,\n \"info\": logging.INFO,\n \"warn\": logging.INFO,\n \"warning\": logging.WARNING,\n \"error\": logging.ERROR,\n \"critical\": logging.CRITICAL,\n }\n\n date_format = \"%Y-%m-%d %H:%M:%S\"\n formatter = logging.Formatter(fmt=msg_formats[level.lower()], datefmt=date_format)\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n if len(logger.handlers) > 0:\n rm_idx = [idx for idx, handler in enumerate(logger.handlers) if isinstance(handler, logging.StreamHandler)]\n for idx in rm_idx:\n del logger.handlers[idx]\n logger.addHandler(handler)\n logger.setLevel(level_mapping[level.lower()])\n return logger", "def _get_logger():\n return logging.Logger(__name__)", "def setup_logger(name=None, level=None):\r\n from .config import Config\r\n\r\n logger = logging.getLogger(name)\r\n logger.handlers = []\r\n level = level or Config[\"logging.level\"].upper() or logging.ERROR\r\n if Config[\"logging.std\"]:\r\n handler = logging.StreamHandler()\r\n handler.setLevel(level)\r\n fmt = logging.Formatter(Config[\"logging.std_format\"])\r\n handler.setFormatter(fmt)\r\n logger.addHandler(handler)\r\n if Config[\"logging.file\"]:\r\n handler = logging.FileHandler(Config[\"logging.file\"])\r\n handler.setLevel(level)\r\n fmt = logging.Formatter(Config[\"logging.file_format\"])\r\n handler.setFormatter(fmt)\r\n logger.addHandler(handler)\r\n return logger", "def logger(name=None):\r\n\r\n log = logging.getLogger(name or 'logging')\r\n if HANDLER and HANDLER not in log.handlers:\r\n log.addHandler(HANDLER)\r\n\r\n return log", "def create_logger() -> logging.Logger:\n pass # TODO: Replace with implementation!", "def setup_logger(level):\n logger = loguru.logger\n logger.remove()\n\n # Hearth logger\n logger.add(\n sys.stdout,\n level=level,\n filter=lambda record: record[\"extra\"].get(\"service\") == LoggerServices.Hearth,\n format=LoggerFormats.Hearth\n )\n\n # Stethoscope logger\n logger.add(\n sys.stdout,\n level=level,\n filter=lambda record: record[\"extra\"].get(\"service\") == LoggerServices.Stethoscope,\n format=LoggerFormats.Stethoscope\n )\n\n return logger", "def get_logger(level=logging.INFO, quite=False, debug=False, to_file=''):\n assert level in [logging.DEBUG, logging.INFO, logging.WARNING, logging.CRITICAL]\n logger = logging.getLogger('main')\n formatter = logging.Formatter('%(asctime)s - %(funcName)s - %(levelname)s - %(message)s')\n if debug:\n level = logging.DEBUG\n logger.setLevel(level=level)\n if not quite:\n if to_file:\n fh = logging.FileHandler(to_file)\n fh.setLevel(level=level)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n else:\n ch = logging.StreamHandler()\n ch.setLevel(level=level)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger", "def get_logger(logger_name='default'):\n log = logging.getLogger(logger_name)\n log.setLevel(logging.DEBUG)\n log_format = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n\n ch = logging.StreamHandler(sys.stdout)\n ch.setFormatter(log_format)\n if log.hasHandlers():\n log.handlers.clear()\n log.addHandler(ch)\n\n return log", "def getLogger():\n return logging.getLogger(__name__)", "def ResetLogger(self):\n self._formatter = ColoredFormatter(self.format)\n self._console_handler = logging.StreamHandler()\n self._console_handler.setFormatter(self._formatter)\n self.addHandler(self._console_handler)", "def logger(self, name):\n logger, _ = get_stdout_logger(name, verbosity=self.verbosity)\n return logger", "def get_logger():\n return logging.getLogger(__name__)", "def get_logger(name, fluentd_host='localhost', fluentd_port=24224):\n logger = logging.getLogger(name)\n fluent_handler = handler.FluentHandler(\n 'mole.logs',\n host=fluentd_host,\n port=fluentd_port,\n buffer_overflow_handler=overflow_handler\n )\n formatter = handler.FluentRecordFormatter(\n custom_format,\n format_json=False\n )\n fluent_handler.setFormatter(formatter)\n logger.addHandler(fluent_handler)\n return logger", "def _get_logger(name=None, level=None):\n\n logger = logging.getLogger(name)\n if level is not None:\n logger.setLevel(level)\n\n return logger", "def create_logger():\n logger = logging.getLogger(\"punctuation_logger\")\n logger.setLevel(logging.INFO)\n #logger.setLevel(logging.NOTSET) # Set Logger's level to NOTSET, default is WARNING\n\n # create the logging file handler\n if options.log_file is not None:\n fh = logging.FileHandler(options.log_file)\n \n fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n formatter = logging.Formatter(fmt)\n fh.setFormatter(formatter)\n fh.setLevel(logging.NOTSET)\n # add handler to logger object\n logger.addHandler(fh)\n return logger", "def getLogger(self):\n logger = logging.getLogger(self.name)\n logger.setLevel(self.level)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n # add a rotating handler\n if not logger.handlers:\n handler = RotatingFileHandler(self.path, self.maxBytes, self.backupCount)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n # Log to stream for debugging\n streamHandler = logging.StreamHandler(sys.stdout)\n streamHandler.setFormatter(formatter)\n logger.addHandler(streamHandler)\n\n return logger", "def get_logger(level):\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\"[%(name)s|%(asctime)s] %(message)s\")\n ch.setFormatter(formatter)\n\n logger = logging.getLogger(__name__)\n if not logger.handlers:\n logger.setLevel(level)\n logger.addHandler(ch)\n return logger", "def get_logger():\n logging.config.dictConfig(LOGGING_APPLICATION_CONF)\n logger = logging.getLogger(__name__)\n\n if not logger.handlers:\n logger.setLevel(logging.DEBUG)\n console_handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(\n \"%(asctime)s— %(levelname)s —\\\n %(funcName)s:%(lineno)d — %(message)s\")\n console_handler.setFormatter(formatter)\n logger.addHandler(console_handler)\n return logger", "def get_logger(name):\n logger = logging.getLogger(name)\n # clear handlers if they were created in other runs\n if (logger.hasHandlers()):\n logger.handlers.clear()\n logger.setLevel(logging.DEBUG)\n # create formatter\n formatter = logging.Formatter('%(asctime)s - %(message)s')\n # create console handler add add to logger\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n # create file handler add add to logger when name is not None\n if name is not None:\n fh = logging.FileHandler(f'{name}.log')\n fh.setFormatter(formatter)\n fh.setLevel(logging.DEBUG)\n logger.addHandler(fh)\n return logger", "def _log_format_onecolor(record):\n\n return LEVEL_COLORS.get(record.levelname)", "def make_default_logger(file_path=LOG_FILENAME):\n logger = logging.getLogger(\"Logger\")\n if not len(logger.handlers):\n logger.setLevel(logging.DEBUG)\n # Create a handler and attach it to the logger\n try:\n handler = logging.handlers.RotatingFileHandler(\n file_path, maxBytes=5120000, backupCount=7\n )\n except OSError as e:\n if e.errno == 2:\n errprint(\n \"\\nWarning: %s: %s. \"\n \"Have you created the directory for the log?\"\n % (\n e.strerror,\n file_path,\n )\n )\n elif e.errno == 13:\n errprint(\n \"\\nWarning: %s: %s. \"\n \"Cannot access file as user: %s\"\n % (\n e.strerror,\n file_path,\n getpass.getuser(),\n )\n )\n else:\n errprint(\n \"\\nIOError [%s]: %s\\n%s\"\n % (e.errno, e.strerror, traceback.format_exc())\n )\n errprint(\n \"Juriscraper will continue to run, and all logs will be \"\n \"sent to stderr.\"\n )\n handler = logging.StreamHandler(sys.stderr)\n handler.setFormatter(\n logging.Formatter(\"%(asctime)s - %(levelname)s: %(message)s\")\n )\n logger.addHandler(handler)\n return logger", "def json_console_logger(init_context: \"InitLoggerContext\") -> logging.Logger:\n level = coerce_valid_log_level(init_context.logger_config[\"log_level\"])\n name = init_context.logger_config[\"name\"]\n\n klass = logging.getLoggerClass()\n logger_ = klass(name, level=level)\n\n handler = coloredlogs.StandardErrorHandler()\n\n class JsonFormatter(logging.Formatter):\n def format(self, record):\n return _seven.json.dumps(record.__dict__)\n\n handler.setFormatter(JsonFormatter())\n logger_.addHandler(handler)\n\n return logger_", "def get_logger(self, logname, logfile, loglevel, propagate):\n # TODO: simplify\n logger = logging.getLogger(logname)\n logger_handler = WatchedFileHandler(logfile, mode='w')\n # removed \\t%(name)-6s\n log_fmt = '%(asctime)s\\t%(levelname)-8s\\t%(message)s'\n logger_handler.setFormatter(\n logging.Formatter(log_fmt, '%b %d %H:%M:%S'))\n logger.addHandler(logger_handler)\n logger.propagate = propagate\n logger.setLevel(loglevel)\n return logger", "def make_logger(name=str(os.getpid())):\n if not sys.platform.startswith(\"win\") and sys.stderr.isatty():\n def add_color_emit_ansi(fn):\n \"\"\"Add methods we need to the class.\"\"\"\n def new(*args):\n \"\"\"Method overload.\"\"\"\n if len(args) == 2:\n new_args = (args[0], copy(args[1]))\n else:\n new_args = (args[0], copy(args[1]), args[2:])\n if hasattr(args[0], 'baseFilename'):\n return fn(*args)\n levelno = new_args[1].levelno\n if levelno >= 50:\n color = '\\x1b[31;5;7m\\n ' # blinking red with black\n elif levelno >= 40:\n color = '\\x1b[31m' # red\n elif levelno >= 30:\n color = '\\x1b[33m' # yellow\n elif levelno >= 20:\n color = '\\x1b[32m' # green\n elif levelno >= 10:\n color = '\\x1b[35m' # pink\n else:\n color = '\\x1b[0m' # normal\n try:\n new_args[1].msg = color + str(new_args[1].msg) + ' \\x1b[0m'\n except Exception as reason:\n print(reason) # Do not use log here.\n return fn(*new_args)\n return new\n log.StreamHandler.emit = add_color_emit_ansi(log.StreamHandler.emit)\n log_file = os.path.join(gettempdir(), str(name).lower().strip() + \".log\")\n log.basicConfig(level=-1, filemode=\"w\", filename=log_file)\n log.getLogger().addHandler(log.StreamHandler(sys.stderr))\n adrs = \"/dev/log\" if sys.platform.startswith(\"lin\") else \"/var/run/syslog\"\n try:\n handler = log.handlers.SysLogHandler(address=adrs)\n except:\n log.debug(\"Unix SysLog Server not found, ignored Logging to SysLog.\")\n else:\n log.getLogger().addHandler(handler)\n log.debug(\"Logger created with Log file at: {0}.\".format(log_file))\n return log", "def get_logger(parent_module=''):\n return GLOBAL_LOGGER_BUILDER.build_logger(parent_module)", "def get_logger(name):\n\n logger = logging.getLogger(name)\n if not logger.handlers:\n out = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(\n fmt='%(asctime)s - %(name)s - %(levelname)s \\\n - %(module)s - %(message)s'\n )\n out.setFormatter(formatter)\n logger.addHandler(out)\n logger.setLevel(get_config('LOGGING_LEVEL'))\n logger.propagate = False\n return logger", "def logger_styles(self) -> str:\n\t\treturn ('info=blue;'\n\t\t\t\t'warning=green;'\n\t\t\t\t'error=red;'\n\t\t\t\t'critical=red,bold;'\n\t\t\t\t'debug=white')", "def logger(self) -> 'Logger':\n return self.Logger", "def setup_logger():\n logger = logging.getLogger(\"extract_brass_bedpe\")\n LoggerFormat = '[%(levelname)s] [%(asctime)s] [%(name)s] - %(message)s'\n logger.setLevel(level=logging.INFO)\n handler = logging.StreamHandler(sys.stderr)\n formatter = logging.Formatter(LoggerFormat, datefmt='%Y%m%d %H:%M:%S')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger", "def setup_logger(name, log_file, formatter, level=logging.INFO):\r\n handler = logging.FileHandler(log_file, encoding='utf-8')\r\n handler.setFormatter(formatter)\r\n\r\n logger = logging.getLogger(name)\r\n logger.setLevel(level)\r\n logger.addHandler(handler)\r\n\r\n return logger", "def getLogger(name):\n return logging.getLogger(name)", "def init_logging(loglevel: int = logging.INFO, color: bool = False):\n ch = logging.StreamHandler()\n ch.setLevel(loglevel)\n\n if color:\n formatter = ColoredFormatter(\n \"%(log_color)s%(levelname)-8s%(reset)s [%(name)s] %(message)s\",\n datefmt=None,\n reset=True,\n log_colors={\n \"DEBUG\": \"cyan\",\n \"INFO\": \"green\",\n \"WARNING\": \"yellow\",\n \"ERROR\": \"red\",\n \"CRITICAL\": \"red\",\n },\n )\n else:\n formatter = logging.Formatter(\"%(levelname)-8s [%(name)s] %(message)s\")\n\n ch.setFormatter(formatter)\n\n loggers = [\n logging.getLogger(\"freezing\"),\n logging.getLogger(\"stravalib\"),\n logging.getLogger(\"requests\"),\n logging.root,\n ]\n\n logging.root.addHandler(ch)\n\n for l in loggers:\n if l is logging.root:\n l.setLevel(logging.DEBUG)\n else:\n l.setLevel(logging.INFO)", "def logger(self) -> logging.Logger:\n cls = type(self)\n return logging.getLogger(cls.__module__ + \".\" + cls.__name__)", "def Logger(name, level=None):\n logger = logging.getLogger(name)\n if level:\n logger.setLevel(level)\n return logger", "def get_logger_with_class_if_new(logger_class, logger_name, *args, set_default_logger_class=False, **kwargs):\n\tif set_default_logger_class:\n\t\tlogging.setLoggingClass(logger_class)\n\t\treturn logging.getLogger(logger_name)\n\telse:\n\t\twith PushDefaultLoggingClassContext(logger_class=logger_class, logger_name=logger_name) as logger:\n\t\t\treturn logger", "def create_logger(name, log_file=None):\n l = logging.getLogger(name)\n formatter = logging.Formatter('[%(asctime)s] %(message)s')\n l.setLevel(logging.DEBUG)\n\n sh = logging.StreamHandler()\n sh.setFormatter(formatter)\n sh.setLevel(logging.INFO)\n l.addHandler(sh)\n\n if log_file is not None:\n fh = logging.FileHandler(log_file)\n fh.setFormatter(formatter)\n fh.setLevel(logging.DEBUG)\n l.addHandler(fh)\n\n return l", "def configure_logger():\n logger = logging.getLogger()\n handler = logging.StreamHandler()\n formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.INFO)", "def logger():\n logger = logging.getLogger(\"Automation_Dispatcher\")\n logger.setLevel(settings.LOGLEVEL)\n handler = logging.StreamHandler()\n logger.addFilter(_Commmon_filter())\n handler.setFormatter(logging.Formatter('%(asctime)s [%(component)s]'\n ' [%(levelname)s] %(message)s', \"%Y-%m-%d %H:%M:%S\"))\n logger.addHandler(handler)\n return logger" ]
[ "0.6996591", "0.69907147", "0.6911573", "0.682854", "0.65775317", "0.65120155", "0.6396354", "0.6385912", "0.6341703", "0.6268824", "0.61986893", "0.6196991", "0.618999", "0.61819875", "0.6148457", "0.6129842", "0.6128831", "0.6113148", "0.61027133", "0.60929334", "0.60799354", "0.60786104", "0.60659117", "0.6062733", "0.60485417", "0.6026284", "0.6000058", "0.5996945", "0.59594566", "0.5957211", "0.5955854", "0.5948727", "0.5947981", "0.5946522", "0.5942693", "0.5923624", "0.5915035", "0.59087014", "0.5887464", "0.58733934", "0.58710843", "0.58424246", "0.5830813", "0.582755", "0.5803", "0.5802202", "0.5793107", "0.5759277", "0.5741447", "0.57371026", "0.5728861", "0.5724982", "0.5723968", "0.5722486", "0.57222664", "0.5712712", "0.57012045", "0.5700624", "0.5697441", "0.5694685", "0.56895965", "0.5688659", "0.56867635", "0.5686563", "0.5686508", "0.56855774", "0.5678336", "0.5664291", "0.5661253", "0.5643672", "0.5637868", "0.56298184", "0.5628623", "0.5628045", "0.56272936", "0.5622841", "0.56200975", "0.56176615", "0.56058353", "0.5605111", "0.5602948", "0.5598604", "0.55901355", "0.558561", "0.5564023", "0.55531466", "0.55522466", "0.5546176", "0.5540409", "0.5522182", "0.55176157", "0.55119264", "0.55063534", "0.5497186", "0.5496358", "0.54907346", "0.54881305", "0.5473765", "0.54734075", "0.5469062" ]
0.67095196
4
Create and use a logger.
def main(): logger = setup_logger() logger.debug('a debug message') logger.info('an info message') logger.warning('a warning message') logger.error('an error message') logger.critical('a critical message')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_logger() -> logging.Logger:\n pass # TODO: Replace with implementation!", "def create_logger():\r\n global logger\r\n logger = logging.getLogger(logger_name)\r\n\r\n formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s %(message)s')\r\n \r\n handler = logging.StreamHandler()\r\n handler.setFormatter(formatter)\r\n logger.addHandler(handler)\r\n \r\n return logger", "def create_logger(log_dir=None):\n if log_dir and not os.path.exists(log_dir):\n os.makedirs(log_dir)\n log_format = '%(asctime)s %(process)d [%(levelname)s] %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_format)\n logger = logging.getLogger('es_on_gke')\n if log_dir:\n log_file = os.path.join(log_dir, 'log.txt')\n file_hdl = logging.FileHandler(log_file)\n formatter = logging.Formatter(fmt=log_format)\n file_hdl.setFormatter(formatter)\n logger.addHandler(file_hdl)\n return logger", "def setup_logger():\n LOG_DIR = unicode( os.environ.get(u'usep_gh__LOG_DIR') )\n LOG_LEVEL = unicode( os.environ.get(u'usep_gh__LOG_LEVEL') )\n filename = u'%s/usep_gh_handler.log' % LOG_DIR\n formatter = logging.Formatter( u'[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s' )\n logger = logging.getLogger( __name__ )\n # logger = logging.getLogger( u'usep_gh_handler' )\n level_dict = { u'debug': logging.DEBUG, u'info':logging.INFO }\n logger.setLevel( level_dict[LOG_LEVEL] )\n file_handler = logging.FileHandler( filename )\n file_handler.setFormatter( formatter )\n logger.addHandler( file_handler )\n logger.debug( u'in utils.log_helper.setup_logger(); log initialized at %s' % unicode(datetime.datetime.now()) )\n return logger", "def setupLogger(logger=None, log_format=\"%(asctime)s %(levelname)s [\"+APP_NAME+\"] %(message)s\", level=logging.INFO, log_name=APP_NAME+\".log\", logger_name=APP_NAME):\r\n\tif logger is None:\r\n\t\tlogger = logging.getLogger(logger_name)\r\n\t\r\n\tlogger.propagate = False # Prevent the log messages from being duplicated in the python.log file\r\n\tlogger.setLevel(level)\r\n\t\r\n\tfile_handler = logging.handlers.RotatingFileHandler(make_splunkhome_path([\"var\", \"log\", \"splunk\", log_name]), maxBytes=2500000, backupCount=5)\r\n\tformatter = logging.Formatter(log_format)\r\n\tfile_handler.setFormatter(formatter)\r\n\t\r\n\tlogger.handlers = []\r\n\tlogger.addHandler(file_handler)\r\n\t\r\n\treturn logger", "def create_logger():\n logging.basicConfig(level = logging.INFO, filename='logging', filemode='w')\n logger = logging.getLogger(\" \")\n admin_handler = logging.FileHandler('logging')\n admin_handler.setLevel(logging.INFO)\n logger.addHandler(admin_handler)\n logger.warning(f'{admin_handler} created a new logger')\n return logger", "def _instanciate_logger(self):\n\t\tself._logger = logging.getLogger('main')\n\t\tself._logger.setLevel(logging.DEBUG)\n\t\tself._logger.addHandler(logging.StreamHandler())", "def logger_setup(self, logger_name):\n logger = logging.getLogger(logger_name)\n logger_path = \"/tmp/\" + logger.name\n logger_format = '%(asctime)s %(name)s %(levelname)s %(lineno)d %(message)s'\n\n # set up logging to file\n logging.basicConfig(\n level=logging.INFO,\n format=logger_format,\n datefmt='%Y-%m-%d %H:%M:%S',\n filename=logger_path,\n filemode='w'\n )\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n # set a format which for console use\n formatter = logging.Formatter(logger_format)\n # tell the handler to use this format\n console.setFormatter(formatter)\n # add the handler to the root logger\n logging.getLogger('').addHandler(console)\n return logger", "def init_logger():\n logpath = Path(f\"logs/{time.strftime('%Y.%m.%d %H:%M')}.txt\")\n logpath.parent.mkdir(exist_ok=True)\n logging.basicConfig(filename=logpath, level=logging.DEBUG)", "def setup_logger(logger_name, logfile='crawler.log'):\n _logger = logging.getLogger(logger_name)\n _logger.setLevel(logging.INFO)\n h = logging.handlers.RotatingFileHandler(filename=logfile,\n maxBytes=10e6, backupCount=1)\n f = logging.Formatter(\n '%(asctime)s %(processName)-10s %(levelname)-8s %(message)s')\n h.setFormatter(f)\n _logger.addHandler(h)\n return _logger", "def create_logger():\n global logger\n\n formatter = logging.Formatter('%(asctime)s|%(levelname)s|%(message)s')\n handler = TimedRotatingFileHandler(log_file, when=\"midnight\", interval=1)\n handler.setFormatter(formatter)\n handler.setLevel(log_level)\n handler.suffix = \"%Y-%m-%d\"\n logger = logging.getLogger(\"sacplus\")\n logger.setLevel(log_level)\n logger.addHandler(handler)", "def _init_logger(self):\n self.logger = logging.getLogger('WSClientAPILogger')\n self.logger.setLevel(logging.DEBUG)\n self.logger_handler = logging.FileHandler(self.__class__.__name__ + '.log')\n self.logger_handler.setLevel(logging.DEBUG)\n self.logger_formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%d-%m %H:%M:%S')\n self.logger_handler.setFormatter(self.logger_formatter)\n self.logger.addHandler(self.logger_handler)", "def logger() -> logging.Logger:\n return logging.getLogger(__name__)", "def logger():\n return logging.getLogger(__name__)", "def _create_logger(self, log_dir: str) -> logging.Logger:\n self.log_dir = log_dir\n self.log_file = os.path.join(log_dir, self.name)\n os.makedirs(self.log_dir, exist_ok=True)\n logger = logging.getLogger(self.log_file)\n logger.setLevel(logging.DEBUG)\n handler = logging.FileHandler(self.log_file)\n handler.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\"%(asctime)s %(levelname)s %(filename)s:%(lineno)d %(message)s\",\n datefmt=\"%Y-%m-%d-%H:%M:%S\")\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger", "def plog_use_logger(name):\r\n global logger, loglevels\r\n logger = logging.getLogger(name)", "def make_logger(model_dir: str, log_file: str = \"train.log\") -> Logger:\n logger = logging.getLogger(__name__)\n if not logger.handlers:\n logger.setLevel(level=logging.DEBUG)\n fh = logging.FileHandler(\"{}/{}\".format(model_dir, log_file))\n fh.setLevel(level=logging.DEBUG)\n logger.addHandler(fh)\n formatter = logging.Formatter(\"%(asctime)s %(message)s\")\n fh.setFormatter(formatter)\n if platform == \"linux\":\n sh = logging.StreamHandler()\n sh.setLevel(logging.INFO)\n sh.setFormatter(formatter)\n logging.getLogger(\"\").addHandler(sh)\n logger.info(\"Hello! This is Joey-NMT.\")\n return logger", "def setup_logger(level, name, use_rotating_handler=True):\r\n \r\n logger = logging.getLogger(name)\r\n logger.propagate = False # Prevent the log messages from being duplicated in the python.log file\r\n logger.setLevel(level)\r\n \r\n log_file_path = os.path.join( os.environ['SPLUNK_HOME'], 'var', 'log', 'splunk', 'radius_auth_rest_handler.log' )\r\n \r\n if use_rotating_handler:\r\n file_handler = logging.handlers.RotatingFileHandler(log_file_path, maxBytes=25000000, backupCount=5)\r\n else:\r\n file_handler = logging.FileHandler(log_file_path)\r\n \r\n formatter = logging.Formatter('%(asctime)s %(levelname)s ' + name + ' - %(message)s')\r\n file_handler.setFormatter(formatter)\r\n \r\n logger.addHandler(file_handler)\r\n \r\n return logger", "def setup_logger() -> None:\n LOGGER.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(levelname)s \\t|%(asctime)s \\t| %(name)s \\t| %(message)s')\n\n if not check_if_dir_exists(FILENAMES.LOG_DIR):\n os.mkdir(to_abs_file_path(FILENAMES.LOG_DIR))\n\n file_handler: logging.FileHandler = logging.FileHandler(to_abs_file_path(FILENAMES.LOG), mode='w')\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n\n console_handler: logging.StreamHandler = logging.StreamHandler()\n console_handler.setLevel(logging.WARNING)\n\n LOGGER.addHandler(file_handler)\n LOGGER.addHandler(console_handler)\n LOGGER.info('Filehandler and Console_Handler were born, let\\'s start logging')", "def setup_logger(name, log_file, level=logging.INFO):\n if name in ( \"\", None ):\n raise \"No name\"\n return\n\n if log_file in ( \"\", None ):\n raise \"No log_file\"\n return\n\n formatter = logging.Formatter(\n fmt = '%(asctime)s.%(msecs)03d %(levelname)s File: \"%(pathname)s\", line %(lineno)d, in %(module)s - %(funcName)s: %(message)s',\n datefmt= '%Y-%m-%d %H:%M:%S'\n )\n\n handler = logging.FileHandler(log_file)\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def create_logger():\n logger = logging.getLogger(\"punctuation_logger\")\n logger.setLevel(logging.INFO)\n #logger.setLevel(logging.NOTSET) # Set Logger's level to NOTSET, default is WARNING\n\n # create the logging file handler\n if options.log_file is not None:\n fh = logging.FileHandler(options.log_file)\n \n fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n formatter = logging.Formatter(fmt)\n fh.setFormatter(formatter)\n fh.setLevel(logging.NOTSET)\n # add handler to logger object\n logger.addHandler(fh)\n return logger", "def init_logger(name, path=None):\n import logging.handlers\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n logger.propagate = 0\n _nf = ['[%(asctime)s]',\n '[%(name)s]',\n '[%(filename)20s:%(funcName)15s:%(lineno)5d]',\n '[%(levelname)s]',\n ' %(message)s']\n _cf = ['$GREEN[%(asctime)s]$RESET',\n '[%(name)s]',\n '$BLUE[%(filename)20s:%(funcName)15s:%(lineno)5d]$RESET',\n '[%(levelname)s]',\n ' $CYAN%(message)s$RESET']\n nformatter = logging.Formatter('-'.join(_nf))\n cformatter = ColoredFormatter('-'.join(_cf))\n\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n ch.setFormatter(cformatter)\n\n if path:\n path += '/' + name + '.log'\n else:\n path = get_path('log') + '/' + name + '.log'\n rf = logging.handlers.RotatingFileHandler(path, maxBytes=5 * 1024 * 1024, backupCount=5)\n rf.setLevel(logging.DEBUG)\n rf.setFormatter(nformatter)\n\n logger.addHandler(ch)\n logger.addHandler(rf)\n return logger", "def setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file) \n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file) \n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file) \n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file) \n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def setup_logger(name, log_file, level=logging.DEBUG):\n\n handler = logging.FileHandler(log_file)\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file) \n handler.setFormatter(logging_formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def get_logger():\r\n global logger\r\n \r\n if logger:\r\n return logger\r\n else:\r\n return create_logger()", "def _create_logger(title, log_msg_id=\"\", log_file_suffix=\".log\"):\n\n logging.setLoggerClass(SkidlLogger)\n logger = logging.getLogger(title)\n\n # Errors & warnings always appear on the terminal.\n handler = logging.StreamHandler(sys.stderr)\n handler.setLevel(logging.WARNING)\n handler.setFormatter(logging.Formatter(log_msg_id + \"%(levelname)s: %(message)s\"))\n logger.addHandler(handler)\n\n # Errors and warnings are stored in a log file with the top-level script's name.\n handler = SkidlLogFileHandler(get_script_name() + log_file_suffix, mode=\"w\")\n handler.setLevel(logging.WARNING)\n handler.setFormatter(logging.Formatter(log_msg_id + \"%(levelname)s: %(message)s\"))\n logger.addHandler(handler)\n\n # Set logger to trigger on info, warning, and error messages.\n logger.setLevel(logging.INFO)\n\n # Augment the logger's functions to count the number of errors and warnings.\n logger.error = CountCalls(logger.error)\n logger.warning = CountCalls(logger.warning)\n\n return logger", "def setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file)\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def create_logger():\n log = logging.getLogger() # root logger\n log.setLevel(logging.DEBUG)\n format_str = '%(asctime)s - %(levelname)-8s - %(message)s'\n date_format = '%Y-%m-%d %H:%M:%S'\n if os.isatty(2):\n cformat = '%(log_color)s' + format_str\n colors = {'DEBUG': 'reset',\n 'INFO': 'reset',\n 'WARNING': 'bold_yellow',\n 'ERROR': 'bold_red',\n 'CRITICAL': 'bold_red'}\n formatter = colorlog.ColoredFormatter(cformat, date_format,\n log_colors=colors)\n else:\n formatter = logging.Formatter(format_str, date_format)\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n log.addHandler(stream_handler)\n return logging.getLogger(__name__)", "def create_logger(logger_name,\n log_format=None,\n log_level=logging.INFO,\n log_path=None):\n logger = logging.getLogger(logger_name)\n assert (len(logger.handlers) == 0)\n logger.setLevel(log_level)\n if log_path is None:\n handler = logging.StreamHandler()\n else:\n os.stat(os.path.dirname(os.path.abspath(log_path)))\n handler = logging.FileHandler(log_path)\n handler.setLevel(log_level)\n if log_format is not None:\n formatter = logging.Formatter(log_format)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger", "def get_logger():\n # Prepare log directory.\n try:\n os.mkdir('logs')\n except FileExistsError:\n pass\n\n # Create logger and formatter.\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s %(message)s')\n\n # Create and attach stream handler.\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n\n # Create and attach file handler.\n file_handler = logging.handlers.TimedRotatingFileHandler(\n 'logs/log.txt', when='d', encoding='utf-8')\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n\n return logger", "def logger(self) -> logging.Logger:\n logging.basicConfig(\n level=logging.DEBUG,\n format=\"%(asctime)s - %(name)-15s - [%(levelname)-10s] %(message)s\"\n )\n return logging.getLogger(os.path.basename(__file__))", "def create_logger_service(program_id, processor_id):\n logger = logging.getLogger(__name__)\n logger = logging.LoggerAdapter(logger,\n extra={'program_id': program_id,\n 'processor_id': processor_id})\n return logger", "def setup_logger(log_file_path =\"\"):\n formatter = ColoredFormatter(\n \"%(log_color)s%(levelname)-8s%(reset)s %(blue)s%(message)s\",\n datefmt=None,\n reset=True,\n log_colors={\n 'DEBUG': 'cyan',\n 'INFO': 'green',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'purple',\n }\n )\n logging.basicConfig(handlers=[logging.FileHandler(log_file_path, 'w', 'utf-8')],\n format=\"%(message)s\"\n )\n logger = logging.getLogger('')\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.DEBUG)\n\n return logger", "def logger_initiate():\n logger.setLevel(logging.DEBUG)\n return logging.basicConfig(\n format=(\n '%(asctime)s.%(msecs)03d %(name)-12s %(levelname)-8s %(message)s'),\n datefmt='%Y-%m-%d %H:%M:%S')", "def get_logger(logger_name, logging_format, file_name, level=logging.INFO):\n path, prepared = '', True\n for cat in file_name.split('/')[1:-1]:\n path += '/%s' % cat\n if not os.path.exists(path):\n try:\n os.mkdir(path)\n except PermissionError:\n prepared = False\n break\n if not prepared:\n file_name = '/tmp/%s' % file_name.split('/')[-1]\n logging.basicConfig(level=level, format=logging_format)\n log = logging.getLogger(logger_name)\n handler = logging.FileHandler(file_name, encoding='utf8')\n handler.setFormatter(logging.Formatter(logging_format))\n log.addHandler(handler)\n log.setLevel(level=level)\n return log", "def create_logger(name, log_file=None):\n l = logging.getLogger(name)\n formatter = logging.Formatter('[%(asctime)s] %(message)s')\n l.setLevel(logging.DEBUG)\n\n sh = logging.StreamHandler()\n sh.setFormatter(formatter)\n sh.setLevel(logging.INFO)\n l.addHandler(sh)\n\n if log_file is not None:\n fh = logging.FileHandler(log_file)\n fh.setFormatter(formatter)\n fh.setLevel(logging.DEBUG)\n l.addHandler(fh)\n\n return l", "def create_logger(app_name: str) -> logging.Logger:\n if not os.path.exists(os.path.join(os.getcwd(), 'logs')):\n os.mkdir(os.path.join(os.getcwd(), 'logs'))\n\n app_logfile = os.path.join(os.getcwd(), 'logs', f'{app_name}.log')\n\n logger = logging.getLogger(f\"{app_name}-logger\")\n logger.setLevel(logging.DEBUG)\n\n handler = logging.handlers.RotatingFileHandler(filename=app_logfile, mode='a', maxBytes=20000, backupCount=10)\n handler.setLevel(logging.DEBUG)\n\n # Set the formatter\n formatter = logging.Formatter(\"%(asctime)s | %(levelname)s | %(message)s\", \"%Y-%m-%d %H:%M:%S\")\n handler.setFormatter(formatter)\n\n logger.addHandler(handler)\n\n # Set it as the base handler\n logger.base_handler = handler\n\n # Also add a newline handler to switch to later\n newline_handler = logging.FileHandler(filename=app_logfile, mode='a')\n newline_handler.setLevel(logging.DEBUG)\n newline_handler.setFormatter(logging.Formatter(fmt='')) # Must be an empty format\n \n logger.newline_handler = newline_handler\n\n # Also add the provision for a newline handler using a custom method attribute\n logger.newline = types.MethodType(add_newlines, logger)\n\n # Also add a StreamHandler for printing to stderr\n console_handler = logging.StreamHandler()\n formatter = logging.Formatter(\"%(asctime)s | %(levelname)s | %(message)s\", \"%Y-%m-%d %H:%M:%S\")\n console_handler.setFormatter(formatter)\n \n logger.addHandler(console_handler)\n\n return logger", "def init_logger(self, logger_path,\n logger_name='Experiment') -> logging.Logger:\n self.logger = logging.getLogger(logger_name)\n\n self.logger.setLevel(logging.INFO)\n\n file_handler = logging.FileHandler(logger_path) # TOD bug here\n formatter = logging.Formatter('%(asctime)s||%(message)s')\n file_handler.setFormatter(formatter)\n self.logger.addHandler(file_handler)", "def logger(name=None):\r\n\r\n log = logging.getLogger(name or 'logging')\r\n if HANDLER and HANDLER not in log.handlers:\r\n log.addHandler(HANDLER)\r\n\r\n return log", "def _get_logger():\n return logging.Logger(__name__)", "def setup_logger():\n logger = logging.getLogger('tracking_log')\n logger.setLevel(logging.INFO)\n #Where to Store needs to be identified?\n f_handler = logging.FileHandler(PROCESSED_LOGFILE, mode='a', encoding = None, delay = False)\n f_handler.setLevel(logging.INFO)\n f_format = logging.Formatter('%(asctime)s\\t%(message)s\\t%(dataset_id)s\\t%(status)s')\n f_handler.setFormatter(f_format)\n logger.addHandler(f_handler)\n return logger", "def load_logger(factory):\n def inner():\n web.ctx.logger = factory()\n return inner", "def setup_logger(logger: logging.Logger, file_name: str):\n log_fmt = logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s')\n # Console Handler\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n ch.setFormatter(log_fmt)\n # File Handler\n fh = RotatingFileHandler(\n filename=f'log/{file_name}.log',\n maxBytes=int(1e6), backupCount=3,\n encoding='utf-8', mode='a'\n )\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(log_fmt)\n logger.addHandler(fh)\n logger.addHandler(ch)", "def logging(cls, logger=None):\n logger = logger or logging.getLogger()\n on_message = lambda msg: logger.log(msg.severity, msg.text)\n return cls(on_message=on_message)", "def setup_logger(level):\n logger = loguru.logger\n logger.remove()\n\n # Hearth logger\n logger.add(\n sys.stdout,\n level=level,\n filter=lambda record: record[\"extra\"].get(\"service\") == LoggerServices.Hearth,\n format=LoggerFormats.Hearth\n )\n\n # Stethoscope logger\n logger.add(\n sys.stdout,\n level=level,\n filter=lambda record: record[\"extra\"].get(\"service\") == LoggerServices.Stethoscope,\n format=LoggerFormats.Stethoscope\n )\n\n return logger", "def setup_logger():\n\n global _logger\n global _has_logbook\n\n if _has_logbook:\n _logger = Logger('UoM_WIFI')\n try:\n log_path = join(sys.argv[1], '%s.log' % USERNAME)\n except IndexError:\n log_path = join(split(abspath(__file__))[0], '%s.log' % USERNAME)\n\n # because the log file is owned by root, if this program is ran by a\n # regular user, we need to prevent it from crashing by writing to a file\n # owned by root\n try:\n # create the handler\n log_handler = RotatingFileHandler(log_path)\n\n # push the context object to the application stack\n log_handler.push_application()\n except IOError:\n _has_logbook = False", "def setup_logger(logger_name, level=\"INFO\", log_file: str = None):\n assert level in LOG_LEVELS\n\n formatter = logging.Formatter('%(message)s')\n if log_file:\n handler = logging.FileHandler(log_file, mode=\"w\")\n else:\n handler = logging.StreamHandler(stdout)\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(logger_name)\n logger.setLevel(getattr(logging, level))\n logger.addHandler(handler)\n return logger", "def setup_logging(logger):\n hdlr = logging.FileHandler('linter.log', 'w')\n logger.addHandler(hdlr)\n logger.setLevel(logging.DEBUG)\n return logger", "def build_logger(self):\n pass", "def get_logger():\n return logging.getLogger(__name__)", "def setup_logger():\n formatter = ColoredFormatter(\n (\n '%(log_color)s%(levelname)-5s%(reset)s '\n '%(yellow)s[%(asctime)s]%(reset)s'\n '%(green)s %(name)s %(purple)s %(filename)s %(purple)s %(funcName)s %(purple)s:%(lineno)d%(reset)s '\n '%(bold_blue)s%(message)s%(reset)s'\n ),\n datefmt='%y-%m-%d %H;%M:%S',\n log_colors={\n 'DEBUG': 'blue',\n 'INFO': 'yellow',\n 'WARNING': 'red',\n 'ERROR': 'blue,bg_bold_red',\n 'CRITICAL': 'red,bg_white',\n }\n )\n\n logger = logging.getLogger('shen-yue-is-beautiful')\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.DEBUG)\n\n return logger", "def __setup_logger(name, log_file, level=logging.WARNING, stream=True):\n log_format = logging.Formatter(\"%(asctime)s%(filename)s:%(lineno)-3d %(levelname)s %(message)s\")\n handler = logging.FileHandler(log_file)\n handler.setFormatter(log_format)\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n if stream is True:\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(log_format)\n logger.addHandler(stream_handler)\n return logger", "def construct_logger(in_logger_file_path):\n logger_configfile_path = in_logger_file_path + \"/log.properties\"\n # print logger_configfile_path\n logging.config.fileConfig(logger_configfile_path)\n logger = logging.getLogger(\"ITR2\")\n return logger", "def get_logger(name: str) -> logging.Logger:\n try:\n p = Path(name)\n if p.exists():\n name = str(p.absolute().relative_to(Path.cwd()).as_posix())\n except:\n pass\n logger = logging.getLogger(name)\n # logger.addHandler(TqdmLoggingHandler())\n return logger", "def init_log(log_level=logging.DEBUG):\n now = time.time()\n ts = datetime.datetime.fromtimestamp(now).strftime('%Y%m%d')\n file_name = os.path.abspath(os.path.join(os.getcwd(), '..', 'traffic_logs', f'{ts}_traffic.log'))\n folder, _ = os.path.split(file_name)\n Path(folder).mkdir(parents=True, exist_ok=True)\n\n # create formatter and add it to the handlers\n log_format = '[%(asctime)s][%(name)s][%(levelname)s] %(message)s'\n\n logging.basicConfig(filemode='a',\n format=log_format,\n datefmt='%H:%M:%S',\n level=logging.ERROR,\n stream=sys.stdout,\n # filename=file_handler\n )\n\n formatter = logging.Formatter(log_format)\n\n # create file handler which logs even debug messages\n file_handler = logging.FileHandler(file_name)\n file_handler.setFormatter(formatter)\n file_handler.setLevel(log_level)\n\n std_out = logging.StreamHandler(sys.stdout)\n std_out.setFormatter(formatter)\n std_out.setLevel(log_level)\n\n # This for avoiding streams to log to root's stderr, which prints in red in jupyter\n root_logger = logging.getLogger()\n for handler in root_logger.handlers:\n # continue\n root_logger.removeHandler(handler)\n\n # add the handlers to the logger\n root_logger.addHandler(file_handler)\n\n # By default the install() function installs a file_handler on the root root_logger,\n # this means that log messages from your code and log messages from the\n # libraries that you use will all show up on the terminal.\n coloredlogs.install(level=log_level, fmt=log_format, stream=sys.stdout)", "def setup_logger(name, log_file, format, log_mode, stream_handler):\n DATE_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n\n handler = logging.FileHandler(log_file, mode=log_mode)\n handler.setFormatter(logging.Formatter(fmt=format, datefmt=DATE_FORMAT))\n\n logger = logging.getLogger(name)\n logger.addHandler(handler)\n logger.setLevel(logging.INFO)\n if stream_handler:\n logger.addHandler(logging.StreamHandler()) # stderr\n\n return logger", "def create_logger(level=logging.DEBUG, record_format=None):\n if record_format is None:\n record_format = \"[%(asctime)s][%(thread)d][%(filename)s][line: %(lineno)d][%(levelname)s] ## %(message)s\"\n\n logger = logging.getLogger(\"mylogger\")\n logger.setLevel(level)\n # 修改\n fh.setLevel(level)\n ch.setLevel(level)\n formatter = logging.Formatter(record_format)\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n logger.addHandler(fh)\n logger.addHandler(ch)\n return logger", "def loggerSetup(logLevel=logging.INFO):\n logger = logging.getLogger(__name__)\n outHandler = logging.StreamHandler(sys.stdout)\n outHandler.setFormatter(logging.Formatter(\"%(asctime)s:%(levelname)s:%(module)s: %(message)s\"))\n outHandler.setLevel(logLevel)\n logger.addHandler(outHandler)\n logger.setLevel(logLevel)\n return logger", "def open_log(self, log_name='autotest'):\n logger = logging.getLogger(log_name)\n logger.setLevel(logging.DEBUG)\n logger.addHandler(self.console_handler)\n\n self.__logtofile(log_name)\n\n return logger", "def create_logger(log_dir):\n logger = logging.getLogger(__file__)\n logger.setLevel(logging.INFO)\n\n # file logger\n log_filename = \"probabilist_connectogram_%s.log\" % time.strftime(\"%Y-%m-%d_%H:%M:%S\")\n if log_dir:\n log_path = os.path.join(log_dir, log_filename)\n else:\n log_path = log_filename\n file_handler = logging.FileHandler(log_path)\n formatter = logging.Formatter('%(asctime)s :: %(message)s')\n file_handler.setFormatter(formatter)\n file_handler.setLevel(logging.DEBUG)\n logger.addHandler(file_handler)\n\n # console logger\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.DEBUG)\n console_handler.setFormatter(formatter)\n logger.addHandler(console_handler)\n\n logger.info(\"Log path: %s\" % log_path)\n\n return logger", "def get_logger(name: str, log_path: str = os.path.join(os.path.dirname(__file__), \"main.log\"),\n console: bool = False) -> logging.Logger:\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n\n # ensure that logging handlers are not duplicated\n for handler in list(logger.handlers):\n logger.removeHandler(handler)\n\n # rotating file handler\n if log_path:\n fh = RotatingFileHandler(path_join(log_path),\n maxBytes=10 * 2 ** 20, # 10 MB\n backupCount=1) # 1 backup\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n # console handler\n if console:\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n # null handler\n if not (log_path or console):\n logger.addHandler(logging.NullHandler())\n\n return logger", "def _generate_log(path):\n # Create a logger and set the level.\n logger = logging.getLogger(\"Log_info\")\n # Check handler exists\n if len(logger.handlers) > 0:\n return logger # Logger already exists\n # set logger level\n logger.setLevel(logging.DEBUG)\n # Create file handler, log format and add the format to file handler\n stream_handler = logging.StreamHandler()\n file_handler = logging.FileHandler(path)\n\n # See https://docs.python.org/3/library/logging.html#logrecord-attributes\n # for log format attributes.\n log_format = \"%(levelname)s %(asctime)s %(message)s\"\n formatter = logging.Formatter(log_format)\n stream_handler.setFormatter(formatter)\n file_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n logger.addHandler(file_handler)\n\n return logger", "def _logger():\n return logging.getLogger(module_name)", "def get_logger(name, file_name_path='yang.log'):\n # check if file exists\n exists = False\n if os.path.isfile(file_name_path):\n exists = True\n FORMAT = '%(asctime)-15s %(levelname)-8s %(name)5s => %(message)s - %(lineno)d'\n DATEFMT = '%Y-%m-%d %H:%M:%S'\n logging.basicConfig(datefmt=DATEFMT, format=FORMAT, filename=file_name_path, level=logging.INFO)\n logger = logging.getLogger(name)\n # if file didn t exist we create it and now we can set chmod\n if not exists:\n os.chmod(file_name_path, 0o664 | stat.S_ISGID)\n return logger", "def setup_logger():\n logger = logging.getLogger(\"extract_brass_bedpe\")\n LoggerFormat = '[%(levelname)s] [%(asctime)s] [%(name)s] - %(message)s'\n logger.setLevel(level=logging.INFO)\n handler = logging.StreamHandler(sys.stderr)\n formatter = logging.Formatter(LoggerFormat, datefmt='%Y%m%d %H:%M:%S')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger", "def setup_logger(log_name, log_level=LOG_LEVEL, log_handler=LOG_HANDLER):\n if not os.getenv('IS_AIRFLOW'):\n logger = logging.getLogger(log_name)\n if not len(logger.handlers):\n log_handler.setFormatter(logging.Formatter(LOG_FORMAT))\n log_handler.setLevel(log_level)\n logger.addHandler(log_handler)\n logger.setLevel(log_level)\n logger.propagate = False\n return logger\n else:\n return logging", "def initialize(context, level):\n if not Log.initialized:\n Log.logger = logging.getLogger(context)\n Log.initialized = True\n logging.basicConfig(\n filename=CONST.APP_LOG_FILENAME,\n format=CONST.APP_LOG_FORMAT,\n datefmt='%Y-%m-%d %H:%M:%S'\n )\n Log.logger.setLevel(level)\n Log.logger.log(50, 'Logging initialised, level={}'.format(level))\n return Log.logger", "def getLogger(self, *args, **kwargs):\r\n return loggers.getLogger(*args, **kwargs)", "def init_logger():\n lformat = \"%(asctime)s [%(levelname)-5.5s] [%(name)s] [%(threadName)-12.12s] %(message)s\"\n\n logging.basicConfig(\n level=logging.INFO,\n format=lformat,\n )\n\n file_handler = handlers.RotatingFileHandler(\n \"{0}/{1}.log\".format('.', 'meta-meta-hive'),\n maxBytes=(50*1024*1024),\n backupCount=7\n )\n file_handler.setFormatter(logging.Formatter(lformat))\n logging.getLogger().addHandler(file_handler)\n return", "def setup(log_level, log_name):\n\n # Log format string for flake8 compliance\n log_fmt = ('%(levelname)-8s %(asctime)s%(filename)s:%(lineno)-4s '\n '%(message)s')\n\n # Configure logging\n config = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'default': {\n 'format': log_fmt,\n 'datefmt': '%Y-%m-%d %H:%M:%S',\n },\n },\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'default',\n },\n },\n 'loggers': {\n 'createtransfers': {\n 'level': log_level,\n 'handlers': ['console'],\n },\n },\n }\n\n logger = logging.getLogger(log_name)\n logging.config.dictConfig(config)\n return logger", "def setup_log(self, log_file):\n directory = os.path.dirname(log_file)\n if directory:\n os.makedirs(directory, exist_ok=True)\n\n logger = logging.getLogger(log_file)\n formatter = logging.Formatter(config.LOG_FORMAT)\n\n file_handler = logging.FileHandler(log_file, mode='a')\n file_handler.setFormatter(formatter)\n\n logger.setLevel(logging.INFO)\n logger.addHandler(file_handler)\n\n return logger", "def get_logger(log_name: str) -> logging.Logger:\n logger = logging.getLogger(log_name)\n handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter('%(asctime)s - %(name)s: %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger", "def _init_logger(self):\n # Create log directory, if it doesn't already exist.\n self._create_directory(directory=self._log_directory)\n log_filename = \"{0}/{1}.log\".format(self._log_directory, self._program)\n\n # Add the date to the log file names.\n logging.basicConfig(\n filename=log_filename,\n filemode='w',\n level=logging.DEBUG,\n format='%(asctime)s|%(name)s|%(levelname)-5s| %(message)s',\n datefmt='%Y-%m-%d %I:%M:%S %p')\n\n # define a Handler which writes LOG messages or higher to the sys.stderr\n console = logging.StreamHandler()\n #\n # Note: Anything above the logging level is displayed to stdout.\n #\n # Level Numeric value\n # CRITICAL\t50\n # ERROR \t40\n # WARNING\t30\n # LOG 25 (our log level)\n # INFO\t 20\n # DEBUG \t10\n # NOTSET\t0\n #\n # Add a logging level to always display to stderr.\n logging.addLevelName(self._LOG_LEVEL, self._LOG_NAME)\n if self._debug:\n console.setLevel(logging.DEBUG)\n else:\n console.setLevel(self._LOG_LEVEL)\n # Set a format which is simpler for console use.\n formatter = logging.Formatter('%(name)s|%(levelname)-5s| %(message)s')\n console.setFormatter(formatter)\n # Add the handler to the root logger.\n logging.getLogger('').addHandler(console)\n self._logger = logging.getLogger()", "def log():\n return logging.getLogger(__name__)", "def initlogger(cls, app):\n global mylogapp\n global mylogger\n if uselogger == 0:\n return None\n # pylint: disable=E0601\n if mylogger == 0:\n name = \"Log_\" + time.strftime(\"%Y%m%d_%H_%M_%S\")\n logging.basicConfig(level=logging.DEBUG,\n format='%(name)-12s:%(asctime)s ' +\n '%(levelname)-8s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n filename=name,\n filemode='w')\n mylogger = 1\n if app not in mylogapp:\n logger = logging.getLogger(app)\n mylogapp[app] = logger\n return mylogapp[app]", "def create_logger(logging, tool_name, level):\n logger = logging.getLogger(tool_name)\n\n # Create handlers\n handler = logging.StreamHandler()\n handler.setLevel(level)\n\n # Create formatters and add it to handlers\n logformat = logging.Formatter(\n '[%(name)s - %(asctime)s] %(levelname)s: %(message)s')\n handler.setFormatter(logformat)\n\n # Add handlers to the logger\n logger.addHandler(handler)\n logger.setLevel(level)\n\n return logger", "def get_logger(self, name=\"amulet-logger\", level=logging.DEBUG):\n log = logging\n logger = log.getLogger(name)\n fmt = log.Formatter(\"%(asctime)s %(funcName)s \"\n \"%(levelname)s: %(message)s\")\n\n handler = log.StreamHandler(stream=sys.stdout)\n handler.setLevel(level)\n handler.setFormatter(fmt)\n\n logger.addHandler(handler)\n logger.setLevel(level)\n\n return logger", "def get_logger(level=None, name=None, filename=None, log_dir=None):\n if isinstance(log_dir, str):\n log_dir = Path(log_dir)\n if level is None:\n level = settings.log_level\n if name is None:\n name = settings.log_name\n if filename is None:\n filename = settings.log_filename\n\n logger = lg.getLogger(name)\n\n # if a logger with this name is not already set up\n if len(logger.handlers) == 0:\n\n # get today's date and construct a log filename\n todays_date = dt.datetime.today().strftime(\"%Y_%m_%d\")\n\n if not log_dir:\n log_dir = settings.logs_folder\n\n log_filename = log_dir / \"{}_{}.log\".format(filename, todays_date)\n\n # if the logs folder does not already exist, create it\n if not log_dir.exists():\n log_dir.makedirs_p()\n # create file handler and log formatter and set them up\n formatter = lg.Formatter(\n \"%(asctime)s [%(process)d] %(levelname)s - %(name)s - %(\" \"message)s\"\n )\n if settings.log_file:\n handler = lg.FileHandler(log_filename, encoding=\"utf-8\")\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n if settings.log_console:\n handler = lg.StreamHandler(sys.stdout)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(level)\n\n return logger", "def setup_logger(name=None, level=None):\r\n from .config import Config\r\n\r\n logger = logging.getLogger(name)\r\n logger.handlers = []\r\n level = level or Config[\"logging.level\"].upper() or logging.ERROR\r\n if Config[\"logging.std\"]:\r\n handler = logging.StreamHandler()\r\n handler.setLevel(level)\r\n fmt = logging.Formatter(Config[\"logging.std_format\"])\r\n handler.setFormatter(fmt)\r\n logger.addHandler(handler)\r\n if Config[\"logging.file\"]:\r\n handler = logging.FileHandler(Config[\"logging.file\"])\r\n handler.setLevel(level)\r\n fmt = logging.Formatter(Config[\"logging.file_format\"])\r\n handler.setFormatter(fmt)\r\n logger.addHandler(handler)\r\n return logger", "def create_logger(log_level):\n log_formatter = logging.Formatter(fmt=LOG_FORMAT, datefmt=LOG_TIMESTAMP_FORMAT)\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(log_formatter)\n logger = logging.getLogger('blockip')\n logger.setLevel(log_level)\n logger.addHandler(console_handler)\n return logger", "def init_logger(level=logging.DEBUG, when=\"D\", backup=7,\n _format=\"%(levelname)s: %(asctime)s: %(filename)s:%(lineno)d * %(thread)d %(message)s\",\n datefmt=\"%m-%d %H:%M:%S\"):\n formatter = logging.Formatter(_format, datefmt)\n logger = logging.getLogger()\n logger.setLevel(level)\n\n log_path = ops.join(os.getcwd(), 'logs/shadownet.log')\n _dir = os.path.dirname(log_path)\n if not os.path.isdir(_dir):\n os.makedirs(_dir)\n\n handler = handlers.TimedRotatingFileHandler(log_path, when=when, backupCount=backup)\n handler.setLevel(level)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n handler = handlers.TimedRotatingFileHandler(log_path + \".log.wf\", when=when, backupCount=backup)\n handler.setLevel(logging.WARNING)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n handler = logging.StreamHandler()\n handler.setLevel(level)\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger", "def get_logger(name, log_dir, config_dir):\n config_dict = json.load(open(config_dir + 'log_config.json'))\n config_dict['handlers']['file_handler']['filename'] = log_dir + name.replace('/', '-')\n logging.config.dictConfig(config_dict)\n logger = logging.getLogger(name)\n\n std_out_format = '%(asctime)s - [%(levelname)s] - %(message)s'\n consoleHandler = logging.StreamHandler(sys.stdout)\n consoleHandler.setFormatter(logging.Formatter(std_out_format))\n logger.addHandler(consoleHandler)\n\n return logger", "def setup_logger(args):\n\timport logging\n\timport sys\n\timport pplogger\n\n\tlogger = None\n\tif args.NoLogger:\n\t\tlogger = pplogger.Logger(name=current_script_name, log_dir=args.output_dir, log_format=1, enabled=False).get()\n\telse:\n\t\tcurrent_script_name = os.path.basename(__file__).replace('.py','')\n\t\tlogger = pplogger.Logger(name=current_script_name, log_dir=args.output_dir, log_format=1, enabled=True).get() # gives logname --> snapsnap_query.py\n\t\tlogger.setLevel(logging.DEBUG)\n\t\t## This works. Exceptions are written to the log AND printed to sys.stderr\n\t\t## An alternative solution is to make one big \"try except\" block in main:\n\t\tdef handleException(excType, excValue, traceback, logger=logger):\n\t\t\tlogger.error(\"Logging an uncaught exception\", exc_info=(excType, excValue, traceback))\n\t\tsys.excepthook = handleException\n\treturn logger", "def __create_logger(who, level):\n global loggers\n global toconsole\n global LEVELS\n global console\n global logfile\n loggers[who] = logging.getLogger(who)\n loggers[who].setLevel(level)\n format = logging.Formatter(\"%(asctime)s - %(name)s - \"\\\n \"%(levelname)s - %(message)s\")\n if (toconsole):\n if (console == None):\n console = logging.StreamHandler()\n console.setFormatter(format)\n loggers[who].addHandler(console)\n else:\n if (logfile == None):\n logfile = logging.handlers.RotatingFileHandler('/var/log/yapc.log',\n maxBytes=10485760,\n backupCount=10)\n logfile.setFormatter(format)\n loggers[who].addHandler(logfile)\n loggers[GENERIC_LOG_NAME].log(LEVELS[\"VDBG\"],\n \"Add logger for \"+who+\" at level \"+str(level))", "def init_log(log_path,\r\n name=None,\r\n level=logging.INFO,\r\n when=\"D\",\r\n backup=7,\r\n format=\"%(name)s:%(levelname)s:%(asctime)s:%(filename)s:%(lineno)d * %(thread)d %(message)s\",\r\n datefmt=\"%m-%d %H:%M:%S\"):\r\n formatter = logging.Formatter(format, datefmt)\r\n logger = logging.getLogger(name)\r\n logger.setLevel(level)\r\n\r\n dir = os.path.dirname(log_path)\r\n if not os.path.isdir(dir):\r\n os.makedirs(dir)\r\n\r\n # 输出info以上的信息\r\n handler = logging.handlers.TimedRotatingFileHandler(filename=log_path + \".log\",\r\n when=when,\r\n backupCount=backup)\r\n handler.setLevel(level)\r\n handler.setFormatter(formatter)\r\n logger.addHandler(handler)\r\n\r\n # 只输出warning的信息\r\n handler = logging.handlers.TimedRotatingFileHandler(filename=log_path + \".log.wf\",\r\n when=when,\r\n backupCount=backup)\r\n handler.setLevel(logging.WARNING)\r\n handler.setFormatter(formatter)\r\n logger.addHandler(handler)\r\n\r\n # 标准输出流\r\n stdout_handler = logging.StreamHandler(stream=sys.stdout)\r\n stdout_handler.setLevel(level)\r\n stdout_handler.setFormatter(formatter)\r\n logger.addHandler(stdout_handler)\r\n\r\n return logger", "def init_logger(level=logging.DEBUG, when=\"D\", backup=7,\r\n _format=\"%(levelname)s: %(asctime)s: %(filename)s:%(lineno)d * %(thread)d %(message)s\",\r\n datefmt=\"%m-%d %H:%M:%S\"):\r\n formatter = logging.Formatter(_format, datefmt)\r\n logger = logging.getLogger()\r\n logger.setLevel(level)\r\n\r\n log_path = ops.join(os.getcwd(), 'logs/shadownet.log')\r\n _dir = os.path.dirname(log_path)\r\n if not os.path.isdir(_dir):\r\n os.makedirs(_dir)\r\n\r\n handler = handlers.TimedRotatingFileHandler(log_path, when=when, backupCount=backup)\r\n handler.setLevel(level)\r\n handler.setFormatter(formatter)\r\n logger.addHandler(handler)\r\n\r\n handler = handlers.TimedRotatingFileHandler(log_path + \".log.wf\", when=when, backupCount=backup)\r\n handler.setLevel(logging.WARNING)\r\n handler.setFormatter(formatter)\r\n logger.addHandler(handler)\r\n\r\n handler = logging.StreamHandler()\r\n handler.setLevel(level)\r\n handler.setFormatter(formatter)\r\n logger.addHandler(handler)\r\n return logger", "def create_log(self):\n from settings import evidence_path\n test_case = self.__class__.__name__\n log_extension = '.log'\n if evidence_path is not None:\n log_path = '{}/{}{}'.format(\n evidence_path, test_case, log_extension\n )\n else:\n log_path = None\n self.log = Log(log_path)\n self.log = self.log.get_logger()\n return self.log", "def _logger(self) -> logging.Logger:\n return logging.getLogger(\n type(self).__name__\n )", "def log_setup(self):\n # Logger initialisation\n logger = logging.getLogger(self.app_name)\n logger.setLevel(logging.DEBUG)\n\n # Creating console handler and set level to debug\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n\n # Creating formatter\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n )\n\n # Adding formatter to ch\n ch.setFormatter(formatter)\n\n # Adding ch to logger\n logger.addHandler(ch)\n\n # Setting the Logger Level (INFO)\n logger.setLevel(logging.INFO)\n\n return logger", "def get_logger():\n # create logger\n logger = logging.getLogger(\"simple_thread_example\")\n # set logger level\n logger.setLevel(logging.DEBUG)\n\n # create a file handler\n fh = logging.FileHandler(\"logs/simple_thread_example.log\")\n # create a formatter and set the formatter for the handler.\n fmt = logging.Formatter(\"%(asctime)s - %(name)s - %(threadName)s - %(levelname)s - %(message)s\")\n fh.setFormatter(fmt)\n # add the Handler to the logger\n logger.addHandler(fh)\n\n # return the logger\n return logger", "def set_logger(**kwargs):\n # create logger\n if not os.path.exists(kwargs.get('log_dir_path')):\n os.makedirs(kwargs.get('log_dir_path'))\n logger = logging.getLogger(kwargs.get('logger_name'))\n if kwargs.get('log_level').lower() == 'info':\n log_level = 20\n elif kwargs.get('log_level').lower() == 'warning':\n log_level = 30\n elif kwargs.get('log_level').lower() == 'error':\n log_level = 40\n elif kwargs.get('log_level').lower() == 'critical':\n log_level = 50\n else:\n log_level = 10\n logger.setLevel(log_level)\n # Create a file handler\n log_file_path = os.path.join(kwargs.get('log_dir_path'), kwargs.get('log_file_name'))\n handler = logging.FileHandler(log_file_path)\n handler.setLevel(log_level)\n # Create a logging format\n formatter = logging.Formatter(\n fmt='%(asctime)s - %(levelname)s[%(lineno)d] - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')\n handler.setFormatter(formatter)\n # Add the handlers to the logger\n logger.addHandler(handler)\n return logger", "def _get_logger(self):", "def getLogger(\n verbose: int = 0,\n filename: Optional[str] = None,\n name: str = \"ttslearn\",\n add_stream_handler: bool = True,\n) -> Logger:\n global _initialized\n logger = logging.getLogger(name)\n if verbose >= 10:\n logger.setLevel(logging.DEBUG)\n elif verbose > 0:\n logger.setLevel(logging.INFO)\n else:\n logger.setLevel(logging.WARN)\n\n if _initialized.get(name, False):\n return logger\n else:\n _initialized[name] = True\n\n if add_stream_handler:\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(logging.Formatter(format))\n logger.addHandler(stream_handler)\n\n if filename is not None:\n Path(filename).parent.mkdir(parents=True, exist_ok=True)\n file_handler = logging.FileHandler(filename=filename)\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(logging.Formatter(format))\n logger.addHandler(file_handler)\n\n return logger", "def _init():\n global logger\n logger = logging.getLogger(\"Log\")", "def initialize_logger(ckpt_dir=None, level=logging.INFO, logger=None) -> None:\n if logger is None:\n logger = logging.getLogger('scETM')\n logger.setLevel(level)\n if logger.hasHandlers():\n for handler in logger.handlers:\n if isinstance(handler, logging.FileHandler):\n logger.warning(f'Reinitializing... The file handler {handler} will be closed.')\n logger.removeHandler(handler)\n formatter = logging.Formatter('[%(asctime)s] %(levelname)s - %(name)s: %(message)s')\n if not logger.hasHandlers():\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n stream_handler.setLevel(level)\n logger.addHandler(stream_handler)\n if ckpt_dir is not None:\n file_handler = logging.FileHandler(os.path.join(ckpt_dir, 'log.txt'))\n file_handler.setFormatter(formatter)\n file_handler.setLevel(level)\n logger.addHandler(file_handler)", "def _get_logger():\n logger = logging.getLogger(__name__)\n\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n\n ch.setFormatter(logging.Formatter(\"%(asctime)s [%(levelname)8s] %(message)s\"))\n\n logger.addHandler(ch)\n logger.setLevel(logging.DEBUG)\n\n return logger", "def setup_logger(name, log_file, formatter, level=logging.INFO):\r\n handler = logging.FileHandler(log_file, encoding='utf-8')\r\n handler.setFormatter(formatter)\r\n\r\n logger = logging.getLogger(name)\r\n logger.setLevel(level)\r\n logger.addHandler(handler)\r\n\r\n return logger" ]
[ "0.81048304", "0.80497605", "0.78264886", "0.77451503", "0.7742907", "0.762832", "0.7588399", "0.7549163", "0.75487506", "0.7536273", "0.74903685", "0.74829865", "0.74677", "0.7460846", "0.7454843", "0.74143606", "0.74058366", "0.73974717", "0.73834455", "0.73752475", "0.736043", "0.73581713", "0.7350922", "0.7350922", "0.7350922", "0.7350922", "0.7349743", "0.73452127", "0.734329", "0.73341817", "0.73317707", "0.7320327", "0.7319525", "0.72932565", "0.7273163", "0.7259334", "0.7252734", "0.72509414", "0.7250052", "0.72445357", "0.72398746", "0.7234224", "0.7221224", "0.72095466", "0.72001565", "0.7185371", "0.7183594", "0.7179753", "0.7177692", "0.7168367", "0.7164671", "0.7154149", "0.7150242", "0.7146561", "0.7130771", "0.7120846", "0.71160245", "0.7115266", "0.710708", "0.7101588", "0.70696795", "0.7069152", "0.7068985", "0.70663613", "0.7059521", "0.7053013", "0.70442986", "0.70434684", "0.7042281", "0.70355433", "0.70347846", "0.70312047", "0.7027609", "0.7017297", "0.7016237", "0.70121634", "0.7008839", "0.70059097", "0.7005688", "0.6999433", "0.6994105", "0.6990226", "0.6976435", "0.6974165", "0.69707674", "0.6961575", "0.69595855", "0.69595295", "0.69577634", "0.695604", "0.6949215", "0.6944927", "0.6944822", "0.6939384", "0.693424", "0.69251555", "0.692057", "0.6912567", "0.69121903", "0.69068646", "0.6899098" ]
0.0
-1
Converts text data to feature data with the instance's vectorizer. Returns None.
def process_data(self): self.processed_data = dict() for split,text_data_ in self.text_data.items(): y = text_data_[self.target_col].values print("Vectorizing for split: "+split) x = np.array([self.vectorizer(x_) for x_ in text_data_['Text']]) self.processed_data[split] = {'x':x,'y':y} self.set_split(self.split_)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def TransformData(text):\n global COUNT_VECTORIZER\n if COUNT_VECTORIZER is None:\n COUNT_VECTORIZER = CountVectorizer(analyzer = 'word', lowercase = True)\n COUNT_VECTORIZER.fit(text)\n features = COUNT_VECTORIZER.transform(text)\n features_nd = features.toarray() # for easy usage\n global TFIDF\n if TFIDF is None:\n TFIDF = TfidfTransformer(use_idf=False)\n TFIDF.fit(features_nd)\n text_tfidf = TFIDF.transform(features_nd)\n return text_tfidf", "def process_text(self):\n prp1 = preprocessor.Preprocess()\n processed_text = prp1.clean_data(self.text)\n self.vec1 = self.vec.transform(pd.Series(processed_text))", "def vectorize_data(self, data, idf=False):\r\n\r\n # collect only the cleaned text of the tweet\r\n text = []\r\n for tweet in data:\r\n if not tweet.get_processed_text():\r\n tweet.set_processed_text(self.clean_tweet(tweet))\r\n text.append(tweet.get_processed_text())\r\n\r\n # vectorize tweets\r\n\r\n if idf:\r\n vectorizer = TfidfVectorizer(min_df=((len(data) // 1000) + 1), max_df=10000, ngram_range=(1, 3))\r\n else:\r\n vectorizer = CountVectorizer(min_df=((len(data) // 1000) + 1), max_df=10000, ngram_range=(1, 3))\r\n\r\n # vectorizer = TFVectorizing()\r\n vectors = vectorizer.fit_transform(text)\r\n return vectors", "def feature_extraction(inputFile, text, label):\r\n df = pd.read_csv(inputFile, encoding=\"utf8\")\r\n df[text].replace(np.nan, '', inplace=True)\r\n for idx, line in df.iterrows():\r\n try:\r\n words = line[text]\r\n newWords = ''.join(words.split())\r\n df.set_value(idx, text, newWords)\r\n except:\r\n pass\r\n tf = TfidfVectorizer(analyzer='char', encoding=\"utf8\", min_df=10)\r\n\r\n x = tf.fit_transform(df[text])\r\n x = x.toarray()\r\n print(x.shape)\r\n y = df[label]\r\n\r\n return x, y", "def create_vectorizer(ds):\n vectorize_layer = TextVectorization(\n standardize=clean_text,\n split=\"whitespace\",\n max_tokens=MAX_WORDS - 1,\n output_mode=\"int\",\n output_sequence_length=MAX_LEN,\n )\n vectorize_layer.adapt(ds.map(lambda text, label: text))\n return vectorize_layer", "def useTfidfVectorizer(self, data):\n if self.results:\n print()\n print(\"Extracting features from the training dataset using a sparse vectorizer\", end=\" - \")\n t0 = time()\n \n vectorizer = TfidfVectorizer(max_features=10000, stop_words='english',norm='l2',use_idf=True, sublinear_tf=False,encoding='utf-8')\n matrix = vectorizer.fit_transform(data)\n \n if self.results:\n print(\"done in %0.3fs\" % (time() - t0))\n print(\"n_samples: %0.3d, n_features: %d\" % matrix.shape)\n print()\n \n feature_names = vectorizer.get_feature_names()\n return matrix, feature_names", "def vectorize(self, source_text, target_text, use_dataset_max_lengths=True):\r\n \r\n data = super().vectorize(source_text, target_text, use_dataset_max_lengths)\r\n \r\n mltm_x_vector = self.mltm_vectorizer.vectorize(source_text.lower())\r\n mltm_x_vector = mltm_x_vector.astype(np.float32)\r\n \r\n data[\"x_source_mltm_vector\"] = mltm_x_vector\r\n return data", "def preprocess(self, data_f):\n \n return self.vec.transform(data_f.review)", "def text_to_vecs(self):\n # convert word strings into word vectors\n sent_vec = []\n for w in self.sentence:\n if w in self.word_vectors.getVocab():\n sent_vec.append( self.word_vectors.getWordVectors()[w] )\n else:\n sent_vec.append( self.word_vectors.getOOVWordVector() )\n \n assert(len(self.sentence) == len(sent_vec)) \n self.sent_vec = sent_vec", "def doc2vec(self, text: str) -> np.array:\n # tfidf_matrix = self.tfidf.transform([text])\n # vectors = []\n # for token in self.tokenize(text):\n # if token in self.word2vec and token in self.feature_names:\n # tfidf_score = tfidf_matrix[0, self.feature_names.index(token)]\n # vectors.append(self.word2vec[token] * tfidf_score)\n vectors = [self.word2vec[token] for token in self.tokenize(text) if token in self.word2vec]\n if not vectors:\n return np.zeros(300)\n return np.mean(vectors, axis=0)", "def get_text_features() -> np.array:\r\n # Universal sentence encoder model\r\n # Original model by Google could be loaded from: https://tfhub.dev/google/universal-sentence-encoder/4\r\n # In this notebook the model is loaded from a public dataset on Kaggle\r\n # at https://www.kaggle.com/dimitreoliveira/universalsentenceencodermodels\r\n text_model = tf.keras.Sequential(\r\n [KerasLayer(txt_model_path, input_shape=[], dtype=tf.string, # Pretrained model\r\n output_shape=[512], trainable=False),\r\n tf.keras.layers.Layer(512, dtype='float16')] # This layer reduces precision of float numbers\r\n )\r\n\r\n # Convert all texts to vectors\r\n features = text_model.predict(data['title'],\r\n batch_size=BATCH_SIZE,\r\n use_multiprocessing=True,\r\n workers=-1)\r\n print('Text features extracted. Shape:', features.shape)\r\n\r\n return features", "def get_dataset_features(text):\n return model.extract(text)", "def vectorize_text(df: pd.DataFrame):\n # Creating a stop_words list set that are common to many questions.\n common_phrases = [\n 'read the sentence from the passage',\n 'which of the following best describes',\n 'which is the best one sentence * for the section',\n 'which sentence from the passage provides the most evidence'\n 'select the sentence that does not support the central idea of the article',\n 'supports the main idea',\n 'select the paragraph from the section that explains how that shows the ',\n 'that is most relevant to be included in the summary of the article',\n 'according to the article',\n 'which of these is not one',\n ]\n stop_words = stopwords.words('english')\n [stop_words.extend(x.split()) for x in common_phrases]\n\n ct_vectorizer = CountVectorizer(token_pattern='\\\\w{3,}',\n max_df=.3,\n min_df=.001,\n stop_words=list(set(stop_words)),\n strip_accents='ascii', # Faster than unicode.\n ngram_range=(1, 3), # Enable uni, bi, trigrams.\n lowercase=True,\n dtype='uint8')\n\n tfidf_vectorizer = TfidfVectorizer(token_pattern='\\\\w{3,}',\n max_df=.3,\n min_df=.001,\n stop_words=list(set(stop_words)),\n strip_accents='ascii', # Faster than unicode.\n ngram_range=(1, 3), # Enable uni, bi, trigrams.\n lowercase=True,\n sublinear_tf=True, # Replace tf with 1 + log(tf).\n smooth_idf=True, # Default 1 doc for each term.\n dtype=np.float32)\n\n # Count & tf-idf vectorization learns vocab and transforms data into matrices.\n ct_vec = ct_vectorizer.fit_transform(np.array(df.text))\n tfidf = tfidf_vectorizer.fit_transform(np.array(df.text))\n # print(\"Shape of ct_vec:\", ct_vec.shape)\n # print('Size of ct_vec:', sys.getsizeof(ct_vec))\n # print(\"Shape of tfidf:\", tfidf.shape)\n # print('Size of tfidf:', sys.getsizeof(tfidf), '\\n')\n\n ct_names = ct_vectorizer.get_feature_names()\n tf_names = tfidf_vectorizer.get_feature_names()\n\n df_cv = pd.concat(\n [df, pd.DataFrame(ct_vec.toarray(), columns=ct_names)],\n axis=1)\n df_tfidf = pd.concat(\n [df, pd.DataFrame(tfidf.toarray(), columns=tf_names)],\n axis=1)\n\n return (\n df_cv,\n ct_vec,\n ct_names,\n df_tfidf,\n tfidf,\n tf_names\n )", "def featurize(self, data):\n \n features = []\n\n # tokens = data.split()\n\n #Modification 1: Normalization: All lowercase\n #Removing this did not seem to have any performance boost\n #but it did nothing negative either\n data = data.lower()\n\n #Modification 2: Normalization: Tokenizing using NLTK\n #Keep this\n # tokens = word_tokenize(data)\n tokens = data.split()\n\n #Modification 3: Word List: Removing stop words using NLTK\n #Keep this\n stop_words = set(stopwords.words('english'))\n tokens_filtered = []\n\n for t in tokens:\n if t not in stop_words:\n tokens_filtered.append(t)\n\n tokens = tokens_filtered\n\n #Modification 4: Pre-Processing Lemmization using NLTK\n #Surprisingly does not appear to impact performance\n # for t in tokens:\n # t = self.wordnet_lemmatizer.lemmatize(t)\n\n capital = 0\n average_word_length = 5 #It's 4.7, but we'll use 5\n short_words = 0\n long_words = 0\n\n for t in tokens:\n\n #Feature 1: Bag of words\n features.append((t, True))\n\n if(t.isupper()):\n capital += 1\n\n #Feature 3: Long or short word counter, intentionally ignoring length 4\n #and 5 as those are close to average\n #Very important that stop words were removed\n if(len(t) > average_word_length):\n long_words += 1\n elif(len(t) < average_word_length - 1):\n short_words += 1\n \n #Feature 2: Lots of capital\n #Remove this. It only appears to be a rough count of sentence number vs.\n #Capturing any sentiment. Does not impact F1 score in given train/dev sets\n # if(capital > 2):\n # features.append((\"LOTS_OF_CAPITAL\", True))\n\n #Feature 3: Long or short words\n # if(long_words > short_words):\n # features.append((\"LOTS_OF_LONG_WORDS\", True))\n\n\n\n return features", "def extractFeatures(self, data, tf=False):\n tfidf_training_matrix, tfidf_terms = self.useTfidfVectorizer(data)\n \n if tf:\n tf_vectorizer = CountVectorizer(max_df=0.5, min_df=2, max_features=10000,\n stop_words='english')\n \n tf_training_matrix = tf_vectorizer.fit_transform(data)\n tf_terms = tf_vectorizer.get_feature_names()\n \n return tfidf_training_matrix, tfidf_terms, tf_training_matrix, tf_terms\n \n else:\n return tfidf_training_matrix, tfidf_terms", "def instance2fv(self, text):\n if isinstance(text, unicode):\n text = text.encode('utf8')\n\n arr = np.zeros((self.n_feats,), dtype='uint32')\n\n # Convert the text to a sequence of ascii values\n ords = map(ord, text)\n\n # Count the number of times we enter each state\n state = 0\n statecount = defaultdict(int)\n for letter in ords:\n state = self.tk_nextmove[(state << 8) + letter]\n statecount[state] += 1\n\n # Update all the productions corresponding to the state\n for state in statecount:\n for index in self.tk_output.get(state, []):\n arr[index] += statecount[state]\n\n # The returned vector is the TFxIDF vector. The IDF for the\n # linguini system is actually the inv-lang-freq, and this is\n # pre-computed from the training data. We also normalize to len 1\n # at this stage.\n retval = arr * self.ilf\n return retval", "def to_vector(text, model, idf, is_tokenized=False):\n if not is_tokenized: text= text.split() # splits the text by space and returns a list of words\n vec = np.zeros(300) # creates an empty vector of 300 dimensions\n for word in text: # iterates over the sentence\n if (word in model) & (word in idf): # checks if the word is both in the word embedding and the tf-idf model\n vec += model[word]*idf[word] # adds every word embedding to the vector\n if np.linalg.norm(vec) > 0:\n return vec / np.linalg.norm(vec) # divides the vector by their normal\n else:\n return vec", "def __init__(self, text):\n self.text = text\n self.train_vec = np.load('feat.npy')\n self.train_output = pickle.load(open('mylist.pkl', 'rb'))\n self.vec = pickle.load(open('vector.pkl', 'rb'))", "def vectorize_text(corpus):\n bag_of_words_model = CountVectorizer()\n\n # performs the above described three tasks on the given data corpus.\n dense_vec_matrix = bag_of_words_model.fit_transform(corpus).todense()\n bag_of_word_df = pd.DataFrame(dense_vec_matrix)\n bag_of_word_df.columns = sorted(bag_of_words_model.vocabulary_)\n return bag_of_word_df", "def get_text_feature(texts,\n labels=None,\n nrow_train=None,\n vec='bow',\n lowercase=False,\n analyzer='word',\n single_token=True,\n ngram_range=(1, 1),\n stop_words=None,\n min_df=2,\n binary=True,\n select_k=None):\n from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n from sklearn.feature_selection import SelectKBest, chi2\n\n # keep single char as word\n if single_token:\n token_pattern = r\"\\b\\w+\\b\"\n else:\n token_pattern = r\"(?u)\\b\\w\\w+\\b\"\n\n # choose vec\n if vec is 'bow':\n vec = CountVectorizer(\n lowercase=lowercase,\n analyzer=analyzer,\n ngram_range=ngram_range,\n stop_words=stop_words,\n min_df=min_df,\n token_pattern=token_pattern,\n binary=binary)\n elif vec is 'tfidf':\n vec = TfidfVectorizer(\n lowercase=lowercase,\n analyzer=analyzer,\n ngram_range=ngram_range,\n stop_words=stop_words,\n min_df=min_df,\n token_pattern=token_pattern,\n sublinear_tf=True)\n else:\n raise ValueError('vec must be bow or tfidf!')\n\n # get word vector\n feature = vec.fit_transform(texts)\n feature_names = vec.get_feature_names()\n\n # feature select\n if (labels is not None) and (select_k is not None):\n if nrow_train is not None:\n x_train = feature[:nrow_train, :]\n x_test = feature[nrow_train:, :]\n y_train = labels[:nrow_train]\n\n feature_selector = SelectKBest(chi2, k=select_k)\n x_train = feature_selector.fit_transform(x_train, y_train)\n feature_names = np.array(feature_names)[feature_selector.get_support()]\n\n x_test = feature_selector.transform(x_test)\n\n # combine train test\n import scipy.sparse as sp\n feature = sp.vstack([x_train, x_test])\n\n else:\n feature_selector = SelectKBest(chi2, k=select_k)\n feature = feature_selector.fit_transform(feature, labels)\n feature_names = np.array(feature_names)[feature_selector.get_support()]\n\n return feature, list(feature_names)", "def _vectorize_data(self, docs: []):\n print('Vectorizing data...')\n tfidf = TfidfVectorizer()\n encoded_data = tfidf.fit_transform(docs)\n return encoded_data", "def convert_txt_to_data():\n pass", "def transform(self, graph, instances):\n check_is_fitted(self, ['model_'])\n\n feature_vectors = []\n for instance in instances:\n feature_vectors.append(self.model_.wv.get_vector(str(instance)))\n return feature_vectors", "def transform(self, graph, instances):\n check_is_fitted(self, [\"model_\"])\n\n feature_vectors = []\n for instance in instances:\n feature_vectors.append(self.model_.wv.get_vector(str(instance)))\n return feature_vectors", "def get_tfidf_vectors(self):\n\n train_text = self.get_training_data()\n test_text = self.get_testing_data()\n\n print 'Initilizing tf vectorizer ...'\n vectorizer = TfidfVectorizer(sublinear_tf=True)\n vectorizer.fit( train_text + test_text )\n\n print 'Transforming data to tfidf vector ...'\n train_vec = vectorizer.transform(train_text)\n #print len(vectorizer.get_feature_names())\n test_vec = vectorizer.transform(test_text)\n\n return train_vec, test_vec", "def parse(self, text):\n return self.dict.txt2vec(text)", "def training(train_data, dev_data, param):\n text_to_vec = TextToVec(**param)\n\n # Fit with both train and dev data\n text_to_vec.fit(train_data['data'] + dev_data['data'])\n word_vec_map = text_to_vec.vectorizer.get_feature_names()\n train_vec = text_to_vec.transform(train_data['data'])\n dev_vec = text_to_vec.transform(dev_data['data'])\n logger.info(f\"train vec size:{train_vec.shape}, dev vec size:{dev_vec.shape}\")\n\n # # apply weights on tfidf based on whether the word appear in multiple classes\n # tt_occ = Counter(train_data['encoded_label'])\n # weight_list = []\n # for i in range(train_vec.shape[1]): # For every feature\n # occ = Counter(train_data['encoded_label'][train_vec[:, i] > 0.0])\n # for key, value in occ.items():\n # occ[key] = value/tt_occ[key]\n # weight_list.append(np.std(list(occ.values()))/0.35)\n # weight = np.array(weight_list).reshape(1, -1)\n # weight = weight/np.max(weight)\n # train_vec = np.multiply(train_vec, weight)\n\n # Perform oversampling on training data\n if param['balanced'] not in ['Bootstrap', 'Handsample']:\n logger.info(f\"class info before resampling: {sorted(Counter(train_data['encoded_label']).items())}\")\n train_vec, train_data['encoded_label'] = resample(X_train=train_vec, y_train=train_data['encoded_label'], balance=param['balanced'])\n logger.info(f\"class info after resampling:{sorted(Counter(train_data['encoded_label']).items())}\")\n\n # Fit model\n if param['classifier'] == 'MultinomialNB':\n clf = MultinomialNB()\n elif param['classifier'] == 'LDA':\n clf = LinearDiscriminantAnalysis()\n else:\n clf = svm.LinearSVC()\n\n if param['multiclass'] == 'OnevsOne':\n model = OneVsOneClassifier(clf)\n else:\n model = OneVsRestClassifier(clf)\n\n if param['classifier'] == 'LinearSVM' or param['multiclass'] == 'OnevsOne':\n logger.info(f'Fitting model: {param}')\n model = model.fit(train_vec, train_data['encoded_label'])\n train_prediction = model.predict(train_vec)\n dev_prediction = model.predict(dev_vec)\n else:\n logger.info(f'Fitting model: {param}')\n model = model.fit(train_vec, train_data['binary_label'])\n train_prediction = np.argmax(model.predict(train_vec), axis=1)\n dev_prediction = np.argmax(model.predict(dev_vec), axis=1)\n\n\n return train_prediction, dev_prediction, train_vec.shape, dev_vec.shape, model, word_vec_map", "def extract_features_only(self, text):\n \n featurelist = []\n \n sentences = util.sentence_tokenize(text)\n taggedSentences = [] \n for sentnumber, sentence0 in enumerate(sentences):\n \n sentence = self.clean_text(sentence0)\n \n # tokenize each sentence to have a list of words to be processed\n tokens = nltk.word_tokenize(sentence)\n #run the above procedure\n sentence_to_parse = self.get_untagged(tokens)\n \n # Save tagged sentences for later computing of expose date\n taggedSentences.append(sentence_to_parse)\n \n #only if the cleaned sentence is NOT empty we parse it\n if sentence_to_parse!=[]:\n tree = self.cp.parse(sentence_to_parse)\n tree1 = self.cp1.parse(sentence_to_parse)\n \n# new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.node in self.st_filter])\n new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.label() in self.st_filter])\n\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(', ,', ',')\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(',', ', ')\n\n new_sentence_to_parse = nltk.word_tokenize(new_sentence_to_parse)\n\n #run the above procedure\n new_sentence_to_parse = self.get_untagged(new_sentence_to_parse)\n \n if new_sentence_to_parse!=[]:\n tree2 = self.cp.parse(new_sentence_to_parse)\n for subtree in tree2.subtrees():\n if subtree.label() in self.st_filter: \n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n \n for subtree in tree1.subtrees():\n if subtree.label() in self.labels_gram1:\n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n\n self.sentences = sentences\n \n n = len(sentences)\n locsSentStarts = [-1] * n\n curpt = 0\n for i in range(n):\n pos = text[curpt:].find(sentences[i])\n locsSentStarts[i] = pos + curpt\n curpt = locsSentStarts[i] + len(sentences[i])\n self.sentence_startPos = locsSentStarts\n \n featObjList = self.initialize_feature_obj_list(featurelist)\n \n featList = [(feat.getType(), feat.getStartPos(), feat.getEndPos(), feat.getString()) for feat in featObjList]\n return featList", "def train_model(self, text, labels):\n clf = svm.SVR()\n count_vect = CountVectorizer()\n tfidf_transformer = TfidfTransformer()\n counts = count_vect.fit_transform(text)\n tfidf = tfidf_transformer.fit_transform(counts)\n clf.fit(tfidf, labels)\n\n return clf, count_vect, tfidf_transformer", "def vectorize_texts(self, encoder) -> NoReturn:\n self.encoder = encoder\n self.vectorized_texts = [self.encoder.encode(t) for t in self.tokenized_texts]", "def to_vector(texto,model,idf):\n tokens = normalizer(texto).split() # splits the text by space and returns a list of words\n vec = np.zeros(300) # creates an empty vector of 300 dimensions\n for word in tokens: # iterates over the sentence\n if (word in model) & (word in idf): # checks if the word is both in the word embedding and the tf-idf model\n vec += model[word]*idf[word] # adds every word embedding to the vector\n if np.linalg.norm(vec) > 0:\n return vec / np.linalg.norm(vec) # divides the vector by their normal\n else:\n return vec", "def _vectorize(self, vectorizer = None):\n\n\t\tvectorizer = vectorizer if vectorizer else self.vectorizer;\n\n\t\tself.training_set_vector = vectorizer.fit_transform(self.training_set)\n\n\t\tself.testing_set_vector = vectorizer.transform(self.testing_set)", "def text_feature_extract(df):\n return df", "def bag_of_words_vectorizer(datafile, k_features):\n data = []\n labels = []\n\n for jsoned_entity in open(\"data.json\", errors=\"ignore\").readlines():\n entity = json.loads(jsoned_entity)\n if entity[\"lang\"] == \"en\":\n data.append(entity[\"text\"])\n labels.append(entity[\"label\"])\n\n vectorizer = TfidfVectorizer(stop_words=get_stop_words(\"english\"))\n data = vectorizer.fit_transform(data)\n data = SelectKBest(chi2, k=k_features).fit_transform(data, labels)\n\n for vector_label_batch in batch(zip(data, labels), config.BATCH_SIZE):\n vectors = []\n labels = []\n for vec_label in vector_label_batch:\n vectors.append(vec_label[0].toarray())\n labels.append(vec_label[1])\n\n X = np.vstack(vectors)\n Y = np_utils.to_categorical(labels, 2)\n yield X, Y", "def vectorize_data(self, idf=False):\r\n vectorizer = Vectorizer()\r\n return vectorizer.vectorize_data(self.tweets, idf)", "def __tf_idf_feature_extraction(self):\n print('=' * 80)\n print(\"TF-IDF Feature Extraction\")\n t0 = time()\n vectorizer = TfidfVectorizer()\n vec_train = vectorizer.fit_transform(self.train.text)\n vec_test = vectorizer.transform(self.test.text)\n duration = time() - t0\n print(\"DONE!!!!! total time: %fs\" % duration)\n print('=' * 80)\n return vec_train, vec_test", "def preprocessing(text, tokenization=0, rm_stopwords=0, numbers_to_text=0, to_tfidf=0):\n\ttrain_data = pd.DataFrame(columns=['text', 'response'])\n\n\tprep_0 = [strip_non_alphanum(line) for line in text]\n\tprep_1 = [line for line in prep_0 if line.rstrip()]\n\tprep_2 = [strip_multiple_whitespaces(line) for line in prep_1]\n\tprep_3 = [line.lower() for line in prep_2]\n\n\tif to_tfidf == 1:\n\t\t#when using tf_idf, removes single character words given that they are ignored by sklearn's TfidfVectorizer\n\t\tprep_3 = [' '.join([word for word in line.split() if len(word) > 1]) for line in prep_3]\n\n\tif tokenization == 1:\n\t\tprep_3 = [line.split(' ') for line in prep_3]\n\t\t#removes whitespaces from the list\n\t\tprep_3 = [list(filter(None, line)) for line in prep_3]\n\telse:\n\t\tprep_3 = [line[:-1] if line[-1] == \" \" else line for line in prep_3]\n\n\tif numbers_to_text == 1 and tokenization == 1:\n\t\t#convert all numbers to integers and convert these numbers to its written form\n\t\ttemp_prep = []\n\t\tfor sentence in prep_3:\n\t\t\ttemporary_sentence = []\n\t\t\tfor word in sentence:\n\t\t\t\tif str(word).isdigit():\n\t\t\t\t\tconverted_words = num2words(int(word), to='cardinal', lang='pt').split(' ')\n\t\t\t\t\tif to_tfidf == 1 and rm_stopwords == 0:\n\t\t\t\t\t\tconverted_words = [word for word in converted_words if word != 'e']\n\t\t\t\t\ttemporary_sentence.extend(converted_words)\n\t\t\t\telse:\n\t\t\t\t\ttemporary_sentence.append(word)\n\t\t\ttemp_prep.append(temporary_sentence)\n\n\t\tprep_3 = temp_prep\n\telif numbers_to_text == 1 and tokenization == 0:\n\t\t#convert all numbers to integers and convert these numbers to its written form\n\t\ttemp_prep = []\n\t\tfor sentence in prep_3:\n\t\t\ttemporary_sentence = []\n\t\t\tfor word in sentence.split(' '):\n\t\t\t\tif str(word).isdigit():\n\t\t\t\t\tconverted_words = num2words(int(word), to='cardinal', lang='pt').split(' ')\n\t\t\t\t\tif to_tfidf == 1 and rm_stopwords == 0:\n\t\t\t\t\t\tconverted_words = [word for word in converted_words if word != 'e']\n\t\t\t\t\ttemporary_sentence.extend(converted_words)\n\t\t\t\telse:\n\t\t\t\t\ttemporary_sentence.append(word)\n\t\t\ttemporary_sentence = ' '.join(temporary_sentence)\n\t\t\ttemp_prep.append(temporary_sentence)\n\t\tprep_3 = temp_prep\n\n\tif rm_stopwords == 1:\n\t\tstp = set(stopwords.words('portuguese') + list(punctuation))\n\t\tif tokenization == 1:\n\t\t\tprep_3 = [[word for word in sentence if word not in stp] for sentence in prep_3]\n\t\telif tokenization == 0:\n\t\t\tprep_3 = [' '.join([word for word in sentence.split(' ') if word not in stp]) for sentence in prep_3]\n\n\ttmp = pd.DataFrame({'text':prep_3[::2], 'response':prep_3[1::2]})\n\ttrain_data = train_data.append(tmp[['text', 'response']], ignore_index=True)\n\n\treturn train_data", "def creating_feature_vector():\r\n\twordlist = []\r\n\tlabel = \"\"\r\n\tfw = open(\"feature_vector.txt\", \"w+\", encoding = \"utf-8\")\r\n\twith open(\"D:\\\\Python_Prac\\\\wordstag\\\\modules\\\\HI_EN_TRAIN.txt\", \"r\", encoding = \"utf-8\") as f:\r\n\t\tfor line in f:\r\n\t\t\twordlist.append(line)\r\n\t\tfor index, line in enumerate(wordlist):\r\n\t\t\tif line == \"\\n\":\r\n\t\t\t\tcontinue\r\n\t\t\tcontext = line.split(\"\\t\")\r\n\t\t\tlabel = context[1]\r\n\t\t\tfeature_vector = label+\" \"\r\n\t\t\tngram_vector = ngram_frequency(str(context[0]))\r\n\t\t\tfor vector in ngram_vector:\r\n\t\t\t\tfeature_vector += str(vector)+\" \"\r\n\t\t\tfeature_vector += str(is_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_hindi(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_abbr(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_hindi(context[0]))+\" \"\r\n\t\t\tbefore = [0,0,0]\r\n\t\t\tafter = [0,0,0]\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index-i) < 0 or (index-i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tbefore[2-i] = get_word_context(wordlist[index-i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index+i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tafter[2-i] = get_word_context(wordlist[index+i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in before:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfor i in after:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfeature_vector += \"\\n\"\r\n\t\t\tfw.write(feature_vector)\r\n\t\t\tprint(\"Proceeding...\"+str(index+1)+\" of 16683\")\r\n\r\n\tfw.close()", "def CalculateSVM(data=None):\n vectorizer = TfidfVectorizer(tokenizer=pre_process)\n classifier = LinearSVC()\n train, test = train_test_split([(i['text'], i['stars']) for i in data],\n test_size=.2,\n random_state=10)\n x_train = vectorizer.fit_transform(i[0] for i in train)\n x_test = vectorizer.transform(i[0] for i in test)\n classifier.fit(x_train, [i[1] for i in train])\n score = classifier.score(x_test, [i[1] for i in test])\n print score", "def vectorize(train_texts: List[str], train_labels, test_texts: List[str]) -> Tuple[Any, Any]:\n\n kwargs = {\n 'ngram_range': (1, 2),\n 'dtype': 'int32',\n 'analyzer': 'word',\n 'min_df': MIN_DOCUMENT_FREQUENCY\n }\n # Use TfidfVectorizer to convert the raw documents to a matrix of TF-IDF features with:\n # either 1-gram or 2-gram, using 'word' to split, and minimum document/corpus frequency of 2\n # Limit the number of features to top 30K.\n\n vectorizer = TfidfVectorizer(**kwargs)\n x_train = vectorizer.fit_transform(train_texts)\n x_test = vectorizer.transform(test_texts)\n selector = SelectKBest(f_classif, k=min(30000, x_train.shape[1]))\n selector.fit(x_train, train_labels)\n x_train = selector.transform(x_train)\n x_test = selector.transform(x_test)\n\n x_train = x_train.astype('float32').toarray()\n x_test = x_test.astype('float32').toarray()\n\n return x_train, x_test", "def train(self, data_sent_text: dict) -> tuple:\n text = data_sent_text.get(\"text\")\n sentiment = data_sent_text.get(\"sentiment\")\n splitted_word = self.__cleaning_split(text)\n freq_words = FrequencyWords(sentiment=sentiment,\n freq=self.__model,\n splitted_word=splitted_word)\n\n self.__model = self.__frequency_model(freq_words=freq_words,\n calibrate_neutral=True)\n __, __, diff_pos, diff_neg, __ = self.__build_histogram(sentiment=sentiment,\n freq_words=freq_words,\n splitted_word=splitted_word,\n calibrate=True)\n\n return self.__model, diff_pos, diff_neg", "def txt2vectors(self, txt, is_html):\n words = txt2words(txt)\n words = [w for w in words if w in self._model]\n if len(words) != 0:\n for w in words:\n yield self._model[w]", "def transform_texts(model, texts_series):\n\n # falls ein unbekanntes wort vorkommt, dieses ignorieren, d.h. auch\n # nicht zur gesamtzahl der worte zählen, durch die am Ende geteilt wird\n \n n_dims = model.layer1_size # 100 by default\n text_vectors = []\n n_broken_rows = 0\n n_unknown_words = 0\n \n for j, text in enumerate(texts_series):\n tokens = text.split()\n \n known_words = [w for w in tokens if w in model.wv] # only consider words in the dictionary of the w2v model\n if len(tokens) != len(known_words):\n # print(f\"Text: {text} ({len(tokens)} total, {len(known_words)} known)\")\n n_unknown_words += len(tokens) - len(known_words)\n if len(known_words) == 0:\n print(colored(f'\"{text}\" enthält 0 bekannte Wörter!', 'red'))\n \n text_vectors.append([np.nan for i in range(n_dims)])\n n_broken_rows += 1\n continue # to next text\n \n text_vector = [\n np.mean([model.wv[word][i] for word in known_words])\n for i in range(n_dims)\n ]\n text_vectors.append(text_vector)\n \n df = pd.DataFrame(text_vectors)\n #df.to_csv(f'data/text_features_{n_dims}dim.csv', index=False)\n\n #print(f\"{len(texts_series)} texts vectorized in total ({n_unknown_words/len(texts_series):.3f} mean unknown words per text.)\")\n c = 'green' if n_broken_rows == 0 else 'red'\n #print(colored(f\"{n_broken_rows} invalid input texts!\", c))\n\n return df", "def _update_feature_vec(fvec, word, tag_ngram):", "def get_features(self) -> Generator[np.ndarray, None, None]:\n for text in self.texts:\n yield embed(text)", "def _make_feature_vec(self, word_list):\n\n # Pre-initialize an empty numpy array (for speed)\n feature_vec = np.zeros((self.num_features,), dtype=\"float32\")\n\n # index2word is a list that contains the names of the words in\n # the model's vocabulary. Convert it to a set, for speed.\n index2word_set = set(self.w2v_model.index2word)\n\n # Loop over each word in the word_list and, if it is in the model's\n # vocabulary, add its feature vector to the total\n nwords = 0\n for word in word_list:\n # NOTE: Careful there, if all words are in caps in the article,\n # this function will return nan values and blow up the forest.\n word = word.lower()\n if word in index2word_set:\n nwords += 1\n feature_vec = np.add(feature_vec, self.w2v_model[word])\n\n # Divide the result by the number of words to get the average\n feature_vec = np.divide(feature_vec, nwords)\n return feature_vec", "def convert_data(self,i,j,words,tags,di,b_table):\n testing_features = TestingFeatures(self.training_features)\n self._add_features_to(testing_features,i,j,words,tags,di,b_table)\n return testing_features.to_vector()", "def get_vector_representation(self):\r\n vectorizer = CountVectorizer(lowercase=False,\r\n tokenizer=lambda x: x, # Tokenization should already be done by preprocessor\r\n stop_words=None,\r\n min_df=5,\r\n max_features=None, ## use all features\r\n ngram_range=(1, 1), ## This uses only unigram counts\r\n binary=False) ## This sets the beatures to be frequency counts\r\n pipeline = Pipeline([('vec', vectorizer), ('tfidf', TfidfTransformer())])\r\n\r\n X = pipeline.fit_transform(self.df['tokens'])\r\n Y = self.df['label'].values\r\n return ((X[:self.train_index], Y[:self.train_index]),\r\n (X[self.train_index:self.valid_index], Y[self.train_index:self.valid_index]),\r\n (X[self.valid_index:], Y[self.valid_index:]))", "def lstm_infer_vector(lstm_model, txt, stopwords,word_indices, maxlen=10, taillemax=300) :\n \n txt_prep = gensim.utils.simple_preprocess(txt, deacc=True)\n txt_wo_uw = remove_unknown_words(txt_prep, word_indices)\n txt_wo_ws = remove_stopwords(txt_wo_uw, stopwords)\n \n if len(txt_wo_ws)>taillemax:\n sentence = txt_wo_ws[-taillemax:]\n \n if len(txt_wo_ws)<maxlen :\n #cas du texte trop court\n sentence = txt_wo_ws\n X = np.zeros((1, maxlen, len(word_indices)), dtype=np.bool)\n y = np.zeros((1, len(word_indices)), dtype=np.bool)\n for t, word in enumerate(sentence):\n X[0, t, word_indices[word]] = 1\n preds = lstm_model.predict(X, verbose=0)[0]\n else :\n \n for current_part in range(len(txt_wo_ws)/maxlen):\n sentence = txt_wo_ws[current_part*maxlen:(current_part+1)*maxlen]\n X = np.zeros((1, maxlen, len(word_indices)), dtype=np.bool)\n y = np.zeros((1, len(word_indices)), dtype=np.bool)\n for t, word in enumerate(sentence):\n X[0, t, word_indices[word]] = 1\n preds = lstm_model.predict(X, verbose=0)[0]\n \n\n return preds", "def getVector(text):\n url = cfg.use_vectoriser\n res = requests.post(url, json={'text': text, 'access_key': cfg.vectoriser_access_key})\n res_dictionary = res.json()\n return res_dictionary['vectors']", "def _preprocess(self):\n self.data['sentences'] = self.data['text'].apply(self._tokenize_sent)\n self.data['nouns'] = self.data['sentences'].apply(self._get_nouns)\n # self._get_frequent_features()\n # self._compactness_pruning()\n # self._redundancy_pruning()\n # self._get_features()\n self._extract_opinions()", "def process_text(input_txt):\r\n # if input is string\r\n tidy_txt = remove_pattern(input_txt,\"@[\\w]*\")\r\n ##=============================== if input is dataframe ====================##\r\n # tidy_txt = np.vectorize(remove_pattern)(input_txt,\"@[\\w]*\") #\r\n ##==========================================================================##\r\n # remove special characters\r\n tidy_txt = tidy_txt.replace(\"[^a-zA-Z#]\",\" \")\r\n # split into words\r\n tokenized_txt = tidy_txt.split()\r\n # perform stemming\r\n stemmer = PorterStemmer()\r\n tokenized_txt = [stemmer.stem(i) for i in tokenized_txt]\r\n print(tokenized_txt)\r\n # joining words back\r\n tokenized_txt = ' '.join(tokenized_txt)\r\n return tokenized_txt", "def __init__(self,training_data):\n my_data = genfromtxt(training_data, delimiter='\\t',skip_header=0)\n n_col = my_data.shape[1]\n n_features=n_col-1 #assuming that the latest column\n #contains the the outputs \n #preprocessing data\n X = preprocessing.scale(np.hsplit(my_data,[n_features,n_col])[0])\n Y = np.squeeze(np.asarray(np.hsplit(my_data,[n_features,n_col])[1]))\n #defining scaling\n self.scaler = preprocessing.Scaler()\n self.scaler.fit(np.hsplit(my_data,[n_features,n_col])[0])\n #define classifier\n self.classifier = svm.LinearSVC(class_weight='auto',C=1.0)\n self.classifier.fit(X, Y)", "def compute_sklearn_features():\n text_dir = 'text_model'\n emb_dir = 'embedding_weights'\n filename = 'glove.6B.50d.txt'\n emb_name = 'glove'\n emotions = ['happy', 'sad', 'angry', 'scared', 'disgusted', 'surprised']\n post_size = 200\n df_all, word_to_id, embedding = preprocess_df(text_dir, emb_dir, filename, emb_name, emotions, post_size)\n\n X = np.stack(df_all['text_list'])\n y = df_all['search_query'].values\n\n id_to_word = {i: k for k, i in word_to_id.iteritems()}\n config = {'word_to_id': word_to_id,\n 'id_to_word': id_to_word,\n 'batch_size': 128,\n 'vocab_size': len(word_to_id),\n 'embedding_dim': embedding.shape[1],\n 'post_size': post_size,\n 'fc1_size': 16,\n 'nb_emotions': len(emotions),\n 'dropout': 1.0, # Proba to keep neurons\n 'max_grad_norm': 5.0, # Maximum norm of gradient\n 'init_scale': 0.1, # Weights initialization scale\n 'initial_lr': 1e-3,\n 'lr_decay': 0.5,\n 'max_epoch_no_decay': 2, # Number of epochs without decaying learning rate\n 'nb_epochs': 10} # Maximum number of epochs\n \n tf.reset_default_graph()\n with tf.Session() as sess:\n print('Computing sklearn features:')\n init_scale = config['init_scale']\n initializer = tf.random_uniform_initializer(-init_scale, init_scale) \n with tf.variable_scope('Model', reuse=None, initializer=initializer):\n config['nb_epochs'] = 1\n m_train = WordModel(config)\n sess.run(tf.global_variables_initializer())\n sess.run(m_train.embedding_init, feed_dict={m_train.embedding_placeholder: embedding})\n\n batch_size = m_train.config['batch_size']\n initial_lr = m_train.config['initial_lr']\n \n nb_batches = X.shape[0] / batch_size\n dropout_param = 1.0\n ops = m_train.h1\n \n sess.run(tf.assign(m_train.learning_rate, initial_lr))\n\n X, y = _shuffling(X, y)\n X_reshaped = X[: (nb_batches * batch_size), :].reshape((nb_batches, batch_size, -1))\n y_reshaped = y[: (nb_batches * batch_size)].reshape((nb_batches, batch_size))\n h1_list = []\n for i in range(nb_batches):\n curr_input = X_reshaped[i, :, :]\n curr_target = y_reshaped[i, :]\n h1_features = sess.run(ops, feed_dict={m_train.input_data: curr_input, \n m_train.target: curr_target,\n m_train.keep_prob: dropout_param})\n h1_list.append(h1_features)\n\n X_sklearn = np.vstack(h1_list)\n y_sklearn = y_reshaped.reshape((-1))\n print('Finished')\n return X_sklearn, y_sklearn", "def text2tfidf(data_generator):\n counter = CountVectorizer(min_df=0.)\n data = counter.fit_transform(data_generator)\n tfidf = TfidfTransformer()\n data = tfidf.fit_transform(data)\n return counter.vocabulary_, data", "def svmlight_to_vectors(txt):\n\n MAXENT_LOG.info(\"Attempting to convert {} to a vector file.\".format(txt))\n\n ntf = NamedTemporaryFile(mode='w', delete=False)\n ntf.close()\n\n p = ProcessCommunicator('{} import-svmlight --input \"{}\" --output \"{}\"'.format(mallet_bin, txt, ntf.name),\n stdout_func=MAXENT_LOG.info, stderr_func=MAXENT_LOG.warn, shell=True)\n\n\n if p.wait() == 0:\n MAXENT_LOG.debug(\"Successfully created temporary vector file {}\".format(ntf.name))\n return ntf.name\n else:\n raise ClassifierException(\"SVMLight Conversion did not complete successfully.\")", "def vectorize_text(text):\n\n def remove_punctuation(text):\n \"\"\"Removes special characters from text.\"\"\"\n return re.sub('[,.?\";:\\-!@#$%^&*()]', '', text)\n\n def remove_common_words(text_vector):\n \"\"\"Removes 50 most common words in the uk english.\n\n source: http://www.bckelk.ukfsn.org/words/uk1000n.html\n\n \"\"\"\n common_words = set(['the', 'and', 'to', 'of', 'a', 'I', 'in', 'was',\n 'he', 'that', 'it', 'his', 'her', 'you', 'as', 'had', 'with',\n 'for', 'she', 'not', 'at', 'but', 'be', 'my', 'on', 'have', 'him',\n 'is', 'said', 'me', 'which', 'by', 'so', 'this', 'all', 'from',\n 'they', 'no', 'were', 'if', 'would', 'or', 'when', 'what', 'there',\n 'been', 'one', 'could', 'very', 'an', 'who'])\n return [word for word in text_vector if word not in common_words]\n\n text = text.lower()\n text = remove_punctuation(text)\n words_list = text.split()\n words_list = remove_common_words(words_list)\n\n return words_list", "def extract_features_temporal(self, text, expDateStr = None, onsetDateStr = None, refExpDateStr = None, textType='vaers'):\n \n featurelist = []\n \n sentences = util.sentence_tokenize(text)\n taggedSentences = [] \n# id = 0\n for sentnumber, sentence0 in enumerate(sentences):\n \n sentence = self.clean_text(sentence0)\n \n # tokenize each sentence to have a list of words to be processed\n tokens = nltk.word_tokenize(sentence)\n #run the above procedure\n sentence_to_parse = self.get_untagged(tokens)\n \n # Save tagged sentences for later computing of expose date\n taggedSentences.append(sentence_to_parse)\n \n #only if the cleaned sentence is NOT empty we parse it\n if sentence_to_parse!=[]:\n tree = self.cp.parse(sentence_to_parse)\n tree1 = self.cp1.parse(sentence_to_parse)\n \n# new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.node in self.st_filter])\n new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.label() in self.st_filter])\n\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(', ,', ',')\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(',', ', ')\n\n new_sentence_to_parse = nltk.word_tokenize(new_sentence_to_parse)\n\n #run the above procedure\n new_sentence_to_parse = self.get_untagged(new_sentence_to_parse)\n \n if new_sentence_to_parse!=[]:\n tree2 = self.cp.parse(new_sentence_to_parse)\n for subtree in tree2.subtrees():\n if subtree.label() in self.st_filter: \n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n \n for subtree in tree1.subtrees():\n if subtree.label() in self.labels_gram1:\n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n\n self.initialization_text_data(text, sentences, taggedSentences, textType)\n \n featObjList = self.initialize_feature_obj_list(featurelist)\n \n docFeature = self.extract_temporal_info(featObjList, expDateStr, onsetDateStr, refExpDateStr)\n \n return docFeature", "def transform_word_vectors(self):\n print('Transforming word vectors')\n \n self.train_X_tfidfvec = self.get_word_vectors(self.train_X)\n self.val_X_tfidfvec = self.get_word_vectors(self.val_X)\n self.test_X_tfidfvec = self.get_word_vectors(self.test_X)\n if self.savename is not None:\n with open(self.savename + '_X_tfidfvec.obj','wb') as f:\n pickle.dump((self.train_X_tfidfvec,self.val_X_tfidfvec,self.test_X_tfidfvec),f) \n print('Done transforming word vectors')", "def gen_dtm(text_data, vocab):\n vectorizer = sklearn.feature_extraction.text.CountVectorizer(\n vocabulary = vocab)\n return vectorizer.fit_transform(text_data)", "def doc_term_matrix(text, vectorizer = 'CV', stop_words = 'english'):\n\n\tfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n\timport pandas as pd\n\n\tif vectorizer == 'CV':\n\t vec = CountVectorizer(stop_words = stop_words)\n\telif vectorizer == 'TFIDF':\n\t vec = TfidfVectorizer(stop_words = stop_words)\n\n\tfit = vec.fit_transform(text)\n\tdf = pd.DataFrame(fit.toarray(), columns = vec.get_feature_names())\n\treturn df", "def data_mining_features(index,input_string_x1,input_string_x2,vocab_word2index,word_vec_fasttext_dict,word_vec_word2vec_dict,tfidf_dict,n_gram=8):\r\n input_string_x1=input_string_x1.decode(\"utf-8\")\r\n input_string_x2 = input_string_x2.decode(\"utf-8\")\r\n #1. get blue score vector\r\n feature_list=[]\r\n #get blue score with n-gram\r\n for i in range(n_gram):\r\n x1_list=split_string_as_list_by_ngram(input_string_x1,i+1)\r\n x2_list = split_string_as_list_by_ngram(input_string_x2, i + 1)\r\n blue_score_i_1 = compute_blue_ngram(x1_list,x2_list)\r\n blue_score_i_2 = compute_blue_ngram(x2_list,x1_list)\r\n feature_list.append(blue_score_i_1)\r\n feature_list.append(blue_score_i_2)\r\n\r\n #2. get length of questions, difference of length\r\n length1=float(len(input_string_x1))\r\n length2=float(len(input_string_x2))\r\n length_diff=(float(abs(length1-length2)))/((length1+length2)/2.0)\r\n feature_list.append(length_diff)\r\n\r\n #3. how many words are same, how many words are unique\r\n sentence_diff_overlap_features_list=get_sentence_diff_overlap_pert(index,input_string_x1,input_string_x2)\r\n feature_list.extend(sentence_diff_overlap_features_list)\r\n\r\n #4. question 1,2 start with how/why/when\r\n #how_why_feature_list=get_special_start_token(input_string_x1,input_string_x2,special_start_token)\r\n #print(\"how_why_feature_list:\",how_why_feature_list)\r\n #feature_list.extend(how_why_feature_list)\r\n\r\n #5.edit distance\r\n edit_distance=float(edit(input_string_x1, input_string_x2))/30.0\r\n feature_list.append(edit_distance)\r\n\r\n #6.cos distance from sentence embedding\r\n x1_list=token_string_as_list(input_string_x1, tokenize_style='word')\r\n x2_list = token_string_as_list(input_string_x2, tokenize_style='word')\r\n distance_list_fasttext = cos_distance_bag_tfidf(x1_list, x2_list, word_vec_fasttext_dict, tfidf_dict)\r\n distance_list_word2vec = cos_distance_bag_tfidf(x1_list, x2_list, word_vec_word2vec_dict, tfidf_dict)\r\n #distance_list2 = cos_distance_bag_tfidf(x1_list, x2_list, word_vec_fasttext_dict, tfidf_dict,tfidf_flag=False)\r\n #sentence_diffence=np.abs(np.subtract(sentence_vec_1,sentence_vec_2))\r\n #sentence_multiply=np.multiply(sentence_vec_1,sentence_vec_2)\r\n feature_list.extend(distance_list_fasttext)\r\n feature_list.extend(distance_list_word2vec)\r\n #feature_list.extend(list(sentence_diffence))\r\n #feature_list.extend(list(sentence_multiply))\r\n return feature_list", "def __init__(self, data_filename):\n with open(data_filename, 'rb') as data_file:\n loaded_features = pickle.load(data_file)\n self.title_nlp_tfidf_features = loaded_features['title_NLP_TFIDF_features']\n self.other_features = loaded_features['other_features']\n self.category1_features = loaded_features['category1_features']\n self.category2_features = loaded_features['category2_features']\n self.category3_features = loaded_features['category3_features']\n self.material_features = loaded_features['material_features']\n self.who_made_features = loaded_features['whoMade_features']\n self.when_made_features = loaded_features['whenMade_features']\n self.style1_features = loaded_features['style1_features']\n self.style2_features = loaded_features['style2_features']\n self.feature_labels = loaded_features['feature_labels']", "def transform(self, strings):\n\n logger.debug(\"Converting {} strings into lists of \"\n \"sentences.\".format(len(strings)))\n\n tokenized_strings = []\n for text in strings:\n tokenized_strings.append(text_to_wordlist(text, remove_stopwords=True))\n\n # Pre-allocate a 2D numpy array, for speed\n feature_vecs = np.zeros((len(tokenized_strings), self.num_features),\n dtype=\"float32\")\n\n # Loop through the strings\n for counter, word_list in enumerate(tokenized_strings):\n\n # Call the function (defined above) that makes average feature vectors\n feature_vecs[counter] = self._make_feature_vec(word_list)\n\n # For DEBUG only\n if np.isnan(feature_vecs[counter][0]):\n import ipdb;ipdb.set_trace()\n\n\n return feature_vecs", "def text2vec(self, maxlen):\n # Vocab = {word : index}\n self.Vocab = dict()\n\n for SentenceLabel in self.Pos + self.Neg:\n vector = [0] * maxlen\n for index, word in enumerate(SentenceLabel[0]):\n if index >= maxlen:\n break\n if word not in self.Vocab.keys():\n self.Vocab[word] = len(self.Vocab)\n vector[index] = len(self.Vocab) - 1\n else:\n vector[index] = self.Vocab[word]\n SentenceLabel[0] = vector\n self.doConvert = True", "def classify(self, dataSet):\n\n return nltk.classify.apply_features(self.extrairFrase, dataSet)", "def vectorizer_features(self) -> list:\n if self._vectorizer:\n return self._vectorizer.get_feature_names()\n self.logger.warning('Uninitialized vector. Please call count_vectorizer first.')", "def build_sf(dta, clf, trained_tweets):\n X = [] # samples\n y = [] # features\n for tweet in trained_tweets:\n vector = w2v_vector(dta, tweet['text'])\n if vector is not None: # ValueError: setting an array element with a sequence :')\n X.append(vector)\n y.append(tweet['label'])\n return X, y", "def extract(self, document):\n raise NotImplementedError('FeatureExtractorBase:extract(self, text) is not defined')", "def extract_features(self, data):\n\n # TODO: Should feature extraction be done on the testing data? In the lecture notes\n # TODO: it is not done with the training data, but with the test data.\n # TODO: Maybe we should use the validate data when we do cross-validation.\n\n features = np.zeros([len(data)*self.K]).reshape(len(data), self.K)\n for i in range(len(data)):\n for j in range(self.K):\n features[i][j] = np.linalg.norm(data[i] - self.cb_vectors[j])\n\n return features", "def process_data(self) -> Tuple[list, List[Sequence[int]]]:\n features, labels = self.load_files()\n x = [self.process_text(f, self.max_length) for f in features]\n y = [self._convert_labels(l, self.max_length) for l in labels]\n return x, y", "def _minimal_analysis(text, classifier, Resource, threshold, language='en'):\n list_text = clean_text(text, get_correct_stop_word(Resource, language))\n m_features = list()\n m_features.append(characteristic_vector(list_text, Resource))\n return classifier.predict(array(m_features), threshold), m_features", "def _featurize(self, predictions: SequenceSample) -> List[np.ndarray]:\n feature_vectors: List[np.ndarray] = []\n source = predictions.origin_words\n\n char_nn_scores = self.char_nn_lm_score(predictions.paths)\n word_nn_scores = self.word_nn_lm_score(predictions.paths)\n\n for i, (score, hypothesis) in enumerate(zip(predictions.scores, predictions.paths)):\n obss = list(zip(hypothesis, source))\n length = len(source)\n feature_vector = np.array([\n 1.,\n length,\n self.language_model.score(hypothesis) / length,\n char_nn_scores[i],\n word_nn_scores[i],\n score / length,\n sum(w in self.language_model for w in hypothesis) / length,\n sum(h[:self.prefix_size] == s[:self.prefix_size] for h, s in obss) / length,\n sum(h[-self.suffix_size:] == s[-self.prefix_size:] for h, s in obss) / length,\n self.language_model.score(hypothesis) * score / length,\n np.mean([editdistance.eval(h, s) for h, s in obss]),\n np.mean([float(obs in self.train_set_uniq) for obs in obss]),\n np.mean([self.train_counter.get(obs, self.discount) for obs in obss]),\n ])\n feature_vectors.append(feature_vector)\n return feature_vectors", "def vectorize(self,clean_path):\n \n #load pretrained embedding model (GloVe)\n glove = spacy.load('en_core_web_lg')\n #extract unique words (aka vocabulary)\n unique_words = set()\n for d in self.docs: \n txt = d.text\n doc = glove(txt)\n for word in doc: \n if word.has_vector:\n unique_words.add(word.text)\n #change set to list type\n unique_words = list(unique_words)\n #save vector representation\n word_vectors = np.array([glove(word).vector for word in unique_words if glove(word).has_vector])\n #index vectors by corresponding word \n corpus_vectors = pd.DataFrame(word_vectors, index=unique_words)\n with open(clean_path + 'corpus_vectors.pkl', 'wb') as f:\n pickle.dump(corpus_vectors,f)\n self.vectors = corpus_vectors\n print('Saved embedding vectors.')\n return", "def toFeatureVector(tokens,index=None):\n\t# Should return a dictionary containing features as keys, and weights as values\n\tadict = {}\n\ttokens = [w for w in tokens if w not in stopwords]\n\t# Q4 Limiting the token list to average/median of all the tokens per reviews\n\tfor i in tokens[:mean_token]: \n\t\tadict[i] = featureDict[i]\n\tif index is not None:\n\t\tfor i in rawData:\n\t\t\tif i[0] == index:\n\t\t\t\tadict['raiting'] = float(int(i[2]) - 0)/5\n\t\t\t\tadict['verPur'] = 1 if i[3] == 'Y' else 0\n\t\t\t\tadict['avgWordLen'] = sum(len(w) for w in i[1].split())/len(i[1])\n\t\t\t\tadict['stopwords'] = len([w for w in i[1].split() if w in stopwords])\n\t\t\t\t# adict['speacialChar'] = len(re.findall(r'[^A-Z0-9a-z ]+',i[1])) # performace metrics decreases\n\t\t\t\tadict['digits'] = len(re.findall(r'[0-9]+',i[1]))\n\treturn adict", "def build_data_vectors(annotations, tweets, Tfidf_vect, adr_lexicon_dict, should_balance_set=True):\n\n def vectorize_word(word):\n \"\"\"gives vectorized value from TfidfVectorizer for the given word\n If the word is not part of vocabulary, 0 will be returned\n\n # Arguments\n word - word to vectorize\n\n # Returns\n vectorized value\n \"\"\"\n if word in Tfidf_vect.vocabulary_:\n i = Tfidf_vect.vocabulary_[word]\n return Tfidf_vect.idf_[i]\n else:\n return 0\n\n def clean_text(text):\n \"\"\"Cleans the text\n This code snippet is taken from https://towardsdatascience.com/multi-label-text-classification-with-scikit-learn-30714b7819c5\n Author: Susan Li\n\n # Arguments\n text - text to clean\n\n # Returns\n cleaned text\n \"\"\"\n text = text.lower()\n text = re.sub(r\"what's\", \"what is \", text)\n text = re.sub(r\"\\'s\", \" \", text)\n text = re.sub(r\"\\'ve\", \" have \", text)\n text = re.sub(r\"can't\", \"can not \", text)\n text = re.sub(r\"n't\", \" not \", text)\n text = re.sub(r\"i'm\", \"i am \", text)\n text = re.sub(r\"\\'re\", \" are \", text)\n text = re.sub(r\"\\'d\", \" would \", text)\n text = re.sub(r\"\\'ll\", \" will \", text)\n text = re.sub(r\"\\'scuse\", \" excuse \", text)\n text = re.sub('\\W', ' ', text)\n text = re.sub('\\s+', ' ', text)\n text = text.strip(' ')\n return text\n\n X = []\n Y = []\n adr_labels_size = 0\n nonadr_labels_size = 0\n for i, (k, v) in enumerate(annotations.items()):\n tweet_text = clean_text(tweets[k])\n tokens = word_tokenize(tweet_text)\n\n for annotation_index, annotation in enumerate(v):\n prev_token_adr = False\n\n annotated_text = clean_text(annotation['annotatedText'])\n annotated_text_tokens = word_tokenize(annotated_text)\n\n for index, focus_word in enumerate(tokens):\n focus_vector = []\n\n # for Context feature, get index for 3 surrounding words on each side of focus word\n if program_args.context_feature:\n focus_vector.append(vectorize_word(tokens[index-3]) if (index-3 >= 0) else 0)\n focus_vector.append(vectorize_word(tokens[index-2]) if (index-2 >= 0) else 0)\n focus_vector.append(vectorize_word(tokens[index-1]) if (index-1 >= 0) else 0)\n focus_vector.append(vectorize_word(tokens[index]))\n focus_vector.append(vectorize_word(tokens[index+1]) if (index+1 < len(tokens)) else 0)\n focus_vector.append(vectorize_word(tokens[index+2]) if (index+2 < len(tokens)) else 0)\n focus_vector.append(vectorize_word(tokens[index+3]) if (index+3 < len(tokens)) else 0)\n\n if program_args.adrlexicon_feature:\n if focus_word in adr_lexicon_dict:\n focus_vector.append(1)\n else:\n focus_vector.append(0)\n\n if program_args.prev_adrlexicon_feature:\n if prev_token_adr:\n focus_vector.append(1)\n else:\n focus_vector.append(0)\n\n # assign class label\n if annotation['semanticType'] == 'ADR' and focus_word in annotated_text_tokens:\n Y.append(ADR_MENTION_CLASS_LABEL)\n X.append(focus_vector)\n adr_labels_size += 1\n prev_token_adr = True\n else:\n Y.append(NON_ADR_MENTION_CLASS_LABEL)\n X.append(focus_vector)\n nonadr_labels_size += 1\n prev_token_adr = False\n\n print(\" Dataset size: {}\".format(len(X)))\n print(\" {} class size: {}\".format(ADR_MENTION_CLASS_NAME, adr_labels_size))\n print(\" {} class size: {}\".format(NON_ADR_MENTION_CLASS_NAME, nonadr_labels_size))\n\n if should_balance_set:\n X, Y = balance_set(X, Y, adr_labels_size, nonadr_labels_size)\n\n X = scipy.sparse.csr_matrix(X)\n return X, Y", "def generateFeatures(self, data):\n pass", "def build(self, Tweets):\n text = [t.text for t in Tweets]\n vectorizer = TfidfVectorizer(tokenizer=process_text,\n stop_words=stopwords.words('english'),\n max_df=0.5,\n min_df=0.1,\n lowercase=True,\n max_features=10000)\n\n return vectorizer.fit_transform(text).A", "def extract_features(self, docs_train, docs_test, word_ngram_range=(1, 3), dim_reduce=False):\n\n\t\t# Build a vectorizer that splits strings into sequences of i to j words\n\t\tword_vectorizer = TfidfVectorizer(preprocessor=self.preprocess_tweet,\n\t\t\t\t\t\t\t\t\t analyzer='word', ngram_range=word_ngram_range,\n\t\t\t\t\t\t\t\t\t min_df=2, use_idf=True, sublinear_tf=True)\n\t\t# Build a vectorizer that splits strings into sequences of 3 to 5 characters\n\t\tchar_vectorizer = TfidfVectorizer(preprocessor=self.preprocess_tweet,\n\t\t\t\t\t\t\t\t\t analyzer='char', ngram_range=(3, 5),\n\t\t\t\t\t\t\t\t\t min_df=2, use_idf=True, sublinear_tf=True)\n\n\t\t# Build a transformer (vectorizer) pipeline using the previous analyzers\n\t\t# *FeatureUnion* concatenates results of multiple transformer objects\n\t\tself.ngrams_vectorizer = Pipeline([('feats', FeatureUnion([('word_ngram', word_vectorizer),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t ('char_ngram', char_vectorizer),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t ])),\n\t\t\t\t\t\t\t\t # ('clff', LinearSVC(random_state=42))\n\t\t\t\t\t\t\t\t ])\n\n\t\t# Fit (learn vocabulary and IDF) and transform (transform documents to the TF-IDF matrix) the training set\n\t\tX_train_ngrams_tfidf = self.ngrams_vectorizer.fit_transform(docs_train)\n\t\t'''\n\t\t↳ Check the following attributes of each of the transformers (analyzers)—*word_vectorizer* and *char_vectorizer*:\n\t\tvocabulary_ : dict. A mapping of terms to feature indices.\n\t\tstop_words_ : set. Terms that were ignored\n\t\t'''\n\t\tprint(\"%.2f seconds: Finished fit_transforming the training dataset\" % time.process_time())\n\t\tprint(\"Training set word & character ngrams .shape = \", X_train_ngrams_tfidf.shape)\n\n\t\tfeature_names_ngrams = [word_vectorizer.vocabulary_, char_vectorizer.vocabulary_]\n\n\t\t'''\n\t\tExtract the features of the test set (transform test documents to the TF-IDF matrix)\n\t\tOnly transform is called on the transformer (vectorizer), because it has already been fit to the training set.\n\t\t'''\n\t\tX_test_ngrams_tfidf = self.ngrams_vectorizer.transform(docs_test)\n\t\tprint(\"%.2f seconds: Finished transforming the test dataset\" % time.process_time())\n\t\tprint(\"Test set word & character ngrams .shape = \", X_test_ngrams_tfidf.shape)\n\n\t\t# • Dimensionality reduction using truncated SVD (aka LSA)\n\t\tif dim_reduce:\n\t\t\t# Build a truncated SVD (LSA) transformer object\n\t\t\tself.svd_reducer = TruncatedSVD(n_components=300, random_state=43)\n\t\t\t# Fit the LSI model and perform dimensionality reduction\n\t\t\tX_train_ngrams_tfidf_reduced = self.svd_reducer.fit_transform(X_train_ngrams_tfidf)\n\t\t\tprint(\"@ %.2f seconds: Finished dimensionality reduction (LSA) on the training dataset\", time.process_time())\n\t\t\tX_test_ngrams_tfidf_reduced = self.svd_reducer.transform(X_test_ngrams_tfidf)\n\t\t\tprint(\"@ %.2f seconds: Finished dimensionality reduction (LSA) on the test dataset\", time.process_time())\n\n\t\t\tX_train = X_train_ngrams_tfidf_reduced\n\t\t\tX_test = X_test_ngrams_tfidf_reduced\n\t\telse:\n\t\t\tX_train = X_train_ngrams_tfidf\n\t\t\tX_test = X_test_ngrams_tfidf\n\n\t\treturn X_train, X_test, feature_names_ngrams", "def _featurize_py_func(text):\n label = np.array(text[-1], dtype=np.int32)\n words = word_tokenize(text[:-2])\n chars = np.zeros([max_sentence_length, max_word_length], dtype=np.int32)\n for i, word in enumerate(words):\n ids = [char_to_int.get(char, -1) for char in word]\n chars[i,:len(ids)] = ids\n return chars", "def review_to_vec(words, model, num_features , index2word_set):\n \n feature_vec = np.zeros((num_features), dtype=\"float32\")\n word_count = 0\n \n \n \n for word in words:\n if word in index2word_set: \n word_count += 1\n feature_vec += model[word]\n\n if word_count == 0:\n word_count = 1\n\n feature_vec /= word_count\n\n return feature_vec", "def __init__(self,training_data):\n my_data = genfromtxt(training_data, delimiter='\\t',skip_header=0)\n n_col = my_data.shape[1]\n self.n_features=n_col-1 #assuming that the latest column\n #contains the the outputs \n #pre-processing data\n X = preprocessing.scale(np.hsplit(my_data,[self.n_features,n_col])[0])\n Y = np.squeeze(np.asarray(np.hsplit(my_data,[self.n_features,n_col])[1]))\n #defining scaling\n self.scaler = preprocessing.Scaler()\n self.scaler.fit(np.hsplit(my_data,[self.n_features,n_col])[0])\n #define classifier\n self.classifier = svm.SVR(kernel='linear', C=1e3, cache_size=DEFAULT_CACHE_SIZE)\n #self.classifier = svm.SVR(kernel='rbf', C=1e3, gamma=0.1, cache_size=DEFAULT_CACHE_SIZE)\n self.classifier.fit(X, Y)", "def _get_word2vec_features(x, word2vec, all_words_per_tweet, max_tweet_len):\n\n features = np.zeros((len(x), max_tweet_len, word2vec.vector_size))\n\n for i, tweet_words in enumerate(all_words_per_tweet):\n tweet_repr = np.array(\n [word2vec.wv[r] if r in word2vec.wv.vocab else np.zeros(word2vec.vector_size) for r in tweet_words])\n features[i][:len(tweet_repr), :word2vec.vector_size] = tweet_repr\n\n return features", "def __process_element(data):\n print('prosessing {}'.format(data))\n x_i = data[0]\n y_i = data[1]\n\n file_name = FeatureExtractor.get_file_name(x_i, feature_name)\n try:\n # try to load if file already exist\n np.load(out_path / file_name, allow_pickle=True)\n print('info: {} loaded from .npy !'.format(file_name))\n new_labels.append([file_name, y_i])\n except FileNotFoundError or OSError or EOFError:\n # OSError and EOFError are raised if file are inconsistent\n voice_activation = np.load(source_path / x_i, allow_pickle=True)\n mean_voice_activation = FeatureExtractor.get_mean_voice_activation(voice_activation[0])\n # this is kind-of standard\n FeatureExtractor.save_feature([voice_activation[0], mean_voice_activation], feature_name, out_path, x_i,\n y_i, new_labels)", "def txt2vec(self, text: str) -> List[int]:\n itr: List[int] = []\n for token in self.tokenize(str(text)):\n itr.append(self._word_lookup(token))\n return itr", "def preprocess(self, data):\n logger.info(str(data))\n text = data[0].get(\"data\")\n if text is None:\n text = data[0].get(\"body\") # with txt file\n if isinstance(text, dict):\n logger.info(\" ############## Got Dict !! ##########################\")\n input_text = text['text']\n else:\n input_text = text.decode('utf-8')\n max_length = int(self.setup_config[\"max_length\"])\n logger.info(\"Received text: '%s'\", input_text)\n\n logger.info(input_text)\n # input_text = \"안녕하세요? 반갑습니다. 오늘 날씨가 정말 끝내줘요. 너 너무 사랑스러워요\"\n inputs = self.tokenizer.encode(input_text, max_char_length=max_length, return_attention_mask=True)\n return inputs", "def Classify_Text(self, overview):\n\n # convert text to lower case\n overview = overview.lower()\n\n path = self.path\n\n # start time\n time0 = time.process_time()\n\n # Use ensemble classifier - voting with weights\n\n # model = joblib.load(path + \"MULTINOMIAL NB_TFIDF VECTORIZER\" + \".pkl\")\n model = joblib.load(\n \"/home/do/PycharmProjects/pythonProject/information-retrival-search-engine/informationRetrival/frontend/static/frontend/text/SVM_COUNT VECTORIZER.pkl\")\n dictionary = joblib.load(path + \"_Genre_Dictionary\")\n vec = feature_extraction.text.CountVectorizer(vocabulary=dictionary)\n\n print(vec)\n # overview=\"An undercover cop and a mole in the police\"\n Y = vec.fit_transform([overview]).toarray()\n print(vec.get_feature_names())\n print(Counter(Y[0]))\n # print(Counter(Y[1]))\n print(model)\n predicted_genre = model.predict(Y)\n print(predicted_genre)\n\n # Return predicted genre and time taken for classification\n return predicted_genre, str(round(time.process_time() - time0, 3)) + \" seconds\"", "def _post_process(self, vectorizer_output):\n tdm = pd.DataFrame(vectorizer_output.toarray().transpose(),\n index=self.vectorizer.get_feature_names())\n dtm = tdm.transpose()\n return dtm", "def prepare_length_features(text_counts, custom_vec, length_processed_flora_data_frame):\n vocab = custom_vec.get_feature_names() # https://stackoverflow.com/questions/39121104/how-to-add-another-feature\n # -length-of-text-to-current-bag-of-words-classificati\n\n length_model_data_frame = pd.DataFrame(text_counts.toarray(), columns=vocab)\n length_model_data_frame = pd.concat(\n [length_model_data_frame, length_processed_flora_data_frame['length'].reset_index(drop=True)], axis=1)\n\n length_model_data_frame_values = length_model_data_frame.values.astype(np.float64)\n length_model_sparse = sparse.csr_matrix(length_model_data_frame_values)\n\n assert length_model_sparse.shape > text_counts.shape, 'Length model should have one more column of data than BOW ' \\\n 'model '\n return length_model_sparse", "def _analyse_and_overwrite_existing_vocabulary(self, preprocessed_content: List[str]) -> None:\n\n vectoriser = sklearn_text.TfidfVectorizer()\n vectoriser.fit(preprocessed_content)\n\n # Extract all of the unique words found\n words = vectoriser.get_feature_names()\n new_vocabulary = pd.DataFrame(data={'word': words, 'feature_matrix_index': range(len(words))})\n\n # Replace existing data\n self._db_connection.execute_database_operation(\"TRUNCATE TABLE encoded_articles.tfidf_vocabulary;\")\n\n self._db_connection.upload_dataframe(\n dataframe=new_vocabulary,\n table_name='tfidf_vocabulary',\n schema='encoded_articles',\n if_exists='append',\n index=False,\n )", "def create_training_data_file(list_of_word_lines, language):\r\n # To store each feature vector\r\n feature_vector = []\r\n\r\n # To store the entire dataset\r\n data = []\r\n\r\n for sentence in list_of_word_lines:\r\n\r\n # Contains Q\r\n CONTAINS_Q = 'N'\r\n\r\n # Contains Q\r\n CONTAINS_X = 'N'\r\n\r\n # Contains more than 1 vowel\r\n VOWELS = 'N'\r\n\r\n # Contains common dutch substrings\r\n DUTCH_SUBSTRING = 'N'\r\n\r\n # Contains is-was\r\n ISWAS = 'N'\r\n\r\n # Contains come\r\n COME = 'N'\r\n\r\n # Contains common english words\r\n COMMON_ENGLISH_WORDS = 'N'\r\n\r\n # Contains common dutch words\r\n DUTCH_WORDS = 'N'\r\n\r\n # Contains dutch ij\r\n IJ = 'N'\r\n\r\n # Contains and\r\n AND = 'N'\r\n\r\n # Contains they, he, she\r\n COLLECTIVES = 'N'\r\n\r\n for word in sentence:\r\n\r\n if re.match('[0-9]*', word):\r\n word = re.sub('[0-9]*', '', word)\r\n\r\n if re.match('[!?~`@#$%&)(_=+/.,\"»;«-]', word):\r\n word = re.sub('[!?~`@#$%&)(_=+/.,\"»;«-]', '', word)\r\n\r\n word = word.lower()\r\n if \"de\" == word or \"het\" == word or \"dat\" == word or \"en\" == word or \"een\" == word or \"voor\" == word or \"van\" == word or \"welke\" == word \\\r\n or \"te\" == word or \"hij\" == word or \"zij\" == word or \"op\" == word or \"ik\" == word or \"bij\" == word:\r\n DUTCH_WORDS = 'Y'\r\n\r\n if \"ij\" in word:\r\n IJ = 'Y'\r\n\r\n if \"the\" == word or \"but\" == word or \"for\" == word or \"which\" == word or \"that\" == word or \"and\" == word or \"not\" == word \\\r\n or \"to\" == word or \"in\" == word:\r\n COMMON_ENGLISH_WORDS = 'Y'\r\n\r\n if \"q\" in word:\r\n CONTAINS_Q = 'Y'\r\n\r\n if \"x\" in word:\r\n CONTAINS_X = 'Y'\r\n\r\n if \"aa\" in word or \"ee\" in word or \"ii\" in word or \"uu\" in word:\r\n VOWELS = 'Y'\r\n\r\n if \"ijk\" in word or \"sch\" in word or \"ijn\" in word:\r\n DUTCH_SUBSTRING = 'Y'\r\n\r\n if \"is\" == word or \"of\" == word or \"was\" == word or \"all\" in word:\r\n ISWAS = 'Y'\r\n\r\n if \"come\" == word or \"a\" == word:\r\n COME = 'Y'\r\n\r\n if \"and\" == word:\r\n AND = 'Y'\r\n\r\n if \"he\" == word or \"she\" == word or \"it\" == word or \"they\" == word:\r\n COLLECTIVES = 'Y'\r\n\r\n feature_vector.append([DUTCH_WORDS, IJ, COMMON_ENGLISH_WORDS, CONTAINS_Q, CONTAINS_X,\r\n VOWELS, DUTCH_SUBSTRING, ISWAS,\r\n COME, AND, COLLECTIVES, language])\r\n\r\n data.append(feature_vector)\r\n feature_vector = []\r\n return data", "def create_vectors(\n dataset_path_train: str, dataset_path_test: str,\n vectors_path_train: str, vectors_path_test: str\n) -> int:\n dtypes = {\n \"id\": int,\n \"keyword\": str,\n \"location\": str,\n \"text\": str,\n \"text_stemmed\": str,\n \"text_lemmatized\": str,\n }\n\n df_train = pd.read_csv(\n f\"/data/{dataset_path_train}\",\n index_col=\"id\",\n dtype={**dtypes, \"target\": int},\n converters={\"tokens\": ast.literal_eval})\n df_train[\"text_preprocessed\"] = df_train[\"tokens\"].apply(\n lambda x: \" \".join(x))\n\n df_test = pd.read_csv(\n f\"/data/{dataset_path_test}\",\n index_col=\"id\",\n dtype=dtypes,\n converters={\"tokens\": ast.literal_eval})\n df_test[\"text_preprocessed\"] = df_test[\"tokens\"].apply(\n lambda x: \" \".join(x))\n\n vectorizer = sklearn.feature_extraction.text.CountVectorizer()\n vectors_train = vectorizer.fit_transform(df_train[\"text_preprocessed\"])\n vectors_test = vectorizer.transform(df_test[\"text_preprocessed\"])\n\n with open(f\"/data/{vectors_path_train}\", \"wb\") as f:\n pickle.dump(vectors_train, f)\n with open(f\"/data/{vectors_path_test}\", \"wb\") as f:\n pickle.dump(vectors_test, f)\n\n return 0", "def predict(self, X, feature_names=None):\n if not self.trained_model:\n self.trained_model = Doc2Vec.load(self.model_file)\n\n items = [X, \"\"]\n if self.delimiter in X:\n items = X.split('#DEMI#')\n vector = self.trained_model.infer_vector(self.preprocess(items[0], items[1]))\n return vector", "def infer_vectors(self, reports, labels):\n logger.info('Inferring vectors from Doc2Vec model')\n tagged_docs = self.tag_dataset(reports, labels)\n vecs = [self.model.infer_vector(tag.words) for tag in tagged_docs]\n vecs = np.array(vecs)\n return vecs", "def text_to_w2v_input(text, tokenizer=None, remove_stopwords=False):\n\n # NOTE: Punkt is a sentence tokenizer\n if not tokenizer:\n tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\n\n # Split text into sentences\n raw_sentences = tokenizer.tokenize(text.decode('utf8').strip())\n\n tokenized_sentences = []\n for raw_sentence in raw_sentences:\n if raw_sentence:\n tokenized_sentences.append(\n text_to_wordlist(raw_sentence, remove_stopwords))\n\n return tokenized_sentences", "def load_dataset(dataset_path: str, mode: str, vectorizer_path: str):\n if mode == \"celeb\":\n x_path = dataset_path + \"/celebrity-feeds.ndjson\"\n else:\n x_path = dataset_path + \"/feeds.ndjson\"\n y_data = [json.loads(line) for line in open(\"./data/gt-labels.ndjson\", \"r\")]\n\n if not Path(vectorizer_path).exists():\n logging.info(\"no stored vectorizer found, creating ...\")\n vec = TfidfVectorizer(preprocessor=_preprocess_feed, ngram_range=N_GRAM_RANGE,\n max_features=MAX_WORD_FEATURES, analyzer='word', min_df=3,\n )# norm='l1')\n vec.fit(_read_text_linewise(x_path, mode))\n joblib.dump(vec, vectorizer_path)\n else:\n logging.info(\"loading stored vectorizer\")\n vec = joblib.load(vectorizer_path)\n\n # load x data\n logging.info(\"transforming data ...\")\n x = vec.transform(_read_text_linewise(x_path, mode))\n\n # load Y data\n # y_gender = [g_dict[l[\"gender\"]] for l in y_data]\n y_gender = []\n y_occ = []\n y_age = []\n ids = []\n\n for l in y_data:\n y_gender.append(g_dict[l[\"gender\"]])\n y_occ.append(o_dict[l[\"occupation\"]])\n y_age.append(_get_age_class(l[\"birthyear\"]))\n ids.append(l[\"id\"])\n\n # y_occ = [o_dict[l[\"occupation\"]] for l in y_data]\n # y_age = [_get_age_class(l[\"birthyear\"]) for l in y_data]\n # ids = [i[\"id\"] for i in y_data]\n return x, y_age, y_gender, y_occ, ids", "def __init__(self, vectorizer: WordVectorizer = None):\n self.parse_size = text_parse_size\n\n self.mst = Mystem()\n self.stop_words_set = set(stopwords.words('english'))\n self.stop_words_set.update(stopwords.words('russian'))\n\n if vectorizer is not None:\n self.vectorizer = vectorizer", "def predict(self, text):\r\n vectorizer = CountVectorizer(ngram_range=(1, 2), lowercase=True, vocabulary=self.voc)\r\n vector = vectorizer.fit_transform([text])\r\n return self.model.predict(vector)[0]", "def preprocess_training_text(text, accented_chars=True, \n convert_num=False, extra_whitespace=True, \n lemmatization=True, lowercase=True, punctuations=True,\n remove_html=True, remove_num=True, special_chars=True, \n stop_words=True):\n \n\n \"\"\"preprocess text with default option set to true for all steps\"\"\"\n if remove_html == True: #remove html tags\n text = strip_html_tags(text)\n if extra_whitespace == True: #remove extra whitespaces\n text = remove_whitespace(text)\n if accented_chars == True: #remove accented characters\n text = remove_accented_chars(text)\n if lowercase == True: #convert all characters to lowercase\n text = text.lower()\n \n \n doc = nlp(text) #tokenise text\n\n\n clean_text = []\n for token in doc:\n flag = True\n edit = token.text\n # print(\"Word: \", edit, \" Type: \", token.pos_)\n # remove stop words\n if stop_words == True and token.is_stop and token.pos_ != 'NUM': \n flag = False\n # remove punctuations\n if punctuations == True and (token.pos_ == 'PUNCT') and flag == True: \n flag = False\n \n # remove 'X' characters:\n if token.pos_ == 'X':\n flag = False\n # remove special characters\n if special_chars == True and token.pos_ == 'SYM' and flag == True: \n flag = False\n # remove numbers\n if remove_num == True and (token.pos_ == 'NUM' or token.text.isnumeric()) \\\n and flag == True:\n flag = False\n # convert number words to numeric numbers\n if convert_num == True and token.pos_ == 'NUM' and flag == True:\n edit = w2n.word_to_num(token.text)\n # convert tokens to base form\n elif lemmatization == True and token.lemma_ != \"-PRON-\" and flag == True:\n edit = token.lemma_\n # append tokens edited and not removed to list \n if edit != \"\" and flag == True:\n clean_text.append(edit)\n \n # Convert back to string:\n new_text = ' '.join(clean_text)\n regex = re.compile('[^a-zA-Z]')\n new_text = regex.sub(' ', new_text)\n words = re.findall(r'\\w+.', new_text)\n return ' '.join(words)", "def build_model(self, documents):\n self.vectorizer = TfidfVectorizer(\n stop_words='english', lowercase=True).fit(documents)\n self.vectors = self.vectorizer.transform(documents)" ]
[ "0.7076867", "0.70379716", "0.67895293", "0.67181784", "0.6577113", "0.64362943", "0.63970965", "0.6308338", "0.6303764", "0.6243157", "0.6215836", "0.62142926", "0.61975324", "0.6172654", "0.61442214", "0.61173296", "0.6084646", "0.6082078", "0.6061634", "0.6042774", "0.6021297", "0.5965925", "0.5949392", "0.5946612", "0.5943166", "0.5938397", "0.59210914", "0.58842826", "0.5865079", "0.5831876", "0.58194035", "0.5808541", "0.5799141", "0.5779838", "0.57721937", "0.5772177", "0.57600796", "0.5751429", "0.5748311", "0.5729925", "0.56916666", "0.56857044", "0.56853694", "0.56620747", "0.56521755", "0.56520534", "0.5642831", "0.56213665", "0.56205827", "0.56125164", "0.56091255", "0.5608647", "0.5608643", "0.56051433", "0.5595076", "0.5592368", "0.55776703", "0.5576831", "0.55743945", "0.5572158", "0.5547463", "0.5520171", "0.5517781", "0.5517036", "0.5515646", "0.55124605", "0.5508924", "0.55048466", "0.55000615", "0.5489178", "0.54838693", "0.5469605", "0.54694813", "0.54686815", "0.5453462", "0.54463536", "0.5440778", "0.5432588", "0.54254335", "0.5407083", "0.5405905", "0.54029876", "0.5398396", "0.5389385", "0.5386986", "0.53858966", "0.538066", "0.5377653", "0.53774893", "0.5348189", "0.5340288", "0.5338574", "0.5337923", "0.53266984", "0.5322951", "0.53207", "0.53152215", "0.531276", "0.53126156", "0.53110427" ]
0.61769956
13
Sets the current active partition.
def set_split(self,split='train'): self._target_data = self.processed_data[split] self.split_ = split
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setActive(self, active):\n\n self._active = active", "def set_active(self, active):\n self._active = active", "def set_active(self, active):\n self.active = active", "def active(self, active):\n\n self._active = active", "def active(self, active):\n\n self._active = active", "def active(self, active):\n\n self._active = active", "def active(self, active):\n\n self._active = active", "def SetActive(self, b):\r\n\r\n self.active = b", "def set_active(self):\n self.active = True", "def _setPartedPartition(self, partition):\n log_method_call(self, self.name)\n\n if partition is not None and not isinstance(partition, parted.Partition):\n raise ValueError(\"partition must be None or a parted.Partition instance\")\n\n log.debug(\"device %s new partedPartition %s\", self.name, partition)\n self._partedPartition = partition\n self.updateName()", "def set_active(self):\n if self.active is True:\n return\n self.active = True\n self.save()\n self.question_set.update(active=True)", "def set_partition(self, begin=0, end=0):\r\n self.partition = (begin, end)", "def set_partition(self, partition=0):\n if not isinstance(partition, int):\n raise TypeError('partition must be an integer')\n if partition <= 0:\n raise ValueError('partition must be positive')\n if self.connected:\n self.producer.send(\"PART:\"+str(partition))", "def setPrimActive(self, primPath, state):\n\n prim = self._stage.GetPrimAtPath(primPath)\n with self.editInPrimStateLayer():\n prim.SetActive(state)", "def update_active(self):\n self.set_active(0)\n self.state = INACTIVE", "def update_active(self):\n self.set_active(0)\n self.state = INACTIVE", "def update_active(self):\n self.set_active(0)\n self.state = INACTIVE", "def update_active(self):\n self.set_active(0)\n self.state = INACTIVE", "def update_active(self):\n self.set_active(0)\n self.state = INACTIVE", "def setActive(self, i, a=1):\n self.active[i] = a\n return", "def active(self, active):\n if active is None:\n raise ValueError(\"Invalid value for `active`, must not be `None`\") # noqa: E501\n\n self._active = active", "def SetActiveObject(self):", "def setCurrent(self, current):\n self.__current = current", "def set_active_book(self, book):\n\n self.active_book = book\n self.display.set_display_book(book)", "def set_active(cls, directory: Path, ver: str) -> None:\n\n if directory.is_dir() is False:\n raise Failure(f\"{directory} is not a valid directory\")\n\n if not cls.is_installed(ver):\n raise Failure(f\"{ver} is not installed, cannot set as active\")\n\n logger.info(f\"Setting python version {ver} as active in {directory}\")\n\n os.chdir(directory)\n run([cls.command, \"local\", ver])", "def active(self, activate):\n self.is_active = activate", "def partition_session(self):\n if self.user['drive']['name'] is not None:\n\n # Set root size\n if self.user['root_freespace'] is True:\n self.user['root_size'] = 'freespace'\n\n # Set partition parameters\n self.user['partitions'] = {'name': ['boot', 'root'],\n 'size': [self.user['boot_size'],\n self.user['root_size']],\n 'filesystem': ['fat32', 'ext4'],\n 'mountpoint': ['/mnt/boot', '/mnt'],\n 'mountorder': [1, 0]}\n\n # Set swap size and filesystem\n if 'Swap' in self.user['optional_partitions']:\n self.user['partitions']['size'].insert(1, self.user['swap_size'])\n self.user['partitions']['filesystem'].insert(1, 'swap')\n\n # Set home size and filesystem\n if 'Home' in self.user['optional_partitions']:\n if self.user['home_freespace'] is True:\n self.user['home_size'] = 'freespace'\n self.user['partitions']['size'].append(self.user['home_size'])\n self.user['partitions']['filesystem'].append('ext4')\n\n # Custom partitions\n else:\n\n # Set partition parameters\n self.user['partitions'] = {\n 'name': ['boot', 'root'],\n 'drive_id': [self.user['boot_id'].split()[0],\n self.user['root_id'].split()[0]],\n 'mountpoint': ['/mnt/boot', '/mnt'],\n 'mountorder': [1, 0]}\n\n # Set swap drive ID\n if self.user['swap_id'] is not None:\n self.user['partitions']['drive_id'].insert(\n 1, self.user['swap_id'].split()[0])\n\n # Set home drive ID\n if self.user['home_id'] is not None:\n self.user['partitions']['drive_id'].append(\n self.user['home_id'].split()[0])\n\n # Set swap parameters\n if ('Swap' in self.user['optional_partitions']) or \\\n (self.user['swap_id'] is not None):\n self.user['partitions']['name'].insert(1, 'swap')\n self.user['partitions']['mountpoint'].insert(1, 'swap')\n self.user['partitions']['mountorder'].insert(1, 2)\n\n # Set home parameters\n if 'Home' in self.user['optional_partitions'] or \\\n (self.user['home_id'] is not None):\n self.user['partitions']['name'].append('home')\n self.user['partitions']['mountpoint'].append('/mnt/home')\n self.user['partitions']['mountorder'].append(3)", "def set_current(self, current):\n self._current = current", "def _set_active(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"active\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"active must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"active\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)\"\"\",\n })\n\n self.__active = t\n if hasattr(self, '_set'):\n self._set()", "def _set_active(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"active\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"active must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"active\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)\"\"\",\n })\n\n self.__active = t\n if hasattr(self, '_set'):\n self._set()", "def _set_active(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"active\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"active must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"active\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)\"\"\",\n })\n\n self.__active = t\n if hasattr(self, '_set'):\n self._set()", "def _set_active(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name=\"active\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"active must be of a type compatible with boolean\"\"\",\n 'defined-type': \"boolean\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, is_leaf=True, yang_name=\"active\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)\"\"\",\n })\n\n self.__active = t\n if hasattr(self, '_set'):\n self._set()", "def _activate(self):\n self.active = True", "def setCurrent(self,name):\n \n self.current = name", "def setCurrent(self,name):\n self.current = name", "def active(self, value):\n self._active = value\n # Check if this is already linked with an object in the database.\n # If it is, change the username in the user account too.\n try:\n self.userprofile.user.is_active = value\n except UserProfile.DoesNotExist:\n pass", "def setValue(self, key, val):\n if key == 'active':\n self.active = val\n else:\n super(CREBundleDiagnosticPolicy, self).setValue(key, val)", "def set_active(self, employee_id, active):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('UPDATE employee '\n 'SET is_active = %s '\n 'WHERE id=%s;',\n (active, employee_id))\n self.dbconnect.commit()\n except:\n self.dbconnect.rollback()\n raise", "def activate(self):\n self.active = True", "def activate(self):\n self.active = True", "def setSuspended(self , curThread ):\n self._suspended.setSuspended(curThread)", "def active(self, active: NetworkWirelessAP | None) -> None:\n if self._active and self._active is not active:\n self._active.shutdown()\n\n self._active = active", "def set_current(self, to):\n self.current = to", "def SetSuspend(self, val):\n self.suspended = val\n if self.suspended:\n self.Disconnect()", "def partitioning_attribute(self, partitioning_attribute):\n\n self._partitioning_attribute = partitioning_attribute", "def _setBootable(self, bootable):\n if self.partedPartition:\n if arch.isS390():\n return\n if self.flagAvailable(parted.PARTITION_BOOT):\n if bootable:\n self.setFlag(parted.PARTITION_BOOT)\n else:\n self.unsetFlag(parted.PARTITION_BOOT)\n else:\n raise errors.DeviceError(\"boot flag not available for this partition\", self.name)\n\n self._bootable = bootable\n else:\n self.req_bootable = bootable", "def activate(self):\n self._is_active = True", "def setactive(self, scriptname):\n code, data = self.__send_command(\n \"SETACTIVE\", [scriptname.encode(\"utf-8\")])\n if code == \"OK\":\n return True\n return False", "def partition_key(self, partition_key):\n\n self._partition_key = partition_key", "def active_clusters(self, active_clusters):\n\n self._active_clusters = active_clusters", "def setCurrent(self, current, nm=None):\r\n #~ print \"character::setCurrent \" + str(self.id) + \"/\" + str(current)\r\n self.current = current\r\n if current:\r\n if nm is not None:\r\n self.ship.sendInfo(nm)", "def setActiveVal(selforcls, val, index = None):\n if not selforcls.isActive():\n logging.error(\"Value of parameter cannot be set, \"\n \"parameter not active!\")\n return\n\n tempVal = selforcls.activeValues()\n if index is None:\n index = len(tempVal) # append to end by default\n elif index < 0:\n # index == -1 means the last, not yet existing entry\n index = len(tempVal) + index + 1\n\n while len(tempVal) <= index:\n # expand list to allow storage of value\n tempVal.append(None)\n\n tempVal[index] = val\n selforcls.setActiveValues(tempVal)", "def setCurrent(self, value):\n\n\t\tself._current = self._setpoint - value", "def set_active(cls, name=None):\r\n if name is None:\r\n cls.active = True\r\n cls.non_actives = {} # Clear not actives\r\n else:\r\n if name in cls.non_actives:\r\n del cls.non_actives[name]", "def active_pod(self, pod_name: str) -> None:\n self.proto.active_pod = pod_name", "def set_inactive(self):\n if self.active is False:\n return\n self.active = False\n self.save()\n self.question_set.update(active=False)", "def set_current(self, val: int) -> None:\n self._bin_iter.set_current(val)", "def is_active(self, is_active):\n \n self._is_active = is_active", "def setfocus(self, focus):\n self.focus = self.data[focus]\n self.focus_stage = focus\n for k in self.focus.keys():\n setattr(self, k, self.focus[k])", "def partition1(self, partition1):\n\n self._partition1 = partition1", "def set_inactive(self):\n self.active = False", "def is_active(self, is_active):\n\n self._is_active = is_active", "def is_active(self, is_active):\n\n self._is_active = is_active", "def is_active(self, is_active):\n\n self._is_active = is_active", "def is_active(self, is_active):\n\n self._is_active = is_active", "def current_progress(self, current_progress):\n\n self._current_progress = current_progress", "def set_active(self) -> None:\n self.map.active_cam = self.map.cameras.index(self) + 1", "def setCurrentAccel(self):\n if self.currentAccel != self.accel:\n if self.currentAccel != round(self.accel,1):\n if self.currentAccel < self.accel:\n self.currentAccel += 0.01\n elif self.currentAccel > self.accel:\n self.currentAccel -= 0.01", "def provide_partition_info(self):\n self.partition_info = True", "def activate(self):\r\n self.update_enrollment(is_active=True)", "def standby(self):\n self._state = STATE_STANDBY", "def standby(self):\n self._state = STATE_STANDBY", "def _setCurrents(self, att, newdata):\n logger.debug(\"Func: _setCurrents\")\n\n self._currentsDict[att] = newdata\n self._saveUserPrefs(self._currentsDict)", "def set_no_longer_active(self):\n with self.redis_client.lock(\"active-lock\"):\n self.set_to_redis(\"active\", \"done\")", "def set_progress(self, current):\n self._current = current\n if self._last_time is None or (datetime.datetime.now() - self._last_time).seconds > 1:\n self._update_time()\n\n self._draw()\n if self._current == self._total:\n self.reset(0)", "def set_active(self, name):\n try:\n obj = self.get_by_name(name)\n item = obj.item\n group = self.group_items[obj.kind]\n\n group_index = self.index(group.row(), 0, QtCore.QModelIndex())\n item_index = self.index(item.row(), 0, group_index)\n\n self.view.selectionModel().select(item_index, QtCore.QItemSelectionModel.Select)\n except Exception as e:\n log.error(\"[ERROR] Cause: %s\" % str(e))\n raise", "def set_active_tool(self, tool=None):\n self.active_tool = tool", "def set_current(self, value):\n self.write(\":CURR {}A\".format(value))", "def partitionname(self, partitionname) :\n\t\ttry :\n\t\t\tself._partitionname = partitionname\n\t\texcept Exception as e:\n\t\t\traise e", "def start(self):\n self.active = True", "def set_piece_selected(self, uid, val):\n piece = self.get_piece_by_uid(uid)\n if piece:\n piece.selected = val", "def swap(isamAppliance, check_mode=False, force=False):\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n ret_obj_old = get(isamAppliance)\n\n ret_obj = isamAppliance.invoke_put(\"Swapping the active partition\",\n \"/firmware_settings/kickoff_swap\", {}, requires_model=requires_model)\n # Process previous query after a successful call to swap the partition\n for partition in ret_obj_old['data']:\n if partition['active'] is False: # Get version of inactive partition (active now!)\n ver = partition['firmware_version'].split(' ')\n isamAppliance.facts['version'] = ver[-1]\n\n return ret_obj", "def setPointsActive(self):\r\n for point in self.points:\r\n point.setActive()", "def test_partitioner(self):\n args = \"xyzzy\", set([1, 2, 3])\n partitioner = self.tx_client.SetPartitioner(*args)\n self.assertEqual(partitioner.state, PartitionState.ALLOCATING)\n self.assertEqual(partitioner._partitioner.args, args)\n self.assertEqual(partitioner._partitioner.kwargs, {})\n\n partitioner._partitioner.state = PartitionState.ACQUIRED\n self.assertEqual(partitioner.state, PartitionState.ACQUIRED)", "def start_bath(self) -> None:\n\n self.send(self.cmd.SET_HEATING_RUNNING, True)", "def on_edit_clicked(self,button):\n\t\tself.list_partitions.edit_partition()", "def setDisplayActiveRange(selforcls, newRange):\n newRange = (selforcls.toSi(min(newRange)),\n selforcls.toSi(max(newRange)))\n selforcls.setActiveRange(newRange)", "def set_exclusive_active(self, name):\n self.set_all_inactive()\n self.set_active(name)", "def setAvailability(self):\n self.available = not self.available", "async def ensure_active(self):\n if not self.active:\n await self.refresh()", "def active(value):\r\n self.context.active = threading.BoundedSemaphore(value=value)", "def set_current(idx):\n\n dev_dict = {dev.uuid: dev for dev in G.DEVICE_LIST}\n if idx in dev_dict:\n current_dev = dev_dict[idx]\n elif isinstance(idx, int) and idx < len(G.DEVICE_LIST):\n current_dev = G.DEVICE_LIST[idx]\n else:\n raise IndexError(\"device idx not found in: %s or %s\" % (\n list(dev_dict.keys()), list(range(len(G.DEVICE_LIST)))))\n G.DEVICE = current_dev", "def _set_current(self, parname, value):\n mod_id, dac = self.channel_map[parname]\n\n current_value = self.get(parname)\n\n def setter(v): return self.current_sources[mod_id].set_current(dac, v)\n ramp_values(start_val=current_value, end_val=value,\n ramp_rate=self.cfg_ramp_rate(),\n update_interval=0.1,\n callable=setter, verbose=self.cfg_verbose())\n self.current_sources[mod_id].set_current(dac, value)", "def make_active(self, request, queryset):\n queryset.update(is_active=True)", "def set_xp_partition(self,xp_partition):\n # setup partition: set x grid\n self.xp_partition = xp_partition\n self.xp_partition.setup_x_grid(xx=[0,self.L])\n # local copies of x and p grids\n self.x=self.xp_partition.x\n self.p=self.xp_partition.p\n # allocate fmci_XP array\n self.fmci_XP=np.zeros((self.xp_partition.nx,self.xp_partition.np))", "def active_users(self, active_users):\n\n self._active_users = active_users", "def is_current_node_active(self, device, partition):\n if self.is_version_sufficient(min_version='11.3.0') is False:\n print \"!!!!!!!!!!! WARNING !!!!!!!!!!!!!!!\"\n print \"! UNABLE TO VERIFY FAILOVER STATE !\"\n print \"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\"\n stop = raw_input('Do you want to continue? [y|N]')\n if stop.strip() == \"y\" or stop.strip() == \"Y\":\n return True\n else:\n return False\n \"\"\" Determines if the connect device is the master, if not Bail with an error.\"\"\"\n try:\n self.connection.System.Session.set_active_folder(\"/Common\")\n status = self.connection.Management.Device.get_failover_state([device])\n if status == ['HA_STATE_ACTIVE']:\n self.connection.System.Session.set_active_folder(\"/\"+partition)\n return True\n else:\n return False\n except:\n raise Exception(\"Failed to determine if {} is a master\".format(device))", "def activated(self, activated):\n\n self._activated = activated", "def set_beam_current(self, target_current):\n self.target_beam_current = target_current\n # Setting SEM to target beam current must be implemented in child class!", "def set_current_tool_to_pan(self):\n\n self.variables.active_tool = TOOLS.PAN_TOOL\n self.variables.current_tool = TOOLS.PAN_TOOL", "def __toggleCurrent(self):\n aw = self.activeWindow()\n if aw:\n aw.toggleCurrentFold()" ]
[ "0.6547811", "0.65143806", "0.6484135", "0.6361823", "0.6361823", "0.6361823", "0.6361823", "0.6254472", "0.6218527", "0.6194217", "0.61550754", "0.61421317", "0.61362016", "0.5958744", "0.5954031", "0.5954031", "0.5954031", "0.5954031", "0.5954031", "0.58707124", "0.5751636", "0.56785065", "0.5645239", "0.5633738", "0.55697733", "0.5534677", "0.5530588", "0.55240005", "0.5517493", "0.5517493", "0.5517493", "0.5517493", "0.5496251", "0.5490448", "0.5450452", "0.5428721", "0.54285634", "0.5415587", "0.5400252", "0.5400252", "0.53992784", "0.53961766", "0.5367797", "0.5362235", "0.535114", "0.53473777", "0.53335166", "0.529868", "0.52903557", "0.52722555", "0.5263798", "0.5227958", "0.5194311", "0.518651", "0.51864105", "0.5165302", "0.51537746", "0.51505685", "0.51396996", "0.5130838", "0.5129432", "0.51142323", "0.51142323", "0.51142323", "0.51142323", "0.50943756", "0.5091848", "0.5085559", "0.50808084", "0.507813", "0.50452757", "0.50452757", "0.50366735", "0.5032614", "0.50130224", "0.4992632", "0.49891302", "0.49856353", "0.49837822", "0.49706617", "0.4954063", "0.49454647", "0.49365443", "0.49329543", "0.4914225", "0.49102435", "0.49091122", "0.49031293", "0.48965782", "0.48936784", "0.48904064", "0.48895064", "0.48890248", "0.4888378", "0.48869243", "0.48840234", "0.48808745", "0.48765987", "0.48724362", "0.48716906", "0.4871169" ]
0.0
-1
If data has not been processed, calls process_data. Returns None.
def check_Data(self): if self._target_data is None: self.processData()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(proc_data):\n\n # No further processing\n return proc_data", "def process_data(self, data):\n return data", "def process_data_impl(\n self,\n data_dir: Path,\n output_processed_data_dir: Path,\n ) -> NoReturn:\n pass", "def run(self, data):\n\t\t# no processing here\n\t\treturn data", "def _async_process_data(self):\n raise NotImplementedError", "def post_process(cls, data):\n return data", "def _process(self, data: np.ndarray) -> np.ndarray:", "def _process(self, data: np.ndarray) -> np.ndarray:", "def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage", "def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage", "def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage", "def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage", "def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage", "def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage", "def data_received(self, data):\n if self.session is not None:\n size = len(data)\n self.logger.debug('process_data(%r, %d)', data, size)\n self.session.process_data(data, size)", "def process(self, data) :\n rData = Core.Processlib.Data()\n rData.frameNumber = data.frameNumber\n rData.buffer = self._worker.process(data.buffer)\n if self._writer: #optional HDF5 writer\n self._writer.write(rData.buffer, rData.frameNumber)", "def prepare_process(self, dataset):\n if dataset is not None:\n pass", "def postprocess(self, data, pagination):\n self.inject_data_hook(data)\n # Serialize ``data`` to python data structures\n python_data = self.serialize_to_python(data)\n # finalize any pending data processing\n self.finalize_pending(data)\n # Package the python_data to a dictionary\n return self.package(python_data, pagination)", "def process(self, data):\n return self.transformer.transform(data)", "def process_data(data):\n bio = BytesIO()\n bio.write(data)\n bio.seek(0)\n process(bio)", "def process_data(self, windowed_data):\n return", "def process(self, data) :\n rData = Core.Processlib.Data()\n rData.frameNumber = data.frameNumber\n rData.buffer = self._worker.process(data.buffer)\n if self._writer: #optional HDF5 writer\n self._writer.write(rData.buffer, rData.frameNumber)\n return rData", "def processData(self, rawData, queryMeta=None):\n\t\treturn self.service.run(self, rawData, queryMeta)", "def _data_process(self, v):\n pass", "def _hook_data(self, data):\n if self.data_hook is not None:\n self.data_hook(data)", "def _finalize_data(self, data):\n return data", "def process(self, data, channel = None):\n\t\traise NotImplementException()", "def process_data(self, value):\n try:\n self.data = value.value\n except AttributeError:\n self.data = value", "def process(self):\n\n # validate processing\n if self.is_acceptable():\n # handle data and write log\n self.handle()", "def _async_process_data(self):\n data = self._api.get_device_data(self._dev_id)\n\n if not data:\n _LOGGER.error(\"Received no data for device %s\", self._name)\n self.async_write_ha_state()\n return\n\n if \"relay\" in data:\n self._is_on = data[\"relay\"]\n\n self.async_write_ha_state()", "def processData(self, json):\r\n if json[\"data\"] is not None:\r\n return json[\"data\"]\r\n else:\r\n pass\r\n # Raise Exception", "def process(self, data: ByteString) -> Union[Optional[ByteString], Iterable[ByteString]]:", "def test_process_data(self):\n pass", "def handle_data(self, data):\n \n line_num, offset = self.getpos()\n new_pos = self.new_line_pos[line_num] + offset\n self.data_buffer += self.html_doc[self.current_pos:new_pos]\n\n content = data\n if self.filtering:\n content = self.typogrify._apply_filters(content, self.lasttag)\n self.filtered_data_length = len(content)\n\n self.data_buffer += content\n self.current_pos = new_pos + len(data)", "def process(self):\n raise NotImplementedError", "def on_data(self, data):\n if data is not None:\n # Send the data to the parent process\n logging.debug('Received raw data : ' + str(data))\n self.mp_queue.put(data)", "def process_data_callback(self, res):\n self.current_in_progress -= 1", "def handle_data(self, data):\n if self.article_body:\n if not self.suspend_acquisition:\n self.article_data += data", "def process_data(\n self,\n data_dir: Path,\n output_processed_data_dir: Path,\n is_train: bool = False,\n ) -> NoReturn:\n self.logger.info(self.logging_prefix + f\"Processing data from {data_dir} to {output_processed_data_dir}\")\n IOUtils.rm_dir(output_processed_data_dir)\n IOUtils.mk_dir(output_processed_data_dir)\n\n if is_train:\n # Preprocess with training data, if needed\n self.preprocess_with_train_data(data_dir, output_processed_data_dir)\n # end if\n\n self.process_data_impl(data_dir, output_processed_data_dir)\n return", "def process(self):\n try:\n if not self._successor:\n return self.loading_strategy()\n else:\n return self._successor.process_next(self.loading_strategy())\n except Exception as e:\n Oprint.err(e, 'lmdo')", "def on_data(self, data: str) -> bool:\n def process_tweet(t_data):\n self.tweet_list.append(json.loads(t_data))\n if len(self.tweet_list) >= self.batch_size:\n processing.process_tweets(self.tweet_list, self.tweet_handler_map)\n self.total_tweet_counter += len(self.tweet_list)\n if self.total_tweet_counter % 1000 == 0:\n self.report_tweet_count()\n self.tweet_list = []\n return True\n\n if self.time_limit:\n # Check if the time limit has elapsed\n if time.time() - self.start_time < self.time_limit:\n process_tweet(data)\n else:\n print('Stopping tweet collection')\n logger.info('Stopping tweet collection')\n processing.process_tweets(self.tweet_list, self.tweet_handler_map, force_write=True)\n return False\n else:\n # Process data infinitely\n process_tweet(data)", "def _preprocess(self, data):\n\n # pipeline: first call the previous statistics:\n if self.previous_statistics is not None:\n data = self.previous_statistics.statistics(data)\n # the first of the statistics need to take list as input, in order to match the API. Then actually the\n # transformations work on np.arrays. In fact the first statistic transforms the list to array. Therefore, the\n # following code needs to be called only if the self statistic is the first, i.e. it does not have a\n # previous_statistic element.\n else:\n data = self._check_and_transform_input(data)\n\n return data", "def receive_data_chunk(self, raw_data, start):\r\n if not self.active:\r\n return raw_data", "def preProcess(self, datum):\n pass", "def on_data_received(self, data):\n # pylint: disable=too-many-branches,too-many-statements\n\n if self.is_receiving_data is True:\n self._buffer += data\n return\n\n try:\n self.is_receiving_data = True\n self._buffer += data\n\n # Keep looping while we have unprocessed data\n # We start processing only once we have an entire field\n # (e.g. 'id=value') in the buffer, otherwise wait for more\n # data.\n # The problem with the current approach is that if there is a\n # binary field with an incorrect length, we may read past\n # the end of the message.\n # BUGBUG: Need to fix this. A quick hack may be to\n # try to peek to see what the tag id is and do something\n # with that. On the other hand this may just be a problem\n # with the protocol (should probably specify a maximum\n # allowable length of a binary field as a sanity check)\n while (len(self._buffer) > 0 and\n self._buffer.find(b'\\x01', self._binary_length + 1) != -1):\n\n # Need to make sure that we have the entire binary field\n # before continuing the processing\n if (self._binary_length > 0 and\n len(self._buffer) < self._binary_length):\n break\n\n # break up the field\n delim = self._buffer.find(b'\\x01', self._binary_length + 1)\n field = self._buffer[:delim]\n self._buffer = self._buffer[delim+1:]\n\n tag_id, value = self._parse_field(field)\n\n # Is this the start of a message?\n if tag_id == 8:\n if self.is_parsing:\n raise FIXParserError('unexpected tag: 8')\n self.is_parsing = True\n elif not self.is_parsing:\n raise FIXParserError('message must start with tag 8')\n\n if self._debug:\n log_text(self._logger.debug, None,\n f\"tag {tag_id} = {repr(value)}\")\n\n self._update_length(field, tag_id, value)\n self._update_checksum(field, tag_id, value)\n self._update_binary(field, tag_id, value)\n\n # The tag value gets assigned here. Due to grouping\n # the container where the update takes place gets\n # changed\n # self._message[tag_id] = value\n self._update_field(tag_id, value)\n\n # Is this the end of a message?\n if tag_id == 10:\n self._receiver.on_message_received(self._message,\n self._message_length,\n self._checksum)\n self.reset()\n\n except FIXLengthTooLongError as err:\n self.reset(flush_buffer=True)\n self._receiver.on_error_received(err)\n except FIXParserError as err:\n self.reset(flush_buffer=True)\n self._receiver.on_error_received(err)\n finally:\n self.is_receiving_data = False", "def _process(self):\n self.kwargs[\"collect\"].process_scan_form_data(self.kwargs[\"data\"])", "def run(self, data: PipeLineDataObject) -> PipeLineDataObject:\n raise NotImplementedError", "def process(self, data, output, processes, process):\n slice_list = du.get_grouped_slice_list(data, self.get_filter_frame_type(), self.get_max_frames())\n self._process_chunks(slice_list, data, output, len(processes), process)", "def process_frames(self, data):\n pass", "def process_frames(self, data):\n pass", "def process_frames(self, data):\n pass", "def process_frames(self, data):\n pass", "def process_frames(self, data):\n pass", "def data_received(self, data):\n pass", "def data_process(self):\n logging.info('Processing the data and split files')\n lines = Utility.file_len(self.fname)\n self.lines_to_be, self.split_files = Utility.split_files(self.fname, lines,\n cpu_count().real)", "def handle(self, rawdata):\r\n\r\n return self.__filter(self.__handler(rawdata))", "def collect_incoming_data(self, data):\n self.logger.debug('collect_incoming_data() -> (%d)\\n\"\"\"%s\"\"\"', len(data), data)\n self.received_data.append(data)", "def post_load(self, data):\n return data", "def process(self):\n pass", "def process_pickle(self, data):\n if self._handler is None:\n raise NotImplementedError\n\n self.notify_started()\n self.send_pickle(self._handler(data))", "def process(self, data):\n if self.__head:\n self.__head.send(Element(\n stream_id=self.id,\n data=data))", "def process(self, data, reset=False):\n data = np.asarray(data)\n self.check_dims(data)\n data = self.highpass_filter(data, reset=reset)\n data = self.lowpass_filter(data, reset=reset)\n data = self.resample(data)\n data = self.reref_data(data)\n data = self.select_channels(data)\n data = self.normalize_data(data)\n data = self.add_context(data)\n return data", "def process(self, data):\n allocating = (self._output is None)\n ind = 0\n for i, (name, feature) in enumerate(self.features):\n if allocating:\n x = feature.compute(data)\n self.feature_indices[name] = (ind, ind+x.size)\n ind += x.size\n\n if self._output is None:\n self._output = x\n else:\n self._output = np.hstack([self._output, x])\n else:\n self._output[self.feature_indices[name][0]:\n self.feature_indices[name][1]] = \\\n feature.compute(data)\n\n return self._output", "def got_data(self, data):\n if self.get_current_state() == SBE37ProtocolState.DIRECT_ACCESS:\n # direct access mode\n if len(data) > 0:\n mi_logger.debug(\"SBE37Protocol._got_data(): <\" + data + \">\") \n if self._driver_event:\n self._driver_event(DriverAsyncEvent.DIRECT_ACCESS, data)\n # TODO: what about logging this as an event?\n return\n \n if len(data)>0:\n # Call the superclass to update line and prompt buffers.\n CommandResponseInstrumentProtocol.got_data(self, data)\n \n # If in streaming mode, process the buffer for samples to publish.\n cur_state = self.get_current_state()\n if cur_state == SBE37ProtocolState.AUTOSAMPLE:\n if SBE37_NEWLINE in self._linebuf:\n lines = self._linebuf.split(SBE37_NEWLINE)\n self._linebuf = lines[-1]\n for line in lines:\n self._extract_sample(line)", "def handle(self, data):\n pass", "def handle_data(self, data_type, data_size, socket, data):\n return self.game.process_exnternal_request(socket, data_type, data_size, data)", "def process(self, input, is_processed=False):\n raise NotImplementedError", "def _process(self, data, cache):\n stop = False\n try:\n super(PickleCache, self).process(data)\n except StopIteration:\n stop = True\n\n data_to_save = data\n\n cache = dict() if cache is None else cache\n cache[self.chain_info['chain_hash']] = {\"data\": data_to_save,\n \"stopped\": stop,\n 'chain_repr': self.chain_info[\n 'chain_repr'],\n 'chain_mtime': self.chain_info[\n 'chain_mtime']}\n return cache, stop", "def _process(proc_data: List[Dict]) -> List[Dict]:\n return proc_data", "def collect_incoming_data(self, data):\n self.__input.append(data)", "def processing(self):\n pass", "def handle_received(self) -> None:\n self.buffer: bytes\n while len(self.buffer) >= DataHeader.length:\n header = DataHeader.decode(self.buffer, allow_excessive=True)\n if len(self.buffer) < header.size:\n _LOGGER.debug(\n \"Not enough data on data channel (has %d, expects %d)\",\n len(self.buffer),\n header.size,\n )\n break\n\n try:\n self._process_message_from_buffer(header)\n except Exception:\n _LOGGER.exception(\"failed to process data frame\")\n\n self.buffer = self.buffer[header.size :]", "def handle(self, data, context):\n \n model_input = self.preprocess(data)\n model_out = self.inference(model_input)\n return self.postprocess(model_out)", "def handle_data(self, data):\n if verbose(): print(\"TIParser.handle_data(self, '%s')\" % (data))\n pass", "def process_data(self, data):\n # Decode the incoming data.\n try:\n message = json.loads(data.decode('utf-8'))\n except ValueError:\n self.disconnect(\"Bad request received\")\n logger.warning(\"Cannot parse incoming message, discarding.\")\n return\n\n self.process_incoming_request(message)", "def justhandle(self, rawdata):\r\n\r\n return self.__handler(rawdata)", "def run(self, dataRef):\n self.log.info(\"Processing %s\" % (dataRef.dataId))\n\n # initialize outputs\n skyInfo = getSkyInfo(coaddName=self.config.coaddName, patchRef=dataRef)\n coadd = dataRef.get(self.config.coaddName + \"Coadd\")\n if self.config.doScaleVariance:\n self.scaleVariance(coadd)\n\n # delegate most of the work to ProcessImageTask\n result = self.process(dataRef, coadd, enableWriteSources=False)\n result.coadd = coadd\n\n if result.sources is not None:\n self.setPrimaryFlags.run(result.sources, skyInfo.skyMap, skyInfo.tractInfo, skyInfo.patchInfo,\n includeDeblend=self.config.doDeblend)\n self.propagateFlags.run(dataRef.getButler(), result.sources,\n self.propagateFlags.getCcdInputs(coadd), coadd.getWcs())\n\n # write sources\n if self.config.doWriteSources:\n dataRef.put(result.sources, self.dataPrefix + 'src')\n\n self.log.info(\"Finish processing %s\" % (dataRef.dataId))\n\n return result", "def run(self):\r\n self.collect_data()", "def collect_incoming_data(self, data):\n self.in_buffer.append(data)\n self.in_buffer_len += len(data)\n # Flush buffer if it gets too long (possible DoS attacks).\n # RFC-959 specifies that a 500 response could be given in\n # such cases\n buflimit = 2048\n if self.in_buffer_len > buflimit:\n self.respond('500 Command too long.')\n self.log('Command received exceeded buffer limit of %s.' %(buflimit))\n self.in_buffer = []\n self.in_buffer_len = 0", "def _consumer(self) -> None:\n while (data := self._q.get()) is not None:\n write_data(data, self.writer)\n self._q.task_done()\n else:\n logging.info(\"None received. Queue consumed.\")\n self._q.task_done()\n return", "def _process(self):\n export_collect_data(self.kwargs[\"collect\"])", "def collect_data(self):\n self.logger.info(\"Waiting for incoming data ...\")\n while True:\n item = self.in_queue.get()\n self.logger.info(\"Received data!\")\n self.collector_process_data(item)", "def process(self):\n raise NotImplementedError('Method must be implemented by subclass.')", "def process_frame(self, data):\n logging.error(\"filter_frame needs to be implemented for %s\",\n data.__class__)\n raise NotImplementedError(\"filter_frame needs to be implemented\")", "def process_data(self, data):\n # Decode the incoming data.\n try:\n message = json.loads(data.decode('utf-8'))\n except ValueError:\n self.disconnect(\"Bad response received\")\n logger.warning(\"Cannot parse incoming message, discarding.\")\n return\n\n self.process_incoming_response(message)", "def process(self):\n self.extract()\n self.transform()\n self.load()", "def process():", "def handle_execution(self, data, *args, **kwargs):\n return {}", "def handle_execution(self, data, *args, **kwargs):\n return {}", "def post_execute(self, data):\n return data", "def handle_data(self, data):\n if len(self.current_tags) > 0:\n self.current_tags[-1].add_data(data)", "def process(self, data_itr):\n for data in data_itr:\n self.update(data)\n while True:\n try:\n out = self.next()\n yield out\n except StopIteration:\n break", "def process():\n pass", "def process(self, output_data: Answer) -> bool:\n return True", "def _process_data(self, data: T) -> List[TestGroupReport]:\n raise NotImplementedError", "def collect_incoming_data(self, data):\n self.l.debug('data -> (%d bytes):\"%s\"', len(data), data)\n self.received_data.append(data)", "def reprocessSeries(self, tiltseriesdata):\n\t\treturn None", "def dataReceived (self, data) :\r\n \r\n buf = buffer.Buffer(self.recvbuffer + data)\r\n \r\n # process packets until there are no more of them\r\n\r\n try :\r\n buf.processWith(self.processPacket)\r\n except BaseClientAbort, e :\r\n self.do_abort(e.errorCode())\r\n \r\n self.log(\"closing connection\")\r\n self.transport.loseConnection()\r\n \r\n except BaseClientError, e :\r\n self.do_error(e.errorCode())\r\n \r\n except Exception, e :\r\n self.log(\"unknown exception %s: %s\" % (type(e), e))\r\n \r\n self.log(\"closing connection\")\r\n self.transport.loseConnection()\r\n \r\n raise\r\n \r\n # stuff remaining data back into recvbuf\r\n self.recvbuffer = buf.read()", "def post_process(self):\n pass", "def post_process(self):\n pass" ]
[ "0.78714263", "0.7426427", "0.7234299", "0.72270745", "0.71373206", "0.69578606", "0.67860335", "0.67860335", "0.67753035", "0.67753035", "0.67753035", "0.67753035", "0.67753035", "0.67753035", "0.66798294", "0.6571338", "0.6558113", "0.6555273", "0.6539222", "0.64652324", "0.6426956", "0.6377317", "0.63754314", "0.63746464", "0.6223275", "0.6220969", "0.62132", "0.6196978", "0.6181765", "0.60985607", "0.6083596", "0.6044093", "0.6042522", "0.6035727", "0.60221505", "0.60162365", "0.59799474", "0.5965793", "0.59602284", "0.5956016", "0.5949934", "0.5915429", "0.5914213", "0.5906629", "0.5890644", "0.5885586", "0.58841056", "0.58753806", "0.5872866", "0.5872866", "0.5872866", "0.5872866", "0.5872866", "0.58727044", "0.58636445", "0.5863343", "0.58585966", "0.58362967", "0.58346003", "0.5824776", "0.5820244", "0.57962376", "0.57938576", "0.5792019", "0.57885367", "0.5784914", "0.57666135", "0.57527524", "0.5743355", "0.5735467", "0.57238525", "0.57232106", "0.57105917", "0.5699669", "0.56958365", "0.56908554", "0.56755537", "0.56666094", "0.56564105", "0.56534684", "0.5640563", "0.5629502", "0.56215316", "0.56203336", "0.56084526", "0.56006444", "0.5592877", "0.55860025", "0.55860025", "0.5576149", "0.556914", "0.5554768", "0.5553427", "0.55465645", "0.5529778", "0.55292106", "0.5527937", "0.55264926", "0.55250084", "0.55250084" ]
0.70434296
5
Determines the number of batches.
def get_num_batches(self,batch_size): return len(self) // batch_size
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def number_of_batches(self):\n return int(np.floor(len(self.file_paths_list) / self.batch_size))", "def batch_size(self) -> int:\n ...", "def num_batches(self):\n\t\t\n\t\treturn len(self.batch_stats)", "def _update_num_batches(self):\n # maximum possible number of batches is equal to number of whole times\n # batch_size divides in to the number of data points which can be\n # found using integer division\n possible_num_batches = self.inputs.shape[0] // self.batch_size\n if self.max_num_batches == -1:\n self.num_batches = possible_num_batches\n else:\n self.num_batches = min(self.max_num_batches, possible_num_batches)", "def batch_size(self):\n return self.size", "def max_num_batches(self):\n return self._max_num_batches", "def total_train_batches(self) -> int:\n return self.trainer.num_training_batches", "def __len__(self) -> int:\n num_batches, remainder = divmod(len(self.mapped_triples), self.batch_size)\n if remainder and not self.drop_last:\n num_batches += 1\n return num_batches", "def get_batch_size():\n return get_global_variable(GraphKeys.BATCH_SIZE)", "def batch_size(self):\n return self._batch_size", "def batch_size(self):\n return self._batch_size", "def batch_size(self):\n return self._batch_size", "def batch_size(self):\n return self._batch_size", "def batch_size(self) -> ConfigNodePropertyInteger:\n return self._batch_size", "def total_test_batches(self) -> int:\n return sum(self.trainer.num_test_batches)", "def batch_size(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"batch_size\")", "def num_training_batches(self, edge_type: Tuple[int, int], edge_class: int) -> int:\n return len(self.train_edges[edge_type][edge_class]) // self.batch_size", "def count_records(batches: List[Batch]) -> int:\n return sum(b.current_size for b in batches)", "def record_batch_size(self):\n return 10000", "def batch_size(self):\n if self._batch_size is not None:\n return self._batch_size # custom batch size defined\n if self.task == 'objdet':\n return 8\n annos_per_img = self._annos_per_img[self.dataset]\n if self.task in {'predcls', 'sgcls'}:\n annos_per_img = annos_per_img['pairs']\n elif self.task == 'objcls':\n annos_per_img = annos_per_img['objects']\n elif self.task == 'preddet' and self.filter_multiple_preds:\n annos_per_img = annos_per_img['predicates_filtered']\n elif self.task == 'preddet' and self.filter_duplicate_rels:\n annos_per_img = annos_per_img['duplicates_filtered']\n elif self.task in {'preddet', 'sggen'}:\n annos_per_img = annos_per_img['relations']\n batch_size = ceil(self._annotations_per_batch / annos_per_img)\n return max(batch_size, 2)", "def _get_batch_size(self):\n if self.batch_size == 'auto':\n return self._backend.compute_batch_size()\n else:\n # Fixed batch size strategy\n return self.batch_size", "def total_predict_batches(self) -> int:\n return sum(self.trainer.num_predict_batches)", "def get_evaluation_batch_size():\n return 1", "def batch_size(features, labels):\n return extract_batch_length(features)", "def batch_size(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"batch_size\")", "def get_num_batches(self, instances: Iterable[Instance]) -> int:\n n_docs = len(set([instance[\"metadata\"][\"doc_key\"] for instance in instances]))\n return n_docs", "def get_num_chunks(self) -> int:", "def batch_size(self) -> typing.Optional[jsii.Number]:\n return self._values.get('batch_size')", "def batch_size(self) -> typing.Optional[jsii.Number]:\n return self._values.get('batch_size')", "def batch_size(self) -> typing.Optional[jsii.Number]:\n return self._values.get('batch_size')", "def batch_size(self) -> typing.Optional[jsii.Number]:\n return self._values.get('batch_size')", "def __len__(self):\n return len(self.batches)", "def __len__(self):\n return math.ceil(self.number_of_images / self.batch_size)", "def batch_node_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"batch_node_count\")", "def __len__(self):\n return self.limit_batches", "def ExpectedMaxBatchSizes(self, run_params):\n return self.max_batch_sizes", "def batch_request_size(self):\n return self._batch_request_size", "def __len__(self):\n return int(np.ceil(len(self.ids) / self.batch_size))", "def num_sown_batches(self):\n self.calc_progress()\n return self._num_sown_batches", "def batch_per_file(self):\n return(self.X_x_Y // self.batch_size)", "def batch_shape(self) -> torch.Size:\n self._check_if_fitted()\n return torch.Size([self.num_mcmc_samples])", "def __len__(self):\n gen_len = len(self.image_ids) // self.batch_size\n if len(self.image_ids) % self.batch_size != 0:\n gen_len += 1\n return gen_len", "def get_batch_size(self, recip_count: int) -> int:\n Logger.debug(f'In get_batch_size.', TAG)\n yesterdays_count = self._yesterdays_bet_count.get()\n if yesterdays_count < 1:\n yesterdays_count = 1\n size = (DIST_DURATION_PARAM * recip_count // yesterdays_count)\n if size < TX_MIN_BATCH_SIZE:\n size = TX_MIN_BATCH_SIZE\n if size > TX_MAX_BATCH_SIZE:\n size = TX_MAX_BATCH_SIZE\n Logger.debug(f'Returning batch size of {size}', TAG)\n return size", "def batch_num_nodes(self, ntype=None):\n return self._batch_num_nodes[self.get_ntype_id(ntype)]", "def __len__(self) -> int:\n return len(self.reps_batches)", "def batch_steps(num_examples, batch_size):\n steps = num_examples // batch_size\n if num_examples % batch_size > 0:\n steps += 1\n return steps", "def _global_batch_size(self):\n return True", "def num_training_steps(self, num_batches, gradient_accumulation):\n return len(\n [i for i in range(self.num_mini_batches + 1, self.num_mini_batches + num_batches + 1) if\n i % gradient_accumulation == 0])", "def test_num_training_batches(tmpdir):\n # when we have fewer batches in the dataloader we should use those instead of the limit\n model = EvalModelTemplate()\n trainer = Trainer(limit_val_batches=100, limit_train_batches=100, max_epochs=1)\n trainer.fit(model)\n\n assert len(model.train_dataloader()) == 10\n assert len(model.val_dataloader()) == 10\n assert isinstance(trainer.num_val_batches, list)\n assert trainer.num_val_batches[0] == 10\n assert trainer.num_training_batches == 10\n\n # when we have more batches in the dataloader we should limit them\n model = EvalModelTemplate()\n trainer = Trainer(limit_val_batches=7, limit_train_batches=7, max_epochs=1)\n trainer.fit(model)\n\n assert len(model.train_dataloader()) == 10\n assert len(model.val_dataloader()) == 10\n assert isinstance(trainer.num_val_batches, list)\n assert trainer.num_val_batches[0] == 7\n assert trainer.num_training_batches == 7", "def get_generator_batch_size(self):\n\n return self.generator_batch_size", "def __len__(self):\n return int(np.ceil(len(self.image_filenames) / (self.batch_size)))", "def get_step_size(total_items, batch_size):\n return np.ceil(total_items / batch_size)", "def __len__(self):\n return int(np.floor(len(self.ids) / self.batch_size))", "def num_training_steps(self) -> int:\n if self.trainer.max_steps:\n return self.trainer.max_steps\n\n limit_batches = self.trainer.limit_train_batches\n batches = len(self.train_dataloader())\n batches = (\n min(batches, limit_batches)\n if isinstance(limit_batches, int)\n else int(limit_batches * batches)\n )\n\n num_devices = max(1, self.trainer.num_gpus, self.trainer.num_processes)\n if self.trainer.tpu_cores:\n num_devices = max(num_devices, self.trainer.tpu_cores)\n\n effective_accum = self.trainer.accumulate_grad_batches * num_devices\n return (batches // effective_accum) * self.trainer.max_epochs", "def batch_size(self) -> Optional[int]:\n if self.batched:\n raise RuntimeError(\n 'Environment %s marked itself as batched but did not override the '\n 'batch_size property'\n % type(self)\n )\n return None", "def __len__(self):\n return int(np.ceil(self.max_index / float(self.batch_size)))", "def get_global_batch_size(self) -> int:\n return self._global_batch_size", "def __len__(self):\n if self.TRAIN_BOOL is True:\n count = len(self.dict_batch_1[b'data'])\n count += len(self.dict_batch_2[b'data'])\n count += len(self.dict_batch_3[b'data'])\n count += len(self.dict_batch_4[b'data'])\n count += len(self.dict_batch_5[b'data'])\n else:\n count = len(self.dict_batch_test[b'data'])\n return count", "def number_of_iterations(self) -> int:\n pass", "def batch_len(batch):\n flatlist, _ = tree_util.tree_flatten(batch)\n if len(flatlist) < 1:\n return 0\n b = flatlist[0].shape[0]\n assert all(\n arr.shape[0] == b for arr in flatlist if th.is_tensor(arr)\n ), \"Not all arrays have same batchsize!\"\n return b", "def __len__(self) -> int:\n return int(np.floor(len(self.list_IDs) / self.batch_size))", "def _calculateIterations(self):\n #iterations = self.nb_images/self.batchsize\n imgs = self.protofile.nb_test()\n batch = self.protofile.batch_test()\n iterations = imgs/batch\n if imgs % batch != 0:\n iterations += 1\n return iterations", "def get_number_of_training(self):\n return self.n_train", "def train(self, num_batches: int):", "def __len__(self):\n return int(np.floor(len(self.dataset_df) / self.batch_size))", "def get_per_slot_batch_size(self) -> int:\n return self._per_slot_batch_size", "def __len__(self):\n\n return math.ceil(len(self.img_files) * self.gen_count / self.batch_size)", "def num_training_examples(self):", "def __len__(self):\n return int(np.ceil(self.total_frame_count / self.batch_size))", "def batch_size(self):\n self.validate_shape_and_dtype()\n return self.rgb.shape[0]", "def __len__(self):\n return int(np.floor(len(self.list_ids) / self.batch_size))", "def total_val_batches(self) -> int:\n total_val_batches = 0\n if self.trainer.enable_validation:\n is_val_epoch = (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch == 0\n total_val_batches = sum(self.trainer.num_val_batches) if is_val_epoch else 0\n\n return total_val_batches", "def num_examples_per_epoch(self):\n\t\tif self.subset == 'train':\n\t\t\treturn 50000\n\t\tif self.subset == 'validation':\n\t\t\treturn 10000", "def num_partitions(self): # -> int:\n ...", "def batch_size(self):\n return self._first_rgb.shape[0]", "def number_of_sample_loops(self) -> int:\n return self.__number_of_sample_loops", "def size(self, batch):\n x,y,m = batch \n return sum([mm.sum() for mm in m])", "def __init__(self):\n self.num_mini_batches = 0", "def get_n_splits(self):\n return self.n_folds", "def __len__(self):\n return int(np.floor(len(self.list_IDs) / self.batch_size))", "def __len__(self):\n return int(np.floor(len(self.list_IDs) / self.batch_size))", "def __len__(self):\n return int(np.floor(len(self.list_IDs) / self.batch_size))", "def getBatchSize(self, context, obj):\n return 10", "def __len__(self):\n return len(self.indexes) // self.batch_size", "def __len__(self):\n return len(self.indexes) // self.batch_size", "def get_n_splits(self):\n return self.n_splits", "def get_n_splits(self):\n return self.n_splits", "def get_n_splits(self):\n return self.n_splits", "def train_size(self) -> int:\n return int(self.data_size * self.__train_fraction)", "def __len__(self):\r\n return int(np.floor(len(self.list_IDs) / self.batch_size))", "def num_examples_per_epoch(mode):\n if mode == tf.estimator.ModeKeys.TRAIN:\n return 1281167\n return 50000", "def getBatchSize(self, context, obj):\n return 100", "def batch_size(self, batch_size: ConfigNodePropertyInteger):\n\n self._batch_size = batch_size", "def __len__(self):\n return math.ceil(len(self._sampler) / self._batch_size)", "def __len__(self):\n return self.nb_iterations", "def length(self, data: Sequence[Sequence[torch.Tensor]]) -> int:\n return self.n_batch", "def get_n_splits(self):\n pass", "def getDataSetCount(self):\n\t\treturn int(self.numberOfImages / self.slicesPerTimepoint)", "def num_chunking_units(self):\n if self._source_paths:\n return len(self._source_paths)\n return 1", "def num_examples_per_epoch(mode):\n if mode == tf.estimator.ModeKeys.TRAIN:\n return 45000\n return 5000" ]
[ "0.8359936", "0.8323407", "0.80367476", "0.7839606", "0.778999", "0.77374494", "0.76503813", "0.7589813", "0.752585", "0.7504559", "0.7504559", "0.7504559", "0.7504559", "0.7446192", "0.7433917", "0.7410418", "0.73296297", "0.7293308", "0.7237539", "0.7229498", "0.7215725", "0.7209173", "0.7190623", "0.7183909", "0.7177883", "0.71027505", "0.7093445", "0.70683944", "0.70683944", "0.70683944", "0.70683944", "0.7042369", "0.7035318", "0.7032486", "0.70092386", "0.69775045", "0.69540507", "0.69486225", "0.69432753", "0.69311863", "0.6925833", "0.6872396", "0.68645483", "0.6853549", "0.6838526", "0.68334484", "0.6820909", "0.6791572", "0.6780532", "0.6769358", "0.676919", "0.67318064", "0.6729144", "0.67187136", "0.67134434", "0.6711716", "0.67070293", "0.6693833", "0.66911966", "0.6666094", "0.6660209", "0.6647381", "0.6633696", "0.66253185", "0.66173226", "0.6610618", "0.6602948", "0.65851873", "0.658227", "0.65688", "0.6568668", "0.6557634", "0.655454", "0.653289", "0.65221786", "0.65163803", "0.6513572", "0.6510456", "0.65081644", "0.6465882", "0.6465882", "0.6465882", "0.64587677", "0.6455696", "0.6455696", "0.6448675", "0.6448675", "0.6448675", "0.644559", "0.64442015", "0.64440745", "0.64382267", "0.64290524", "0.64221865", "0.6395248", "0.6384294", "0.63835335", "0.6382416", "0.63759625", "0.63708264" ]
0.82500005
2
Returns the number of features in the processed data. Returns int Feature size.
def get_num_features(self): return len(self[0]['x'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def features_size(self) -> int:\n return len(self.data[0].features) if len(self.data) > 0 and self.data[0].features is not None else None", "def getNrFeatures(self):\n return self.featureNames.size", "def num_features(self):\n if self.x is None:\n return 0\n return 1 if self.x.dim() == 1 else self.x.size(1)", "def get_num_features(self, ndim: int) -> int:\n nb_features = 0\n for feature_group in self.features_group_list:\n nb_features += feature_group.num_features(ndim)\n return nb_features", "def get_n_features(self):\n # +1 due to dummy bit\n return self.model.n_latent_features + 1", "def nr_features(self):\n if self.is_predict_only:\n return clib.xlinear_get_int_attr(self.model_chain, \"nr_features\")\n else:\n return self.model_chain[0].nr_features", "def features_size(self) -> int:\n return None", "def n_features(self):\n return self.components.shape[-1]", "def num_features(self) -> Dict[NodeType, int]:\n return self.num_node_features", "def num_feature(self):\n if self.handle is None:\n raise AttributeError('Model not loaded yet')\n out = ctypes.c_size_t()\n _check_call(_LIB.TreeliteQueryNumFeature(self.handle, ctypes.byref(out)))\n return out.value", "def num_node_features(self):\n return self[0].num_node_features", "def num_node_features(self) -> int:\n data, _, _ = self[0]\n if hasattr(data, 'num_node_features'):\n return data.num_node_features\n raise AttributeError(f\"'{data.__class__.__name__}' object has no \"\n f\"attribute 'num_node_features'\")", "def feature_len(self):\n return len(self.coord)", "def _n_features_out(self):\n return self.components_.shape[0]", "def num_flat_features(self, x):\n size = x.size()[1:] # all dimensions except the batch dimension\n num_features = 1\n for s in size:\n num_features *= s\n return num_features", "def num_flat_features(self, x):\n\n size = x.size()[1:] # All dimensions except batch dimension\n num_features = 1\n for s in size:\n num_features *= s\n\n return num_features", "def __len__(self):\n return len(self.features)", "def size(self):\r\n return len(self._train_datas)", "def feature_size(self):\n return self.fingerprint_length", "def n_good_features_(self):\n return np.sum(self.important_features_)", "def size(self):\n return _libsbml.ListOfSpeciesFeatures_size(self)", "def feature_dim(self):\n raise NotImplementedError", "def get_train_data_size(self):\n return len(self.pipeline.data['train'])", "def num_flat_features(x):\n\n size = x.size()[1:] # All dimensions except batch dimension\n num_features = 1\n for s in size:\n num_features *= s\n\n return num_features", "def num_edge_features(self) -> int:\n data, _, _ = self[0]\n if hasattr(data, 'num_edge_features'):\n return data.num_edge_features\n raise AttributeError(f\"'{data.__class__.__name__}' object has no \"\n f\"attribute 'num_edge_features'\")", "def num_edge_features(self):\n return self[0].num_edge_features", "def num_flat_features(self, x):\n return int(np.prod(x.size()[1:]))", "def dim(self):\n if self._classifier is None:\n with self:\n return self._classifier.features_dim\n\n return self._classifier.features_dim", "def count(self):\r\n return self.data_array.size", "def count_unique_features(self):\n return N_UNIQUE_FEATS", "def __len__(self):\n return 1 + len(self.features)", "def getNumSpeciesFeatures(self):\n return _libsbml.ListOfSpeciesFeatures_getNumSpeciesFeatures(self)", "def feature_dim(self):\n return feature_dim_from_test_system(self)", "def _predict_feature_sizes(self):\n return self._feature_sizes", "def __len__(self):\n key = list(self.keys())[0]\n feature = self[key]\n return len(feature)", "def _len_feature_list(tf_feature_list):\n return len(tf_feature_list.feature)", "def get_number_of_features(key):\n sum = 0\n for name, module in common.QOL_PARAMS[key].items():\n sum += module.LENGTH\n\n return sum", "def get_num_features(corpus_file, side):\n if side == 'src':\n num_feats = 0\n else:\n with codecs.open(corpus_file, \"r\", \"utf-8\") as cf:\n f_line = cf.readline().strip().split()\n _, _, num_feats = ImageDataset.extract_text_features(f_line)\n\n return num_feats", "def n_train(self):\n return self.factors[0].shape[0]", "def n_points(self):\n\n if self.data_reduced:\n return len(self.data_reduced[0])\n else:\n return 0", "def __len__(self):\n if self._train:\n return len(self._train_data)\n return len(self._test_data)", "def __len__(self):\n if self._train:\n return len(self._train_data)\n return len(self._test_data)", "def __len__(self):\r\n return len(self.train_data)", "def num_training_examples(self):", "def get_feature_size_per_file(self, f_name):\n shape = utils_classif.get_shape(os.path.join(f_name.replace('.data', '.shape')))\n return shape[1]", "def get_feature_size_per_file(self, f_name):\n shape = utils_classif.get_shape(os.path.join(f_name.replace('.data', '.shape')))\n return shape[1]", "def features_dim(self):\n if not self.exposes_features:\n return None\n\n dim = self._features_op.outputs[0].get_shape().as_list()[-1]\n if dim is None:\n logger.warning(\n \"Unable to statically get feature dimension; returning None\"\n )\n\n return dim", "def num_feature_outputs(self):\n pass", "def num_of_classes(self):\n return len(self.classes_())", "def num_of_classes(self):\n return len(self.classes_())", "def data_size(self) -> int:\n return len(self.__labels)", "def get_num_train_samples(self):\n raise NotImplementedError", "def data_count(self):\n return(len(self.data))", "def _len_feature(tf_feature):\n assert(tf_feature)\n attrs = ['bytes_list', 'float_list', 'int64_list']\n for attr in attrs:\n if hasattr(tf_feature, attr):\n feature_vec = getattr(tf_feature, attr).value\n res = len(feature_vec)\n if res > 0:\n return res\n return 0", "def getSampleCount(self):\r\n return len(self._data)", "def getNumSpeciesFeatures(self):\n return _libsbml.MultiSpeciesPlugin_getNumSpeciesFeatures(self)", "def getNumSpeciesFeatures(self):\n return _libsbml.SubListOfSpeciesFeatures_getNumSpeciesFeatures(self)", "def _number_of_samples(self):\n return len(self._raw_data.samples)", "def feature_dimension(self) -> int:\n return self._feature_dimension", "def count_data(self):\n try:\n ndata = len(self.x)\n logger.info(\"Number of data points: {0}\".format(ndata))\n except AttributeError:\n logger.error(\"Data object has not been defined\")\n ndata = 0\n return ndata", "def get_number_of_training(self):\n return self.n_train", "def getNbColumns(self):\n return self.data.shape[0]", "def getNumData(self):\n return len(self.data)", "def features(self):\n return self.shape[2]", "def num_classes(self):\n return len(self.classes)", "def n_cf(self):\n return np.size(self._ref_ii, 0)", "def get_num_points(self):\n dimensions = self.data.shape\n return dimensions[0]", "def batch_size(features, labels):\n return extract_batch_length(features)", "def __len__(self):\n if self.mode.lower() == 'train':\n return len(self.train_data)\n elif self.mode.lower() == 'val':\n return len(self.val_data)\n elif self.mode.lower() == 'test':\n return len(self.test_data)\n else:\n raise RuntimeError(\"Unexpected dataset mode. \"\n \"Supported modes are: train, val and test\")", "def num_train_samples(self):\n if self._num_training_samples is None:\n for key, value in self._training_data.items():\n self._num_training_samples[key] = len(value[0])\n return self._num_training_samples", "def __len__(self):\n return self.data.num_samples", "def countDataSize(self,filename):\n \n d = h5py.File(filename,'r')\n features = d['spectrometer/features'][:]\n select = self.selectData(features.astype(float), self.ifeature, d)\n N = len(features[select])\n d.close()\n\n N = (N//self.offsetLen) * self.offsetLen\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def num_classes(self) -> int:\n y = self.data.y\n if y is None:\n return 0\n elif y.numel() == y.size(0) and not torch.is_floating_point(y):\n return int(self.data.y.max()) + 1\n elif y.numel() == y.size(0) and torch.is_floating_point(y):\n return torch.unique(y).numel()\n else:\n return self.data.y.size(-1)", "def num_dof(self) -> int:\n return len(self)", "def data_count(self):\r\n\r\n shp = self.df.shape\r\n row_count = shp[0]\r\n return row_count", "def __len__(self):\n return int(np.ceil(self.total_frame_count / self.batch_size))", "def __len__(self):\n if self.mode.lower() == 'train':\n return len(self.train_data)\n if self.mode.lower() == 'val':\n return len(self.val_data)\n if self.mode.lower() == 'test':\n return len(self.test_data)\n\n raise RuntimeError(\"Unexpected dataset mode. \"\n \"Supported modes are: train, val and test\")", "def get_number_samples(self):\n return self.samples.shape[0]", "def count(self):\n return len(self.read_ints())", "def get_num_dimensions(self):\n dimensions = self.data.shape\n return dimensions[1]", "def __len__(self):\n return len(self.train) + len(self.val) + len(self.test)", "def getNbRows(self):\n return self.data.shape[1]", "def num_classes(self):\n return self._num_classes", "def __len__(self) -> int:\n import h5py\n\n with h5py.File(\n os.path.join(self.root, self.data_dir, self.img_file_name), \"r\"\n ) as f:\n num_datapoints: int = f[self.split][\"pv_log\"].shape[0]\n\n return num_datapoints", "def __len__(self):\n if self.TRAIN_BOOL is True:\n count = len(self.dict_batch_1[b'data'])\n count += len(self.dict_batch_2[b'data'])\n count += len(self.dict_batch_3[b'data'])\n count += len(self.dict_batch_4[b'data'])\n count += len(self.dict_batch_5[b'data'])\n else:\n count = len(self.dict_batch_test[b'data'])\n return count", "def size(self):\n return self.N", "def get_num_classes(self):", "def num_classes(self):\n\t\t\treturn len(self.classes)", "def number_of_data_nodes(self):\n return int(self._data['number_of_data_nodes'])", "def feature_count(self, trajs: List[Dict[str, list]],\n gamma: float) -> np.ndarray:\n # This was moved to utils:\n return irl_utils.feature_count(self.env, trajs, gamma)", "def num_classes(self):\n\t\treturn len(self.classes)", "def num_train_instances(self):\n raise NotImplementedError()", "def size(self):\n\t\treturn self._count", "def num_points(self, f=None):\n if f is not None:\n return f(self.contexts.shape[0])\n return self.contexts.shape[0]", "def __len__(self):\n return self._num_samples", "def numpoints(self):\n return len(self.pars) + 1 # so dof is 1", "def train_size(self) -> int:\n return int(self.data_size * self.__train_fraction)", "def num_classes(self):\n raise NotImplementedError", "def n_samples(self) -> int: # pragma: no cover\n return self.samples.shape[0]", "def size(self):\n return len(self.data)" ]
[ "0.8408814", "0.83368707", "0.8241575", "0.8142289", "0.80863315", "0.80641556", "0.8048011", "0.79128486", "0.7869567", "0.7836357", "0.78265285", "0.78225327", "0.77721834", "0.7672442", "0.75243306", "0.74740946", "0.7469065", "0.7446943", "0.74352556", "0.7432964", "0.7424912", "0.73855555", "0.73197794", "0.7315663", "0.7306298", "0.7294315", "0.7281277", "0.72633004", "0.7188402", "0.7157994", "0.713146", "0.70630753", "0.70547223", "0.7046529", "0.70450926", "0.7044829", "0.70339364", "0.7025023", "0.6999724", "0.6968625", "0.6966687", "0.6966687", "0.69616085", "0.6949267", "0.6943695", "0.6943695", "0.6939665", "0.69358593", "0.69342786", "0.69342786", "0.6913772", "0.69079864", "0.69052607", "0.68954676", "0.68843853", "0.68829226", "0.6882876", "0.68688565", "0.6863658", "0.68205076", "0.68199825", "0.6812153", "0.6801208", "0.6799068", "0.67948455", "0.67849493", "0.67830366", "0.67798156", "0.67540246", "0.67405283", "0.6737355", "0.67292464", "0.67156136", "0.66943026", "0.66884845", "0.6682031", "0.66743183", "0.6671969", "0.6671257", "0.6664222", "0.6662187", "0.6656468", "0.6640455", "0.6636887", "0.66253674", "0.6622854", "0.66174465", "0.6612837", "0.66000956", "0.6599839", "0.6586937", "0.6586086", "0.65858465", "0.6565099", "0.65411425", "0.65240264", "0.65233135", "0.6520922", "0.65203434", "0.6517099" ]
0.8641283
0
Returns a list of the class labels. Returns list List of class labels..
def get_class_labels(self): y = self.get_data()['y'] if type(y) == torch.Tensor: return y.unique().numpy() else: return sorted(list(set(y)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def class_labels(self):\n return self._class_labels", "def classes(self) -> List[Any]:\n return list(self.label_counts.keys())", "def get_labels(self) -> List[str]:\n return self.labels", "def get_labels(self) -> List[str]:\n raise NotImplementedError()", "def label_names(self) -> Strings:\n\n try:\n if self._le:\n return self._le.classes_.tolist()\n except AttributeError:\n self.logger.warning('AttributeError: LabelEncoder was not found.')\n self.logger.warning('No LabelEncoder. Please call label_encoder first.')\n return None", "def labels(self):\n return self._labels", "def get_labels(self):\n return []", "def labels(self) -> list:\n return self._labels", "def get_labels(self):\n return [token.label for token in self.tokens]", "def get_labels(self):\n return self.labels", "def get_labels(self):\n\t\traise NotImplementedError()", "def list_labels(self):\n # Create empty list\n label_names = []\n \n # For every name in training directory\n for name in os.listdir(self.train_data):\n # If it does not start with . (which hidden files do)\n if not name.startswith('.'):\n label_names.append(name)\n \n return label_names", "def labels(self):\n return self._labels", "def labels(self):\n return self._labels", "def labels(self):\n return self._labels", "def labels(self):\n return self._labels", "def labels(self):\n return self._labels", "def labels(self):\n return self._labels", "def get_labels(self):\n resp = self._client.scan(TableName=self.LABELS_TABLE)\n return [self._item_to_label(item) for item in resp['Items']]", "def get_labels(self):\n raise NotImplementedError", "def get_labels(self) -> Set[str]:", "def labels(self) -> List[str]:\n\n return list(self.t0.keys())", "def get_labels(self):\r\n raise NotImplementedError()", "def labels_all(self):\n return self._labels_all", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def get_labels(self):\n raise NotImplementedError()", "def labels(self):\n return self._get_labels(self.label_vector)", "def keys(cls) -> t.List[t.Any]:\n return list(cls.__labels__.keys())", "def get_labels(self):\r\n raise NotImplementedError()", "def get_labels(self):\r\n raise NotImplementedError()", "def get_labels(self):\r\n raise NotImplementedError()", "def get_labels(self):\r\n raise NotImplementedError()", "def get_labels(self):\r\n raise NotImplementedError()", "def get_labels(self):\r\n raise NotImplementedError()", "def get_labels(self):\r\n raise NotImplementedError()", "def get_labels(self):\r\n raise NotImplementedError()", "def labels(self):\n return self.label2cc.keys()", "def get_labels(self):\r\n return [\"X\", \"O\", \"B-a\", \"I-a\", \"B-b\", \"I-b\", \"B-c\", \"I-c\", \"S-a\", \"S-b\", \"S-c\", \"[CLS]\", \"[SEP]\"]", "def classes(self):\n if not hasattr(self, '_unique_classes'):\n # build when we don't have\n self._unique_classes = self.data['label'].unique()\n self._unique_classes.sort()\n\n ret = self._unique_classes\n return ret", "def get_labels(self):\n return self.labels[1:]", "def labels(self) -> list[\"Label\"]:\n _args: list[Arg] = []\n _ctx = self._select(\"labels\", _args)\n _ctx = Label(_ctx)._select_multiple(\n _name=\"name\",\n _value=\"value\",\n )\n return _ctx.execute_sync(list[Label])", "def items(cls) -> t.List[t.Tuple[t.Any, t.Union[str, NameTitle]]]:\n return list(cls.__labels__.items())", "def class_names(self):\n raise NotImplementedError", "def get_labels(self):\n return set(category.label for category in\n self.get_categories(LABELS_SCHEME))", "def get_labels(self):\n return get_labels(self.api_key)", "def get_class_labels(\n tuning_level: list, query_list: ProcessedQueryList\n ) -> List[str]:\n if TuneLevel.INTENT.value in tuning_level:\n return [\n f\"{d}.{i}\" for d, i in zip(query_list.domains(), query_list.intents())\n ]\n else:\n return [f\"{d}\" for d in query_list.domains()]", "def labels(cls) -> FrozenSet[str]:\n return cls._meta.labels", "def get_labels_and_classes(self):\n query = read_query('structure exploration/labels_and_classes')\n response = self._submit_query(query)\n\n temp = dict()\n for r in response:\n temp[r['l']['value']] = r['type']['value'].split('/')[-1]\n\n return temp", "def get_labels(self):\n return set(k.label for k in self)", "def values(cls) -> t.List[t.Union[str, NameTitle]]:\n return list(cls.__labels__.values())", "def display_label(f_class, catalog): \n # Transform the top n class indexes into class labels LIST.\n return catalog[str(f_class)]", "def get_labels() -> list[Label]:\n\n labels_file = deepcopy(get_data(\"labels.yml\"))\n standard_labels = []\n for group_info in labels_file[\"groups\"]:\n labels = group_info.pop(\"labels\", [])\n group = LabelGroup(**group_info)\n for label_info in labels:\n label = Label(**label_info, group=group)\n standard_labels.append(label)\n for label_info in labels_file[\"standalone\"]:\n label = Label(**label_info)\n standard_labels.append(label)\n return standard_labels", "def get_labels():\n return if_found(dao.get_labels())", "def get_labels(self):\r\n return None", "def plabels(self):\n return self._cache.plabels", "def classes(self):\n if self.classname:\n return [self.classname]\n return []", "def SAMT_labels(self):\n \t\t#find basic labels\n \t\tlabels_basic = self.dependency_labels()\n \t\tlabels = Labels(labels_basic)\n \t\treturn labels.SAMT_labels()", "def labels(self) -> ndarray:\n return self._labels", "def get_cora_label_names():\n # type: () -> List[str]\n return _label_names", "def get_all_labels(self):\n labels = self.wls_board.get_labels\n return labels", "def get_imagenet_classnames():\r\n return np.loadtxt(open(path_data+'/ilsvrc_2012_labels.txt'), dtype=object, delimiter='\\n')", "def _extract_class(labels: List[int], class_index: int):\n class_ids = [i for i, label in enumerate(labels) if label == class_index]\n return class_ids", "def train_labels(self):\n return self._train_labels", "def get_labels(self):\n return [\"A轮\", \"B轮\",\"C轮\",\"天使轮\",\"战略融资\"]", "def output_labels(self):\n return list(self._output_labels)", "def output_labels(self):\n return list(self._output_labels)", "def getMetaLabelsList(self):\n return self.dict_batch_meta[b'label_names']", "def labels_(self) -> DNDarray:\n return self._labels", "def get_unique_label_list(self) -> List[str]:\n return self.tasks.get_label_list()", "def get_class_label_names(class_idx_predictions, class_to_idx, class_to_label):\r\n # Flatten to 1D tensor and convert to ndarray\r\n class_idx_predictions = np.array(np.squeeze(class_idx_predictions))\r\n\r\n # Switch key to value and value to key\r\n idx_to_class = {idx: cls for cls, idx in class_to_idx.items()}\r\n\r\n # class_idx_predictions represents an class index, e.g. provided by model prediction\r\n # Get the label from the class that matches the index\r\n class_labels_predictions = [class_to_label.get(idx_to_class.get(idx, None), idx) for idx in class_idx_predictions]\r\n\r\n # Return list\r\n return class_labels_predictions", "def getLabels(self):\n return self.numToLabel", "def get_labels(info):\n return info.features[\"labels\"].names", "def get_labels(info):\n return info.features[\"labels\"].names", "def get_feature_labels(self):\n return self.feature_labels", "def getCSLabels(self):\n\n if self._n_csets:\n return list(self._cslabels)", "def cat_labels(self):\n try:\n return list(self.cats.columns)\n except AttributeError:\n return []", "def get_labels(model):\n return model._labels", "def get_labels(model):\n return model._labels", "def get_labels(model):\n return model._labels", "def get_labels(model):\n return model._labels", "def get_predefined_labels(self):\n raise NotImplementedError", "def get_train_labels(self):\n raise NotImplementedError", "def get_labels(self):\n\n labels = list(self.meta_data[self.target_column])\n\n return labels" ]
[ "0.84051836", "0.7856161", "0.7742177", "0.7669178", "0.7610997", "0.757372", "0.754162", "0.75304675", "0.7472855", "0.72751015", "0.72687674", "0.71715975", "0.71615297", "0.71615297", "0.71615297", "0.71615297", "0.71615297", "0.71615297", "0.71392363", "0.71387213", "0.7134495", "0.71284467", "0.7112693", "0.7096803", "0.7092208", "0.7092208", "0.7092208", "0.7092208", "0.7092208", "0.7092208", "0.7092208", "0.7092208", "0.7092208", "0.7092208", "0.7092208", "0.7092208", "0.7092208", "0.7092208", "0.7092208", "0.7092208", "0.7062761", "0.7062761", "0.7062761", "0.7047754", "0.7019273", "0.7013806", "0.7013806", "0.7013806", "0.7013806", "0.7013806", "0.7013806", "0.7013806", "0.7013806", "0.70123476", "0.700103", "0.6964731", "0.6937586", "0.6926767", "0.69241834", "0.6912987", "0.6907854", "0.69041514", "0.6867909", "0.6864201", "0.68612576", "0.68554765", "0.68536437", "0.6834182", "0.6821335", "0.6812525", "0.6784552", "0.6776534", "0.6746891", "0.673421", "0.6731716", "0.67269415", "0.6698776", "0.6692635", "0.6689979", "0.668289", "0.666769", "0.6664293", "0.6664293", "0.66538024", "0.66519856", "0.66502845", "0.6649203", "0.6645978", "0.66433495", "0.66433495", "0.66415375", "0.66320777", "0.66207856", "0.6620544", "0.6620544", "0.6620544", "0.6620544", "0.66014576", "0.65853", "0.6570609" ]
0.73184854
9
Returns the index corresponding to the given class label.
def lookup_class_idx(self,label): return self.class_labels[label]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_class_index(self, label):\n assert label in CLASSES\n return CLASSES.index(label)", "def get_class_index(label):\n if isinstance(label,str) is False:\n basic.outputlogMessage('input label must be a string')\n assert(False)\n length = len(class_label)\n for i in range(0,length):\n if label.lower()==class_label[i]:\n return i\n #if not found\n basic.outputlogMessage('class label: %s not found in the class list'%label)\n assert(False)\n return False", "def label_index(self, label: Text) -> int:\n count = 0\n for l in self.le.classes_:\n if(l == label):\n return count\n count += 1", "def labelIndex(self, label):\n for idx, taskDef in enumerate(self):\n if taskDef.label == label:\n return idx\n return -1", "def get_index(self, label):\n if label in self.labels:\n return self.labels.index(label)\n else:\n self.labels.append(label)\n return self.labels.index(label)", "def get_class_label(index):\n if isinstance(index,str):\n index = int(index)\n # print(type(index))\n if index < len(class_label):\n return class_label[index]\n basic.outputlogMessage('class index: %d not found in the class list' % index)\n assert (False)\n return False", "def label_index(self):\n return self._label_index", "def label_index(self):\n return self._label_index", "def fromLabel(name):\n return Data.labels.index(name)", "def encode_label(self, label: str) -> int:\n return self.class_map[label]", "def _extract_class(labels: List[int], class_index: int):\n class_ids = [i for i, label in enumerate(labels) if label == class_index]\n return class_ids", "def get_label_2_index(self, label):\n return self._labels_2_index.get(label, 0) # return unknown index when not found", "def indices_of_label(self, label_name):\n return self.indices_of('label', label_name)", "def get_index_of_class(self, y, to_torch=False):\n\n # init labels\n y_idx = torch.empty(y.shape, dtype=torch.long)\n\n for i, yi in enumerate(y):\n\n # get index\n idx = np.where(np.array(self.classes) == yi)[0]\n\n # transfer to torch\n if to_torch:\n y_idx[i] = torch.from_numpy(idx)\n\n return y_idx", "def __get_label_idx__(idx: int) -> int:\n\n label_idx = idx // 100\n label_idx = int(label_idx) if label_idx >= 0 else 0\n\n return label_idx", "def label_from_index(self, index):\n assert self.labels is not None, \"Labels not processed\"\n return self.labels[index]", "def get_index(observable_nodes, label):\n for k in observable_nodes:\n if label in observable_nodes[k]:\n return observable_nodes[k][label]['category']", "def column_index(self, column_label):\n return self.column_labels.index(column_label)", "def label_from_index(self, index):\n assert self.labels is not None, \"Labels not processed\"\n #return self.labels[index, :, :]\n return self.labels[index]", "def get_ind(labels, k):\n return (np.array(labels) == k).astype('float64')", "def get_imagenet_label(index):\n global _CLASS_INDEX\n if _CLASS_INDEX is None:\n with open(os.path.join(os.path.dirname(__file__), '../resources/imagenet_class_index.json')) as f:\n _CLASS_INDEX = json.load(f)\n return _CLASS_INDEX[str(index)][1]", "def class_name_to_id(self, class_name: str):\n\n return self.class_to_idx[str(class_name)]", "def get_label_with_index(labels, index):\n return labels[np.where(labels[:, 0] == index)]", "def label_from_index(self, index):\n raise NotImplementedError", "def fromIndex(index):\n return Data.labels[index]", "def get_train_index():\n data_size = (NUM_CLASS - 1) * NUM_DATA_PER_CLASS\n return np.array([i for i in range(0, data_size)])", "def map_id_to_idx(self, class_ids):\n class_idx = torch.zeros(class_ids.shape, dtype=int)\n for k, v in self.id2idx.items():\n class_idx[class_ids == k] = v\n\n class_idx = class_idx.to(device)\n return class_idx", "def find_label(self, *args):\n return _ida_hexrays.cfunc_t_find_label(self, *args)", "def get_instance_idx(self, idx):\n obj_idx = 0\n while idx >= 0:\n idx -= self.num_per_instance_observations[obj_idx]\n obj_idx += 1\n return obj_idx - 1, int(idx + self.num_per_instance_observations[obj_idx - 1])", "def get_instance_idx(self, idx):\n obj_idx = 0\n while idx >= 0:\n idx -= self.num_per_instance_observations[obj_idx]\n obj_idx += 1\n return obj_idx - 1, int(idx + self.num_per_instance_observations[obj_idx - 1])", "def get_instance_idx(self, idx):\n obj_idx = 0\n while idx >= 0:\n idx -= self.num_per_instance_observations[obj_idx]\n obj_idx += 1\n return obj_idx - 1, int(idx + self.num_per_instance_observations[obj_idx - 1])", "def get_classLabel(self, dataset, class_label): \n\t\tnode = self.root\n\t\tbroken=0\n\t\t\n\t\t#print(\"BEBE:\" + str(node.get_bebe( dataset)))\n\t\t\n\t\tif (node.get_bebe( dataset) == class_label ):\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn 0\n\n\t\t\tdef junk(data, class_label, seed, ratio):", "def __get_ohe_label__(self, label_idx) -> List[int]:\n\n label = [0] * self.n_classes\n label[label_idx] = 1\n\n return label", "def _get_instance_indices(self, classes, num_detections, batch_index,\n class_id):\n classes = classes[batch_index:batch_index+1, ...]\n _, max_detections = shape_utils.combined_static_and_dynamic_shape(\n classes)\n # Get the detection indices corresponding to the target class.\n # Call tf.math.equal with matched tensor shape to make it tf.lite\n # compatible.\n valid_detections_with_kpt_class = tf.math.logical_and(\n tf.range(max_detections) < num_detections[batch_index],\n tf.math.equal(classes[0], tf.fill(classes[0].shape, class_id)))\n instance_inds = tf.where(valid_detections_with_kpt_class)[:, 0]\n # Cast the indices tensor to int32 for tf.lite compatibility.\n return tf.cast(instance_inds, tf.int32)", "def get_relevant_indices(dataset, classes, target_classes):\n indices = []\n for i in range(len(dataset)):\n # Check if the label is in the target classes\n label_index = dataset[i][1] # ex: 3\n label_class = classes[label_index] # ex: 'cat'\n if label_class in target_classes:\n indices.append(i)\n return indices", "def name_to_label(self, name):\n\t\treturn self.classes[name]", "def find_index(self, obj):\n return self.model.indexlist[obj]", "def _string_label_to_class_id_postprocessor(\n string_label, label_classes, default=-1, **unused_kwargs):\n if string_label in label_classes:\n return label_classes.index(string_label)\n else:\n return default", "def num(self, cls):\n try:\n return self.classes.index(cls) + 1\n except:\n raise Exception(\"Someone asked for \" + str(cls) + \", which is not here \" + str(self))", "def findLabel(self, label):\n return self.root._findLabel(label)", "def _get_label_id(self, label: str) -> int:\n try:\n return self._label_id_map[label]\n except KeyError:\n label_id = self._get_id('label')\n self._label_id_map[label] = label_id\n return label_id", "def get_label_id(self) -> int:\n pass", "def getLabel(labels):\r\n elems = {}\r\n for l in labels:\r\n if l not in elems.keys():\r\n elems[l] = 1\r\n else:\r\n elems[l] += 1\r\n counts = sorted(elems.values(), reverse=True)\r\n if len(counts) > 1 and counts[0] == counts[1]:\r\n return choice(list(elems.keys()))\r\n return sorted(elems, key=elems.get, reverse=True)[0]", "def name_to_label(self, name):\n\t\t\treturn self.classes[name]", "def get_label_num(self, *args):\n return _ida_hexrays.ctree_item_t_get_label_num(self, *args)", "def name_to_label(self, name):\n return self.classes[name]", "def get_label_indices(df: DataFrame, labels: list):\n return [idx for idx, name in enumerate(df.columns) if name in labels]", "def find_label(self, *args):\n return _ida_hexrays.cfuncptr_t_find_label(self, *args)", "def get_label(self, label):\n\n return torch.from_numpy(np.array(label)).long()", "def value_to_class_index(bin_arr, val_arr):\n# return pd.cut(val_arr,bin_arr,labels=False)\n return np.digitize(val_arr,bin_arr,right=True)-1", "def value_to_class_index(bin_arr, val_arr):\n# return pd.cut(val_arr,bin_arr,labels=False)\n return np.digitize(val_arr,bin_arr,right=True)-1", "def __getitem__(self, index):\n if index in self.marks:\n return psi_class(self, index)\n return self.classes[index-1]", "def label_to_class_name(label):\n try:\n genre_label = pd.read_csv(path.join(DATA_PATH, 'genre_labels.csv'))\n return genre_label[genre_label['label'] == int(label)]['genre'].values[\n 0]\n except IOError:\n return label", "def get_by_label(self, label):\n # label = label.replace(\"-\", \"\") FLB: problem with - in variable\n # Check for name in all categories in self\n for category in categories:\n method = getattr(self, category)\n for entity in method():\n if label in entity.label:\n return entity\n # Check for special names\n d = {\n 'Nothing': Nothing,\n }\n if label in d:\n return d[label]\n # Check whether `label` matches a Python class name of any category\n # l = [cls for cls in itertools.chain.from_iterable(\n # getattr(self, category)() for category in categories)\n # if hasattr(cls, '__name__') and cls.__name__ == label]\n # if len(l) == 1:\n # return l[0]\n # elif len(l) > 1:\n # raise NoSuchLabelError('There is more than one Python class with '\n # 'name %r'%label)\n # # Check imported ontologies\n # for onto in self.imported_ontologies:\n # onto.__class__ = self.__class__ # magically change type of onto\n # try:\n # return onto.get_by_label(label)\n # except NoSuchLabelError:\n # pass", "def label_id(self):\n return int(self.instance_id // 1000)", "def _map_new_class_index(y, order):\n return np.array(list(map(lambda x: order.index(x), y)))", "def groupByLabel( y ):\n index = []\n for i in np.unique(y): # pour toutes les classes\n ind, = np.where(y==i)\n index.append(ind)\n \n return index", "def get_namespace_index(cls, libvirt_network_if):\n matcher = re.match(r\"^tt(\\d+)$\", libvirt_network_if)\n return int(matcher.groups()[0]) if matcher is not None else 0", "def get_unlabeled_idx(X_train, labeled_idx):\n return np.arange(X_train.shape[0])[np.logical_not(np.in1d(np.arange(X_train.shape[0]), labeled_idx))]", "def get_label_name(label):\n\tindex = np.argmax(label)\n\tlabels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\n\treturn labels[int(index)]", "def _idx(self, class_, key):\n return u':'.join((class_, key))", "def get_item_from_label(self, label):\n idx = self.labels.index(label)\n item = self[idx][0]\n return item", "def _index(self) -> int:\n return -1", "def get_tkinter_index(self,index):\n if isinstance(index,str):\n if index in self.indexname2index:\n i=self.indexname2index[index]\n else:\n # pass through tkinter to get 'end' etc converted to index\n i=self.index(index)\n else:\n i=index\n return i", "def get_count_by_label(self, label=None):\n if label is None:\n return len(self.data)\n else:\n return sum(1 for d in self.data if d.pred == label)", "def get_encoder_class(self,label):\n return len(self.encodeDict[label].classes_)", "def get_step_class_at_index(self, index):\n return self[index][0]", "def get_step_label_at_index(self, index):\n return self[index][1]", "def index(self) -> int:", "def _decode_to_index(self, decoder_output):\n value, index = torch.topk(decoder_output, 1)\n index = index.transpose(0, 1) # S = 1 x B, 1 is the index of top1 class\n if self.use_cuda:\n index = index.cuda()\n return index", "def get_index(tag):\n global kpi_list\n try:\n return kpi_list.index(str(tag))\n except ValueError:\n return -1", "def label_index2node(label_index, labels):\n hi_pairs, med_pairs = labels\n if label_index < len(hi_pairs):\n return hi_pairs[label_index][0]\n else:\n error_msg = \"there is no node with label \"+str(label_index)\n assert label_index-len(hi_pairs) < len(med_pairs), error_msg\n return med_pairs[label_index-len(hi_pairs)][0]", "def getNodeIndex(gltf, idname):\n\n if gltf.get('nodes') is None:\n return -1\n\n index = 0\n for node in gltf['nodes']:\n key = 'id' if node.get('id') != None else 'name'\n if node.get(key) == idname:\n return index\n\n index += 1\n\n return -1", "def index(self, pos):\n for i, n in enumerate(self):\n if i == pos: return n\n raise Exception('Index out of bounds.')", "def _get_labels(self, ind):\n pass", "def voc_label_indices(colormap, colormap2label):\n colormap = np.array(colormap.convert(\"RGB\")).astype('int32')\n idx = ((colormap[:, :, 0] * 256 + colormap[:, :, 1]) * 256\n + colormap[:, :, 2])\n return colormap2label[idx]", "def get_idx_to_target(self, idx):\n metadata = self.data.loc[idx]\n target = metadata['label']\n return target", "def _match_class_pos(self):\n # TODO: add notfitted warnings\n if self.kernel.classes_.shape[0] != 2:\n raise ValueError(\n f\"Number of classes is {self.kernel.classes_.shape[0]}, expected 2.\"\n )\n\n # # get the position of match probabilities\n # classes = list(self.kernel.classes_)\n # return classes.index(1)\n\n return 1", "def encode_label(label: str) -> int:\n\tif not label:\n\t\treturn 0\n\t# part after letter if it has a number, otherwise 1\n\tindex = int(label[1:]) if len(label) > 1 else 1\n\t# A = 1, B = 2, ... E = 5\n\toffset = ord(label[0]) - ord(\"A\") + 1\n\t# compute label number\n\treturn (index - 1) * 5 + offset", "def display_label(f_class, catalog): \n # Transform the top n class indexes into class labels LIST.\n return catalog[str(f_class)]", "def axis_index(self, key):\n for i, name in enumerate(self.axis_labels):\n if name == key:\n return i\n raise ValueError(f'Axis not found: {key}')", "def pandas_find_post_label_num(index, dataframe):\n return dataframe.at[index, 'label_number']", "def predict(self, input: ByteTensor) -> IntTensor:\n clause_outputs = self.evaluate_clauses(input)\n class_votes = self.sum_up_class_votes(clause_outputs)\n _, index = class_votes.max(0)\n return index", "def get_index_2_label(self, index):\n return self._index_2_labels.get(index, self._unknown_label)", "def index(self, factor_name):\n return self._factor_names.index(str(factor_name))", "def index(self) -> int:\r\n return self._index", "def get_index(self, gi):\n for i in range(len(self.gradual_items)):\n gi_obj = self.gradual_items[i]\n if (gi.symbol == gi_obj.symbol) and (gi.attribute_col == gi_obj.attribute_col):\n return i\n return -1", "def get_idx(self, key):\n found = [i for i, e in enumerate(self.list) if e.key == key]\n if found:\n return found[0]\n\n else:\n return -1", "def getBranchIndex(self):\n\n data = self.name.split('-')\n return int(data[2])", "def step_index(self, step):\n return self.steps.index(step)", "def index(self) -> int:\n return self._index", "def index(self) -> int:\n return self._index", "def index(self) -> int:\n return self._index", "def index(self) -> int:\n return self._index", "def index(self) -> int:\n return self._index", "def get_class_weights(img_paths: List[str], class_to_idx: Dict[str, int], label_names: List[str]):\n labels = list()\n for img_path in img_paths:\n label = os.path.basename(os.path.dirname(img_path))\n labels.append(class_to_idx[label]) \n\n counts = Counter(labels) + Counter([class_to_idx[name] for name in label_names])\n counts = np.array(sorted(counts.items()))[:,1]\n \n return counts.max()/counts", "def get_by_label(self, label, table, verbose=True):\n assert (self.connected)\n \n theId = -1\n GET_BY_LABEL_COMMAND = \"SELECT id,label FROM {0} WHERE samples.label = \\\"{1}\\\"\".format(table, label)\n \n \n self.cursor.execute(GET_BY_LABEL_COMMAND)\n \n for row in self.cursor:\n theId = row[0]\n break\n \n if verbose and theId != -1: \n print(\"Item with id {0} and label '{1}' retrieved.\".format(theId, label))\n elif verbose: \n print(\"No item in the table '{0}' with the label '{1}' was found.\".format(table, label))\n \n return int(theId)", "def category_int_of_label_string(self, label_string):\n if label_string not in self.label_vocab:\n self.label_vocab[label_string] = max(self.label_vocab.values())+1\n return self.label_vocab[label_string]", "def lookup(name):\n for i in range(len(catalog.obj_catalog)):\n if catalog.obj_catalog[i].name == name:\n return i\n return -1", "def labeled_indices(self):\n return self._labeled_indices" ]
[ "0.8939161", "0.8781351", "0.8290658", "0.79299194", "0.75193024", "0.7453748", "0.7260702", "0.7260702", "0.7200521", "0.7144214", "0.6997018", "0.6986183", "0.6928649", "0.68628794", "0.67985356", "0.6562468", "0.64826816", "0.6404319", "0.6269193", "0.6264292", "0.62591416", "0.6208201", "0.61772144", "0.61468256", "0.61286426", "0.6051882", "0.6021557", "0.6006552", "0.6004112", "0.6004112", "0.6004112", "0.5962353", "0.5953318", "0.59454256", "0.59268636", "0.5889849", "0.5873558", "0.587053", "0.5870289", "0.5870201", "0.5864304", "0.5845861", "0.58416283", "0.5815028", "0.5800807", "0.57976604", "0.5782971", "0.5768314", "0.57639515", "0.5759536", "0.5759536", "0.57592577", "0.57573026", "0.5725743", "0.5720887", "0.57201725", "0.5711872", "0.56967115", "0.56920975", "0.5686342", "0.5681812", "0.5674964", "0.56586653", "0.56225765", "0.56050414", "0.5604605", "0.56020975", "0.5599644", "0.5595721", "0.5592775", "0.5586394", "0.55766004", "0.5576394", "0.55730003", "0.55536467", "0.5550737", "0.554476", "0.5539509", "0.5531208", "0.5523268", "0.552018", "0.55118656", "0.55110884", "0.5510772", "0.54968333", "0.5490497", "0.54850674", "0.547481", "0.54733133", "0.54726285", "0.54697645", "0.54697645", "0.54697645", "0.54697645", "0.54697645", "0.546026", "0.5458672", "0.54492354", "0.54489475", "0.54415584" ]
0.9071037
0
Returns ndarrays or Tensors of all data in the current split.
def get_data(self,split=None,numpy=True): if split is not None: split_ = self.split_ self.set_split(split) dataloader = DataLoader(self,batch_size=len(self),shuffle=False, drop_last=False) for i,data_item in enumerate(dataloader): assert(i==0) x = data_item['x'] y = data_item['y'] if numpy: if type(x) == torch.Tensor: x = x.detach().numpy() else: x = np.array(x) if type(y) == torch.Tensor: y = y.detach().numpy() else: y = np.array(y) if split is not None: self.split_ = split_ return {'x':x,'y':y}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def all_data(self) -> Optional[np.ndarray]:\n if self._data_store is None:\n return None\n return self._data_store[:self._count, :]", "def full_batch(self):\n return self.X_data, self.Y_data", "def getNdArray(self):\n futures = self.client.map(_call_getNdArray, self.vecDask, pure=False)\n arrays = self.client.gather(futures)\n return arrays", "def batch_split(self) -> np.array:\n pass", "def split(self):\n return self.dataset_split", "def getData(trainSize):\r\n return splitData([getReal(), getFake()], trainSize=trainSize)", "def _read_data(self):\n return [np.array([]), np.array([])]", "def data(self) -> List[ndarray]:\n return self._data", "def read_data_from_file(split=False):\n with open(constants.DATA_FILENAME, 'rb') as f:\n data = []\n while(True):\n try:\n temp = pickle.load(f)\n\n if type(temp) is not list:\n temp = np.ndarray.tolist(temp)\n\n data = data + temp\n except EOFError:\n break\n if split:\n X_train = []\n Y_train = []\n\n for i in range (0, len(data)):\n X_train.append(data[i][0]) # image\n Y_train.append(data[i][1]) # corresponding joystick output\n\n return np.array(X_train), np.array(Y_train)\n else:\n return np.array(data)", "def _read_examples(self, split: base.Split) -> tf.data.Dataset:\n if split == base.Split.TEST:\n return tf.data.Dataset.range(self._num_test_examples)\n if split == base.Split.TRAIN:\n return tf.data.Dataset.range(self._num_train_examples)\n if split == base.Split.VAL:\n return tf.data.Dataset.range(self._num_validation_examples)", "def _load_data(self):\n raw_data = self._load(\n tf.gfile.Open(self._get_full_pickle_path(self._dataset_split), \"rb\"))\n if self._dataset_split == MetaSplit.TRAIN and self._config[\"train_on_val\"]:\n valid_data = self._load(\n tf.gfile.Open(self._get_full_pickle_path(MetaSplit.VALID), \"rb\"))\n for key in valid_data:\n if self._verbose:\n tf.logging.info(str([key, raw_data[key].shape]))\n raw_data[key] = np.concatenate([raw_data[key],\n valid_data[key]], axis=0)\n if self._verbose:\n tf.logging.info(str([key, raw_data[key].shape]))\n\n if self._verbose:\n tf.logging.info(\n str([(k, np.shape(v)) for k, v in six.iteritems(raw_data)]))\n\n return raw_data", "def get_data():\n iris = datasets.load_iris()\n xall = np.asarray(iris[\"data\"], dtype=np.float64)\n yall = np.asarray(iris[\"target\"], dtype=np.float64)\n xall = np.vstack([xall, (7, 2.0, 4.5, 1)])\n yall = np.append(yall, n_classes)\n X, Xval, y, yval = train_test_split(\n xall, yall, test_size=0.2, shuffle=True, random_state=12345\n )\n y = tf.one_hot(y, n_classes)\n yval = tf.one_hot(yval, n_classes)\n return X, y, Xval, yval", "def vars(self) -> np.ndarray:\n if isinstance(self.data, pd.DataFrame) is False:\n return np.array([])\n else:\n return np.array(self.data.index)", "def get_split_data(self):\n X, y, _, _ = self.get_subsets()\n return train_test_split(X, y, test_size=0.3, random_state=42)", "def get_all_data(self):\n\t\ttemp = self.get_temp()\n\t\taccel = self.get_accel_data()\n\t\tgyro = self.get_gyro_data()\n\t\treturn [accel, gyro, temp]", "def all_gather(data):\n world_size = dist.get_world_size()\n if world_size == 1:\n return [data]\n\n buffer = pickle.dumps(data) #write data into Bytes and stores in buffer\n np_buffer = np.frombuffer(buffer, dtype=np.int8)\n tensor = paddle.to_tensor(np_buffer, dtype='int32') # uint8 doese not have many ops in paddle\n\n # obtain Tensor size of each rank\n local_size = paddle.to_tensor([tensor.shape[0]])\n size_list = []\n dist.all_gather(size_list, local_size)\n max_size = max(size_list)\n\n # receiving tensors from all ranks, \n # all_gather does not support different shape, so we use padding\n tensor_list = []\n if local_size != max_size:\n padding = paddle.empty(shape=(max_size - local_size, ), dtype='int32')\n tensor = paddle.concat((tensor, padding), axis=0)\n dist.all_gather(tensor_list, tensor)\n\n data_list = []\n for size, tensor in zip(size_list, tensor_list):\n buffer = tensor.astype('uint8').cpu().numpy().tobytes()[:size]\n data_list.append(pickle.loads(buffer))\n\n return data_list", "def split(self):\n\n split_fun = [Function(self.F_base) for i in range(self.nvdofs)]\n\n for i in range(self.nvdofs):\n split_fun[i].dat.data[:] = self.dat.data.reshape(-1, self.nvdofs)[:,i]\n return split_fun", "def numpy(self):\n return self.data", "def getSensors(self):\n sensors = array([])\n sensors = r_[sensors, self._getTotalDemandSensor()]\n# sensors = r_[sensors, self._getDemandSensor()]\n# sensors = r_[sensors, self._getPriceSensor()]\n\n# sensors = r_[sensors, self._getBusVoltageSensor()]\n\n# sensors = r_[sensors, self._getBusVoltageMagnitudeSensor()]\n# sensors = r_[sensors, self._getBusVoltageLambdaSensor()]\n# sensors = r_[sensors, self._getBranchFlowSensor()]\n\n# logger.info(\"State: %s\" % sensors)\n\n return sensors", "def get_data(self, img_size=None, split=False, model_class=None):\n X, y = self.import_data(img_size)\n X = self.preprocess_data(X, model_class)\n\n if split:\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=7)\n return X_train, X_test, y_train, y_test\n\n else:\n return X, y", "def tondarray(self):\r\n return self.data;", "def getObservation(self):\n sensors = self.env.getSensors()\n if self.sensor_limits:\n sensors = self.normalize(sensors)\n return sensors", "def get_variables(self) -> np.array:\n pass", "def get_data(self):\n return self.X_train, self.X_test, self.y_train, self.y_test", "def get_data(self):\n return self._fullInput, self._fullOutput", "def get_dataset(self, split):\r\n def generator():\r\n while True:\r\n idx = self.get_batch_idx(split)\r\n yield self.idx_to_data(idx)\r\n return tf.data.Dataset.from_generator(\r\n generator,\r\n output_types=(dict(self.dtypes_input), self.dtype_target),\r\n output_shapes=(self.shapes_input, self.shape_target))", "def get_dataset(self):\n return self._X, self._y", "def get_data():\n transform = Compose([paddle.vision.Resize(32),\n Normalize(mean=[127.5], std=[127.5], data_format='CHW'),\n paddle.vision.transforms.Transpose()])\n train_data = paddle.vision.datasets.Cifar10(mode='train', transform=transform)\n l = len(train_data)\n return paddle.io.random_split(train_data, [l // 2, l - l // 2])", "def gather_all(self):\n size = tf.reduce_min(self._current_size)\n max_size = tf.reduce_max(self._current_size)\n tf.Assert(size == max_size, [\n \"Not all environment have the same size. min_size:\", size,\n \"max_size:\", max_size\n ])\n\n if size == self._max_length:\n return tf.nest.map_structure(lambda buf: buf.value(), self._buffer)\n else:\n return tf.nest.map_structure(lambda buf: buf[:, :size, ...],\n self._buffer)", "def features(self) -> List[np.ndarray]:\n return None", "def dataset_splits(self):\n # 10% evaluation data\n return [{\n \"split\": problem.DatasetSplit.TRAIN,\n \"shards\": 799,\n }, {\n \"split\": problem.DatasetSplit.EVAL,\n \"shards\": 1,\n }]", "def all_gather(data):\n world_size = get_world_size()\n if world_size == 1:\n return [data]\n\n if type(data) is torch.Tensor:\n data = data.cpu()\n # serialized to a Tensor\n buffer = pickle.dumps(data)\n storage = torch.ByteStorage.from_buffer(buffer)\n tensor = torch.ByteTensor(storage).to(\"cuda\")\n\n # obtain Tensor size of each rank\n local_size = torch.LongTensor([tensor.numel()]).to(\"cuda\")\n size_list = [torch.LongTensor([0]).to(\"cuda\") for _ in range(world_size)]\n dist.all_gather(size_list, local_size)\n size_list = [int(size.item()) for size in size_list]\n max_size = max(size_list)\n\n # receiving Tensor from all ranks\n # we pad the tensor because torch all_gather does not support\n # gathering tensors of different shapes\n tensor_list = []\n for _ in size_list:\n tensor_list.append(torch.ByteTensor(size=(max_size,)).to(\"cuda\"))\n if local_size != max_size:\n padding = torch.ByteTensor(size=(max_size - local_size,)).to(\"cuda\")\n tensor = torch.cat((tensor, padding), dim=0)\n dist.all_gather(tensor_list, tensor)\n\n data_list = []\n for size, tensor in zip(size_list, tensor_list):\n buffer = tensor.cpu().numpy().tobytes()[:size]\n data = pickle.loads(buffer)\n if type(data) is torch.Tensor:\n data = data.to(\"cuda\")\n data_list.append(data)\n\n return data_list", "def getBatchData(self):\n if self.is_stored:\n if self.storage == \"memory\":\n dat = self.raw_data\n return dat['X_data'], dat['y_data']\n\n elif self.storage == \"disk\":\n batch_file = 'batch' + str(self.batch_id) + '.pkl'\n dat = pkl.load(open(self.disk_storage_path + batch_file, \"rb\"))\n return dat['X_data'], dat['y_data']\n\n elif self.storage == \"numpy\":\n batch_file = 'batch' + str(self.batch_id) + '.npy'\n X_data = np.load(self.disk_storage_path + \"X_\" + batch_file)\n y_data = np.load(self.disk_storage_path + \"y_\" + batch_file)\n return X_data, y_data\n elif self.storage == \"disk_raw\":\n batch_dir = self.disk_storage_path + \"batch\" + str(self.batch_id)\n for i in range(0, len(self.y_data)):\n raise NotImplementedError\n else:\n pass", "def _split_inputs_outputs(self, data):\n\n\t\tinputs = []\n\t\toutputs = []\n\n\t\tfor point in data:\n\t\t\tinputs.append(point[0])\n\t\t\toutputs.append(point[1])\n\n\t\treturn np.array(inputs), np.array(outputs)", "def getObservation(self):\n res = zeros(4)\n all = self.env.getSensors()\n res[0] = all[3]\n res[1] = all[1]\n res[2] = all[3] and all[1]\n res[3] = not all[3] and not all[1]\n return res", "def split_data(self):\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=7)\n\n return X_train, X_test, y_train, y_test", "def get_partions(self) -> Union[ndarray, Tuple[ndarray, ndarray]]:\n if self.fragmented:\n return (self[self._begin:], self[:self._end])\n else:\n return self[self._begin:self._end]", "def get_sensors(self) -> tuple:\n return self.sensors", "def getAll(self):\n return self.dataBuffer", "def getSensors(self):\n return [float(self.current_state),]", "def values(self):\n vals = []\n narrays = self.VTKObject.GetNumberOfArrays()\n for i in range(narrays):\n a = self.VTKObject.GetAbstractArray(i)\n if a.GetName():\n vals.append(a)\n return vals", "def get_eval_batch(self) -> jnp.ndarray:\n\n return self.dataset[-self.eval_batch_size:, ...]", "def get_data(self):\n return [self.axes]", "def split_data(X:np.ndarray, y:np.ndarray) -> (np.ndarray, np.ndarray, np.ndarray, np.ndarray):\n \n X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.20, random_state=42, stratify=y)\n \n return X_train, X_val, y_train, y_val", "def tensors(self):\n return [x[0] for x in self.__normalizeData__(self.__tensors__)]", "def fetch_samples(self):\n return torch.cat(self.samples,dim=0).reshape(-1,self.parameters.numel())", "def dataset_splits(self):\n return [{\n \"split\": problem.DatasetSplit.TRAIN,\n \"shards\": 80,\n }, {\n \"split\": problem.DatasetSplit.EVAL,\n \"shards\": 2,\n }]", "def _read_all(self) -> list[float]:\n with self.lock:\n return [sensor.read() for sensor in self.sensors]", "def _get_one_split ( split_indices, number_of_split ):\n \n # Given the split indices, get the `number_of_split` element of the indices.\n return ( np.delete ( np.concatenate ( split_indices ), split_indices [ number_of_split ] ), # Drops the test from the train\n split_indices [ number_of_split ],) # Gets the train\n # End get_one_split", "def _get_one_split ( split_indices, number_of_split ):\n \n # Given the split indices, get the `number_of_split` element of the indices.\n return ( np.delete ( np.concatenate ( split_indices ), split_indices [ number_of_split ] ), # Drops the test from the train\n split_indices [ number_of_split ],) # Gets the train\n # End get_one_split", "def split_data(data, squeeze=False):\n vdata = np.atleast_2d(data)\n nr_freqs = int(vdata.shape[1] / 2)\n part1 = vdata[:, 0:nr_freqs]\n part2 = vdata[:, nr_freqs:]\n if(squeeze):\n part1 = part1.squeeze()\n part2 = part2.squeeze()\n return part1, part2", "def get_inputs_train():\n x = tf.constant(extract_pandas_data(x_train))\n y = tf.constant(y_train.values)\n return x, y", "def output_tensors(self):\r\n return self._output_tensors", "def load_data(self,split='train'):\n return load_arrow_data(self.config,split)", "def get_numpy(x):\n return x.cpu().data.numpy() #if use_cuda else x.data.numpy()", "def allgather(self, tensor, name, shape, dtype, context):\n assert isinstance(tensor, nd.NDArray), type(tensor)\n assert isinstance(name, str), type(name)\n assert isinstance(shape, tuple), type(shape)\n assert isinstance(dtype, str), type(dtype)\n assert isinstance(context, mx.context.Context), type(context)\n total_tensor = self.get_ndarray(context=context,\n name=name,\n shape=shape,\n dtype=dtype)\n total_tensor[:] = 0 # reset array before all-reduce is very important\n total_tensor[self.rank * self.batch_size:self.rank * self.batch_size +\n self.batch_size] = tensor\n hvd.allreduce_(total_tensor, average=False) # all-reduce in-place\n return total_tensor", "def get_samples(\n self,\n num_samples=0,\n idx=None,\n split='val',\n as_list=True,\n deterministic=False,\n as_tuple=False,\n simple_IDs=False):\n assert(idx is not None or num_samples > 0)\n\n if split == 'train':\n assert(self.mode in ['train_noval', 'train_with_val'])\n if idx is None:\n if deterministic:\n idx = self._trn_idx[0:num_samples]\n else:\n idx = np.random.choice(self._trn_idx, size=num_samples, replace=False)\n\n images, labels, IDs = self._get_train_samples(idx, as_tuple=as_tuple, simple_IDs=simple_IDs)\n\n if as_list:\n return images, labels, IDs\n else:\n return map(np.asarray, (images, labels, IDs))\n\n elif split == 'val':\n assert(self.mode in ['val', 'val_notrain', 'train_with_val'])\n if idx is None:\n if deterministic:\n idx = self._val_idx[0:num_samples]\n else:\n idx = np.random.choice(self._val_idx, size=num_samples, replace=True)\n\n images, gt_labels, IDs = self._get_val_samples(idx, as_tuple=as_tuple, simple_IDs=simple_IDs)\n\n if as_list:\n return images, gt_labels, IDs\n else:\n return map(np.asarray, (images, gt_labels, IDs))\n\n elif split == 'val_with_preds':\n assert(self.mode in ['val', 'val_notrain', 'train_with_val'])\n if idx is None:\n if deterministic:\n idx = self._val_idx[0:num_samples]\n else:\n idx = np.random.choice(self._val_idx, size=num_samples, replace=True)\n\n images, gt_labels, pred_labels, IDs = self._get_val_samples_with_preds(\n idx, as_tuple=as_tuple, simple_IDs=simple_IDs)\n\n if as_list:\n return images, gt_labels, pred_labels, IDs\n else:\n return map(np.asarray, (images, gt_labels, pred_labels, IDs))\n\n elif split == 'val_with_pred_paths':\n assert(self.mode in ['val', 'val_notrain', 'train_with_val'])\n if idx is None:\n if deterministic:\n idx = self._val_idx[0:num_samples]\n else:\n idx = np.random.choice(self._val_idx, size=num_samples, replace=True)\n\n images, gt_labels, pred_label_paths, IDs = self._get_val_samples_with_pred_paths(\n idx, as_tuple=as_tuple, simple_IDs=simple_IDs)\n\n if as_list:\n return images, gt_labels, pred_label_paths, IDs\n else:\n return map(np.asarray, (images, gt_labels, pred_label_paths, IDs))\n\n elif split == 'test':\n if idx is None:\n if deterministic:\n idx = self._tst_idx[0:num_samples]\n else:\n idx = np.random.choice(self._tst_idx, size=num_samples, replace=False)\n\n images, IDs = [], []\n for l in idx:\n if self.opts['in_memory']:\n image = self._images_test[l]\n else:\n image = self._load_sample(self._img_tst_path[l], preprocess=False, as_tuple=as_tuple)\n images.append(image)\n if simple_IDs is True:\n IDs.append(self._tst_IDs_simpl[l])\n else:\n IDs.append(self._tst_IDs[l])\n\n if as_list:\n return images, IDs\n else:\n return map(np.asarray, (images, IDs))\n\n elif split == 'test_with_preds':\n if idx is None:\n if deterministic:\n idx = self._tst_idx[0:num_samples]\n else:\n idx = np.random.choice(self._tst_idx, size=num_samples, replace=False)\n\n images, pred_labels, IDs = self._get_test_samples_with_preds(idx, as_tuple=as_tuple, simple_IDs=simple_IDs)\n\n if as_list:\n return images, pred_labels, IDs\n else:\n return map(np.asarray, (images, pred_labels, IDs))\n\n elif split == 'test_with_pred_paths':\n if idx is None:\n if deterministic:\n idx = self._tst_idx[0:num_samples]\n else:\n idx = np.random.choice(self._tst_idx, size=num_samples, replace=False)\n\n images, pred_label_paths, IDs = self._get_test_samples_with_pred_paths(\n idx, as_tuple=as_tuple, simple_IDs=simple_IDs)\n\n if as_list:\n return images, pred_label_paths, IDs\n else:\n return map(np.asarray, (images, pred_label_paths, IDs))\n\n else:\n return None, None", "def split_dataset(self, split):\n trunk_pos_size = math.ceil((1 - split) * len(self.Pos))\n trunk_neg_size = math.ceil((1 - split) * len(self.Neg))\n trunk_num = int(1 / (1 - split))\n pos_temp = list()\n neg_temp = list()\n for index in range(trunk_num):\n pos_temp.append(self.Pos[index * trunk_pos_size:(index + 1) *\n trunk_pos_size])\n neg_temp.append(self.Neg[index * trunk_neg_size:(index + 1) *\n trunk_neg_size])\n self.test = pos_temp.pop(2) + neg_temp.pop(2)\n # self.train = [i for item in pos_temp + neg_temp for i in item]\n self.train = []\n for item in pos_temp + neg_temp:\n for i in item:\n self.train.append(i)\n\n random.shuffle(self.train)\n random.shuffle(self.test)", "def get_all(self):\n try:\n return self.current_data\n except:\n print('No data received from sensor')", "def flatten(self):\n return DataArray([s for s in self.unstructured()])", "def use(self,dataset):\n features = []\n outputs = np.zeros((len(dataset),1))\n for xy in dataset:\n x,y = xy\n features += [x]\n\n for test,out in zip(features,outputs):\n out[0] = self.tree.apply(test)\n \n return outputs", "def read_buffer(self):\n data = []\n\n # loop until the buffer was completely read out\n while True:\n new = [float(n) for n in self.read(\":FETC:ARR? MAX\").split(\",\") if n]\n data += new\n\n if len(new) < self.batch_size:\n break\n\n return data", "def get_idx_dataset(self, split):\r\n def generator():\r\n while True:\r\n batch_idx = self.get_batch_idx(split)\r\n yield tf.constant(batch_idx, dtype=tf.int32)\r\n return tf.data.Dataset.from_generator(\r\n generator,\r\n output_types=tf.int32,\r\n output_shapes=[None])", "def data(self, train=True):\n data = self.train_data if train else self.val_data\n return data.data, data.targets", "def provide_data(self):\n return [(k, v.shape) for k, v in self.data]", "def _get_batch_data(batch, ctx):\n data, label = batch\n return (mx.gluon.utils.split_and_load(data, ctx),\n mx.gluon.utils.split_and_load(label, ctx),\n data.shape[0])", "def read(self) -> np.ndarray:\n return self[self._head]", "def getGridSensorData(self):\r\n\r\n gridData = []\r\n\r\n gridData.append(self.gridSensorDataX)\r\n gridData.append(self.gridSensorDataY)\r\n gridData.append(self.gridSensorDataZ)\r\n gridData.append(self.gridSensorDataRotationX)\r\n gridData.append(self.gridSensorDataRotationY)\r\n gridData.append(self.gridSensorDataRotationZ)\r\n\r\n return gridData", "def reconstructed_data(self):\n try:\n data = np.sum(\n np.array([\n self.partial_reconstructed_data(i)\n for i in range(self._max_level)\n ]),\n axis=0)\n except MemoryError:\n data = np.array(self.partial_reconstructed_data(0))\n for i in range(1, self._max_level):\n data = np.sum([data,\n np.array(self.partial_reconstructed_data(i))], axis=0)\n return data", "def labeledTensors(self):\n return self.__normalizeData__(self.__tensors__)", "def get_all_features(self) :\n raise NotImplementedError", "def GetArrays(self):\n self.__init_from_composite()\n return self._Arrays", "def sensors(self):\n return self._sensors", "def get_output_shape(self):\n return []", "def get_outputs(self):\n outputs = Interaction.get_outputs(self)\n outputs.update(np.atleast_1d(self._stores))\n return outputs", "def get_data_train(self):\n return self.get_data(self.file_train, self.batch)", "def variables(self):\n return self.dataset.data_vars", "def get_data(self):\n return self.train_edges, self.train_labels, self.test_edges, self.test_labels", "def indices_of_split(self, split_name='train'):\n return self.indices_of('split', split_name)", "def get_iter_data(dataset):\n num_samples = dataset.num_examples\n\n handle = dataset.open()\n features = []\n targets = []\n for i in xrange(num_samples):\n data = dataset.get_data(handle)\n features.append(data[0])\n targets.append(data[1])\n\n dataset.close(handle)\n\n targets_arr = targets[0]\n for i in xrange(1, num_samples):\n targets_arr = np.vstack((targets_arr, targets[i]))\n\n return features, targets_arr", "def numpy(self) -> np.ndarray:\n return self.tensor.numpy()", "def getAllData(self):\r\n return self.data", "def __array__(self):\n return np.asarray(self.data)", "def data():\n return RaggedArray(\n [[0, 1], [1, 2, 3, 4], [], [-1, -2], []]*20, dtype='float64')", "def dataset_input_fn(self, train_batch_size, split):\n self._create_tf_datasets(split, train_batch_size)\n next_el = self.iterator.get_next()\n obj_ids, translations, rotations, labels, is_augmented = next_el\n points, segment_ids = self._input_fn(obj_ids, translations,\n rotations, train_batch_size,\n train_batch_size * 6, True)\n return (points, segment_ids, labels, is_augmented)", "def load_data(split):\n with open(split,'r') as splitfile:\n reader = [line.split() for line in splitfile]\n x_sp = np.zeros((len(reader),2048)) # size of frozen vectors\n y_sp = np.zeros((len(reader)))\n for counter, row in enumerate(reader):\n path = row[0]\n ml = path.split('/')[-1]\n ml2 = ml.split('_')\n targ = int(ml2[1])\n tot = int(ml2[2])\n nontarg = tot - targ\n if targ > nontarg:\n ans = 1\n elif nontarg > targ:\n ans = 0\n elif targ == nontarg:\n ans = 2\n y_sp[counter] = ans\n feat_vector = [float(x) for x in row[1:]]\n x_sp[counter] = feat_vector\n \n x_split = x_sp.reshape((len(reader),2048))\n y_split = y_sp.reshape((len(reader)))\n return x_split, y_split", "def getSensors(self):\n return self.listener.sensors", "def get_data(datapath,n=10):\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n input_path = datapath + \"/maze_data_test_59/inputs.npy\"\n target_path = datapath + \"/maze_data_test_59/solutions.npy\"\n data = np.load(input_path)\n target = np.load(target_path)\n a = data[:n]\n a = torch.from_numpy(a)\n input = a.to(device, dtype=torch.float)\n b = target[:n]\n t = torch.from_numpy(b)\n t = t.to(device, dtype=torch.float)\n target = t\n return input, target", "def split_and_load(batch_data, num_gpus):\n return [batch_data[i].data[0] for i in range(num_gpus)], \\\n [batch_data[i].label[0].as_in_context(mx.gpu(i)) for i in range(num_gpus)]", "def data(self) -> np.ndarray:\n return self._data", "def get(self):\r\n return self.data_array", "def values(self):\n arr = self.view(np.ndarray).copy()\n return arr", "def array(self):\n aa = list(map(np.asarray, self.loader_array.flat))\n return np.stack(aa, axis=0).reshape(self.output_shape)", "def to_fits_array(self):\n return self.data", "def get(self):\n return np.hstack((self.data[:, self.cur:], self.data[:, :self.cur])) #Concatena los datos en horizontal", "def _datasets(self):\n return self._flat_data._datasets", "def get_datasets(self): # noqa: N805\n return vars(self)", "def _get_data(self, position):\n index = self._indexes[position]\n basename = self._waves[index].with_suffix(\".npy\").name\n return tuple(np.load(self._path / x / basename) for x in self._variables)", "def outdim(self):\n return len(self.getSensors())", "def making_x_train_data_list_for_kfold(data:pandas.core.frame.DataFrame, split_num:int) -> list:\n train_data_list = making_dataset_list_train(data, split_num)\n x_train_data_list = making_dataset_list_x(train_data_list)\n return translate_pandas_to_numpy(x_train_data_list)" ]
[ "0.62569696", "0.618584", "0.6073904", "0.6042331", "0.5926986", "0.5832924", "0.5821185", "0.5761303", "0.5756968", "0.57559574", "0.57297504", "0.57242715", "0.57101953", "0.570451", "0.5688236", "0.56806684", "0.56590205", "0.564544", "0.56339717", "0.55949557", "0.5587822", "0.5539013", "0.5523314", "0.55133456", "0.5511322", "0.5509223", "0.5505799", "0.54871887", "0.5483199", "0.54404867", "0.5437536", "0.54359573", "0.54137814", "0.54011637", "0.5387084", "0.5373262", "0.53718376", "0.5369375", "0.5365694", "0.53602743", "0.53455997", "0.5341386", "0.53388304", "0.5334904", "0.53163445", "0.5284122", "0.52759665", "0.52735865", "0.5268404", "0.5268404", "0.52658945", "0.52657455", "0.5261651", "0.5259461", "0.5258104", "0.5251916", "0.5249405", "0.5244505", "0.5243992", "0.52220285", "0.5218976", "0.5217487", "0.5210305", "0.5210068", "0.52065545", "0.5205669", "0.5201273", "0.51941836", "0.5185035", "0.51828265", "0.51798934", "0.5179286", "0.51737404", "0.51732063", "0.51717025", "0.51656497", "0.51627773", "0.51613265", "0.5153882", "0.5153801", "0.51523215", "0.51503396", "0.5137963", "0.5136027", "0.51358956", "0.5135446", "0.5133455", "0.5132282", "0.51291287", "0.51269615", "0.5125949", "0.5122495", "0.51210445", "0.5120168", "0.5118934", "0.51134026", "0.5113021", "0.5105385", "0.51039714", "0.5102865" ]
0.5634289
18
Partitions the full data into a list of ndarrays/Tensors.
def get_n_folds(self,split=None,N=5,numpy=True,perm=None): data = self.get_data(split,numpy) X = data['x'] y = data['y'] size = len(y) if perm is None: perm = np.random.permutation(size) elif len(perm) != size: raise Exception("Permutation provided is wrong length: "+\ str(len(perm))+" vs "+str(size)) X = X[perm,:] y = y[perm,:] x_folds = np.split(X,N,axis=0) y_folds = np.split(y,N,axis=0) return [{'x':x_folds[i],'y':y_folds[i]} for i in range(N)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _batchify(data: nd.NDArray, batch_size):\n # Work out how cleanly we can divide the dataset into bsz parts.\n nbatch = len(data) // batch_size\n # Trim off any extra elements that wouldn't cleanly fit (remainders).\n data = data[0: nbatch * batch_size]\n # Evenly divide the data across the bsz batches.\n data = data.reshape(batch_size, -1).transpose()\n # if torch.cuda.is_available():\n # data = data.cuda()\n return data", "def transform(self, chunks):\n data = np.array([chunk.flatten() for chunk in chunks])\n\n return data", "def flatten_data(data):\r\n result = []\r\n for mesurements in data:\r\n result.append(mesurements.flatten())\r\n return np.array(result)", "def flatten(self):\n return DataArray([s for s in self.unstructured()])", "def pmap_dataset(ds, n_devices):\n n_data = len(ds[0])\n if n_data % n_devices:\n new_len = n_devices * (n_data // n_devices)\n warning_str = (\"Dataset of length {} can not be split onto {} devices.\"\n \"Truncating to {} data points.\".format(\n n_data, n_devices, new_len))\n warnings.warn(warning_str, UserWarning)\n ds = (arr[:new_len] for arr in ds)\n return jax.pmap(lambda x: x)(batch_split_axis(ds, n_devices))", "def _deserialize(self, data):\n\n firstInd = 0\n deserialized_data = []\n for shp in self._data_shape_list:\n if len(shp) > 1:\n shift = np.prod(shp)\n elif len(shp) == 0:\n shift = 1\n else:\n shift = shp[0]\n tmp_array = data[firstInd:firstInd+shift]\n tmp_array = tmp_array.reshape(shp)\n deserialized_data.append(tmp_array)\n firstInd += shift\n return deserialized_data", "def all_gather(data):\n world_size = dist.get_world_size()\n if world_size == 1:\n return [data]\n\n buffer = pickle.dumps(data) #write data into Bytes and stores in buffer\n np_buffer = np.frombuffer(buffer, dtype=np.int8)\n tensor = paddle.to_tensor(np_buffer, dtype='int32') # uint8 doese not have many ops in paddle\n\n # obtain Tensor size of each rank\n local_size = paddle.to_tensor([tensor.shape[0]])\n size_list = []\n dist.all_gather(size_list, local_size)\n max_size = max(size_list)\n\n # receiving tensors from all ranks, \n # all_gather does not support different shape, so we use padding\n tensor_list = []\n if local_size != max_size:\n padding = paddle.empty(shape=(max_size - local_size, ), dtype='int32')\n tensor = paddle.concat((tensor, padding), axis=0)\n dist.all_gather(tensor_list, tensor)\n\n data_list = []\n for size, tensor in zip(size_list, tensor_list):\n buffer = tensor.astype('uint8').cpu().numpy().tobytes()[:size]\n data_list.append(pickle.loads(buffer))\n\n return data_list", "def partition(X: np.ndarray, y: np.ndarray, num_partitions: int) -> XYList: # returns list of Xy (read more about function annotations) ????\n return list(\n zip(np.array_split(X, num_partitions), \n np.array_split(y, num_partitions))\n )", "def batch(self, data, size):\n\n return [data[x : x + size] for x in range(0, len(data), size)]", "def rows_from_data (data):\n return data.tolist() # use numpy.ndarray conversion function", "def _read_data(self):\n return [np.array([]), np.array([])]", "def data(self) -> List[ndarray]:\n return self._data", "def batchify(data, batch_size, args):\n # Work out how cleanly we can divide the dataset into batch_size parts (i.e. continuous seqs).\n nbatch = data.size(0) // batch_size\n # Trim off any extra elements that wouldn't cleanly fit (remainders).\n data = data.narrow(0, 0, nbatch * batch_size)\n # Evenly divide the data across the batch_size batches.\n data = data.view(batch_size, -1)\n if args.cuda:\n data = data.cuda()\n return data", "def making_x_train_data_list_for_kfold(data:pandas.core.frame.DataFrame, split_num:int) -> list:\n train_data_list = making_dataset_list_train(data, split_num)\n x_train_data_list = making_dataset_list_x(train_data_list)\n return translate_pandas_to_numpy(x_train_data_list)", "def get_data():\n data = [np.array([32.,595.]),\n np.array([30.,599.]),\n np.array([18.,622.]),\n np.array([51.,606.]),\n np.array([38.,578.])]\n return data", "def split_data(self):\r\n print('split data')\r\n np.random.shuffle(self.dataList)\r\n l = len(self.dataList)/self.fold\r\n self.dataList = [self.dataList[i*l: (i+1)*l] for i in range(self.fold-1)] + [self.dataList[(self.fold-1)*l:]] # each element in the list is splitted data list\r", "def batchify(data, batch_size):\n n_batch = data.shape[0] // batch_size\n data = data[:n_batch * batch_size]\n data = data.reshape((batch_size, n_batch)).T\n return data", "def provide_data(self):\n return [(k, v.shape) for k, v in self.data]", "def data():\n return RaggedArray(\n [[0, 1], [1, 2, 3, 4], [], [-1, -2], []]*20, dtype='float64')", "def flatten_data(X):\n\n return X.reshape((-1, X.shape[-1]))", "def flatten_data(data):\r\n return list(gen_flatten_data(data))", "def _chunk_data(X, slices):\n\n # from object array to list\n slices = [sl for sl in slices if len(sl)]\n selected_times = np.hstack([np.ravel(sl) for sl in slices])\n start = np.min(selected_times)\n stop = np.max(selected_times) + 1\n slices_chunk = [sl - start for sl in slices]\n X_chunk = X[:, :, start:stop]\n return X_chunk, slices_chunk", "def batch_split(self) -> np.array:\n pass", "def dimension_preprocess(self, data, padding=True):\r\n\r\n assert len(data.shape) == 2, \"Data dimension expected to be ( xline, samp_point)\"\r\n if padding:\r\n if data.shape[0] < self.rows:\r\n padding = np.ones((self.rows - data.shape[0], data.shape[1]))\r\n data = np.concatenate((data, padding), axis=0)\r\n if data.shape[1] < self.cols:\r\n padding = np.ones((data.shape[0], self.cols - data.shape[1]))\r\n data = np.concatenate((data, padding), axis=1)\r\n x_chunks, y_chunks = self.get_chunks(data)\r\n images = []\r\n for x in x_chunks:\r\n for y in y_chunks:\r\n images.append(\r\n data[x[0]:x[1], y[0]:y[1]]\r\n )\r\n images = np.array(images)\r\n\r\n return images", "def split_data(data, squeeze=False):\n vdata = np.atleast_2d(data)\n nr_freqs = int(vdata.shape[1] / 2)\n part1 = vdata[:, 0:nr_freqs]\n part2 = vdata[:, nr_freqs:]\n if(squeeze):\n part1 = part1.squeeze()\n part2 = part2.squeeze()\n return part1, part2", "def getNdArray(self):\n futures = self.client.map(_call_getNdArray, self.vecDask, pure=False)\n arrays = self.client.gather(futures)\n return arrays", "def all_gather(data):\n world_size = get_world_size()\n if world_size == 1:\n return [data]\n\n if type(data) is torch.Tensor:\n data = data.cpu()\n # serialized to a Tensor\n buffer = pickle.dumps(data)\n storage = torch.ByteStorage.from_buffer(buffer)\n tensor = torch.ByteTensor(storage).to(\"cuda\")\n\n # obtain Tensor size of each rank\n local_size = torch.LongTensor([tensor.numel()]).to(\"cuda\")\n size_list = [torch.LongTensor([0]).to(\"cuda\") for _ in range(world_size)]\n dist.all_gather(size_list, local_size)\n size_list = [int(size.item()) for size in size_list]\n max_size = max(size_list)\n\n # receiving Tensor from all ranks\n # we pad the tensor because torch all_gather does not support\n # gathering tensors of different shapes\n tensor_list = []\n for _ in size_list:\n tensor_list.append(torch.ByteTensor(size=(max_size,)).to(\"cuda\"))\n if local_size != max_size:\n padding = torch.ByteTensor(size=(max_size - local_size,)).to(\"cuda\")\n tensor = torch.cat((tensor, padding), dim=0)\n dist.all_gather(tensor_list, tensor)\n\n data_list = []\n for size, tensor in zip(size_list, tensor_list):\n buffer = tensor.cpu().numpy().tobytes()[:size]\n data = pickle.loads(buffer)\n if type(data) is torch.Tensor:\n data = data.to(\"cuda\")\n data_list.append(data)\n\n return data_list", "def flatten(X):\n N = X.shape[-1]\n flat = np.zeros((N, 3072))\n for idx, i in enumerate(range(N)):\n # if not idx:\n # print(X[:,:,:,i].reshape(3072))\n flat[i] = X[:,:,:,i].reshape(3072)\n return flat", "def _data_reshape(self, data):\n data_offset = [int(size / 2) for size in data.shape[1:]]\n data_diff = [int(size / 2) for size in self.shape]\n data_diff_min = data_diff\n data_diff_max = []\n for i, elem in enumerate(data_diff):\n if self.shape[i] % 2 == 0:\n data_diff_max.append(elem)\n else:\n data_diff_max.append(elem + 1)\n data = data[:, (data_offset[0] - data_diff_min[0]):(data_offset[0] + data_diff_max[0]),\n (data_offset[1] - data_diff_min[1]):(data_offset[1] + data_diff_max[1]),\n (data_offset[2] - data_diff_min[2]):(data_offset[2] + data_diff_max[2])]\n\n if data.shape[1] == 1:\n data = data.reshape(data.shape[0], data.shape[2], data.shape[3])\n return data", "def provide_data(self):\n return [(k, tuple([self.batch_size] + list(v.shape[1:]))) for k, v in self._data]", "def train_datas(self, batch_size):\r\n if not isinstance(batch_size, int):\r\n raise ValueError('In Dataset, batch_size should be int, get '\r\n '{}'.format(type(batch_size)))\r\n if batch_size <= 0:\r\n raise ValueError('In Dataset, batch_size should larger equal to '\r\n '1, get {}'.format(batch_size))\r\n \r\n indices = list(range(self.size))\r\n np.random.shuffle(indices)\r\n\r\n epoch_size = self.size // batch_size * batch_size\r\n self._train_datas = self._train_datas[indices][:epoch_size] # [epoch_size, ...]\r\n self._train_labels = self._train_labels[indices][:epoch_size] # [epoch_size, ...]\r\n \r\n datas = []\r\n for i in range(self.size // batch_size):\r\n # for label, we have box and landmark which is 0.\r\n datas.append([self._train_datas[i*batch_size:(i+1)*batch_size], \r\n self._train_labels[i*batch_size:(i+1)*batch_size]])\r\n return datas", "def split(self, parts: int) -> List['MultiDimensionalSliceCollection']:\n array_parts = split_array(self.objects, parts)\n return [MultiDimensionalSliceCollection(objects_array=p) for p in array_parts]", "def get_data(data):\n\n np_data = np.array(data)\n array = []\n\n for i in range(0, np_data.shape[1]):\n array.append(np_data[:, i])\n\n return np.array(array)", "def get_data(data):\n\n np_data = np.array(data)\n array = []\n\n for i in range(0, np_data.shape[1]):\n array.append(np_data[:, i])\n\n return np.array(array)", "def reconstructed_data(self):\n try:\n data = np.sum(\n np.array([\n self.partial_reconstructed_data(i)\n for i in range(self._max_level)\n ]),\n axis=0)\n except MemoryError:\n data = np.array(self.partial_reconstructed_data(0))\n for i in range(1, self._max_level):\n data = np.sum([data,\n np.array(self.partial_reconstructed_data(i))], axis=0)\n return data", "def _batchify(self, data_containers: Dict, batch_size):\n\n X = Variable(torch.LongTensor(data_containers['X'])).to(self.device)\n Y = Variable(torch.FloatTensor(data_containers['Y'])).to(self.device)\n\n data_size = X.size()[0]\n num_batches = data_size // batch_size\n\n return [\n (X[bi * batch_size: (bi + 1) * batch_size],\n Y[bi * batch_size: (bi + 1) * batch_size].unsqueeze(1))\n for bi in range(num_batches + 1)\n ]", "def convert_train(ndata, ndim):\r\n print ('Converting training data ... ')\r\n x = np.zeros([ndata, ndim])\r\n y = np.zeros([ndata])\r\n \r\n for i in range(0, len(flist) - 2):\r\n batchn = filepath + flist[i]\r\n temp = read(batchn)\r\n x[i * 10000:(i + 1) * 10000] = temp['data']\r\n y[i * 10000:(i + 1) * 10000] = temp['labels']\r\n \"\"\"\r\n i=0\r\n batchn = filepath + flist[i]\r\n\r\n temp = read(batchn)\r\n\r\n x[i * 10000:(i + 1) * 10000] = temp['data']\r\n\r\n y[i * 10000:(i + 1) * 10000] = temp['labels']\r\n \"\"\"\r\n return x, y", "def dataset_to_tensors(dataset):\r\n tensor_num = len(dataset[0])\r\n tensors = [torch.LongTensor([sample[i] for sample in dataset]) for i in range(tensor_num)]\r\n return tensors", "def gather_list_batch(data, indices):\n\n assert isinstance(indices, (tuple, list)) or (isndarray(indices) and len(indices.shape) == 1)\n\n if isndarray(data):\n return data[indices]\n\n assert len(data) > 0 and len(indices) > 0\n\n sample = np.array(data[0]) # Try to convert the first element to a typical nd array.\n output = np.empty((len(indices), ) + sample.shape, dtype=sample.dtype)\n for i, j in enumerate(indices):\n output[i] = data[j]\n return output", "def provide_data(self):\r\n # import pdb; pdb.set_trace()\r\n # for k, v in self.data:\r\n # print k,v\r\n return [(k, tuple([1] + list(v.shape[1:]))) for k, v in self.data]", "def array(self):\n aa = list(map(np.asarray, self.loader_array.flat))\n return np.stack(aa, axis=0).reshape(self.output_shape)", "def as_list_of_lists(self):\n return self._matrix_data", "def flatten(self):\n return [e for es in self.array for e in es]", "def _prep_data(self, data, func_input_dtype):\n if func_input_dtype in (None, 'DataArray'):\n return data\n if func_input_dtype == 'Dataset':\n # TODO: add logic that creates a single Dataset comprising all of\n # the DataArray objects in `data`.\n raise NotImplementedError(\"func_input_dtype of `Dataset` not yet \"\n \"implemented.\")\n if func_input_dtype == 'numpy':\n self.coords = data[0].coords\n return [d.values for d in data]", "def data_array(self) -> xr.Dataset:\n\n xr_data = xr.open_mfdataset(self.path_to_files,\n chunks=self.chunks,\n parallel=True)\n\n if not all(x in list(xr_data.coords) for x in self.DIMS):\n xr_data = xr_data.rename({\n 'latitude': 'lat',\n 'longitude': 'lon',\n })\n\n if self.subset_dict is not None:\n print(f'Cutting data using {self.subset_dict}')\n xr_data = self.cut(xr_data)\n\n if self.season is not None:\n xr_data = xr_data.where(xr_data.time.dt.season == self.season,\n drop=True)\n\n if self.rescale_longitude is True:\n xr_data = xr_data.assign_coords(lon=(((xr_data.lon + 180) % 360) -\n 180)).sortby('lon')\n\n return xr_data", "def making_x_val_data_list_for_kfold(data:pandas.core.frame.DataFrame, split_num:int) -> list:\n val_data_list = making_dataset_list_val(data, split_num)\n x_val_data_list = making_dataset_list_x(val_data_list)\n return translate_pandas_to_numpy(x_val_data_list)", "def getData(trainSize):\r\n return splitData([getReal(), getFake()], trainSize=trainSize)", "def batch_dataset(x, batch_size):\r\n\tsize_modulo = len(x) % batch_size # hack to ensure data is batches successfully\r\n\tif size_modulo != 0:\r\n\t\tx = x[:-size_modulo]\r\n\tpartitioned = np.split(x, batch_size)\r\n\treturn partitioned", "def full_batch(self):\n full_X, full_y = pad_sort_data(self.full_X, self.full_y)\n return full_X, full_y", "def prepare_full_data(raw_data):\n users_id = np.asarray(raw_data[0], dtype='int32')\n items_id = np.asarray(raw_data[1], dtype='int32')\n ratings = np.asarray(raw_data[3], dtype=theano.config.floatX)\n return [users_id, items_id, ratings]", "def making_dataset_list_x(data_list:list) -> list:\n list_size = len(data_list)\n for i in range(list_size):\n data_list[i] = data_list[i].drop([\"W\",\"D\",\"L\"],axis=1)\n return data_list", "def load_partition(idx: int):\r\n assert idx in range(10)\r\n (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()\r\n return (\r\n x_train[idx * 5000 : (idx + 1) * 5000],\r\n y_train[idx * 5000 : (idx + 1) * 5000],\r\n ), (\r\n x_test[idx * 1000 : (idx + 1) * 1000],\r\n y_test[idx * 1000 : (idx + 1) * 1000],\r\n )", "def partition(data, indecies):\n\tsplitdata = [data[:indecies[0]]]\n\tsplitdata += [data[indecies[i-1]:indecies[i]] for i in range(1,len(indecies))]\n\tsplitdata.append(data[indecies[-1]:])\n\treturn splitdata", "def getLongArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def split_data(data):\n data_values = []\n target_drugs = []\n cell_lines = []\n states = []\n replicates = []\n \n for i in data.columns:\n target_drug = i.split()[-2]\n cell_line = i.split()[-1].split(\"_\")[0]\n state = i.split()[-1].split(\"_\")[1]\n replicate = i.split()[-1].split(\"_\")[2]\n \n data_values.append(list(data[i]))\n target_drugs.append(int(target_drug))\n cell_lines.append(cell_line)\n states.append(state)\n replicates.append(replicate)\n \n data_values = np.array(data_values)\n target_drugs = np.array(target_drugs)\n cell_lines = np.array(cell_lines)\n states = np.array(states)\n replicates = np.array(replicates)\n \n return data_values, target_drugs, cell_lines, states, replicates", "def partition_mnist():\n (x_train, y_train), testset = tf.keras.datasets.mnist.load_data()\n partitions = []\n # We keep all partitions equal-sized in this example\n partition_size = math.floor(len(x_train) / NUM_CLIENTS)\n for cid in range(NUM_CLIENTS):\n # Split dataset into non-overlapping NUM_CLIENT partitions\n idx_from, idx_to = int(cid) * partition_size, (int(cid) + 1) * partition_size\n partitions.append((x_train[idx_from:idx_to] / 255.0, y_train[idx_from:idx_to]))\n return partitions, testset", "def listify(data):\n\n def _convert_to_list(tensor):\n tensor = tensor.detach().cpu()\n if tensor.dtype == torch.bfloat16:\n # As of Numpy 1.21.4, NumPy does not support bfloat16 (see\n # https://github.com/numpy/numpy/blob/a47ecdea856986cd60eabbd53265c2ca5916ad5d/doc/source/user/basics.types.rst ).\n # Until Numpy adds bfloat16, we must convert float32.\n tensor = tensor.to(torch.float32)\n return tensor.tolist()\n\n return recursively_apply(_convert_to_list, data)", "def partitionData(data, labels, partition):\n\treturn [s[partition] for s in data], labels[partition]", "def reshape(data):\n return K.reshape(x=data, shape=(K.shape(data)[0], 1, reshape_size))", "def flatten_npar(np_array):\n \n itr = len(np_array)\n start = np_array[0]\n \n for i in range(1,itr):\n start = np.hstack((start,np_array[i]))\n \n return(np.array(start))", "def load_data(self):\n raw_data = np.genfromtxt(self.data_file, delimiter=',')\n self.n_clusters = int(raw_data[-1][-1] + 1)\n self.n_points = len(raw_data) // self.n_clusters\n \n # group data according to label\n data = [raw_data[raw_data[:,-1] == i][:,:-1] \\\n for i in range(self.n_clusters)]\n\n # take only a subset of the data\n if self.split:\n assert 0 <= self.split <= 1, \"Split must be in [0, 1)\"\n\n # update dataset info and print to stdout\n self.n_points = int(self.split * len(data[0]))\n subsampled = self.__len__() - int(self.ood is not None) * self.n_points\n print(f\"INFO: Subsampled {subsampled}/{len(raw_data)} points\")\n \n return [cluster[:self.n_points] for cluster in data]\n return data", "def full_batch(self):\n return self.X_data, self.Y_data", "def split_db_original(x, components):\n cm = components[1]\n ap = []\n for itera in cm:\n ap.append(x[:, itera].tolist())\n ap_np = np.transpose(np.array(ap))\n\n return ap_np", "def val_split(a: Iterable, partitions: int, range_max: int, range_min: int = 0,\n size: bool = True) -> List[np.ndarray]:\n if size:\n n = int(np.ceil(range_max / partitions))\n splits = partitions\n else:\n n = partitions\n splits = (range_max - range_min) // partitions\n\n it = iter(a)\n it_current = next(it)\n ret_val = [[] for _ in range(n)]\n\n try:\n if isinstance(it_current, (tuple, list, np.ndarray)):\n it_current, it_value = it_current\n for i in range(n):\n for j in range(splits):\n split_current = (partitions + 1) * i + j\n while it_current <= split_current:\n ret_val[i].append([it_current, it_value])\n it_current, it_value = next(it)\n continue\n return list(map(np.array, ret_val))\n for i in range(n):\n for j in range(splits):\n split_current = (partitions + 1) * i + j\n while it_current <= split_current:\n ret_val[i].append(it_current)\n it_current = next(it)\n continue\n except StopIteration:\n return list(map(np.array, ret_val))", "def prepare_batches(self, data):\n batches = []\n start, end = 0, 100\n if len(data) > 100:\n while True:\n data_batch = data[start:end]\n if not data_batch:\n break\n temp = end + 100\n start, end = end, temp\n if data_batch:\n batches.append(data_batch)\n else:\n batches.append(data)\n return batches", "def dataset_as_arrays(dataset):\r\n scores = []\r\n lenghts = []\r\n embeddings = []\r\n for row in dataset:\r\n embeddings += [vec for vec in row[0]]\r\n scores.append(float(row[1]))\r\n lenghts.append(row[0].shape[0])\r\n \r\n embeddings = numpy.array(embeddings)\r\n scores = numpy.array(scores)\r\n lenghts = numpy.array(lenghts)\r\n return embeddings, scores, lenghts", "def flatten_array(self):\n numel = self.xyz_array[:, :, 0].size # Number of elements in dataset\n self.flat_array = np.zeros([self._len_z, numel]) # Create array to hold flattened array\n\n # Loop through each dimension (dataset) and flatten it into new array\n for dim in range(self._len_z):\n self.flat_array[dim, :] = np.ravel(self.xyz_array[:, :, dim])", "def divide_with_stride(arr: np.ndarray) -> List[np.ndarray]:\n\n result_list: List[np.ndarray] = []\n # slice by z axis\n for z in range(0, z_len := arr.shape[0], 16):\n if z + 31 >= z_len:\n z = z_len - 16\n z_arr: np.ndarray = arr[z:z+16]\n\n # slice by y axis\n for y in range(0, y_len := arr.shape[1], 16):\n y_arr: np.ndarray = z_arr[:, y:y+16]\n\n # slice by x axis\n for x in range(0, x_len := arr.shape[2], 16):\n x_arr: np.ndarray = y_arr[:, :, x:x+16]\n if len(set(x_arr.shape)) == 1 and x_arr.shape[0] == 16:\n result_list.append(x_arr)\n \n return result_list", "def partition_data(self):\n\n _header_ = self._header_ + 'partition_data(): '\n\n if self.verbose:\n print(_header_ + 'Partitioning data ...')\n\n network = self._useful_network()\n\n if self.nidx_train:\n # The only reason that allows .nidx to not be empty would be that a training Data was copied over\n # hence, the training node indices are retained and need to be excluded\n print(_header_ + 'Excluding %d training nodes transfered from training dataset ...' % len(self.nidx_train))\n nidx = set(self.nidx2lidx.keys()) - set(self.nidx_train)\n self.nidx_exclude += self.nidx_train\n self.nidx_train = []\n else:\n nidx = set(self.nidx2lidx.keys())\n\n for l in nidx:\n if l in network:\n if self.node_labels[l]:\n self.nidx_train.append(l)\n else:\n self.nidx_exclude.append(l)\n\n if self.verbose:\n print(_header_ + 'Found %d nodes' % len(self.nidx2lidx))\n print(' %d nodes with labels of interest' % len(self.nidx_train))\n print(' %d nodes can be used to predict' % len(self.nidx_pred))\n print(' %d nodes cannot be mapped due to lack of mappable links' % len(self.nidx_exclude))\n\n return self", "def getSplittedDatasetInNumpy(self, xFold_step, xFold_type, depth_first=False, onehot=False):\n if not self.__read_in_dataset:\n (x_train, y_train), (x_eval, y_eval), (x_test, y_test) = self.loadDataset()\n\n self.__read_in_images, self.__read_in_labels = self.convertDatasetToNumpy(x_train, y_train, x_eval, y_eval,\n x_test, y_test, self._read_in_shape, self.__read_in_size,\n self.__num_classes, depth_first, onehot)\n self.__read_in_dataset = True\n\n return self.prepare_dataset(xFold_step, xFold_type)", "def train_datas_debug(self, batch_size):\r\n if not isinstance(batch_size, int):\r\n raise ValueError('In Dataset, batch_size should be int, get '\r\n '{}'.format(type(batch_size)))\r\n if batch_size <= 0:\r\n raise ValueError('In Dataset, batch_size should larger equal to '\r\n '1, get {}'.format(batch_size))\r\n \r\n indices = list(range(batch_size))\r\n \r\n datas = []\r\n # for label, we have box and landmark which is 0.\r\n datas.append([self._train_datas[:batch_size], \r\n self._train_labels[:batch_size]])\r\n return datas", "def all_gather_list(data, group=None, max_size=16384):\n SIZE_STORAGE_BYTES = 4 # int32 to encode the payload size\n\n enc = pickle.dumps(data)\n enc_size = len(enc)\n\n if enc_size + SIZE_STORAGE_BYTES > max_size:\n raise ValueError(\n 'encoded data exceeds max_size, this can be fixed by increasing buffer size: {}'.format(enc_size))\n\n rank = get_rank()\n world_size = get_world_size()\n buffer_size = max_size * world_size\n\n if not hasattr(all_gather_list, '_buffer') or \\\n all_gather_list._buffer.numel() < buffer_size:\n all_gather_list._buffer = torch.cuda.ByteTensor(buffer_size)\n all_gather_list._cpu_buffer = torch.ByteTensor(max_size).pin_memory()\n\n buffer = all_gather_list._buffer\n buffer.zero_()\n cpu_buffer = all_gather_list._cpu_buffer\n\n assert enc_size < 256 ** SIZE_STORAGE_BYTES, 'Encoded object size should be less than {} bytes'.format(\n 256 ** SIZE_STORAGE_BYTES)\n\n size_bytes = enc_size.to_bytes(SIZE_STORAGE_BYTES, byteorder='big')\n\n cpu_buffer[0:SIZE_STORAGE_BYTES] = torch.ByteTensor(list(size_bytes))\n cpu_buffer[SIZE_STORAGE_BYTES: enc_size + SIZE_STORAGE_BYTES] = torch.ByteTensor(list(enc))\n\n start = rank * max_size\n size = enc_size + SIZE_STORAGE_BYTES\n buffer[start: start + size].copy_(cpu_buffer[:size])\n\n all_reduce(buffer, group=group)\n\n try:\n result = []\n for i in range(world_size):\n out_buffer = buffer[i * max_size: (i + 1) * max_size]\n size = int.from_bytes(out_buffer[0:SIZE_STORAGE_BYTES], byteorder='big')\n if size > 0:\n result.append(pickle.loads(bytes(out_buffer[SIZE_STORAGE_BYTES: size + SIZE_STORAGE_BYTES].tolist())))\n return result\n except pickle.UnpicklingError:\n raise Exception(\n 'Unable to unpickle data from other workers. all_gather_list requires all '\n 'workers to enter the function together, so this error usually indicates '\n 'that the workers have fallen out of sync somehow. Workers can fall out of '\n 'sync if one of them runs out of memory, or if there are other conditions '\n 'in your training script that can cause one worker to finish an epoch '\n 'while other workers are still iterating over their portions of the data.'\n )", "def device_reshape(self, x: JaxArray) -> JaxArray:\n assert hasattr(x, 'ndim'), f'Expected JaxArray, got {type(x)}. If you are trying to pass a scalar to ' \\\n f'parallel, first convert it to a JaxArray, for example np.float(0.5)'\n if x.ndim == 0:\n return np.broadcast_to(x, [self.ndevices])\n assert x.shape[0] % self.ndevices == 0, f'Must be able to equally divide batch {x.shape} among ' \\\n f'{self.ndevices} devices, but does not go equally.'\n return x.reshape((self.ndevices, x.shape[0] // self.ndevices) + x.shape[1:])", "def flatten_layers(data):\n return data.reshape((data.shape[0], data.shape[1], -1))", "def batches(data, batch_size) -> list:\n rv = []\n for idx, line in enumerate(data):\n if idx != 0 and idx % batch_size == 0:\n yield rv\n rv = []\n rv.append(line)\n yield rv", "def get_training_data(data_dir):\n data = []\n for label in labels:\n path = os.path.join(data_dir, label)\n class_num = labels.index(label)\n img_set = os.listdir(path)\n n = len(img_set)\n for i in range(n):\n try:\n img = img_set[i]\n img_arr = cv2.imread(os.path.join(path, img))\n resized_arr = cv2.resize(img_arr, (img_size, img_size)) # Reshaping images to preferred size\n data.append([resized_arr, class_num])\n if i % 100 == 0:\n print(\"Processing images: {}/{}\".format(i + 1, n))\n except Exception as e:\n print(e)\n return np.array(data)", "def to_tvm_ndarray(a: List[np.ndarray]) -> List[tvm.nd.NDArray]:\n assert a is not None, \"Empty result cannot be converted to TVM NDArray\"\n return [tvm.nd.array(x) for x in a]", "def all_data(self) -> Optional[np.ndarray]:\n if self._data_store is None:\n return None\n return self._data_store[:self._count, :]", "def flatten(self) -> np.ndarray:\n\n return self.data.copy()", "def data_reshape(image):\n image_mat = []\n if image.shape[-1] == 3:\n for x, i in enumerate(image):\n for y, j in enumerate(i):\n image_mat.append([x, y, j[0], j[1], j[2]])\n else:\n for x, i in enumerate(image):\n for y, j in enumerate(i):\n image_mat.append([x, y, j])\n return np.array(image_mat)", "def loadNumpyAnnotations(self, data):\n print('Converting ndarray to lists...')\n assert(type(data) == np.ndarray)\n print(data.shape)\n assert(data.shape[1] == 7)\n N = data.shape[0]\n ann = []\n for i in range(N):\n if i % 1000000 == 0:\n print('{}/{}'.format(i,N))\n ann += [{\n 'image_id' : int(data[i, 0]),\n 'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ],\n 'score' : data[i, 5],\n 'category_id': int(data[i, 6]),\n }]\n return ann", "def tensors(self):\n return [x[0] for x in self.__normalizeData__(self.__tensors__)]", "def flatten(self):\n if self.data:\n def flat(l):\n ans=[]\n for i in l:\n if type(i)==list:\n ans.extend(flat(i))\n else:\n ans.append(i)\n return ans\n return flat(self.data)\n else:\n return []", "def get_data(self):\n return [\n self.offset, self.diag, self.orig_freq_diag, self.orig_freq_dims\n ]", "def get_data(data_file_path):\n data_file = open(data_file_path, 'r').readlines()\n data = []\n n = -1\n dim = -1\n for i in range(len(data_file)):\n line_elems = [float(x) for x in data_file[i].split()]\n if i == 0:\n n = int(line_elems[0])\n dim = int(line_elems[1])\n else:\n data.append(np.array(line_elems))\n return data, n, dim", "def split_and_load(batch_data, num_gpus):\n return [batch_data[i].data[0] for i in range(num_gpus)], \\\n [batch_data[i].label[0].as_in_context(mx.gpu(i)) for i in range(num_gpus)]", "def getFloatArray2D(self) -> typing.List[typing.List[float]]:\n ...", "def prepareDataBatches(self, traindata, trainlabel):\n index = np.random.permutation(len(traindata))\n traindata = traindata[index]\n trainlabel = trainlabel[index]\n split_no = int(len(traindata) / self.batchSize)\n return zip(np.split(traindata[:split_no*self.batchSize], split_no), np.split(trainlabel[:split_no*self.batchSize], split_no))", "def _reshape_batch(inputs, size, batch_size):\n batch_inputs = []\n for length_id in range(size):\n batch_inputs.append(np.array([inputs[batch_id][length_id]\n for batch_id in range(batch_size)], dtype=np.int32))\n return batch_inputs", "def get_x_data(\n params, dimension, fold, subject_list,\n):\n X = []\n input_file_path = os.path.join(\n params[\"orig_path\"], \"ae_output_{}\".format(params[\"modality\"])\n )\n for i in range(len(subject_list)):\n x_sub_data_path = os.path.join(\n input_file_path,\n str(dimension),\n \"fold_{}\".format(fold),\n \"X_{}.npy\".format(subject_list[i]),\n )\n if not os.path.exists(x_sub_data_path):\n x_sub_data = build_x_data(\n dimension, fold, subject_list, i, params, out_file=input_file_path,\n )\n else:\n x_sub_data = np.load(x_sub_data_path)\n X.append(x_sub_data)\n X = np.array(X)\n return X\n # interTVA data has already been run on taskFMRI, on frioul", "def split_to_batches(self, train_data, batch_size):\n num_of_training_examples = len(train_data)\n for i in range(0, num_of_training_examples, batch_size):\n x, y = zip(*train_data[i: i+batch_size])\n yield np.vstack(x), np.vstack(y)", "def preprocess_the_dataset(x_train):\n # initialize empty lists\n x_real,x_pencil=[],[]\n for image in x_train[:8000]:\n real_image = resize_128(image)\n # make it pencil drawn\n pencil_image = convert_image(real_image)\n x_real.append(real_image/127.5-1)\n x_pencil.append(pencil_image/127.5-1)\n return x_real,x_pencil", "def _finalize_data(self):\n\n if isinstance(self.node_data, np.ndarray): # SR workflow\n self.node_data = da.from_array(self.node_data)\n elif isinstance(self.node_data, list): # vr workflow\n struct_data = np.empty(len(self.node_data), dtype=self.data.dtype)\n datavals = np.array(self.node_data)\n for cnt, varname in enumerate(self.data.dtype.names):\n struct_data[varname] = datavals[:, cnt]\n self.node_data = da.from_array(struct_data)\n if isinstance(self.data, np.ndarray):\n self.data = da.from_array(self.data)", "def split_array(arr, num_of_splits):\n # TODO Replace this function with gluon.utils.split_data() once targeting MXNet 1.7\n size = arr.shape[0]\n if size < num_of_splits:\n return [arr[i:i + 1] for i in range(size)]\n slice_len, rest = divmod(size, num_of_splits)\n div_points = [0] + [(slice_len * index + min(index, rest) + slice_len + (index < rest))\n for index in range(num_of_splits)]\n slices = [arr[div_points[i]:div_points[i + 1]] for i in range(num_of_splits)]\n return slices", "def pre_process_data(input_path: list, cuts: int, shape: int = 32, normalize: bool = True) -> list:\n images = []\n images_uncut = []\n for files_path in input_path:\n\n files = os.listdir(files_path) # TODO paths\n for f in files:\n file_path = f'{files_path}/{f}'\n im_uncut = cv2.imread(file_path)\n im_uncut = cv2.cvtColor(im_uncut, cv2.COLOR_RGB2GRAY)\n images_uncut.append(cv2.resize(im_uncut, (shape * cuts, shape * cuts)))\n x = np.array(images_uncut)\n\n if normalize:\n x_mean = np.mean(x, axis=(0, 1, 2))\n x_std = np.std(x, axis=(0, 1, 2))\n x = (x - x_mean) / (x_std + 1e-9)\n\n for im in x:\n height = im.shape[0]\n width = im.shape[1]\n frac_h = height // cuts\n frac_w = width // cuts\n i = 0\n image = []\n for h in range(cuts):\n for w in range(cuts):\n crop = im[h * frac_h:(h + 1) * frac_h, w * frac_w:(w + 1) * frac_w]\n crop_rehaped = cv2.resize(crop, (shape, shape))\n image.append([crop_rehaped, i, number_to_angle(i, cuts), neighbours(i, cuts)])\n i = i + 1\n images.append(image)\n # return np.array(images) # todo back to array\n return images", "def tohost(x):\n n_device, n_batch, *remaining_dims = x.shape\n return x.reshape((n_device * n_batch,) + tuple(remaining_dims))", "def list_from(tensorArray, length):\n arr = tensorArray\n result_list = []\n with tf.name_scope(\"createlist\"):\n for i in range(length):\n result_list.append(arr.read(i))\n return result_list", "def _listOfImagesToNumpy(self, images):\n # build image data array, y_labels\n for i in range(0, len(images)):\n if self.image_size is not None:\n img = images[i].resize(self.image_size)\n else:\n img = images[i]\n img_arr = img_to_array(img)\n if i == 0:\n dims = [len(images)] + list(img_arr.shape)\n X_data = np.zeros(shape=dims)\n X_data[i, :, :, :] = img_arr\n\n return X_data", "def prepare_sFlat_data(notes, track_range = None, enc_shape = (1,), ip_memory = 32, depth = 2, spread = 16):\n track_range = track_range if track_range else [0, 1]\n \n data_in, data_out = [], []\n \n for tr in range(track_range[1] - track_range[0]):\n # trk = tr - track_range[0]\n nt = notes[tr]\n data_in.append([])\n data_out.append([])\n lent = len(notes[tr])\n # for j in range(lent):\n le = len(nt)\n \n chunks_count = le // ip_memory + 1\n \n for i in range(le - ip_memory):\n start, end = i, i + ip_memory\n buf_size = ip_memory if end < le else le - start # only reason due to logic below else not needed\n buffer = numpy.zeros((ip_memory, depth,))\n # print(\"buff shape : \", buffer.shape)\n buffer[:buf_size, :] = nt[start : start + buf_size]\n\n data_in[tr].append(buffer)\n \n data_out[tr].append((nt[end] if end < le else notes[0][0]))\n \n # if track_range[1]- track_range[0] == 1: #is scalar, no track\n # data_in, data_out = data_in[0], data_out[0]\n \n\n return numpy.array(data_in), numpy.array(data_out)", "def _special_handle_slice(cls, op, X, W):\n tensor_list = []\n # slice add starts, ends, axes, steps\n append_inputs = {\n \"starts\": op.starts,\n \"ends\": op.ends,\n \"axes\": op.axes,\n \"steps\": op.steps,\n }\n for tmp_name, append_input in append_inputs.items():\n node_name = op.name + \":\" + tmp_name\n tensor_list.append(\n numpy_helper.from_array(np.array(append_input), node_name))\n return tensor_list", "def _serialize(self, data):\n data = [np.array(j) for j in data]\n self._data_shape_list = [j.shape for j in data]\n serialized_data = [j.ravel() for j in data]\n serialized_data = np.hstack(serialized_data)\n return serialized_data" ]
[ "0.6259908", "0.62205553", "0.60901254", "0.6081688", "0.6013226", "0.6012804", "0.600478", "0.5999807", "0.59407836", "0.5916167", "0.59141505", "0.5829531", "0.5814003", "0.5805772", "0.58050174", "0.5766101", "0.5750351", "0.5743768", "0.5734869", "0.57283336", "0.57187784", "0.57168555", "0.5709373", "0.56988317", "0.56969655", "0.5693854", "0.56605345", "0.56540835", "0.5643301", "0.5641633", "0.5636813", "0.5622327", "0.5584038", "0.5584038", "0.55707383", "0.5566152", "0.55501294", "0.5546736", "0.5545102", "0.5532558", "0.55310684", "0.5528217", "0.5506252", "0.5505505", "0.54848677", "0.5474343", "0.5469661", "0.5466549", "0.54579204", "0.5454622", "0.5441314", "0.5433917", "0.5415823", "0.5415327", "0.54046404", "0.5397303", "0.53958327", "0.5390964", "0.5388822", "0.5388136", "0.5382278", "0.5371022", "0.53694564", "0.5366867", "0.5366535", "0.53640443", "0.535751", "0.53558195", "0.53557956", "0.53545535", "0.5349874", "0.53476006", "0.5340833", "0.5328932", "0.5326501", "0.53249705", "0.5324576", "0.53117764", "0.5311186", "0.53047365", "0.52986306", "0.5282975", "0.52788985", "0.5278781", "0.5275934", "0.5274326", "0.5267588", "0.526461", "0.5262115", "0.52602965", "0.5253008", "0.5250066", "0.5247323", "0.52441406", "0.52393687", "0.5237266", "0.52348673", "0.5234228", "0.5232679", "0.52280873", "0.52266437" ]
0.0
-1
Applies a function mapping to each element in the feature data.
def apply_fn(self,fn): self.check_Data() for split,data_ in self.processed_data.items(): x = data_['x'] x = np.array([fn(xi) for xi in x]) data_['x'] = x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def map(self, function=lambda value: value):\n for j, value in enumerate(self):\n self[j] = function(value)", "def map(self, function=lambda item: item):\n for i, row in enumerate(self):\n for j, item in enumerate(row):\n row[j] = function(item)", "def map(self, function):\n pass", "def map(self, function):\n return FunctionalWrapper(map(function, self.data))", "def map(self, map_function, *map_arguments) -> None:\n\n elements = []\n self.__get_sorted_elements(self.__root, elements)\n\n for element in elements:\n map_function(element, *map_arguments)", "def list_map(data, function):\n return list(map(function, data))", "def convert(self, function=pointwise_mi):\n self.normalise()\n feat_prob = Counter()\n for feat_set in self.itervalues():\n for feat in feat_set:\n feat_prob[feat] += feat_set[feat]\n \n for feat_set in self.itervalues():\n code_prob = sum(feat_set.values())\n for feat in feat_set:\n feat_set[feat] = function(code_prob, feat_prob[feat], feat_set[feat])\n return self", "def apply(self, fn, column_label):\n return [fn(v) for v in self[column_label]]", "def Map(dataset, map_func, input_columns=None):\n return dataset.map(map_func)", "def map(self, func):\n return _(map(func, self._))", "def map(self, func):\n return List(map(func, self))", "def applyToEach(L,f):\n for i in range(len(L)):\n L[i] = f(L[i])", "def map(function, iterable):\n\n return [function(x) for x in iterable]", "def applymap(self, func, *args, **kwargs):\n return DataFrameDefault.register(pandas.DataFrame.applymap)(\n self, func, *args, **kwargs\n )", "def map(self, fn, *iterables, **kwargs):\n fn = self._prepare_fn(fn)\n return self._self.map(fn, *iterables, **kwargs)", "def map_my(self, func: Callable[[Union[float, int]], int]) -> None:\n def list_func(lst: List[valueType]) -> List[valueType]:\n \"\"\"\n To apply the function/operation defined by users to every item in the list.\n :param lst: A list object like [element1, [element2, element3], element4].\n :return: A list that store the result of items after user-defined operation.\n \"\"\"\n tmp = [] # type: List[valueType]\n for e in lst:\n if isinstance(e, (list, set, tuple)):\n tmp.append(list_func(list(e)))\n else:\n if isinstance(e, (float, int)):\n tmp.append(func(e))\n else:\n raise Exception\n return tmp\n\n for head_node in self.hashTable:\n for node in head_node.singlyLinkedList:\n node.values = list_func(node.values)", "def map(iterable, function):\n for x in iterable:\n yield function(x)", "def applyMapping(self):\n pass", "def mapf( f, C ):\n return (f(x) for x in C)", "def _map_fn(self):\n raise NotImplementedError", "def map_functions(x, functions):\n res = []\n for func in functions:\n res.append(map(func,x))\n return res", "def simple_map(f, l):\n # Again, my first take is a list comprehension.\n return [ f(item) for item in l ]", "def map(self, func, *sequences):\n return self.mapper().map(func, *sequences)", "def map(self, func: Callable[[T], V]) -> 'List[V]':\n return [func(v) for v in self.array]", "def self_map(self, func: Callable[[dd.Series], Any], **kwargs: Any) -> List[Any]:\n return [func(df, **kwargs) for df in self.data]", "def mapfn(k, v):\n for row in v:\n # completar\n pass", "def foreach(function):\n return partial(map, function)", "def _maplist_vm(vm, f, xs):\n def f_(*args):\n return vm.call(f, args)\n return list(map(f_, xs))", "def map():", "def map_(func, some_list):\n \n result = []\n \n for arg in some_list:\n result.append(func(arg))\n \n return result", "def map_functions(x, functions):\n res = []\n for func in functions:\n res.append(func(x))\n return res", "def apply_function_vector(function_vector, x_vector):\n function_index = 0\n element_index = 1\n \n def d():\n for e in zip(function_vector, x_vector):\n print(e[1])\n d()\n \n return list(map(lambda fx_set: fx_set[function_index](fx_set[element_index]), zip(function_vector, x_vector)))", "def maplist(f, xs):\n return list(map(f, xs))", "def map_values_c(fun):\n return partial(map_values, fun)", "def map(self, func: Callable[[Trajectory, Any], Tuple[Trajectory, Any]]) -> Data:\n trajs, labels = [], []\n for traj, label in zip(self.trajs, self.labels):\n traj, label = func(traj, label)\n trajs.append(traj)\n labels.append(label)\n return Data(trajs, labels)", "def apply(df, f):\n return [f(row) for row in df]", "def Map(context, funcname, *nodesets):\n (prefix, local) = ExpandQName(funcname, namespaces=context.processorNss)\n func = (g_extFunctions.get(expanded) or\n CoreFunctions.CoreFunctions.get(expanded, None))\n if not func:\n raise Exception('Dynamically invoked function %s not found.'%funcname)\n flist = [f]*len(nodesets)\n lf = lambda x, f, *args: apply(f, args)\n retlist = apply(map, (lf, flist) + nodesets)\n\n proc = context.processor\n result_nodeset = []\n for ret in retlist:\n proc.pushResult()\n proc.writers[-1].text(Conversions.StringValue(ret))\n frag = proc.popResult()\n context.rtfs.append(frag)\n result_nodeset.append(frag.childNodes[0])\n return result_nodeset", "def applyFuncOnValues(self, func):\r\n self._value = func(self._value)", "def apply_mapping_func(self, mfunc, domain_taxon_set, range_taxon_set=None):\n self.forward = {}\n self.reverse = {}\n self.domain_taxon_set = domain_taxon_set\n if range_taxon_set is None:\n self.range_taxon_set = TaxonSet()\n else:\n self.range_taxon_set = range_taxon_set\n for dt in self.domain_taxon_set:\n rt = mfunc(dt)\n if rt not in self.range_taxon_set:\n self.range_taxon_set.add(rt)\n self.forward[dt] = rt\n try:\n self.reverse[rt].add(dt)\n except KeyError:\n self.reverse[rt] = set([dt])", "def process(dataset, f):\n logger.info('processing dataset ({0})'.format(len(dataset.samples)))\n for sample in dataset.samples:\n sample.proc = f(sample.image)", "def forEach(self, func):\n for x in range(self._width):\n for y in range(self._height):\n func(self.data[x, y], x, y)", "def apply(L, f):\n\n result = []\n for i in L:\n result.append(f(i))\n\n return result", "def MapDataList(ea, length, func, wordsize=1):\n PutDataList(ea, map(func, GetDataList(ea, length, wordsize)), wordsize)", "def simple_map_2(f, l):\n # Same as above without comprehension:\n mapped_l = []\n for item in l:\n mapped_l.append( f(item) ) # the extra blanks are just for readability\n return mapped_l", "def apply(self, func):\n ret = [func(self)]\n for _, node in self.children.items():\n ret.extend(node.apply(func))\n return ret", "def mapf(f: Callable[[D_], R_], C: Iterable[D_]) -> Iterator[R_]:\n return (f(x) for x in C)", "def apply(self, function, *args, **kwargs):\n pass", "def map(self, fn, inv_fn):\r\n\t\treturn MapProjectedList(self, [fn], [inv_fn])", "def apply_(self, function):\n self.sequences = [function(seq) for seq in self.sequences]\n return self", "def map(self, func):\n if self.is_right(): return self.right.map(func)\n if self.is_left(): return self.left.map(func)", "def apply(cls, func):\n raise NotImplementedError", "def apply(self, f):\n for v in self.vertices:\n v.x, v.y, v.z = f(v.coords())", "def adem_basis_elt_2_map(*, Sq_fn, basis_elt):\r\n return [Sq_fn(Sq) for Sq in basis_elt]", "def imap(self, func: Callable[[T], V]) -> '_[V]':\n return _(map(func, self.array))", "def _map(self, p_input:Element, p_output:Element):\r\n \r\n self._sl_model.eval()\r\n\r\n # Input pre processing\r\n input = self.input_preproc(p_input)\r\n\r\n # Make prediction\r\n output = self.forward(input)\r\n\r\n # Output post processing\r\n output = self.output_postproc(output)\r\n\r\n # Set list to Element\r\n p_output.set_values(output)", "def _process_data(rdd_entry, feature_list):\n events = []\n for event in rdd_entry:\n events.append(event[RDD_EVENT])\n return IptablesIngestor.vectorize_events(events, feature_list)", "def map(iteratee, *seqs):\n return _map(fnc.iteratee(iteratee), *seqs)", "def lmap(f: Callable, *xs) -> list:\n return list(map(f, *xs))", "def _predict(self, feature_map_tensor_list):\n pass", "def map_all(f: Callable[[GT], GS], *args, **kwargs) -> Callable[[GT], GS]:\n\n def _map_all(arr: GT) -> GS:\n return f(arr, *args, **kwargs)\n\n return _map_all", "def mapfn(k, v):\n for row in v:\n # rellenar el codigo\n pass", "def _transform_fn(features, mode):\n\t\tprint('Before feature transform_fn')\n\t\tfor k in features:\n\t\t\tprint(features[k].shape)\n\t\tcontext_features, example_features = feature_lib.encode_listwise_features(\n\t\t\t\tfeatures,\n\t\t\t\tinput_size=input_size,\n\t\t\t\tcontext_feature_columns=context_feature_columns,\n\t\t\t\texample_feature_columns=example_feature_columns,\n\t\t\t\tmode=mode)\n\t\tprint('After feature transform_fn')\n\t\tfor k in example_features:\n\t\t\tprint(k)\n\t\t\tprint(example_features[k].shape)\n\t\tfor k in context_features:\n\t\t\tprint(k)\n\t\t\tprint(context_features[k].shape)\n\t\treturn context_features, example_features", "def process_data(self, data):\n\n for feat, vals in zip(data.names, data.feats):\n for val in vals:\n self.fvals[feat][val][1] = Literal(feature=feat, value=val)", "def apply(self, func, *args, **kwargs):\n pass", "def map_bound(self, func):\n\n def iter_all():\n for x in self:\n yield from func(x)\n\n return List(iter_all())", "def transform(self, X): # pylint: disable=invalid-name\n\n if self.target is not None:\n # If we have a target, each element of X takes the keyword argument\n if self.iterate:\n return [self.function(**dict(list(self.kwargs.items())\n + list({self.target: i}.items())))\n for i in X]\n else:\n return self.function(**dict(list(self.kwargs.items())\n + list({self.target: X}.items())))\n else:\n # Each element of X takes first position in function()\n if self.iterate:\n return [self.function(i, **self.kwargs) for i in X]\n else:\n return self.function(X, **self.kwargs)", "def map(self, func, *args, **kwds):\r\n if not self.extensions:\r\n # FIXME: Use a more specific exception class here.\r\n raise RuntimeError('No %s extensions found' % self.namespace)\r\n response = []\r\n for e in self.extensions:\r\n self._invoke_one_plugin(response.append, func, e, args, kwds)\r\n return response", "def multi_mapping(func_name, arg_value_pairs, module_name = \"__main__\"):\n func, arg_names = get_function_args(module_name = module_name, function_name = func_name)\n \n return list(map(lambda arg_value_pair: call_func_dynamically(function_name = func_name, \n argument_names = arg_names, \n arg_value_pair = arg_value_pair) ,\n arg_value_pairs))", "def apply(self, func, *args):\n pass", "def custom_filter(function, iterable):\n map_list = []\n\n for i in iterable:\n if function(i):\n map_list.append(i)\n\n return map_list", "def map(self, f_list: List[Callable[[np.ndarray], int]], axis: int = 0, chunksize: int = 1000, selection: np.ndarray = None) -> List[np.ndarray]:\n\t\tif hasattr(f_list, '__call__'):\n\t\t\traise ValueError(\"f_list must be a list of functions, not a function itself\")\n\n\t\tresult = []\n\t\tif axis == 0:\n\t\t\trows_per_chunk = chunksize\n\t\t\tfor i in range(len(f_list)):\n\t\t\t\tresult.append(np.zeros(self.shape[0]))\n\t\t\tix = 0\n\t\t\twhile ix < self.shape[0]:\n\t\t\t\trows_per_chunk = min(self.shape[0] - ix, rows_per_chunk)\n\t\t\t\tif selection is not None:\n\t\t\t\t\tchunk = self[ix:ix + rows_per_chunk, :][:, selection]\n\t\t\t\telse:\n\t\t\t\t\tchunk = self[ix:ix + rows_per_chunk, :]\n\t\t\t\tfor i in range(len(f_list)):\n\t\t\t\t\tresult[i][ix:ix + rows_per_chunk] = np.apply_along_axis(f_list[i], 1, chunk)\n\t\t\t\tix = ix + rows_per_chunk\n\t\telif axis == 1:\n\t\t\tcols_per_chunk = chunksize\n\t\t\tfor i in range(len(f_list)):\n\t\t\t\tresult.append(np.zeros(self.shape[1]))\n\t\t\tix = 0\n\t\t\twhile ix < self.shape[1]:\n\t\t\t\tcols_per_chunk = min(self.shape[1] - ix, cols_per_chunk)\n\t\t\t\tif selection is not None:\n\t\t\t\t\tchunk = self[:, ix:ix + cols_per_chunk][selection, :]\n\t\t\t\telse:\n\t\t\t\t\tchunk = self[:, ix:ix + cols_per_chunk]\n\t\t\t\tfor i in range(len(f_list)):\n\t\t\t\t\tresult[i][ix:ix + cols_per_chunk] = np.apply_along_axis(f_list[i], 0, chunk)\n\t\t\t\tix = ix + cols_per_chunk\n\t\treturn result", "def map(self, f):\n self.append(Mapper(f))\n return self", "def Map(\r\n data,\r\n map_fct: Callable,\r\n info: List[Dict] = None,\r\n lazy: bool = True,\r\n workers: int = 1,\r\n buffer_len: int = 3,\r\n *arg: list,\r\n **kwargs: Dict\r\n) -> Union[MapAbstract, DataAbstract, np.ndarray, list]:\r\n\r\n if lazy:\r\n return MapAbstract(data, map_fct, *arg, info=info, **kwargs)\r\n else:\r\n return DataAbstract(\r\n MapAbstract(data, map_fct, *arg, info=info, **kwargs),\r\n workers=workers,\r\n buffer_len=buffer_len,\r\n )[:]", "def _apply(self, x, **kwargs):\n return reduce(lambda x_i, tr: tr._apply(x_i), self.transforms, x)", "def map_values(function, dictionary):\n return {k: function(dictionary[k]) for k in dictionary}", "def apply(self, vec):\n raise NotImplementedError", "def map_values(fun, a_dict):\n return dict((k, fun(v)) for (k, v) in a_dict.items())", "def num_func_mapper(nums, funs):\n pass", "def apply_features(self, features):\n # feature_values is a multi-dimensional list\n # 1st dimension: Feature (class)\n # 2nd dimension: token\n # 3rd dimension: values (for this token and feature, usually just one value, sometimes more,\n # e.g. \"w2vc=975\")\n features_values = [feature.convert_window(self) for feature in features]\n\n for token in self.tokens:\n token.feature_values = []\n\n # After this, each self.token.feature_values will be a simple list\n # of feature values, e.g. [\"w2v=875\", \"bc=48\", ...]\n for feature_values in features_values:\n assert isinstance(feature_values, list)\n assert len(feature_values) == len(self.tokens)\n\n for token_idx in range(len(self.tokens)):\n self.tokens[token_idx].feature_values.extend(feature_values[token_idx])", "def map_all(self, f):\n return Document((name, Column(imap(f, col))) for name, col in self)", "def apply(self,i,x):\n #applies the ith map to the point x\n y = self.A[i,:,:] @ x + self.b[i,:]\n return y", "def map_fn(fn, elems, dtype=None, parallel_iterations=None, back_prop=True,\n swap_memory=False, infer_shape=True, name=None):\n if not callable(fn):\n raise TypeError(\"fn must be callable.\")\n\n if isinstance(elems, sparse_tensor.SparseTensor):\n raise TypeError(\n \"To perform a map on the values of a sparse tensor use either \"\n \" SparseTensor(input.indices, fn(input.values), input.dense_shape) or \"\n \" SparseTensor(input.indices, map_fn(fn, input.values), \"\n \"input.dense_shape)\")\n\n in_graph_mode = not context.executing_eagerly()\n # Set the default number of parallel_iterations depending on graph/eager mode.\n if in_graph_mode and not parallel_iterations:\n parallel_iterations = 10\n elif not in_graph_mode and not parallel_iterations:\n parallel_iterations = 1\n\n if not in_graph_mode and parallel_iterations > 1:\n logging.log_first_n(logging.WARN, \"Setting parallel_iterations > 1 has no \"\n \"effect when executing eagerly. Consider calling map_fn\"\n \" with tf.contrib.eager.defun to execute fn in \"\n \"parallel.\", 1)\n parallel_iterations = 1\n\n input_is_sequence = nest.is_sequence(elems)\n input_flatten = lambda x: nest.flatten(x) if input_is_sequence else [x]\n def input_pack(x):\n return nest.pack_sequence_as(elems, x) if input_is_sequence else x[0]\n\n if dtype is None:\n output_is_sequence = input_is_sequence\n output_flatten = input_flatten\n output_pack = input_pack\n else:\n output_is_sequence = nest.is_sequence(dtype)\n output_flatten = lambda x: nest.flatten(x) if output_is_sequence else [x]\n def output_pack(x):\n return (nest.pack_sequence_as(dtype, x)\n if output_is_sequence else x[0])\n\n elems_flat = input_flatten(elems)\n\n with ops.name_scope(name, \"map\", elems_flat):\n # TODO(akshayka): Remove the in_graph_mode check once caching devices are\n # supported in Eager\n if in_graph_mode:\n # Any get_variable calls in fn will cache the first call locally\n # and not issue repeated network I/O requests for each iteration.\n varscope = vs.get_variable_scope()\n varscope_caching_device_was_none = False\n if varscope.caching_device is None:\n # TODO(ebrevdo): Change to using colocate_with here and in other\n # methods.\n varscope.set_caching_device(lambda op: op.device)\n varscope_caching_device_was_none = True\n\n elems_flat = [\n ops.convert_to_tensor(elem, name=\"elem\") for elem in elems_flat]\n\n dtype = dtype or input_pack([elem.dtype for elem in elems_flat])\n dtype_flat = output_flatten(dtype)\n\n # Convert elems to tensor array. n may be known statically.\n static_shape = elems_flat[0].shape\n if static_shape.ndims is not None and static_shape.ndims < 1:\n if len(elems_flat) == 1:\n raise ValueError(\"elems must be a 1+ dimensional Tensor, not a scalar\")\n else:\n raise ValueError(\n \"elements in elems must be 1+ dimensional Tensors, not scalars\"\n )\n n = (tensor_shape.dimension_value(static_shape[0])\n or array_ops.shape(elems_flat[0])[0])\n\n # TensorArrays are always flat\n elems_ta = [\n tensor_array_ops.TensorArray(dtype=elem.dtype,\n size=n,\n dynamic_size=False,\n infer_shape=True)\n for elem in elems_flat]\n # Unpack elements\n elems_ta = [\n elem_ta.unstack(elem) for elem_ta, elem in zip(elems_ta, elems_flat)]\n\n i = constant_op.constant(0)\n\n accs_ta = [\n tensor_array_ops.TensorArray(dtype=dt,\n size=n,\n dynamic_size=False,\n infer_shape=infer_shape)\n for dt in dtype_flat]\n\n def compute(i, tas):\n \"\"\"The loop body of map_fn.\n\n Args:\n i: the loop counter\n tas: the flat TensorArray accumulator list\n\n Returns:\n (i + 1, tas): the updated counter + updated TensorArrays\n\n Raises:\n TypeError: if dtype and packed_fn_values structure do not match\n ValueType: if dtype and packed_fn_values lengths do not match\n \"\"\"\n packed_values = input_pack([elem_ta.read(i) for elem_ta in elems_ta])\n packed_fn_values = fn(packed_values)\n nest.assert_same_structure(dtype or elems, packed_fn_values)\n flat_fn_values = output_flatten(packed_fn_values)\n tas = [ta.write(i, value) for (ta, value) in zip(tas, flat_fn_values)]\n return (i + 1, tas)\n\n _, r_a = control_flow_ops.while_loop(\n lambda i, _: i < n, compute, (i, accs_ta),\n parallel_iterations=parallel_iterations,\n back_prop=back_prop,\n swap_memory=swap_memory,\n maximum_iterations=n)\n results_flat = [r.stack() for r in r_a]\n\n n_static = tensor_shape.Dimension(tensor_shape.dimension_value(\n elems_flat[0].get_shape().with_rank_at_least(1)[0]))\n for elem in elems_flat[1:]:\n n_static.merge_with(tensor_shape.Dimension(tensor_shape.dimension_value(\n elem.get_shape().with_rank_at_least(1)[0])))\n for r in results_flat:\n r.set_shape(tensor_shape.TensorShape(n_static).concatenate(\n r.get_shape()[1:]))\n\n # TODO(akshayka): Remove the in_graph_mode check once caching devices are\n # supported in Eager\n if in_graph_mode and varscope_caching_device_was_none:\n varscope.set_caching_device(None)\n\n return output_pack(results_flat)", "def map(self, f):\n if self.is_empty():\n pass\n else:\n items = []\n items.append(f(self._first))\n map(f._rest)\n new_lst = LinkedListRec(items)", "def map_feature(x):\n m, n = x.shape\n out = x\n\n # Add quodratic features.\n for i in range(n):\n for j in range(i, n):\n out = hstack((out, x[:, i].reshape(m, 1) * x[:, j].reshape(m, 1)))\n\n # Add cubic features.\n for i in range(n):\n for j in range(i, n):\n for k in range(j, n):\n out = hstack(\n (out, x[:, i].reshape(m, 1) * x[:, j].reshape(m, 1) * x[:, k].reshape(m, 1)))\n return out", "def construct_feature_mapping_approx(feature_mapping, weights):\n # here is a function that is created on the fly from the input feature\n # mapping and weights\n def prediction_function(xs):\n designmtx = np.matrix(feature_mapping(xs))\n return linear_model_predict(designmtx, weights)\n # we return the function reference (handle) itself. This can be used like\n # any other function\n return prediction_function", "def fmap(func, obj):\n if _coconut.hasattr(obj, \"__fmap__\"):\n return obj.__fmap__(func)\n if obj.__class__.__module__ == \"numpy\":\n from numpy import vectorize\n return vectorize(func)(obj)\n return _coconut_makedata(obj.__class__, *(_coconut_starmap(func, obj.items()) if _coconut.isinstance(obj, _coconut.abc.Mapping) else _coconut_map(func, obj)))", "def process(self, data):\n allocating = (self._output is None)\n ind = 0\n for i, (name, feature) in enumerate(self.features):\n if allocating:\n x = feature.compute(data)\n self.feature_indices[name] = (ind, ind+x.size)\n ind += x.size\n\n if self._output is None:\n self._output = x\n else:\n self._output = np.hstack([self._output, x])\n else:\n self._output[self.feature_indices[name][0]:\n self.feature_indices[name][1]] = \\\n feature.compute(data)\n\n return self._output", "def self_map(\n self,\n func: Callable[[dd.Series], Any],\n condition: Optional[List[bool]] = None,\n **kwargs: Any,\n ) -> List[Any]:\n if condition:\n rslt = []\n for cond, data in zip(condition, self.data):\n if not cond:\n rslt.append(func(data, **kwargs))\n else:\n rslt.append(None)\n return rslt\n return [func(srs, **kwargs) for srs in self.data]", "def map(self, filter_func, func, *args, **kwds):\r\n if not self.extensions:\r\n # FIXME: Use a more specific exception class here.\r\n raise RuntimeError('No %s extensions found' % self.namespace)\r\n response = []\r\n for e in self.extensions:\r\n if filter_func(e, *args, **kwds):\r\n self._invoke_one_plugin(response.append, func, e, args, kwds)\r\n return response", "def _map_fn(*args):\n if len(args) == 1:\n # Unpack the single Tensor/dict argument as features. This is required\n # for the input_fn returns no labels.\n args = args[0]\n features, labels = _Inputs._parse_inputs(args)\n new_input_dict = {}\n\n if add_padding:\n padding_mask, features, labels = (\n _PaddingSignals.pad_features_and_labels(\n features, labels, batch_size))\n\n new_input_dict['features'] = features\n if labels is not None:\n new_input_dict['labels'] = labels\n\n else:\n new_input_dict['features'] = features\n if labels is not None:\n new_input_dict['labels'] = labels\n padding_mask = None\n\n new_input_dict['signals'] = _StopSignals(\n stop=stop, batch_size=batch_size, padding_mask=padding_mask).as_dict()\n\n return new_input_dict", "def modify(\n self,\n map_function: Callable[[int, tf.keras.Model], Tuple[tf.keras.Model, T]],\n num_processes: int = None,\n context: Callable[[int], EnsembleContextManager] = None,\n models: Iterable[int] = None,\n ) -> List[T]:\n return self._run_in_processes(\n process_creator=_model_updating_process,\n inner_function=map_function,\n num_processes=num_processes,\n context=context,\n models=models,\n )", "def apply_transform(key, data, transform_list):\n for transform in transform_list:\n method_name = transform[MethodKeys.METHOD]\n method_params = transform[MethodKeys.PARAMETERS]\n\n if method_name == 'compute_and_apply_vocabulary':\n method_params.update({'vocab_filename': key})\n\n data = TransformMethods.get_method(method_name)(data,\n **method_params)\n return data", "def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)", "def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)", "def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)", "def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)", "def map(self, func, args_list):\n for args in args_list:\n self.add_task(func, args)", "def apply_func(output, func):\n new_output = []\n for dict in output:\n mnemonic = copy.deepcopy(dict['mnemonic'])\n values = dict['values']\n new_values = func(values)\n new_output.append({'mnemonic': mnemonic, 'values': new_values})\n return new_output", "def _apply(self, x, **kwargs):\n return self.transform._apply(x, **kwargs)", "def on_apply(self, node):\n if node.inputs[0].is_constant(Primitive):\n fn = node.inputs[0].value\n conv = MAP.get(fn)\n if conv is not None:\n return conv(self, *node.inputs[1:])\n return relay.Call(self.ref(node.inputs[0]),\n [self.ref(i) for i in node.inputs[1:]])" ]
[ "0.73664135", "0.7220546", "0.70397", "0.70174754", "0.685345", "0.684726", "0.68348914", "0.6801942", "0.6742367", "0.6730765", "0.6695242", "0.6670184", "0.6574957", "0.6559686", "0.6507356", "0.6469651", "0.6460323", "0.6443097", "0.64101386", "0.64018846", "0.63930345", "0.62695825", "0.6259695", "0.6241182", "0.62201834", "0.62168753", "0.6201429", "0.61587334", "0.6142086", "0.6111248", "0.6103294", "0.61026883", "0.6086276", "0.6084425", "0.6067976", "0.60646975", "0.6061407", "0.60554487", "0.6038238", "0.6016536", "0.6003265", "0.6001232", "0.59859747", "0.59119785", "0.58927655", "0.5890452", "0.58747", "0.587379", "0.58398795", "0.5838286", "0.58258593", "0.5825525", "0.58052814", "0.5799047", "0.57633096", "0.5761813", "0.57467306", "0.5730222", "0.57280207", "0.5715881", "0.5714104", "0.57115006", "0.5709396", "0.5709238", "0.57087237", "0.57018065", "0.56948155", "0.5694351", "0.56599486", "0.5648943", "0.5646871", "0.5646022", "0.5640928", "0.56321794", "0.56207526", "0.55999726", "0.55905426", "0.5588114", "0.5570073", "0.55658484", "0.55645317", "0.55426216", "0.55258495", "0.55256325", "0.5502097", "0.54929245", "0.5484673", "0.5477174", "0.5473711", "0.5470474", "0.5467108", "0.5467007", "0.5459372", "0.5459372", "0.5459372", "0.5459372", "0.5459372", "0.54582834", "0.54560095", "0.54464066" ]
0.7359208
1
Converts a string of text into a numerical vector of features based on the word embedding LTM.
def vectorize(self,text): lv_active = set() words = word_tokenize(text) for word in words: if word in self.tree: ancestors = self.tree.word_ancestors(word) lv_active.update(ancestors) return self.nl.isin(lv_active).values
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_vector(text, model, idf, is_tokenized=False):\n if not is_tokenized: text= text.split() # splits the text by space and returns a list of words\n vec = np.zeros(300) # creates an empty vector of 300 dimensions\n for word in text: # iterates over the sentence\n if (word in model) & (word in idf): # checks if the word is both in the word embedding and the tf-idf model\n vec += model[word]*idf[word] # adds every word embedding to the vector\n if np.linalg.norm(vec) > 0:\n return vec / np.linalg.norm(vec) # divides the vector by their normal\n else:\n return vec", "def to_vector(texto,model,idf):\n tokens = normalizer(texto).split() # splits the text by space and returns a list of words\n vec = np.zeros(300) # creates an empty vector of 300 dimensions\n for word in tokens: # iterates over the sentence\n if (word in model) & (word in idf): # checks if the word is both in the word embedding and the tf-idf model\n vec += model[word]*idf[word] # adds every word embedding to the vector\n if np.linalg.norm(vec) > 0:\n return vec / np.linalg.norm(vec) # divides the vector by their normal\n else:\n return vec", "def tokenize(text):\n tokens = nltk.word_tokenize(text)\n lemmatizer = nltk.WordNetLemmatizer()\n \n lemmatized_words = []\n for word in tokens:\n lemmatized_words.append(lemmatizer.lemmatize(word).lower().strip())\n \n return lemmatized_words", "def txt2vec(self, text: str) -> List[int]:\n itr: List[int] = []\n for token in self.tokenize(str(text)):\n itr.append(self._word_lookup(token))\n return itr", "def lemitization(text_vector):\n\n text_vector = postag_doc(text_vector)\n global lemmatizer\n tokenised_document = [lemmatizer.lemmatize(word, pos=map_postags(\n postag)) for word, postag in text_vector]\n return tokenised_document", "def get_text_features(text, word_features):\n words = word_tokenize(text)\n features = {}\n for w in word_features:\n features[w] = (w in words)\n\n return features", "def tokenize(text):\n \n text.lower() # convert to lowercase\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text) #remove punctuation\n words = word_tokenize(text) # tokenize by individual word\n words = [w for w in words if w not in stopwords.words(\"english\")] #remove stop words\n lemmed = [WordNetLemmatizer().lemmatize(w) for w in words] #lemminization\n \n return words", "def tokenize(text):\n\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n return [lemmatizer.lemmatize(token).lower().strip() for token in tokens]", "def tokenize(text):\n tokens = word_tokenize(text)\n words = [token for token in tokens if re.match(\"[a-zA-Z0-9]\", token)]\n no_stopwords = [word for word in words if word not in stopwords.words(\"english\")]\n lowercase_words = [word.lower() for word in no_stopwords]\n pos_tagged_words = pos_tag(lowercase_words)\n lemmatized_words = [WordNetLemmatizer().lemmatize(word, pos=convert_pos_tag(pos)) for word, pos in pos_tagged_words]\n return lemmatized_words", "def get_text_features() -> np.array:\r\n # Universal sentence encoder model\r\n # Original model by Google could be loaded from: https://tfhub.dev/google/universal-sentence-encoder/4\r\n # In this notebook the model is loaded from a public dataset on Kaggle\r\n # at https://www.kaggle.com/dimitreoliveira/universalsentenceencodermodels\r\n text_model = tf.keras.Sequential(\r\n [KerasLayer(txt_model_path, input_shape=[], dtype=tf.string, # Pretrained model\r\n output_shape=[512], trainable=False),\r\n tf.keras.layers.Layer(512, dtype='float16')] # This layer reduces precision of float numbers\r\n )\r\n\r\n # Convert all texts to vectors\r\n features = text_model.predict(data['title'],\r\n batch_size=BATCH_SIZE,\r\n use_multiprocessing=True,\r\n workers=-1)\r\n print('Text features extracted. Shape:', features.shape)\r\n\r\n return features", "def txt2vectors(self, txt, is_html):\n words = txt2words(txt)\n words = [w for w in words if w in self._model]\n if len(words) != 0:\n for w in words:\n yield self._model[w]", "def get_dataset_features(text):\n return model.extract(text)", "def normalize_func(text: str) -> List[str]:\n tokens = nltk.word_tokenize(text) # need to be consistent with the basic tokenize used in other functions\n return [lemmatizer.lemmatize(w.lower(), get_wordnet_pos(w.lower())) for w in tokens]", "def review_to_vec(words, model, num_features , index2word_set):\n \n feature_vec = np.zeros((num_features), dtype=\"float32\")\n word_count = 0\n \n \n \n for word in words:\n if word in index2word_set: \n word_count += 1\n feature_vec += model[word]\n\n if word_count == 0:\n word_count = 1\n\n feature_vec /= word_count\n\n return feature_vec", "def pre_process(text):\n # replace (,.'\") with ''\n text = text.replace(',', '')\n text = text.replace('.', '')\n text = text.replace(\"'\", '')\n text = text.replace(\"\\\"\", '')\n\n # tokenize into words\n tokens = [word for sent in sent_tokenize(text) for word in word_tokenize(sent)]\n\n # remove stopwords\n stop = stopwords.words('english')\n tokens = [token for token in tokens if token not in stop]\n\n # remove words less than three letters\n tokens = [word for word in tokens if len(word) >= 3]\n\n # lower capitalization\n tokens = [word.lower() for word in tokens]\n\n # lemmatize\n lmtzr = WordNetLemmatizer()\n tokens = [lmtzr.lemmatize(word) for word in tokens]\n\n return tokens", "def parse(self, text):\n return self.dict.txt2vec(text)", "def tokenize(text):\n #Clean data, remove all character except character and number,such as punctuation etc.\n text = re.sub(r'[^a-zA-Z0-9]', ' ', text.lower())\n tokens = word_tokenize(text)\n tokens = [WordNetLemmatizer().lemmatize(word) for word in tokens if word not in ST_english]\n return tokens", "def lstm_infer_vector(lstm_model, txt, stopwords,word_indices, maxlen=10, taillemax=300) :\n \n txt_prep = gensim.utils.simple_preprocess(txt, deacc=True)\n txt_wo_uw = remove_unknown_words(txt_prep, word_indices)\n txt_wo_ws = remove_stopwords(txt_wo_uw, stopwords)\n \n if len(txt_wo_ws)>taillemax:\n sentence = txt_wo_ws[-taillemax:]\n \n if len(txt_wo_ws)<maxlen :\n #cas du texte trop court\n sentence = txt_wo_ws\n X = np.zeros((1, maxlen, len(word_indices)), dtype=np.bool)\n y = np.zeros((1, len(word_indices)), dtype=np.bool)\n for t, word in enumerate(sentence):\n X[0, t, word_indices[word]] = 1\n preds = lstm_model.predict(X, verbose=0)[0]\n else :\n \n for current_part in range(len(txt_wo_ws)/maxlen):\n sentence = txt_wo_ws[current_part*maxlen:(current_part+1)*maxlen]\n X = np.zeros((1, maxlen, len(word_indices)), dtype=np.bool)\n y = np.zeros((1, len(word_indices)), dtype=np.bool)\n for t, word in enumerate(sentence):\n X[0, t, word_indices[word]] = 1\n preds = lstm_model.predict(X, verbose=0)[0]\n \n\n return preds", "def doc2vec(self, text: str) -> np.array:\n # tfidf_matrix = self.tfidf.transform([text])\n # vectors = []\n # for token in self.tokenize(text):\n # if token in self.word2vec and token in self.feature_names:\n # tfidf_score = tfidf_matrix[0, self.feature_names.index(token)]\n # vectors.append(self.word2vec[token] * tfidf_score)\n vectors = [self.word2vec[token] for token in self.tokenize(text) if token in self.word2vec]\n if not vectors:\n return np.zeros(300)\n return np.mean(vectors, axis=0)", "def transform(self, strings):\n\n logger.debug(\"Converting {} strings into lists of \"\n \"sentences.\".format(len(strings)))\n\n tokenized_strings = []\n for text in strings:\n tokenized_strings.append(text_to_wordlist(text, remove_stopwords=True))\n\n # Pre-allocate a 2D numpy array, for speed\n feature_vecs = np.zeros((len(tokenized_strings), self.num_features),\n dtype=\"float32\")\n\n # Loop through the strings\n for counter, word_list in enumerate(tokenized_strings):\n\n # Call the function (defined above) that makes average feature vectors\n feature_vecs[counter] = self._make_feature_vec(word_list)\n\n # For DEBUG only\n if np.isnan(feature_vecs[counter][0]):\n import ipdb;ipdb.set_trace()\n\n\n return feature_vecs", "def lem_and_stem(text, stopwords):\n\n\tlemmatizer = WordNetLemmatizer()\n\tstemmer = PorterStemmer()\n\tprocessed_text = []\n\tfor token, pos in text:\n\t\tpos = map_pos_tag(pos)\n\t\tif not (pos == wn.NOUN):\n\t\t\tcontinue\n\t\tif token not in stopwords and len(token) > 3:\n\t\t\tprocessed_token = stemmer.stem(lemmatizer.lemmatize(token, pos=pos))\n\t\t\tif processed_token not in stopwords:\n\t\t\t\tprocessed_text.append(processed_token)\n\treturn processed_text", "def word_to_vector_list(self, word, numeric=False, xsampa=False, normalize=True):\n if xsampa:\n word = self.xsampa.convert(word)\n segs = self.word_fts(word, normalize or xsampa)\n if numeric:\n tensor = [x.numeric() for x in segs]\n else:\n tensor = [x.strings() for x in segs]\n return tensor", "def tokenize(text):\n tokens=word_tokenize(text)\n lemmatizer=WordNetLemmatizer()\n \n clean_tokens=[]\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n return clean_tokens\n pass", "def tokenize(text):\n\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n \n clean_tokens =[]\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "def makeFeatureVec(words, model, num_features):\n featureVec = np.zeros((num_features,),dtype=\"float32\")\n num_words = 0.\n index2word_set = set(model.wv.index2word)\n for word in words:\n if word in index2word_set:\n num_words += 1\n featureVec = np.add(featureVec,model[word]) \n featureVec = np.divide(featureVec,num_words)\n return featureVec", "def __call__(self, text):\r\n if self.use_pos_tagging:\r\n return [self.wnl.lemmatize(t, self.pos(t)) for t in word_tokenize(self.clean(text))]\r\n else:\r\n return [self.wnl.lemmatize(t) for t in word_tokenize(self.clean(text))]", "def lemmatize(text):\n\n lem = WordNetLemmatizer()\n return ' '.join(list(map(lambda x: lem.lemmatize(x, 'v'),\n text.split())))", "def text_to_vecs(self):\n # convert word strings into word vectors\n sent_vec = []\n for w in self.sentence:\n if w in self.word_vectors.getVocab():\n sent_vec.append( self.word_vectors.getWordVectors()[w] )\n else:\n sent_vec.append( self.word_vectors.getOOVWordVector() )\n \n assert(len(self.sentence) == len(sent_vec)) \n self.sent_vec = sent_vec", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for token in tokens:\n clean_token = lemmatizer.lemmatize(token).lower().strip()\n clean_tokens.append(clean_token)\n \n return clean_tokens", "def preprocess_lines(movie_line):\n\ttokens = tokenizer.tokenize(movie_line)\n\twords = [word for word in tokens if word not in stopwords_set]\n\tstemmed_terms = [porter_stemmer.stem(word) for word in words]\n\tlemmatized_terms = [wordnet_lemmatizer.lemmatize(word) for word in stemmed_terms]\n\treturn lemmatized_terms", "def _make_feature_vec(self, word_list):\n\n # Pre-initialize an empty numpy array (for speed)\n feature_vec = np.zeros((self.num_features,), dtype=\"float32\")\n\n # index2word is a list that contains the names of the words in\n # the model's vocabulary. Convert it to a set, for speed.\n index2word_set = set(self.w2v_model.index2word)\n\n # Loop over each word in the word_list and, if it is in the model's\n # vocabulary, add its feature vector to the total\n nwords = 0\n for word in word_list:\n # NOTE: Careful there, if all words are in caps in the article,\n # this function will return nan values and blow up the forest.\n word = word.lower()\n if word in index2word_set:\n nwords += 1\n feature_vec = np.add(feature_vec, self.w2v_model[word])\n\n # Divide the result by the number of words to get the average\n feature_vec = np.divide(feature_vec, nwords)\n return feature_vec", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n return clean_tokens", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n \n return clean_tokens", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for token in tokens:\n clean_token = lemmatizer.lemmatize(token).lower().strip()\n clean_tokens.append(clean_token)\n\n return clean_tokens", "def test_lemmatization():\n normalizer = TextNormalizer(stopwords=False, lemmatize=True)\n X = normalizer.transform([[\"start running better old friend\"]])\n assert X[\"corpus\"][0] == [\"start\", \"run\", \"well\", \"old\", \"friend\"]", "def feature_extraction(inputFile, text, label):\r\n df = pd.read_csv(inputFile, encoding=\"utf8\")\r\n df[text].replace(np.nan, '', inplace=True)\r\n for idx, line in df.iterrows():\r\n try:\r\n words = line[text]\r\n newWords = ''.join(words.split())\r\n df.set_value(idx, text, newWords)\r\n except:\r\n pass\r\n tf = TfidfVectorizer(analyzer='char', encoding=\"utf8\", min_df=10)\r\n\r\n x = tf.fit_transform(df[text])\r\n x = x.toarray()\r\n print(x.shape)\r\n y = df[label]\r\n\r\n return x, y", "def tokenize(text):\n text = text.lower()\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text)\n words = word_tokenize(text)\n words = [w for w in words if w not in stopwords.words(\"english\")]\n stemmed = [WordNetLemmatizer().lemmatize(w) for w in words]\n return(stemmed)", "def text_prepare(txt):\n print(txt)\n txt = re.sub(r\"[^\\w\\s]\", \" \", str(txt).lower().strip())\n txt = txt.split()\n nltk.corpus.stopwords.words(\"english\")\n txt = [word for word in txt if word not in nltk.corpus.stopwords.words(\"english\")]\n lem = nltk.stem.wordnet.WordNetLemmatizer()\n txt = [lem.lemmatize(word) for word in txt]\n txt = \" \".join(txt)\n return txt", "def text2vec(doc_tok, model, dim=300):\n doc_embedding = np.zeros(dim)\n valid_words = 0\n for word in doc_tok:\n if word in model:\n valid_words += 1\n doc_embedding += model.query(word)\n else:\n continue\n if valid_words > 0:\n return doc_embedding / valid_words\n else:\n return doc_embedding", "def _get_word2vec_features(x, word2vec, all_words_per_tweet, max_tweet_len):\n\n features = np.zeros((len(x), max_tweet_len, word2vec.vector_size))\n\n for i, tweet_words in enumerate(all_words_per_tweet):\n tweet_repr = np.array(\n [word2vec.wv[r] if r in word2vec.wv.vocab else np.zeros(word2vec.vector_size) for r in tweet_words])\n features[i][:len(tweet_repr), :word2vec.vector_size] = tweet_repr\n\n return features", "def tokenize(text):\n # This experiment convinced me to lemmatize only rather than lemmatize and\n # stem. I also got this nifty URL detector there.\n #https://gist.github.com/rajatsharma369007/de1e2024707ad90a73226643c314b118\n\n # initialization\n lemmatizer = WordNetLemmatizer()\n stop = stopwords.words(\"english\")\n\n # Replaced all URLs with 'urlplaceholder'\n url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|'+\\\n '(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n for url in re.findall(url_regex, text):\n text = text.replace(url, \"urlplaceholder\")\n\n # tokenize and lemmatize\n tokens = word_tokenize(text)\n tokens = [lemmatizer.lemmatize(token).lower().strip() for\n token in tokens if token not in stop]\n\n return tokens", "def makeFeatureVec(words, model, num_features):\n\t# Initialize an empty numpy array (for speed) \n\tfeatureVec = np.zeros((num_features,), dtype=\"float32\")\n\t# Initialize a counter (number of words)\n\tnwords = 0.\n\t \n\t# Index2word is a list that contains the names of the words in the model's vocabulary. \n\tindex2word_set = set(model.index2word)\n\t# \n\t# Loop over each word in the review and, if it is in the model's vocaublary, add \n\t# its feature vector to the total \n\tfor word in words:\n\t\tif word in index2word_set:\n\t\t\tnwords = nwords + 1.\n\t\t\tfeatureVec = np.add(featureVec,model[word])\n\t# \n\t# Divide the result by the number of words to get the average \n\tfeatureVec = np.divide(featureVec,nwords)\n\treturn featureVec", "def token_by_lemma(text):\n lemmatizer = WordNetLemmatizer()\n word_list = word_tokenize(text)\n\n lemmatized_wrds = [lemmatizer.lemmatize(w) for w in word_list]\n return lemmatized_wrds", "def compute_user_input_embedding(txt, model):\r\n embeddings = []\r\n tokens = txt.split(\" \")\r\n for word in tokens:\r\n embeddings.append(model.wv[word])\r\n sentence_embedding = compute_average(embeddings)\r\n return sentence_embedding", "def vectorize_text(text):\n\n def remove_punctuation(text):\n \"\"\"Removes special characters from text.\"\"\"\n return re.sub('[,.?\";:\\-!@#$%^&*()]', '', text)\n\n def remove_common_words(text_vector):\n \"\"\"Removes 50 most common words in the uk english.\n\n source: http://www.bckelk.ukfsn.org/words/uk1000n.html\n\n \"\"\"\n common_words = set(['the', 'and', 'to', 'of', 'a', 'I', 'in', 'was',\n 'he', 'that', 'it', 'his', 'her', 'you', 'as', 'had', 'with',\n 'for', 'she', 'not', 'at', 'but', 'be', 'my', 'on', 'have', 'him',\n 'is', 'said', 'me', 'which', 'by', 'so', 'this', 'all', 'from',\n 'they', 'no', 'were', 'if', 'would', 'or', 'when', 'what', 'there',\n 'been', 'one', 'could', 'very', 'an', 'who'])\n return [word for word in text_vector if word not in common_words]\n\n text = text.lower()\n text = remove_punctuation(text)\n words_list = text.split()\n words_list = remove_common_words(words_list)\n\n return words_list", "def tokenize(text):\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "def embed(text: str) -> np.ndarray:\n n = nlp(text)\n return n.vector", "def _featurize_py_func(text):\n label = np.array(text[-1], dtype=np.int32)\n words = word_tokenize(text[:-2])\n chars = np.zeros([max_sentence_length, max_word_length], dtype=np.int32)\n for i, word in enumerate(words):\n ids = [char_to_int.get(char, -1) for char in word]\n chars[i,:len(ids)] = ids\n return chars", "def get_text_feature(texts,\n labels=None,\n nrow_train=None,\n vec='bow',\n lowercase=False,\n analyzer='word',\n single_token=True,\n ngram_range=(1, 1),\n stop_words=None,\n min_df=2,\n binary=True,\n select_k=None):\n from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n from sklearn.feature_selection import SelectKBest, chi2\n\n # keep single char as word\n if single_token:\n token_pattern = r\"\\b\\w+\\b\"\n else:\n token_pattern = r\"(?u)\\b\\w\\w+\\b\"\n\n # choose vec\n if vec is 'bow':\n vec = CountVectorizer(\n lowercase=lowercase,\n analyzer=analyzer,\n ngram_range=ngram_range,\n stop_words=stop_words,\n min_df=min_df,\n token_pattern=token_pattern,\n binary=binary)\n elif vec is 'tfidf':\n vec = TfidfVectorizer(\n lowercase=lowercase,\n analyzer=analyzer,\n ngram_range=ngram_range,\n stop_words=stop_words,\n min_df=min_df,\n token_pattern=token_pattern,\n sublinear_tf=True)\n else:\n raise ValueError('vec must be bow or tfidf!')\n\n # get word vector\n feature = vec.fit_transform(texts)\n feature_names = vec.get_feature_names()\n\n # feature select\n if (labels is not None) and (select_k is not None):\n if nrow_train is not None:\n x_train = feature[:nrow_train, :]\n x_test = feature[nrow_train:, :]\n y_train = labels[:nrow_train]\n\n feature_selector = SelectKBest(chi2, k=select_k)\n x_train = feature_selector.fit_transform(x_train, y_train)\n feature_names = np.array(feature_names)[feature_selector.get_support()]\n\n x_test = feature_selector.transform(x_test)\n\n # combine train test\n import scipy.sparse as sp\n feature = sp.vstack([x_train, x_test])\n\n else:\n feature_selector = SelectKBest(chi2, k=select_k)\n feature = feature_selector.fit_transform(feature, labels)\n feature_names = np.array(feature_names)[feature_selector.get_support()]\n\n return feature, list(feature_names)", "def featurize(self, data):\n \n features = []\n\n # tokens = data.split()\n\n #Modification 1: Normalization: All lowercase\n #Removing this did not seem to have any performance boost\n #but it did nothing negative either\n data = data.lower()\n\n #Modification 2: Normalization: Tokenizing using NLTK\n #Keep this\n # tokens = word_tokenize(data)\n tokens = data.split()\n\n #Modification 3: Word List: Removing stop words using NLTK\n #Keep this\n stop_words = set(stopwords.words('english'))\n tokens_filtered = []\n\n for t in tokens:\n if t not in stop_words:\n tokens_filtered.append(t)\n\n tokens = tokens_filtered\n\n #Modification 4: Pre-Processing Lemmization using NLTK\n #Surprisingly does not appear to impact performance\n # for t in tokens:\n # t = self.wordnet_lemmatizer.lemmatize(t)\n\n capital = 0\n average_word_length = 5 #It's 4.7, but we'll use 5\n short_words = 0\n long_words = 0\n\n for t in tokens:\n\n #Feature 1: Bag of words\n features.append((t, True))\n\n if(t.isupper()):\n capital += 1\n\n #Feature 3: Long or short word counter, intentionally ignoring length 4\n #and 5 as those are close to average\n #Very important that stop words were removed\n if(len(t) > average_word_length):\n long_words += 1\n elif(len(t) < average_word_length - 1):\n short_words += 1\n \n #Feature 2: Lots of capital\n #Remove this. It only appears to be a rough count of sentence number vs.\n #Capturing any sentiment. Does not impact F1 score in given train/dev sets\n # if(capital > 2):\n # features.append((\"LOTS_OF_CAPITAL\", True))\n\n #Feature 3: Long or short words\n # if(long_words > short_words):\n # features.append((\"LOTS_OF_LONG_WORDS\", True))\n\n\n\n return features", "def creating_feature_vector():\r\n\twordlist = []\r\n\tlabel = \"\"\r\n\tfw = open(\"feature_vector.txt\", \"w+\", encoding = \"utf-8\")\r\n\twith open(\"D:\\\\Python_Prac\\\\wordstag\\\\modules\\\\HI_EN_TRAIN.txt\", \"r\", encoding = \"utf-8\") as f:\r\n\t\tfor line in f:\r\n\t\t\twordlist.append(line)\r\n\t\tfor index, line in enumerate(wordlist):\r\n\t\t\tif line == \"\\n\":\r\n\t\t\t\tcontinue\r\n\t\t\tcontext = line.split(\"\\t\")\r\n\t\t\tlabel = context[1]\r\n\t\t\tfeature_vector = label+\" \"\r\n\t\t\tngram_vector = ngram_frequency(str(context[0]))\r\n\t\t\tfor vector in ngram_vector:\r\n\t\t\t\tfeature_vector += str(vector)+\" \"\r\n\t\t\tfeature_vector += str(is_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_hindi(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_abbr(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_hindi(context[0]))+\" \"\r\n\t\t\tbefore = [0,0,0]\r\n\t\t\tafter = [0,0,0]\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index-i) < 0 or (index-i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tbefore[2-i] = get_word_context(wordlist[index-i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index+i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tafter[2-i] = get_word_context(wordlist[index+i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in before:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfor i in after:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfeature_vector += \"\\n\"\r\n\t\t\tfw.write(feature_vector)\r\n\t\t\tprint(\"Proceeding...\"+str(index+1)+\" of 16683\")\r\n\r\n\tfw.close()", "def tokenize(text):\n text = re.sub('[^A-Za-z0-9]', ' ', text)\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n return clean_tokens", "def TransformData(text):\n global COUNT_VECTORIZER\n if COUNT_VECTORIZER is None:\n COUNT_VECTORIZER = CountVectorizer(analyzer = 'word', lowercase = True)\n COUNT_VECTORIZER.fit(text)\n features = COUNT_VECTORIZER.transform(text)\n features_nd = features.toarray() # for easy usage\n global TFIDF\n if TFIDF is None:\n TFIDF = TfidfTransformer(use_idf=False)\n TFIDF.fit(features_nd)\n text_tfidf = TFIDF.transform(features_nd)\n return text_tfidf", "def proc_text(self, text):\n\n lemmas = []\n tokens = []\n doc = self(text)\n for tokObj in doc:\n if self._remove_punct and tokObj.is_punct:\n continue\n lemma = tokObj.lemma_\n text = tokObj.text\n if self._keep_only_alpha_num and not is_alpha_num(text):\n continue\n tok1 = text.lower()\n tok2 = lemma.lower()\n if tok1 in self._stopwords or tok2 in self._stopwords:\n continue\n\n if self._lower_case:\n text = text.lower()\n lemma = lemma.lower()\n\n lemmas.append(lemma)\n tokens.append(text)\n\n return ' '.join(lemmas), ' '.join(tokens)", "def preprocessing(raw_text):\n words_list = tokenize(raw_text)\n words_list = remove_stop_words(words_list)\n words_list = remove_punctuations(words_list)\n words_list = lemmatization(words_list)\n return words_list", "def extractFeatures(self, data, tf=False):\n tfidf_training_matrix, tfidf_terms = self.useTfidfVectorizer(data)\n \n if tf:\n tf_vectorizer = CountVectorizer(max_df=0.5, min_df=2, max_features=10000,\n stop_words='english')\n \n tf_training_matrix = tf_vectorizer.fit_transform(data)\n tf_terms = tf_vectorizer.get_feature_names()\n \n return tfidf_training_matrix, tfidf_terms, tf_training_matrix, tf_terms\n \n else:\n return tfidf_training_matrix, tfidf_terms", "def create_text_sequence_feature(fl, sentence, sentence_len, vocab):\n sentence_transformed = transform_sentence(sentence, vocab)\n for word_id in sentence_transformed:\n fl.feature.add().int64_list.value.extend([word_id])\n return fl", "def tokenize(text):\n text = re.sub(r\"[^a-zA-Z0-9]+\", \" \", text.lower())\n\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "def process_text(self, text, lemma=False):\n processed_text = TextGraph.nlp(text.lower())\n words = [t.text.strip() if not lemma else t.lemma_ for t in processed_text if not t.is_punct]\n return words", "def process(self, sentence):\n\n # selects onlt alphanumeric words\n words = self.tokenizer.tokenize(sentence)\n\n # lemmatize the words\n words = [self.lemmatizer.lemmatize(word) for word in words]\n\n # lowercase all the words and remove single characters\n words = [word.lower() for word in words if len(word) > 1]\n\n # remove the stopwords using NLTK\n words = [word for word in words if word not in stopwords.words('english')]\n\n return words", "def training_examples_to_vec(test_file, embeddings_file, num_words, word_dim):\n x = []\n ignore_words = stopwords.words('english')\n lemmatizer = WordNetLemmatizer()\n stemmer = SnowballStemmer('english')\n word_idx, word_vectors = hf.create_indices_for_vectors(embeddings_file, return_vectors=True)\n with open(test_file, 'r') as f:\n for line in f:\n stemmedWords = set([])\n long_string = line.split(' ')\n total_words = int(len(long_string) / 2)\n total_example_vec = np.empty([num_words, word_dim], dtype=np.float32)\n if total_words - 1 <= num_words:\n continue\n count = 0\n\n for i in range(1, total_words):\n word = long_string[2 * i].split(\"'\")[0]\n\n if (word in ignore_words) or (len(word) <= 3):\n continue\n\n if not word.isalpha():\n continue\n\n try:\n stem = stemmer.stem(word)\n lemma = lemmatizer.lemmatize(word)\n except UnicodeDecodeError:\n continue\n\n if stem in stemmedWords:\n continue\n\n try:\n idx_num = word_idx[word]\n except KeyError:\n\n try:\n idx_num = word_idx[lemma]\n except KeyError:\n\n try:\n idx_num = word_idx[stem]\n except KeyError:\n continue\n\n word_vec = word_vectors[idx_num]\n total_example_vec[count] = word_vec\n stemmedWords.add(stem)\n count += 1\n if count >= num_words:\n break\n x.append(total_example_vec)\n return x", "def word2vec(self, words):\n with torch.no_grad():\n words = torch.LongTensor(self.doc2token(words))\n result = self.model.embedding(words).numpy()\n return result", "def lemmatize(text, nlp):\n\n return [word.lemma_ for word in nlp(text)]", "def tokenize(text):\n \n #regular expression to avoid pucntuations or any special character\n tokenizer = nltk.RegexpTokenizer(r\"\\w+\")\n \n #tokenizing text\n tokens = tokenizer.tokenize(text)\n \n #initiating lemmatizer\n lemmatizer = WordNetLemmatizer()\n \n #iteratating through each token\n clean_tokens = []\n for tok in tokens:\n \n #stop words are irrelevant in this context of classifying response\n if (tok.lower() not in stopwords.words(\"english\")):\n \n # lemmatizing, normalizing case, and removing leading/trailing white space\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n \n return clean_tokens", "def tokenize(text):\n \n #regular expression to avoid pucntuations or any special character\n tokenizer = nltk.RegexpTokenizer(r\"\\w+\")\n \n #tokenizing text\n tokens = tokenizer.tokenize(text)\n \n #initiating lemmatizer\n lemmatizer = WordNetLemmatizer()\n \n #iteratating through each token\n clean_tokens = []\n for tok in tokens:\n \n #stop words are irrelevant in this context of classifying response\n if (tok.lower() not in stopwords.words(\"english\")):\n \n # lemmatizing, normalizing case, and removing leading/trailing white space\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n \n return clean_tokens", "def instance2fv(self, text):\n if isinstance(text, unicode):\n text = text.encode('utf8')\n\n arr = np.zeros((self.n_feats,), dtype='uint32')\n\n # Convert the text to a sequence of ascii values\n ords = map(ord, text)\n\n # Count the number of times we enter each state\n state = 0\n statecount = defaultdict(int)\n for letter in ords:\n state = self.tk_nextmove[(state << 8) + letter]\n statecount[state] += 1\n\n # Update all the productions corresponding to the state\n for state in statecount:\n for index in self.tk_output.get(state, []):\n arr[index] += statecount[state]\n\n # The returned vector is the TFxIDF vector. The IDF for the\n # linguini system is actually the inv-lang-freq, and this is\n # pre-computed from the training data. We also normalize to len 1\n # at this stage.\n retval = arr * self.ilf\n return retval", "def training(string):\n print(\"Training...\")\n vec = create_vector(string)\n print(\"Selecting features...\")\n feature_list = select_features(vec)\n print(\"Done!\")\n return feature_list", "def tokenize(t):\n tweet_tok = TweetTokenizer(strip_handles=True, reduce_len=True)\n tokens = tweet_tok.tokenize(t)\n wnl = WordNetLemmatizer()\n stems = []\n for item in tokens:\n stems.append(wnl.lemmatize(item))\n return stems", "def str_to_nmslib_vect(tokenizer, text):\n lst = unique(get_token_ids(tokenizer, text))\n lst.sort()\n return toks_to_str(lst)", "def preprocess_training_text(text, accented_chars=True, \n convert_num=False, extra_whitespace=True, \n lemmatization=True, lowercase=True, punctuations=True,\n remove_html=True, remove_num=True, special_chars=True, \n stop_words=True):\n \n\n \"\"\"preprocess text with default option set to true for all steps\"\"\"\n if remove_html == True: #remove html tags\n text = strip_html_tags(text)\n if extra_whitespace == True: #remove extra whitespaces\n text = remove_whitespace(text)\n if accented_chars == True: #remove accented characters\n text = remove_accented_chars(text)\n if lowercase == True: #convert all characters to lowercase\n text = text.lower()\n \n \n doc = nlp(text) #tokenise text\n\n\n clean_text = []\n for token in doc:\n flag = True\n edit = token.text\n # print(\"Word: \", edit, \" Type: \", token.pos_)\n # remove stop words\n if stop_words == True and token.is_stop and token.pos_ != 'NUM': \n flag = False\n # remove punctuations\n if punctuations == True and (token.pos_ == 'PUNCT') and flag == True: \n flag = False\n \n # remove 'X' characters:\n if token.pos_ == 'X':\n flag = False\n # remove special characters\n if special_chars == True and token.pos_ == 'SYM' and flag == True: \n flag = False\n # remove numbers\n if remove_num == True and (token.pos_ == 'NUM' or token.text.isnumeric()) \\\n and flag == True:\n flag = False\n # convert number words to numeric numbers\n if convert_num == True and token.pos_ == 'NUM' and flag == True:\n edit = w2n.word_to_num(token.text)\n # convert tokens to base form\n elif lemmatization == True and token.lemma_ != \"-PRON-\" and flag == True:\n edit = token.lemma_\n # append tokens edited and not removed to list \n if edit != \"\" and flag == True:\n clean_text.append(edit)\n \n # Convert back to string:\n new_text = ' '.join(clean_text)\n regex = re.compile('[^a-zA-Z]')\n new_text = regex.sub(' ', new_text)\n words = re.findall(r'\\w+.', new_text)\n return ' '.join(words)", "def convert_str_list_to_vector(self, string_list: Tuple[str]) -> numpy.ndarray:\n if len(string_list) != 4:\n logger.error(\"convert_str_list_to_vector got a too short or long string list: {}. We return a zero-vector!\",\n string_list)\n return numpy.zeros(shape=(self.word2vec_embedding_size +\n self.word2vec_embedding_size / 2 +\n self.word2vec_embedding_size / 3 +\n self.word2vec_embedding_size / 4,),\n dtype=\"float32\"\n )\n ret = numpy.zeros(shape=(0,), dtype=\"float32\")\n for i, token in enumerate(string_list):\n logger.trace(\"Process the {}. token \\\"{}\\\"\", (i + 1), string_list[i])\n ret = numpy.concatenate([ret,\n numpy.average(\n numpy.reshape(\n self.word2vec_dict.get(string_list[i],\n numpy.negative(\n numpy.ones(\n shape=(self.word2vec_embedding_size,),\n dtype=\"float32\")\n )),\n (int(self.word2vec_embedding_size / (i + 1)), (i + 1))\n ),\n axis=1)],\n axis=0)\n return ret", "def lemmatisation(self, \n text: str\n ) -> Union[str, List[str]]:\n lemmatiser = WordNetLemmatizer()\n\n def lemma_sans_kw(w: str\n ) -> str:\n return (\n lemmatiser.lemmatize(w) if w not in self.target_words else w\n )\n \n if not self.tokenise:\n return ' '.join(\n lemma_sans_kw(w) for w in word_tokenize(text)\n )\n return [lemma_sans_kw(w) for w in text]", "def tokenize(text: str):\n lemmatizer = WordNetLemmatizer()\n stop_words = stopwords.words(\"english\")\n\n # Replace urls\n url_regex = r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n detected_urls = re.findall(url_regex, text)\n for url in detected_urls:\n text = text.replace(url, 'urlplaceholder')\n\n # Normalize and remove punctuation\n text = re.sub(r\"[^a-zA-Z0-9]\", \" \", text.lower())\n\n # Tokenize\n tokens = word_tokenize(text)\n\n # Lemmatize and remove stop words\n clean_tokens = [lemmatizer.lemmatize(word) for word in tokens if word not in stop_words]\n # clean_tokens = [lemmatizer.lemmatize(tok, pos='v').lower().strip() for tok in clean_tokens]\n\n return clean_tokens", "def vectorize(self, source_text, target_text, use_dataset_max_lengths=True):\r\n \r\n data = super().vectorize(source_text, target_text, use_dataset_max_lengths)\r\n \r\n mltm_x_vector = self.mltm_vectorizer.vectorize(source_text.lower())\r\n mltm_x_vector = mltm_x_vector.astype(np.float32)\r\n \r\n data[\"x_source_mltm_vector\"] = mltm_x_vector\r\n return data", "def svmlight_to_vectors(txt):\n\n MAXENT_LOG.info(\"Attempting to convert {} to a vector file.\".format(txt))\n\n ntf = NamedTemporaryFile(mode='w', delete=False)\n ntf.close()\n\n p = ProcessCommunicator('{} import-svmlight --input \"{}\" --output \"{}\"'.format(mallet_bin, txt, ntf.name),\n stdout_func=MAXENT_LOG.info, stderr_func=MAXENT_LOG.warn, shell=True)\n\n\n if p.wait() == 0:\n MAXENT_LOG.debug(\"Successfully created temporary vector file {}\".format(ntf.name))\n return ntf.name\n else:\n raise ClassifierException(\"SVMLight Conversion did not complete successfully.\")", "def process_text(text):\n text = text.translate(translator)\n tokens = word_tokenize(text)\n# if stem:\n stemmer = PorterStemmer()\n tokens = [stemmer.stem(t) for t in tokens]\n \n return tokens", "def preprocess(text):\n\tX = []\n\tsent_detector = nltk.data.load('tokenizers/punkt/english.pickle')\n\tfor t in text:\n\t\tsents = sent_detector.tokenize(t)\n\t\tresult = ''\n\t\tfor s in sents:\n\t\t\ttokens = word_tokenize(s)\n\t\t\tresult += ' ' + ' '.join(tokens)\n\t\tX.append(result)\n\treturn X", "def tokenize(text):\n \n tokens = word_tokenize(text)\n \n STOPWORDS = list(set(stopwords.words('english')))\n # remove short words\n tokens = [token for token in tokens if len(token) > 2]\n # remove stopwords\n tokens = [token for token in tokens if token not in STOPWORDS]\n \n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n return clean_tokens", "def tokenize(text):\n\n # Replace URLs\n url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n \n detected_urls = re.findall(url_regex, text)\n for url in detected_urls:\n text = text.replace(url, \"urlplaceholder\")\n\n # Remove non alphanumeric characters\n text = re.sub(pattern=r'[^A-Za-z0-9]+',repl=' ', string=text.lower().strip())\n \n # Tokenize words\n tokens = word_tokenize(text)\n \n # Remove stop words\n stop_words = set(stopwords.words('english'))\n filtered_tokens = [w for w in tokens if not w in stop_words]\n \n lemmatizer = WordNetLemmatizer()\n \n clean_tokens = []\n for token in filtered_tokens:\n new_token = lemmatizer.lemmatize(token)\n clean_tokens.append(new_token)\n \n return clean_tokens", "def tokenize(text):\n text = text.translate(str.maketrans('', '', string.punctuation))\n tokens = word_tokenize(text)\n lemmatizer = WordNetLemmatizer()\n\n clean_tokens = []\n for tok in tokens:\n clean_tok = lemmatizer.lemmatize(tok).lower().strip()\n clean_tokens.append(clean_tok)\n\n s = stopwords.words('english')\n result = []\n for token in clean_tokens:\n if token not in s:\n result.append(token)\n\n return result", "def extract_features_only(self, text):\n \n featurelist = []\n \n sentences = util.sentence_tokenize(text)\n taggedSentences = [] \n for sentnumber, sentence0 in enumerate(sentences):\n \n sentence = self.clean_text(sentence0)\n \n # tokenize each sentence to have a list of words to be processed\n tokens = nltk.word_tokenize(sentence)\n #run the above procedure\n sentence_to_parse = self.get_untagged(tokens)\n \n # Save tagged sentences for later computing of expose date\n taggedSentences.append(sentence_to_parse)\n \n #only if the cleaned sentence is NOT empty we parse it\n if sentence_to_parse!=[]:\n tree = self.cp.parse(sentence_to_parse)\n tree1 = self.cp1.parse(sentence_to_parse)\n \n# new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.node in self.st_filter])\n new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.label() in self.st_filter])\n\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(', ,', ',')\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(',', ', ')\n\n new_sentence_to_parse = nltk.word_tokenize(new_sentence_to_parse)\n\n #run the above procedure\n new_sentence_to_parse = self.get_untagged(new_sentence_to_parse)\n \n if new_sentence_to_parse!=[]:\n tree2 = self.cp.parse(new_sentence_to_parse)\n for subtree in tree2.subtrees():\n if subtree.label() in self.st_filter: \n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n \n for subtree in tree1.subtrees():\n if subtree.label() in self.labels_gram1:\n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n\n self.sentences = sentences\n \n n = len(sentences)\n locsSentStarts = [-1] * n\n curpt = 0\n for i in range(n):\n pos = text[curpt:].find(sentences[i])\n locsSentStarts[i] = pos + curpt\n curpt = locsSentStarts[i] + len(sentences[i])\n self.sentence_startPos = locsSentStarts\n \n featObjList = self.initialize_feature_obj_list(featurelist)\n \n featList = [(feat.getType(), feat.getStartPos(), feat.getEndPos(), feat.getString()) for feat in featObjList]\n return featList", "def basic_clean(text):\n wnl = nltk.stem.WordNetLemmatizer()\n stopwords = stopwords.words('english') + ADDITIONAL_STOPWORDS\n text = (unicodedata.normalize('NFKD', text)\n .encode('ascii', 'ignore')\n .decode('utf-8', 'ignore')\n .lower())\n words = re.sub(r'[^\\w\\s]', '', text).split()\n return [wnl.lemmatize(word) for word in words if word not in stopwords]", "def lemmatize_words(text: str, lemmatizer=WordNetLemmatizer()) -> str:\n return ' '.join(lemmatizer.lemmatize(word) for word in text.split())", "def extract_features(sentence, vocabulary):\n n_tokens = len(sentence)\n n_features = n_feature_functions + len(vocabulary)\n X = sp.lil_matrix((n_tokens, n_features), dtype=bool)\n\n for i in xrange(n_tokens):\n for j, f in enumerate(FEATURE_FUNCTIONS):\n X[i, j] = f(sentence, i)\n\n # Vocabulary feature\n try:\n X[i, n_feature_functions + vocabulary[sentence[i][0].lower()]] = 1\n except KeyError:\n pass\n\n return X", "def token2features(sent, i, add_neighs=True):\n \n def add_lexicon_feats(tpl, lookupLexiconDict, usedTags):\n if tpl in lookupLexiconDict:\n for cls in lookupLexiconDict[tpl]:\n if cls not in usedTags:\n ftrs.append(cls) #<--------------------\n usedTags[cls]=1\n else:\n usedTags[cls]+=1\n \n \n ftrs = []\n # bias\n ftrs.append(\"BIAS\")\n # position features\n if i == 0:\n ftrs.append(\"SENT_BEGIN\")\n if i == len(sent)-1:\n ftrs.append(\"SENT_END\")\n\n # the word itself\n word = unicode(sent[i])\n ftrs.append(\"WORD=\" + word)\n word_lcase = word.lower()\n ftrs.append(\"LCASE=\" + word_lcase)\n # some features of the word\n if word.isalnum():\n ftrs.append(\"IS_ALNUM\")\n if word.isnumeric():\n ftrs.append(\"IS_NUMERIC\")\n if word.isdigit():\n ftrs.append(\"IS_DIGIT\")\n if word.isupper():\n ftrs.append(\"IS_UPPER\")\n if word.islower():\n ftrs.append(\"IS_LOWER\")\n\n # USE LEXICONS################################################## !\n maxTries=5\n usedTags = {}\n \n #look front up to 5 places \n if type(sent[0])== str: lSent = map(str.lower, sent)\n else: lSent = map(unicode.lower, sent)\n while(maxTries!=0):\n\n if len(lSent)-i>=maxTries:\n tpl = tuple(lSent[i:maxTries+i])\n add_lexicon_feats(tpl, lookupLexiconDict, usedTags)\n maxTries-=1\n \n #also look backwards: lexicons\n if i>=1:\n tpl = tuple(lSent[i-1:i+1]) # size 2\n add_lexicon_feats(tpl, lookupLexiconDict, usedTags)\n if i<len(lSent) : \n tpl = tuple(lSent[i-1:i+2]) # size 3\n add_lexicon_feats(tpl, lookupLexiconDict, usedTags)\n \n #analyze and add bias towards max used classification \n if usedTags:\n usedTags = list(usedTags.iteritems())\n maxused = max(usedTags, key=operator.itemgetter(1))\n minused = min(usedTags, key=operator.itemgetter(1)) \n if minused[1]!=maxused[1]:\n ftrs.append('BIAS='+maxused[0])\n \n\n #R ************************************************\n if len(word) > 15:\n ftrs.append(\"IS_LENGTHY\")\n if word[0].upper():\n ftrs.append(\"IS_FIRST_UPPER\")\n if word.__contains__(\"http\"):\n ftrs.append(\"IS_HYPERLINK\")\n if any(x.isupper() for x in word):\n ftrs.append(\"IS_MIXEDCASE\")\n if word.isupper():\n ftrs.append(\"ALL_UPPERCASE\")\n if word.__contains__(\"@\"):\n ftrs.append(\"IS_TAG\")\n if word.__contains__(\"#\"):\n ftrs.append(\"IS_HASHTAG\")\n if word in stop_words:\n ftrs.append(\"IS_STOPWORD\")\n if word in ['ing','ly','ed','ious','ies','ive','es','s','ment']:\n ftrs.append(\"CONTAINS_SUFFIX\")\n ftrs.append( nltk.pos_tag([word])[0][1] )\n\n # previous/next word feats\n if add_neighs:\n if i > 0:\n for pf in token2features(sent, i-1, add_neighs = False):\n ftrs.append(\"PREV_\" + pf)\n if i < len(sent)-1:\n for pf in token2features(sent, i+1, add_neighs = False):\n ftrs.append(\"NEXT_\" + pf)\n \n \n \n # return it!\n return ftrs", "def readVector(text):\n items = text.split()\n if int(items[0])+1 != len(items):\n raise ValueError(\"Invalid number of items\")\n return [float(v) for v in items[1:]]", "def generate_vector(text, tf=None):\n if not _trained:\n print(\"Make sure to train parameterizer first\")\n exit(1)\n if tf is None:\n tf = term_frequency.generate_vector(text)\n vector = []\n for i in range(len(tf)):\n vector.append(tf[i] * _idfs[i])\n return vector", "def process_text(self):\n prp1 = preprocessor.Preprocess()\n processed_text = prp1.clean_data(self.text)\n self.vec1 = self.vec.transform(pd.Series(processed_text))", "def compute_label_feature(text, token_to_idx):\n tokens = list(text.strip().lower())\n feats = [token_to_idx[token] for token in tokens]\n return feats", "def text_to_wordlist(text, remove_html_related=True, remove_non_letter=True,\n to_lowercase=True, remove_stopwords=False, use_lem=False):\n if remove_html_related:\n text = url_removal(text)\n # Remove HTML using BeautifulSoup\n text = BeautifulSoup(text, 'lxml').get_text()\n\n # Remove non-letters using regex\n if remove_non_letter:\n text = non_letter_removal(text)\n # Convert words to lower case and split them\n if to_lowercase:\n text = text.lower()\n\n words = text.split()\n # get tagged before possible stopword removal\n tagged_words = pos_tag(words)\n\n # Optionally remove stop words (false by default)\n if remove_stopwords:\n tagged_words = stopword_removal_from_taggedwords(tagged_words)\n\n # Optionally get part of speech tag of words then lemmatize them\n if use_lem:\n words = lemmatize_tagged_words(tagged_words)\n # Return a list of words and tagged words\n return(words, tagged_words)", "def create_vectorizer(ds):\n vectorize_layer = TextVectorization(\n standardize=clean_text,\n split=\"whitespace\",\n max_tokens=MAX_WORDS - 1,\n output_mode=\"int\",\n output_sequence_length=MAX_LEN,\n )\n vectorize_layer.adapt(ds.map(lambda text, label: text))\n return vectorize_layer", "def preproc_user_input(txt, model):\r\n txt = pre_process(txt)\r\n txt_tokenized = [word for word in txt.split(\" \") if word in model.wv.vocab]\r\n return \" \".join(txt_tokenized)", "def text_to_w2v_input(text, tokenizer=None, remove_stopwords=False):\n\n # NOTE: Punkt is a sentence tokenizer\n if not tokenizer:\n tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\n\n # Split text into sentences\n raw_sentences = tokenizer.tokenize(text.decode('utf8').strip())\n\n tokenized_sentences = []\n for raw_sentence in raw_sentences:\n if raw_sentence:\n tokenized_sentences.append(\n text_to_wordlist(raw_sentence, remove_stopwords))\n\n return tokenized_sentences", "def vectorize_text(df: pd.DataFrame):\n # Creating a stop_words list set that are common to many questions.\n common_phrases = [\n 'read the sentence from the passage',\n 'which of the following best describes',\n 'which is the best one sentence * for the section',\n 'which sentence from the passage provides the most evidence'\n 'select the sentence that does not support the central idea of the article',\n 'supports the main idea',\n 'select the paragraph from the section that explains how that shows the ',\n 'that is most relevant to be included in the summary of the article',\n 'according to the article',\n 'which of these is not one',\n ]\n stop_words = stopwords.words('english')\n [stop_words.extend(x.split()) for x in common_phrases]\n\n ct_vectorizer = CountVectorizer(token_pattern='\\\\w{3,}',\n max_df=.3,\n min_df=.001,\n stop_words=list(set(stop_words)),\n strip_accents='ascii', # Faster than unicode.\n ngram_range=(1, 3), # Enable uni, bi, trigrams.\n lowercase=True,\n dtype='uint8')\n\n tfidf_vectorizer = TfidfVectorizer(token_pattern='\\\\w{3,}',\n max_df=.3,\n min_df=.001,\n stop_words=list(set(stop_words)),\n strip_accents='ascii', # Faster than unicode.\n ngram_range=(1, 3), # Enable uni, bi, trigrams.\n lowercase=True,\n sublinear_tf=True, # Replace tf with 1 + log(tf).\n smooth_idf=True, # Default 1 doc for each term.\n dtype=np.float32)\n\n # Count & tf-idf vectorization learns vocab and transforms data into matrices.\n ct_vec = ct_vectorizer.fit_transform(np.array(df.text))\n tfidf = tfidf_vectorizer.fit_transform(np.array(df.text))\n # print(\"Shape of ct_vec:\", ct_vec.shape)\n # print('Size of ct_vec:', sys.getsizeof(ct_vec))\n # print(\"Shape of tfidf:\", tfidf.shape)\n # print('Size of tfidf:', sys.getsizeof(tfidf), '\\n')\n\n ct_names = ct_vectorizer.get_feature_names()\n tf_names = tfidf_vectorizer.get_feature_names()\n\n df_cv = pd.concat(\n [df, pd.DataFrame(ct_vec.toarray(), columns=ct_names)],\n axis=1)\n df_tfidf = pd.concat(\n [df, pd.DataFrame(tfidf.toarray(), columns=tf_names)],\n axis=1)\n\n return (\n df_cv,\n ct_vec,\n ct_names,\n df_tfidf,\n tfidf,\n tf_names\n )", "def get_sentences(text, nlp):\n\n # get sentences from text\n sentences = [sentence for sentence in\n text.replace('!', '.').replace('?', '.').split('.')]\n\n processed_sentences = [convert_to_string(remove_junk(tokenize_text(sentence, nlp))) for sentence in\n text.replace('!', '.').replace('?', '.').split('.')]\n\n # convert the sentences into a list of document vectors\n sentence_vector_list = [nlp(sentence).vector for sentence in processed_sentences]\n\n return sentences, sentence_vector_list", "def extract_features_temporal(self, text, expDateStr = None, onsetDateStr = None, refExpDateStr = None, textType='vaers'):\n \n featurelist = []\n \n sentences = util.sentence_tokenize(text)\n taggedSentences = [] \n# id = 0\n for sentnumber, sentence0 in enumerate(sentences):\n \n sentence = self.clean_text(sentence0)\n \n # tokenize each sentence to have a list of words to be processed\n tokens = nltk.word_tokenize(sentence)\n #run the above procedure\n sentence_to_parse = self.get_untagged(tokens)\n \n # Save tagged sentences for later computing of expose date\n taggedSentences.append(sentence_to_parse)\n \n #only if the cleaned sentence is NOT empty we parse it\n if sentence_to_parse!=[]:\n tree = self.cp.parse(sentence_to_parse)\n tree1 = self.cp1.parse(sentence_to_parse)\n \n# new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.node in self.st_filter])\n new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.label() in self.st_filter])\n\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(', ,', ',')\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(',', ', ')\n\n new_sentence_to_parse = nltk.word_tokenize(new_sentence_to_parse)\n\n #run the above procedure\n new_sentence_to_parse = self.get_untagged(new_sentence_to_parse)\n \n if new_sentence_to_parse!=[]:\n tree2 = self.cp.parse(new_sentence_to_parse)\n for subtree in tree2.subtrees():\n if subtree.label() in self.st_filter: \n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n \n for subtree in tree1.subtrees():\n if subtree.label() in self.labels_gram1:\n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n\n self.initialization_text_data(text, sentences, taggedSentences, textType)\n \n featObjList = self.initialize_feature_obj_list(featurelist)\n \n docFeature = self.extract_temporal_info(featObjList, expDateStr, onsetDateStr, refExpDateStr)\n \n return docFeature", "def getVector(text):\n url = cfg.use_vectoriser\n res = requests.post(url, json={'text': text, 'access_key': cfg.vectoriser_access_key})\n res_dictionary = res.json()\n return res_dictionary['vectors']", "def tokens_from_string(self, text):\n\n if self.level == \"character\":\n return list(text)\n elif self.level == \"word\":\n return nltk.word_tokenize(text)\n else:\n print(\"error: invalid level\")", "def parse_input(input_data, dictionary, model):\n vec_text = TextBlob(input_data).words.lower().lemmatize()\n vec_bow = dictionary.doc2bow(vec_text)\n return model[vec_bow]", "def clean_stopwords_lemmatize(text):\n tokens = clean_stopwords(text)\n tokens = lemmatize_tokens(tokens)\n # count = Counter(tokens)\n # c = count.most_common(15)\n # b = [str(i[0]) for i in c]\n # keywords = [t for t in tokens if t in b]\n news = ['ESPN', 'espn', 'foxsports', 'fox', 'cnn', 'yahoo', '•', '-', '●']\n keywords = [k for k in tokens if not k in news]\n return keywords", "def extract(self, document):\n f_num = len(self.feature_list)\n feature_vector = np.zeros((f_num,))\n words = document.split()\n for i in xrange(len(words)):\n for n in self.ns:\n ngram = self.try_get_ngram(words, n, i)\n if ngram and ngram in self.ngrams:\n self.add_ngram(feature_vector, ngram)\n return feature_vector" ]
[ "0.70021975", "0.6557391", "0.65419817", "0.65404963", "0.6515521", "0.64831823", "0.6474055", "0.64408255", "0.64127994", "0.6378021", "0.63599265", "0.63383466", "0.6331677", "0.6309186", "0.63056415", "0.6245646", "0.6237467", "0.62303", "0.62136185", "0.61973566", "0.6189876", "0.61872435", "0.61831295", "0.61788756", "0.61539006", "0.61421084", "0.61379063", "0.6108796", "0.60876167", "0.60579544", "0.6038214", "0.6024395", "0.6023434", "0.60128266", "0.60105896", "0.60007256", "0.599773", "0.598994", "0.5988629", "0.5986467", "0.5986299", "0.59802616", "0.59756", "0.5974153", "0.597276", "0.5970416", "0.5963332", "0.5960208", "0.5959896", "0.59595436", "0.5958243", "0.5947317", "0.59362084", "0.5923561", "0.5916421", "0.59072936", "0.590715", "0.5906617", "0.5878011", "0.5868159", "0.58483505", "0.58423924", "0.5839385", "0.5838956", "0.5838956", "0.5837959", "0.5831707", "0.5830739", "0.5827296", "0.58239746", "0.58209914", "0.5819262", "0.5817126", "0.5808531", "0.58082616", "0.5798102", "0.5796893", "0.5795046", "0.57903016", "0.57753676", "0.577383", "0.57622755", "0.57556015", "0.57342964", "0.57260036", "0.5725989", "0.5725327", "0.57205135", "0.5716604", "0.57118464", "0.57111543", "0.5709845", "0.57051045", "0.57020336", "0.5684961", "0.5678745", "0.56745934", "0.5669707", "0.5636148", "0.5628829", "0.56273043" ]
0.0
-1
Calls super mehtod and adds a MLTM vector to the data dict.
def vectorize(self, source_text, target_text, use_dataset_max_lengths=True): data = super().vectorize(source_text, target_text, use_dataset_max_lengths) mltm_x_vector = self.mltm_vectorizer.vectorize(source_text.lower()) mltm_x_vector = mltm_x_vector.astype(np.float32) data["x_source_mltm_vector"] = mltm_x_vector return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_data(self, v, m, x, pos=1):\n if x is not None:\n if v in self.variables:\n if m in self.models:\n self.data.update({self.__gen_key(m, v, pos): x})\n self.pos.update({self.__gen_key(m, v, pos): pos})\n else:\n pass\n else:\n pass\n else:\n pass", "def update_tlm(self):", "def load_ltm(self, **kwargs):\r\n ltm = kwargs['ltm']\r\n\r\n self.ltm = LTM(ltm)\r\n self.ltm.label_ltm(self.hwlist)", "def _calculate_custom_data(self):\n self.data['vms'] = Vms(self.vms, self.url)", "def add_time_point(self,time, mdv_instance):\n\n self.mdvtc[time] = mdv_instance", "def add(self, **kwargs):\n for key, value in kwargs.items():\n if key not in self.keys:\n self.keys.append(key)\n # making 'key' new method of class = an empty listh\n setattr(self, key, [])\n if isinstance(value, torch.Tensor):\n value = value.detach().numpy()\n # value = np.nan_to_num(value) #was converting list to arrays :S\n if isinstance(value, np.float32):\n value = float(value)\n if isinstance(value, np.int64):\n value = int(value)\n # calling method key of class and since a list\n getattr(self, key).append(value)\n # appending new val", "def build_and_add(self,*args):\n lattice = lattice_class.lattice(*args)\n self.lattices.append()\n self.meshfns.append(['in_memory'])", "def add_vector_fields(attributes, data):\n for attrib in attributes:\n if attrib['similarity'] == 'Semantic USE':\n value = data.get(attrib['name'])\n if value is not None:\n newVal = {}\n newVal['name'] = value\n newVal['rep'] = getVector(value)\n data[attrib['name']] = newVal\n elif attrib['similarity'] == 'Semantic SBERT':\n value = data.get(attrib['name'])\n if value is not None:\n newVal = {}\n newVal['name'] = value\n newVal['rep'] = getVectorSemanticSBERT(value)\n data[attrib['name']] = newVal\n elif attrib['similarity'] == 'Array SBERT':\n value = data.get(attrib['name'])\n if value is not None:\n newVal = {}\n newVal['name'] = value\n newVal[\"rep\"] = []\n array = getVectorSemanticSBERTArray(value)\n for element in array:\n temp = {}\n temp['rep'] = element\n newVal[\"rep\"].append(temp)\n\n data[attrib['name']] = newVal\n return data", "def add_modifications(self, dset):\n dset_keys = [\"entity_ids\", \"comp_ids\"]\n for key in dset_keys:\n self.model_dict[key] = dset[key]\n self.model_dict[\"code_version\"] = np.array(mbgdml_version)\n self.model_dict[\"md5\"] = np.array(self.md5)", "def put_vector(self, term, vector):\n self.terms.append(term)\n self.vectors.append(vector.vector)\n self.real_vectors.append(vector)\n return self.dict.update({term: vector})", "def __init__(self, M,):\n #transpose because we are given column vectors not row vectors\n self.M = np.transpose(np.matrix(M))", "def __post_init__(self):\n all_vecs = {}\n for n2 in self._get_n2():\n all_vecs[n2] = all_vecs.get(n2, 0) + 1\n\n norms = {}\n\n object.__setattr__(self, \"_n2\", np.array(list(all_vecs.keys())).reshape(-1, 1))\n object.__setattr__(\n self, \"_multiplicity\", np.array(list(all_vecs.values())).reshape(-1, 1)\n )\n object.__setattr__(\n self, \"_normalization\", np.pi ** 2 * self.N * norms[self.nstep]\n )\n\n raise NotImplementedError(\"Need to implement dispersion Lüscher counter terms.\")", "def init_m_aux(self):\n self.t_T.set_value(np.array([1.]).astype(theano.config.floatX))\n self.t_fista_X.set_value(self.t_A.get_value())", "def __init__(self):\r\n\r\n super(Metallized, self).__init__()\r\n\r\n # Initialize public scalar attributes.\r\n self.spec_sheet = 0\r\n if self.hazard_rate_type < 3: # MIL-HDBK-217\r\n self.reference_temperature = 358.0", "def set_tme_load_m(self, tme):\n self.tlm = tme[:]", "def __post_init__(self):\n all_vecs = {}\n for n2 in self._get_n2():\n all_vecs[n2] = all_vecs.get(n2, 0) + 1\n\n object.__setattr__(self, \"_n2\", np.array(list(all_vecs.keys())).reshape(-1, 1))\n object.__setattr__(\n self, \"_multiplicity\", np.array(list(all_vecs.values())).reshape(-1, 1)\n )\n object.__setattr__(\n self,\n \"_normalization\",\n 2 * np.pi * np.log(self.N)\n if self.spherical\n else 2 * np.pi * np.log(self.N) - 4 * (CATALAN - np.pi / 2 * np.log(2)),\n )", "def mtf(self, mtf):\n\n self._mtf = mtf", "def __init__(self,vector):\n self._vector = vector", "def add_to_dict(param_dict):\n ### Sample - Int\n sample_s = param_dict['ml_args'].sample_s\n ### Sample - Mr\n sample_Mr = param_dict['ml_args'].sample_Mr\n ## Sample volume\n # Units (Mpc/h)**3\n volume_sample = { '18': 37820 / 0.01396,\n '19': 6046016.60311 ,\n '20': 2.40481e7 ,\n '21': 8.79151e7 }\n vol_mr = volume_sample[sample_s]\n ##\n ## Choice of Centrals and Satellites\n cens = int(1)\n sats = int(0)\n ## Other constants\n # Speed of light - In km/s\n speed_c = ac.c.to(u.km/u.s).value\n ## Number of CPU's to use\n cpu_number = int(cpu_count() * param_dict['cpu_frac'])\n ##\n ## Plotting constants\n plot_dict = { 'size_label':23,\n 'size_title':25,\n 'color_ham' :'red',\n 'color_dyn' :'blue'}\n ##\n ## Catalogue Prefix string\n catl_str_fig = param_dict['ml_args'].catl_alg_comp_fig_str()\n ##\n ## Saving to `param_dict`\n param_dict['sample_s' ] = sample_s\n param_dict['sample_Mr' ] = sample_Mr\n param_dict['vol_mr' ] = vol_mr\n param_dict['cens' ] = cens\n param_dict['sats' ] = sats\n param_dict['speed_c' ] = speed_c\n param_dict['cpu_number' ] = cpu_number\n param_dict['plot_dict' ] = plot_dict\n param_dict['catl_str_fig'] = catl_str_fig\n\n return param_dict", "def __init__(self, data, **kwargs):\n super(MmhcEstimator, self).__init__(data, **kwargs)", "def __init__(self, m, vectors = None, dimensions = None, shape = None):\n\n if len(m) == 0:\n\n if dimensions is None or shape is None:\n raise ValueError(\"Empty input: could not determine number of dimensions and/or shape of tight-binding blocks\")\n\n if not isinstance(dimensions,int):\n raise ValueError(\"Dimensions keyword argument should be integer\")\n\n a,b = shape\n if not isinstance(a,int) or not isinstance(b,int):\n raise ValueError(\"Shape keyword argument should be a tuple of two integers\")\n\n self.dims = dimensions\n self.shape = (a,b)\n self.__m__ = {}\n\n else:\n #TODO: Move all this mess to __setitem__\n\n if isinstance(m,dict):\n if not vectors is None:\n warn(\"Vectors keyword argument is ignored\")\n\n else:\n if vectors is None:\n m = {tuple():m}\n else:\n m = dict(zip((tuple(i) for i in vectors), m))\n\n t = None\n\n for k,v in m.items():\n\n if isinstance(v, (list,tuple)):\n tt = numpy.ndarray\n else:\n tt = type(v)\n\n if t is None:\n t = tt\n\n if not t == tt:\n raise ValueError(\"Inconsistent types along the input dict: {} and {}\".format(str(t),str(tt)))\n\n self.__m__ = {}\n\n if t == numpy.ndarray or t == numpy.core.memmap:\n\n for k,v in m.items():\n\n if isinstance(k,int):\n k = (k,)\n\n self.__m__[k] = numpy.array(v, dtype = numpy.complex)\n elif t == TightBinding:\n\n for k,v in m.items():\n\n if not isinstance(k, int):\n raise ValueError(\"The keys in the input should be ints, found {} instead\".format(str(type(k))))\n\n for kk, vv in v.__m__.items():\n self.__m__[(k,)+kk] = numpy.array(vv, dtype = numpy.complex)\n\n else:\n raise ValueError(\"Unknown type: {}\".format(str(t)))\n\n self.dims = None\n self.shape = None\n d1 = None\n\n for k,v in self.__m__.items():\n\n shape = tuple(v.shape)\n if not len(shape) == 2:\n raise ValueError(\"{} is not a 2D matrix: shape = {}\".format(str(k), str(shape)))\n\n if self.shape is None:\n self.dims = len(k)\n d1 = k\n self.shape = shape\n\n elif not self.dims == len(k):\n raise ValueError(\"Inconsistent dimensions: {} vs {}\".format(str(d1),str(k)))\n\n elif not self.shape == shape:\n raise ValueError(\"Inconsistent matrix size: {} in {} vs {} in {}\".format(str(self.shape), str(d1), str(shape), str(k)))\n\n if not dimensions is None and not dimensions == self.dims:\n raise ValueError(\"Dimensions keyword argument = {:d} does not correspond to the input with {:d} dimensions\".format(dimensions, self.dims))\n\n if not shape is None and not shape == self.shape:\n raise ValueError(\"Shape keyword argument = {} does not correspond to the input with shape {}\".format(repr(shape), repr(self.shape)))", "def create_vector_dict(self):\n return self.MOVE_DATA", "def create_vector_dict(self):\n return self.MOVE_DATA", "def __setitem__(self, *args):\n return _osgAnimation.vectorMatrixKeyframe___setitem__(self, *args)", "def __add__(self, m):\n\n nv=Matrice()\n if self.__mm_type(m):\n ls=len(self)\n nv.generate(ls,self.desc())\n for i in self.desc():\n for j in range(len(self)):\n nv.g_val(self.val(i,j)+m.val(i,j),i,j)\n return nv", "def __init__(self, temperature=298.0 * simtk.unit.kelvin, nsteps=10, timestep=1 * simtk.unit.femtoseconds):\n\n super(HMCIntegrator, self).__init__(timestep)\n\n # Compute the thermal energy.\n kT = kB * temperature\n\n #\n # Integrator initialization.\n #\n self.addGlobalVariable(\"naccept\", 0) # number accepted\n self.addGlobalVariable(\"ntrials\", 0) # number of Metropolization trials\n\n self.addGlobalVariable(\"kT\", kT) # thermal energy\n self.addPerDofVariable(\"sigma\", 0)\n self.addGlobalVariable(\"ke\", 0) # kinetic energy\n self.addPerDofVariable(\"xold\", 0) # old positions\n self.addGlobalVariable(\"Eold\", 0) # old energy\n self.addGlobalVariable(\"Enew\", 0) # new energy\n self.addGlobalVariable(\"accept\", 0) # accept or reject\n self.addPerDofVariable(\"x1\", 0) # for constraints\n\n #\n # Pre-computation.\n # This only needs to be done once, but it needs to be done for each degree of freedom.\n # Could move this to initialization?\n #\n self.addComputePerDof(\"sigma\", \"sqrt(kT/m)\")\n\n #\n # Allow Context updating here, outside of inner loop only.\n #\n self.addUpdateContextState()\n\n #\n # Draw new velocity.\n #\n self.addComputePerDof(\"v\", \"sigma*gaussian\")\n self.addConstrainVelocities()\n\n #\n # Store old position and energy.\n #\n self.addComputeSum(\"ke\", \"0.5*m*v*v\")\n self.addComputeGlobal(\"Eold\", \"ke + energy\")\n self.addComputePerDof(\"xold\", \"x\")\n\n #\n # Inner symplectic steps using velocity Verlet.\n #\n for step in range(nsteps):\n self.addComputePerDof(\"v\", \"v+0.5*dt*f/m\")\n self.addComputePerDof(\"x\", \"x+dt*v\")\n self.addComputePerDof(\"x1\", \"x\")\n self.addConstrainPositions()\n self.addComputePerDof(\"v\", \"v+0.5*dt*f/m+(x-x1)/dt\")\n self.addConstrainVelocities()\n\n #\n # Accept/reject step.\n #\n self.addComputeSum(\"ke\", \"0.5*m*v*v\")\n self.addComputeGlobal(\"Enew\", \"ke + energy\")\n self.addComputeGlobal(\"accept\", \"step(exp(-(Enew-Eold)/kT) - uniform)\")\n self.addComputePerDof(\"x\", \"x*accept + xold*(1-accept)\")\n\n #\n # Accumulate statistics.\n #\n self.addComputeGlobal(\"naccept\", \"naccept + accept\")\n self.addComputeGlobal(\"ntrials\", \"ntrials + 1\")", "def __add__(self, vector):\n return self.translated(vector)", "def __init__(self,new_U_dict):\n\n self._LDAU_KEYS = ['LDAUTYPE', 'LDAUPRINT', 'MAGMOM', 'LDAUL', 'LDAUJ', 'LDAUU', 'LDAU'] \n\n self.structure_has_been_read = False\n\n # dictionary which contains the U value to apply to various TM elements\n self.new_U_dict = new_U_dict", "def __iadd__(self, m):\n if self.__mm_type(m):\n ls=len(self)\n for i in self.desc():\n for j in range(ls):\n self.g_val(self.val(i,j)+m.val(i,j),i,j)\n return self", "def __init__(self):\n #MdvData. __init__(self,model.target_fragments)\n #self.mdv = {}\n self.mdvtc ={}\n self.mode = \"timecourse\"", "def __init__(self,**kwargs):\n self.attr = ['angle','width','height','m','Fg','Fs','Fd','kf','Ff']\n # attributes of the incline in order: angle,width,height, mass,Fg(gravity force),Fs(statical force), Fd (dynamical force),kf(friction coefficient), Ff(friction force)\n self.data = {param: None for param in self.attr}#initialazing data\n self.given_data = set() #set of data given by user\n self.add_data(**kwargs)", "def __init__(self, dualgan:nn.Module, l_adv:float=1., l_rec:float=1., l_idt:float=0.):\n super().__init__()\n store_attr()", "def _add_vtarg_and_adv(seg, gamma, lam):\n new = np.append(seg[\"new\"], 0)\n vpred = np.append(seg[\"vpred\"], seg[\"nextvpred\"])\n T = len(seg[\"rew\"])\n seg[\"adv\"] = gaelam = np.empty(T, 'float32')\n rew = seg[\"rew\"]\n lastgaelam = 0\n for t in reversed(range(T)):\n nonterminal = 1 - new[t + 1]\n delta = rew[t] + gamma * vpred[t + 1] * nonterminal - vpred[t]\n gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam\n seg[\"tdlamret\"] = seg[\"adv\"] + seg[\"vpred\"]\n del seg[\"nextvpred\"]", "def set_T_lm(self):\n self.delta_T_lm_array = ( ((self.exh.T_outlet_array -\n self.cool.T_inlet_array) - (self.exh.T_inlet_array -\n self.cool.T_outlet_array)) / np.log((self.exh.T_outlet_array -\n self.cool.T_inlet_array) / (self.exh.T_inlet_array -\n self.cool.T_outlet_array)) )", "def add(self, keys: List[Tuple[int, int]], vectors: np.ndarray, weights: List[float], *args, **kwargs):\n pass", "def __init__(self, X, T, *args, **kvargs):\r\n\r\n print X.shape\r\n print T.shape\r\n print \"start args\"\r\n for arg in args:\r\n print arg\r\n print \"start kvargs\"\r\n for kv, arg in kvargs.items():\r\n print kv, arg\r\n print \"end (kv)args\"\r\n \r\n N, inputs = X.shape\r\n _, targets = T.shape\r\n super(ELM, self).__init__(inputs, targets)", "def get_LDAU(self):\n\n # let's simply use the default as a first step\n LDAU_dict, poscar_need_hack, potcar_need_hack = super(U_Strategy_MaterialsProject_V2, self).get_LDAU()\n\n Na_indices = self.structure.indices_from_symbol('Na')\n\n # hack MAGMOM\n list_oxidizable_site_indices = self.sort_TM_sites_by_Na_distance(Na_indices)\n\n MAGMOM = self.build_magmom(list_oxidizable_site_indices)\n LDAU_dict['MAGMOM'] = MAGMOM \n\n return LDAU_dict, poscar_need_hack, potcar_need_hack", "def __init__(self):\n self.map = {}\n self.vec = []", "def __init__(self, dm1, dm2, cdm):\r\n super(PartialMantel, self).__init__([dm1, dm2, cdm], num_dms=3,\r\n min_dm_size=3)", "def extend(self,data):\n n = float(len(data))\n if n == 0:\n return self\n M2 = 0\n M3 = 0\n M4 = 0\n mean = 0\n vmin = None\n vmax = None\n for x in data:\n mean += x/n \n if vmin is None:\n vmax = x\n vmin = x\n if x < vmin:\n vmin = x\n if x > vmax:\n vmax = x\n for x in data:\n d = x-mean\n M2 += (d**2)\n M3 += (d**3)\n M4 += (d**4)\n x = LiveStat(self.name)\n x.vmin = vmin\n x.vmax = vmax\n x.vmean = mean\n x.vm2 = M2\n x.vm3 = M3\n x.vm4 = M4\n x.vcount = int(n)\n x.vcountsq = x.vcount**2\n x.dirty = True\n self.merge(x)\n return self", "def addDataTo(self, other_sim_data):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n TreeLikelihoodBase.addDataTo(self, other_sim_data)", "def addnewmmltdataset(zlen,ms,msp=None,phz=None,phpz=None,nn=None,vv=None):\n # --- Make sure that the data set has the same number of multipole\n # --- components or less, or that both n and v are passed in.\n assert ((len(shape(ms)) == 1) or (shape(ms)[1] <= top.nmsmult) or \\\n (nn is not None and vv is not None)),\\\n \"The shape of the dataset must be consistent with the data already created or both n and v must be specified\"\n\n # --- Now setup the multipole component dataset.\n top.nmmltsets = top.nmmltsets + 1\n\n # --- Make sure that ms is a 2-D array (first dimension is data versus z,\n # --- second is number of multipole components)\n if len(shape(ms)) == 1:\n ms = transpose(array([ms]))\n if msp is not None: msp = transpose(array([msp]))\n if phz is not None: phz = transpose(array([phz]))\n if phpz is not None: phpz = transpose(array([phpz]))\n\n # --- Make sure that the first dimension of the arrays is long enough\n if shape(ms)[0] > top.nzmmltmax+1: top.nzmmltmax = shape(ms)[0] - 1\n\n # --- Change the sizes of the arrays\n gchange(\"Mult_data\")\n\n # --- Set basic parameters\n n0 = shape(ms)[0] # --- Number of data points along z\n n1 = shape(ms)[1] # --- Number of multipole components\n top.nzmmlt[-1] = n0 - 1\n top.dzmmlt[-1] = zlen/(n0 - 1.)\n\n if nn is None and vv is None:\n assert top.nmsmult > 0,'There are no mmlt data sets, so the nn and vv arguments must be specified'\n # --- Assume n and v are ordered correctly and just copy the data in\n top.msmmlt[:n0,:n1,-1] = ms\n if msp is not None: top.msmmltp[:n0,:n1,-1] = msp\n if phz is not None: top.msmmltph[:n0,:n1,-1] = phz\n if phpz is not None: top.msmmltphp[:n0,:n1,-1] = phpz\n\n else:\n # --- Make sure that n and v are lists\n if len(shape(nn)) == 0: nn = list([nn])\n else: nn = list(nn)\n if len(shape(vv)) == 0: vv = list([vv])\n else: vv = list(vv)\n\n # --- Make ms a list of arrays\n ms = list(transpose(ms))\n if msp is not None: msp = list(transpose(msp))\n if phz is not None: phz = list(transpose(phz))\n if phpz is not None: phpz = list(transpose(phpz))\n\n # --- Loop over existing multipole components\n for i in range(top.nmsmult):\n # --- Loop over input multipole components checking if any are the same\n for j in range(len(nn)):\n if nn[j] == top.mmlt_n[i] and vv[j] == top.mmlt_v[i]:\n # --- If so, then copy the data to the appropriate place and\n # --- delete the data from the lists.\n top.msmmlt[:n0,i,-1] = ms[j]\n if msp is not None: top.msmmltp[:n0,i,-1] = msp[j]\n if phz is not None: top.msmmltph[:n0,i,-1] = phz[j]\n if phpz is not None: top.msmmltphp[:n0,i,-1] = phpz[j]\n del nn[j],vv[j],ms[j]\n if msp is not None: del msp[j]\n if phz is not None: del phz[j]\n if phpz is not None: del phpz[j]\n break\n\n # --- Now copy in any left over data, increasing the number of multipole\n # --- components.\n if len(nn) > 0:\n ln = len(nn)\n top.nmsmult = top.nmsmult + ln\n gchange(\"Mult_data\")\n top.mmlt_n[-ln:] = nn\n top.mmlt_v[-ln:] = vv\n top.msmmlt[:n0,-ln:,-1] = transpose(array(ms))\n if msp is not None: top.msmmltp[:n0,-ln:,-1] = transpose(array(msp))\n if phz is not None: top.msmmltph[:n0,-ln:,-1] = transpose(array(phz))\n if phpz is not None: top.msmmltphp[:n0,-ln:,-1] = transpose(array(phpz))\n\n return top.nmmltsets", "def __setitem__(self, *args):\n return _itkLineSpatialObjectPointPython.vectoritkLineSpatialObjectPoint3___setitem__(self, *args)", "def update_LTM(self, base):\n # Generate binary number corresponding to sign\n # of each input variable.\n b = ((base > 0).astype(int)).astype(str).flatten()\n # Update frequency of relevant area.\n self.LTM[int(''.join(b), 2)] += 1", "def add_vector(self, name, text, tag=None):\n words = self.clean_text_util.clean_text(text)\n \n # max{f(w,d) : w ∈ d)}\n counter = Counter(words)\n _, max_occ = counter.most_common(1)[0] \n\n # remove duplicate word\n words = set(words)\n \n items = []\n for word in words:\n pickle_wordinfo = self.dictionary_db.get(word)\n if not pickle_wordinfo:\n continue\n \n word_info = pickle.loads(pickle_wordinfo)\n\n # tf formula: tf(f,d) = f(f,d)/max{f(w,d) : w ∈ d)} (src Wikipedia)\n tf = counter[word]/float(max_occ)\n\n # create a new vector item entry\n items.append(VectorItem(word, tf))\n\n # sort the vector item by the dictionary index\n items.sort(key=lambda x: x.word_info(self.dictionary_db).index)\n\n # finally, we create a new vector\n vector = Vector(items, tag)\n self.vectors_db.add(name, pickle.dumps(vector))\n\n # add an empty entry to the norm db\n self.vectors_norm_db.add(name, self.vector_tfidf_norm(items))", "def __init__(self, *args):\n _snap.TNGraphMtx_swiginit(self, _snap.new_TNGraphMtx(*args))", "def __init__(self, timestep=1.0 * simtk.unit.femtoseconds):\n\n super(VelocityVerletIntegrator, self).__init__(timestep)\n\n self.addPerDofVariable(\"x1\", 0)\n\n self.addUpdateContextState()\n self.addComputePerDof(\"v\", \"v+0.5*dt*f/m\")\n self.addComputePerDof(\"x\", \"x+dt*v\")\n self.addComputePerDof(\"x1\", \"x\")\n self.addConstrainPositions()\n self.addComputePerDof(\"v\", \"v+0.5*dt*f/m+(x-x1)/dt\")\n self.addConstrainVelocities()", "def translation(self,vect,**kwargs):\n xyz = self.get('x,y,z',**kwargs)\n xyz += vect\n self.update('x,y,z',xyz,**kwargs)", "def add_velocity(self, Mextra=0, period=0, model=1):\n \n if self.npart == 0:\n self.vel = transpose(array([[],[]]))\n return\n \n print(\" Adding velocities...\")\n \n if model==0: vel = zeros((self.npart, 2))\n \n elif model in [1,2]:\n print(\" Setting keplerian velocities...\")\n pos = self.pos - self.center\n radii = norm(pos, axis=1)\n self.v_kep = sqrt(Mextra * G / radii)\n if model==2: Mextra += sum(self.mass)\n v_kep = sqrt(Mextra * G / radii)\n vel = matmul(pos / radii[:, newaxis], array([[0, 1], [-1, 0]])) * v_kep[:, newaxis]\n \n\n elif model==3:\n print(\" Setting velocities from binary period...\")\n if period==0:\n print(\" Incorrect period for setting disk velocities.\")\n print(\" Disk velocities are set to zero.\")\n vel = zeros((self.npart, 2))\n \n else:\n pos = self.pos - self.center\n v_ang = 1 / float(period) \n vel = v_ang * matmul(pos, array([[0, 1], [-1, 0]]))\n \n else:\n print(\"Model must be 0, 1, 2 or 3.\")\n print(\" {:d} was given. Exiting.\".format(model))\n exit()\n \n \n self.vel = vel", "def __init__(self, vector):\n\n self.vector = vector\n # labels is a frequency table for the labels of the vectors of all hits\n self.labels = Counter()", "def __init__(self, *args):\n _snap.TUNGraphMtx_swiginit(self, _snap.new_TUNGraphMtx(*args))", "def _scalar_update(self, d_t, **kwargs):\n for key, val in kwargs.items():\n if isinstance(val, GPUArray):\n kwargs[key] = val.get()\n self.solver(d_t, **kwargs)\n self.post()", "def __init__(self, *args, **kwargs):\n self.mV = [] # Elements of the vector\n self.mPrintSpec = '%f' # String formatter for elements\n for x in args:\n self.mV.append(float(x))\n \n for arg in kwargs:\n if arg not in [ 'size', ]: # Add keywords here\n raise ValueError(\"keyword '%s' not recognized\" % arg)\n\n if 'size' in kwargs and kwargs['size'] is not None:\n if kwargs['size'] < len(args):\n raise IndexError('Cannot allocate fewer items ' + \n 'than already specified.')\n else:\n nToAdd = kwargs['size'] - len(args)\n while nToAdd > 0:\n self.mV.append(0.0)\n nToAdd -= 1", "def __init__(self):\n super(GELU, self).__init__()", "def __init__(self, initial_data=[]):\n hdict.__init__(self)\n\n for elt in initial_data:\n self.add(elt)", "def __init__(self, m=np.random.normal(M_INIT, .25, 1)[0], b=np.random.normal(B_INIT, .25, 1)[0], \\\n\t\t\t\t\tt=np.random.normal(T_INIT, .25, 1)[0], l=L_INIT*np.random.normal(1.0, .25, 1)[0]):\n\t\t\n\t\tself.shape_slope = m\n\t\tself.z_thick = b\n\t\tself.thick = t\n\t\tself.length = l", "def __add__(self, other: float) -> 'Translation':\n self._vector.setWithArray((self._vector.x + other, self._vector.y + other, self._vector.z + other))\n return self", "def insert_vm_cpu_mhz(data):\n return IMPL.insert_vm_cpu_mhz()", "def lvec(self):\n lv = ROOT.TLorentzVector()\n# if self.pt < 0 or abs(self.eta) > 6:\n# raise Exception(\"Invalid values for TLorentzVector\")\n lv.SetPtEtaPhiM(self.pt, self.eta, self.phi, self.mass)\n# if abs(lv.Pt()) > 100000 or abs(lv.Eta()) > 100000:\n# raise Exception(\"Invalid values for TLorentzVector\")\n return lv", "def add_datamodel(self, dm):\n\n assert isinstance(dm, self.base_model), 'value must be a {0}'.format(self.base_name)\n\n self[dm.release] = dm", "def addData(self, d):\n self.__populateDict(self._data, d)", "def __init__(self):\n super(MLE, self).__init__()\n\n # Metadata\n self.appliance = None\n self.stats = []\n self.units = None\n self.resistive = False\n self.thDelta = 0\n self.thLikelihood = 0\n self.sample_period = None\n self.sampling_method = None\n # FEATURES:\n self.onpower = {'name': 'gmm', 'model': mixture.GMM(n_components=1)}\n self.offpower = {'name': 'gmm', 'model': mixture.GMM(n_components=1)}\n self.duration = {'name': 'poisson', 'model': poisson(0)}\n\n # Trainings:\n self.onpower_train = pd.DataFrame(columns=['onpower'])\n self.offpower_train = pd.DataFrame(columns=['offpower'])\n self.duration_train = pd.DataFrame(columns=['duration'])\n\n # Constrains\n self.powerNoise = 0 # Background noise in the main\n self.powerPair = 0 # Max diff between onpower and offpower\n self.timeWindow = 0 # To avoid high computation", "def __init__(self):\n super().__init__()\n self.dmdParams = {} # dmd settings container\n self.printTag = 'DMD' # print tag\n self._dynamicHandling = True # This ROM is able to manage the time-series on its own. No need for special treatment outside\n self.pivotParameterID = None # pivot parameter\n # variables filled up in the training stages\n self._amplitudes = {} # {'target1': vector of amplitudes,'target2':vector of amplitudes, etc.}\n self._eigs = {} # {'target1': vector of eigenvalues,'target2':vector of eigenvalues, etc.}\n self._modes = {} # {'target1': matrix of dynamic modes,'target2':matrix of dynamic modes, etc.}\n self.__Atilde = {} # {'target1': matrix of lowrank operator from the SVD,'target2':matrix of lowrank operator from the SVD, etc.}\n self.pivotValues = None # pivot values (e.g. time)\n self.KDTreeFinder = None # kdtree weighting model\n self.timeScales = {} # time-scales (training and dmd). {'training' and 'dmd':{t0:float,'dt':float,'intervals':int}}\n self.featureVals = None # feature values", "def __add__(self, other) -> 'MultiVector':\n\n other, mv = self._checkOther(other)\n if not mv:\n if isinstance(other, np.ndarray):\n obj = self.__array__()\n return obj + other\n newValue = self.value + other.value\n\n return self._newMV(newValue)", "def __init__(self, *args):\n _snap.TFltKdV_swiginit(self, _snap.new_TFltKdV(*args))", "def SetData(self, data_):\n return _hypre.HypreParVector_SetData(self, data_)", "def add_to_dict(self, m, **kwargs):\n m['created'] = self.created if self.created else datetime_to_str()\n if self.message is not None:\n m['message'] = self.message\n if self.name is not None or self.address is not None:\n m['user'] = {'name': self.name}\n if self.address is not None:\n m['user']['address'] = self.address\n # Add any extra values, and they will override instance variables\n for (key, value) in kwargs.items():\n m[key] = value", "def lorentz_transform(self, transformation_matrix):\n\n t = super(GenericVector, self).lorentz_transform(transformation_matrix)\n return GenericVector(\n t.tensor(),\n syms=self.syms,\n config=self.config,\n parent_metric=None,\n name=_change_name(self.name, context=\"__lt\"),\n )", "def _Add_To_Mesh(self, layer, lVertices):\n self._mesh[layer] += lVertices", "def set_tensor_data(self, data: dict) -> None:\n assert isinstance(data,\n dict), f'data should be a `dict` but got {data}'\n for k, v in data.items():\n if k == 'gt_label':\n self.set_gt_label(v)\n elif k == 'prompt':\n self.set_field(v, k, dtype=(str, list))\n else:\n self.set_field(all_to_tensor(v), k, dtype=torch.Tensor)", "def add_velocity(self, temp=0):\n vel = np.random.normal(0, np.sqrt(temp), size=(self.numatom, 3))\n self.contents['VX'] = vel[:, 0]\n self.contents['VY'] = vel[:, 1]\n self.contents['VZ'] = vel[:, 2]", "def _localNormalizeData(self,values,names,feat):\n self.muAndSigmaFeatures[feat] = (0.0,1.0)", "def __init__(self, *args):\n _snap.TFltV_swiginit(self, _snap.new_TFltV(*args))", "def __setitem__(self, key, value):\n if key in self.base_keys() or key == \"flex_data\":\n setattr(self, key, value)\n else:\n valid_key_chars = re.compile(r\"^[A-Za-z_]\\w*$\")\n FlexError.require_condition(\n valid_key_chars.match(key),\n dedent(\n \"\"\"\n flex_data attribute keys must contain only letters,\n numbers, and '_', and cannot start with a number.\n \"\"\"\n ),\n )\n if value is not None:\n # the flex_data attribute may be none if the instance is not\n # yet 'added'. Defaults are set at mapping time\n current_flex_data = self.flex_data or {}\n self.flex_data = {**current_flex_data, key: value}", "def update_latent(self, variable_mat, weight_mat, output_mat, y_list):\n new_latent = {k: np.zeros(self.H_mat[k].shape) for k in self.H_mat}\n y_sigma = [np.array([self.margin ** -1.0 if i == 1.0 else 4.0\n for i in y])\n for y in y_list]\n\n new_latent['sigma'] = np.linalg.inv(\n np.diag([self.sigma_h ** -1.0 for _ in range(self.R)])\n + reduce(lambda x, y: x + y,\n [(np.outer(weight_mat['mu'][1:, i],\n weight_mat['mu'][1:, i])\n + weight_mat['sigma'][i][1:, 1:])\n / (np.prod(y_sigma[0]) ** (1.0 / self.sample_count))\n for i in range(self.task_count)])\n )\n\n new_latent['mu'] = np.dot(\n new_latent['sigma'],\n np.dot(variable_mat['mu'].transpose(),\n self.kernel_mat) / self.sigma_h\n + reduce(\n lambda x, y: x + y,\n [(np.outer(weight_mat['mu'][1:, i], output_mat['mu'][i, :])\n - np.repeat(a=np.array([\n [x * weight_mat['mu'][0, i] + y for x, y in\n zip(weight_mat['mu'][1:, i],\n weight_mat['sigma'][i, 1:, 0])]]\n ), repeats=self.sample_count, axis=0).transpose())\n / y_sigma[i]\n for i in range(self.task_count)]\n )\n )\n\n return new_latent", "def addMatrix(self, dataMatrix, labels):\n assert(dataMatrix.shape[0]==len(labels))\n\n # Add any labels not yet seen...\n for l in labels:\n if l not in self.labelToNum.keys():\n num = len(self.numToLabel)\n self.numToLabel.append(l)\n self.labelToNum[l] = num\n\n # Convert the given labels list to a list of numerical labels...\n ls = map(lambda l:self.labelToNum[l],labels)\n\n # Store...\n self.blocks.append((dataMatrix.astype(numpy.double),ls))", "def __init__(self, *args):\n _snap.TMMNet_swiginit(self, _snap.new_TMMNet(*args))", "def __init__(self, *args):\n _snap.TFltTrV_swiginit(self, _snap.new_TFltTrV(*args))", "def add_mass(self, cm_new, m_new, iT_new_ar, align, isLoad=False):\n iT_new = tensor(*iT_new_ar)\n cm = self.cm\n iT = self.iT\n m = self.m\n cm_new_aligned = align @ cm_new\n if isLoad:\n cm_new_aligned = cm_new_aligned + self.dimension\n iT_new_aligned = align @ iT_new @ align.T\n res = mass_combine(m, m_new, cm, cm_new_aligned, iT, iT_new_aligned)\n (self.m, self.cm, self.iT) = res", "def initnewdictitem(self, key, enttype):\n\n self._dentsvertsdata[key] = VertDataCollectorCoord3fNormal3fColor4f(enttype)", "def get_tme_load_m(self):\n return self.tlm[:]", "def train(self, tdict):\n pass", "def lc(self, other) -> 'MultiVector':\n\n other, mv = self._checkOther(other, coerce=True)\n\n newValue = self.layout.lcmt_func(self.value, other.value)\n\n return self._newMV(newValue)", "def __setitem__(self, key, value):\n # if isinstance(value, MutableMapping):\n # self._axl_data[key] = AXLDataModel(value)\n # else:\n # self._axl_data[key] = value\n if isinstance(value, MutableMapping):\n raise TypeError(mutable_mapping_msg)\n self._axl_data[key] = value", "def addnewmmlt(zs,ze,ap=0.,ax=0.,ay=0.,ph=0.,sf=0.,sc=1.,id=None,\n ox=0.,oy=0.,ot=0.,op=0.,aps=0.,ape=0.,ol=0,lb=true,\n ms=None,msp=None,phz=None,phpz=None,nn=None,vv=None,\n time=None,data=None,func=None):\n # --- Make sure either an 'id' or a dataset, 'ms', was passed in.\n assert (id is not None or ms is not None), \\\n \"either an 'id' or a dataset, 'ms', must be passed in\"\n\n # --- Make sure that at least some of the element is in the proper range,\n # --- z >= 0., and if zlatperi != 0, z <= zlatperi.\n assert (zs < ze),\"element start must be less than element end\"\n assert (top.zlatperi == 0.) or (ze > 0.),\"element end must be greater than zero if top.zlatperi is nonzero\"\n assert (top.zlatperi == 0.) or (zs < top.zlatperi),\"element start must be less than zlatperi if top.zlatperi is nonzero\"\n\n # --- Get a dict of the input arguments and their values.\n ldict = locals()\n\n # --- Setup the lattice arrays for the insertion of the new element. If\n # --- there are already mmlts, then find the place where the new one is to\n # --- be inserted and shift the existing data to open up a space.\n ie = 0\n # --- Find which element the new one goes before.\n while ie <= top.nmmlt and top.mmltzs[ie] <= zs and top.mmltze[ie] != top.mmltzs[ie]:\n ie = ie + 1\n\n # --- Increase the size of the arrays if the element will go past the end\n # --- or if the array is full (i.e. the last element is used).\n if ie > top.nmmlt or (top.mmltzs[-1] != top.mmltze[-1] or top.mmltid[-1] > 0):\n top.nmmlt = top.nmmlt + 100\n top.nmerr = top.nmerr + 100\n gchange(\"Lattice\")\n\n # --- Setup dictionary relating lattice array with input argument names.\n # --- This is done here so that the references to the lattice arrays\n # --- refer to the updated memory locations after the gchange.\n edict = {'zs':top.mmltzs,'ze':top.mmltze,\n 'ap':top.mmltap,'ax':top.mmltax,'ay':top.mmltay,\n 'ph':top.mmltph,\n 'sf':top.mmltsf,'sc':top.mmltsc,\n 'ox':top.mmltox,'oy':top.mmltoy,'ot':top.mmltot,'op':top.mmltop,\n 'aps':top.mmltas,'ape':top.mmltae,'ol':top.mmltol,'lb':top.mmltlb}\n\n # --- Shift the existing data in the arrays to open up a space for the\n # --- new element.\n if ie <= top.nmmlt:\n top.mmltid[ie+1:] = top.mmltid[ie:-1] + 0\n for e in edict.itervalues():\n e[ie+1:] = e[ie:-1] + 0\n\n # --- Insert the new element. Note that edict correlates the lattice array\n # --- with the input arguments and ldict correlate the arguements with\n # --- their values.\n for (xx,e) in edict.iteritems():\n e[ie] = ldict[xx]\n\n # --- Now setup the multipole component dataset.\n if id is not None:\n # --- If an 'id' was passed in, then just use that.\n top.mmltid[ie] = id\n elif ms is not None:\n top.mmltid[ie] = addnewmmltdataset(ze-zs,ms,msp,phz,phpz,nn,vv)\n\n if (time is not None and data is not None) or func is not None:\n TimeDependentLatticeElement('mmltsc',ie,time,data,func)\n\n # --- resetlat must be called before the data can be used\n top.lresetlat = true\n\n # --- Return the id of the new dataset. This allows the user to refer to\n # --- this new dataset without having to knowne its actual number.\n return ie,top.mmltid[ie]", "def __init__(self, inputs, outputs):\n super(ELM, self).__init__(inputs, outputs)", "def data_vector(self) -> np.ndarray:\r\n return np.dot(\r\n self.linear_obj_list[0].mapping_matrix.T, self.w_tilde.dirty_image\r\n )", "def __init__(self):\n super(LogSTFTMagnitudeLoss, self).__init__()", "def loadData(self, **kwargs):\n \n Simulation.loadData(self, **kwargs)\n if crp_flag:\n self.data['T'] = self.data.D*crp.kpc/crp.c_light/3600/24/365.25/1e6\n else: \n self.data['T'] = self.data.D*siu.kpc/siu.c_light/3600/24/365.25/1e6", "def create_vectors(self):\n self.localStatistics = []\n self.lastStatistics = []\n self.globalV = []\n self.estimate = []\n self.delta = []\n self.drift = []\n self.slack = [] # only for coordBased model", "def __init__(self, plasma_parent):\n super(LevelNumberDensityHeNLTE, self).__init__(plasma_parent)\n self.calculate = self._calculate_helium_nlte\n self._update_inputs()\n self.initialize_indices = True", "def __init__(self, x, y, data):\n super().__init__(x=x, y=y, data=data, has_analytic_ft=False)\n self._ee = {}\n self._mtf = None\n self._nu_p = None\n self._dnx = None\n self._dny = None", "def __init__(self, **kwargs):\r\n self.item_factors = []\r\n self.user_factors = []\r\n self.interactions = None\r\n self.weights = None\r\n self.user_features = None\r\n self.item_features = None\r\n super(WLightFM, self).__init__(**kwargs)", "def sett(self,M,b):\n self.t=s.expit(z(self.q,M,self.a,b)[0]) # answer labels as estimated by the model", "def addMetaMolecule (self,metaMolecule):\r\n self.metaMolecule = metaMolecule", "def set_variables(self, g_t, m_t):\n self.g_t, self.m_t = g_t, m_t\n return", "def __setitem__(self, key, item):\n assert isinstance(key,list) and isinstance(item,list) and len(key)==2 and len(item)==2\n self._data[self.__ptBin(key[0])][self.__etaBin(key[1])] = item", "def _add_meta(self, *args, **kwargs) -> None:\n raise NotImplementedError", "def __init__(self, temperature=298.0 * simtk.unit.kelvin, collision_rate=91.0 / simtk.unit.picoseconds, timestep=1.0 * simtk.unit.femtoseconds):\n\n # Initialize constants.\n kT = kB * temperature\n gamma = collision_rate\n\n # Create a new custom integrator.\n super(GHMCIntegrator, self).__init__(timestep)\n\n #\n # Integrator initialization.\n #\n self.addGlobalVariable(\"kT\", kT) # thermal energy\n self.addGlobalVariable(\"b\", numpy.exp(-gamma * timestep)) # velocity mixing parameter\n self.addPerDofVariable(\"sigma\", 0)\n self.addGlobalVariable(\"ke\", 0) # kinetic energy\n self.addPerDofVariable(\"vold\", 0) # old velocities\n self.addPerDofVariable(\"xold\", 0) # old positions\n self.addGlobalVariable(\"Eold\", 0) # old energy\n self.addGlobalVariable(\"Enew\", 0) # new energy\n self.addGlobalVariable(\"accept\", 0) # accept or reject\n self.addGlobalVariable(\"naccept\", 0) # number accepted\n self.addGlobalVariable(\"ntrials\", 0) # number of Metropolization trials\n self.addPerDofVariable(\"x1\", 0) # position before application of constraints\n\n #\n # Pre-computation.\n # This only needs to be done once, but it needs to be done for each degree of freedom.\n # Could move this to initialization?\n #\n self.addComputePerDof(\"sigma\", \"sqrt(kT/m)\")\n\n #\n # Allow context updating here.\n #\n self.addUpdateContextState()\n\n #\n # Constrain positions.\n #\n self.addConstrainPositions()\n\n #\n # Velocity perturbation.\n #\n self.addComputePerDof(\"v\", \"sqrt(b)*v + sqrt(1-b)*sigma*gaussian\")\n self.addConstrainVelocities()\n\n #\n # Metropolized symplectic step.\n #\n self.addComputeSum(\"ke\", \"0.5*m*v*v\")\n self.addComputeGlobal(\"Eold\", \"ke + energy\")\n self.addComputePerDof(\"xold\", \"x\")\n self.addComputePerDof(\"vold\", \"v\")\n self.addComputePerDof(\"v\", \"v + 0.5*dt*f/m\")\n self.addComputePerDof(\"x\", \"x + v*dt\")\n self.addComputePerDof(\"x1\", \"x\")\n self.addConstrainPositions()\n self.addComputePerDof(\"v\", \"v + 0.5*dt*f/m + (x-x1)/dt\")\n self.addConstrainVelocities()\n self.addComputeSum(\"ke\", \"0.5*m*v*v\")\n self.addComputeGlobal(\"Enew\", \"ke + energy\")\n self.addComputeGlobal(\"accept\", \"step(exp(-(Enew-Eold)/kT) - uniform)\")\n self.addComputePerDof(\"x\", \"x*accept + xold*(1-accept)\")\n self.addComputePerDof(\"v\", \"v*accept - vold*(1-accept)\")\n\n #\n # Velocity randomization\n #\n self.addComputePerDof(\"v\", \"sqrt(b)*v + sqrt(1-b)*sigma*gaussian\")\n self.addConstrainVelocities()\n\n #\n # Accumulate statistics.\n #\n self.addComputeGlobal(\"naccept\", \"naccept + accept\")\n self.addComputeGlobal(\"ntrials\", \"ntrials + 1\")", "def __setitem__(self, key, val):\n extract = lambda t: t.item() if type(t) is torch.Tensor else t\n\n if type(val) is dict:\n for k, v in val.items():\n self.log_scalar(k, extract(v), 'last')\n else:\n self.log_scalar(key, extract(val), 'last')", "def __init__(self, *args):\n _snap.TFltKd_swiginit(self, _snap.new_TFltKd(*args))" ]
[ "0.62161565", "0.60280395", "0.5786776", "0.5678434", "0.5514574", "0.54189706", "0.538351", "0.53775567", "0.5377541", "0.53738487", "0.53701663", "0.5347109", "0.53244966", "0.52985543", "0.52572984", "0.5225952", "0.5215003", "0.52027744", "0.5197203", "0.51853836", "0.51818174", "0.51566505", "0.51566505", "0.512775", "0.5115434", "0.5106592", "0.5083464", "0.50783163", "0.5076803", "0.5048764", "0.50388616", "0.5035229", "0.4999539", "0.49952996", "0.4988994", "0.49841446", "0.4983472", "0.49758825", "0.49702474", "0.49609908", "0.4956043", "0.49530455", "0.49480146", "0.49359196", "0.4928454", "0.49186364", "0.49173746", "0.4910999", "0.4906026", "0.49017918", "0.48988235", "0.4898447", "0.4894098", "0.48921648", "0.48903552", "0.48774388", "0.48769477", "0.4871115", "0.4867609", "0.48670095", "0.48572612", "0.48516092", "0.48508054", "0.48459855", "0.48436883", "0.48416528", "0.48383763", "0.48369202", "0.48284027", "0.4824193", "0.48166338", "0.48137158", "0.4813475", "0.48083225", "0.48014835", "0.48009714", "0.48002523", "0.47995552", "0.47960868", "0.47926247", "0.4788935", "0.47882903", "0.4784519", "0.47842503", "0.4783229", "0.47793278", "0.47790778", "0.47777474", "0.47752845", "0.47739872", "0.47718984", "0.47681", "0.47667804", "0.476192", "0.4749861", "0.47495398", "0.47367957", "0.4735172", "0.47348568", "0.47289923", "0.47271943" ]
0.0
-1
the primary entry point method for PyTorch datasets
def __getitem__(self, index): row = self._target_df.iloc[index] vector_dict = self._vectorizer.vectorize(row.source_language, row.target_language) return {"x_source": vector_dict["source_vector"], "x_target": vector_dict["target_x_vector"], "y_target": vector_dict["target_y_vector"], "x_source_length": vector_dict["source_length"], "x_source_mltm_vector": vector_dict["x_source_mltm_vector"]}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadData(self):\n batch_size = 256\n \n #if self.conv_sg == True:\n # batch_size = 1 \n \n download = True\n root = self.root + self.dataset\n if self.dataset == \"MNIST\": \n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n trainset = torchvision.datasets.MNIST(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.MNIST(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR10\":\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465,), (0.2023, 0.1994, 0.2010,))])\n trainset = torchvision.datasets.CIFAR10(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR10(root, train=False, download=download, transform=transform)\n \n if self.dataset == \"CIFAR100\":\n transform = transforms.Compose([transforms.ToTensor()])\n trainset = torchvision.datasets.CIFAR100(root, train=True, download=download, transform=transform)\n testset = torchvision.datasets.CIFAR100(root, train=False, download=download, transform=transform)\n \n \n trainloader = torch.utils.data.DataLoader(trainset, batch_size = batch_size,\n shuffle=False, num_workers=0, pin_memory = False)\n \n testloader = torch.utils.data.DataLoader(testset, batch_size= batch_size,\n shuffle=False, num_workers=2, pin_memory = False)\n \n return trainloader, testloader", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()", "def main(args):\n data_transform = transforms.Compose([\n transforms.Scale((256, 256)),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n dataset = datasets.ImageFolder(root=args.root_dir, transform=data_transform)\n dataloader = DataLoader(dataset, batch_size=BATCH_SIZE, \n shuffle=False, num_workers=0, pin_memory=True)\n net = get_feature_extractor()\n\n if torch.cuda.is_available():\n net = net.cuda()\n\n features_out = np.zeros((len(dataset), 4096))\n labels_out = np.zeros(len(dataset))\n \n p = progressbar.ProgressBar(widgets=[progressbar.ETA(), ' ', progressbar.Percentage()])\n for i, samples in p(enumerate(dataloader)):\n images, labels = samples\n if torch.cuda.is_available():\n images = images.cuda()\n images = Variable(images)\n features = net(images).cpu().data.numpy()\n features_out[i*BATCH_SIZE:i*BATCH_SIZE+BATCH_SIZE] = features\n labels_out[i*BATCH_SIZE:i*BATCH_SIZE+BATCH_SIZE] = labels.int().numpy()\n print(i)\n\n with open(os.path.join(args.out, 'features.pickle'),'wb') as f:\n pickle.dump(features_out, f)\n with open(os.path.join(args.out, 'labels.pickle'),'wb') as f:\n pickle.dump(labels_out, f)", "def train_dataloader(self) -> torch.utils.data.DataLoader: \n return torch.utils.data.DataLoader(self.dataset_train, **self.dl_kwargs)", "def main(args):\n\n print(now(), \"test_model.py main() running.\")\n\n test_log = \"clean_test_log.txt\"\n to_log_file(args, args.output, test_log)\n\n # Set device\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n ####################################################\n # Dataset\n if args.dataset.lower() == \"cifar10\":\n transform_train = get_transform(args.normalize, args.train_augment)\n transform_test = get_transform(args.normalize, False)\n trainset = torchvision.datasets.CIFAR10(\n root=\"./data\", train=True, download=True, transform=transform_train\n )\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=128)\n testset = torchvision.datasets.CIFAR10(\n root=\"./data\", train=False, download=True, transform=transform_test\n )\n testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False)\n elif args.dataset.lower() == \"cifar100\":\n transform_train = get_transform(args.normalize, args.train_augment)\n transform_test = get_transform(args.normalize, False)\n trainset = torchvision.datasets.CIFAR100(\n root=\"./data\", train=True, download=True, transform=transform_train\n )\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=128)\n testset = torchvision.datasets.CIFAR100(\n root=\"./data\", train=False, download=True, transform=transform_test\n )\n testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False)\n\n elif args.dataset.lower() == \"tinyimagenet_first\":\n transform_train = get_transform(\n args.normalize, args.train_augment, dataset=args.dataset\n )\n transform_test = get_transform(args.normalize, False, dataset=args.dataset)\n trainset = TinyImageNet(\n TINYIMAGENET_ROOT,\n split=\"train\",\n transform=transform_train,\n classes=\"firsthalf\",\n )\n trainloader = torch.utils.data.DataLoader(\n trainset, batch_size=64, num_workers=1, shuffle=True\n )\n testset = TinyImageNet(\n TINYIMAGENET_ROOT,\n split=\"val\",\n transform=transform_test,\n classes=\"firsthalf\",\n )\n testloader = torch.utils.data.DataLoader(\n testset, batch_size=64, num_workers=1, shuffle=False\n )\n\n elif args.dataset.lower() == \"tinyimagenet_last\":\n transform_train = get_transform(\n args.normalize, args.train_augment, dataset=args.dataset\n )\n transform_test = get_transform(args.normalize, False, dataset=args.dataset)\n trainset = TinyImageNet(\n TINYIMAGENET_ROOT,\n split=\"train\",\n transform=transform_train,\n classes=\"lasthalf\",\n )\n trainloader = torch.utils.data.DataLoader(\n trainset, batch_size=64, num_workers=1, shuffle=True\n )\n testset = TinyImageNet(\n TINYIMAGENET_ROOT,\n split=\"val\",\n transform=transform_test,\n classes=\"lasthalf\",\n )\n testloader = torch.utils.data.DataLoader(\n testset, batch_size=64, num_workers=1, shuffle=False\n )\n\n elif args.dataset.lower() == \"tinyimagenet_all\":\n transform_train = get_transform(\n args.normalize, args.train_augment, dataset=args.dataset\n )\n transform_test = get_transform(args.normalize, False, dataset=args.dataset)\n trainset = TinyImageNet(\n TINYIMAGENET_ROOT,\n split=\"train\",\n transform=transform_train,\n classes=\"all\",\n )\n trainloader = torch.utils.data.DataLoader(\n trainset, batch_size=64, num_workers=1, shuffle=True\n )\n testset = TinyImageNet(\n TINYIMAGENET_ROOT,\n split=\"val\",\n transform=transform_test,\n classes=\"all\",\n )\n testloader = torch.utils.data.DataLoader(\n testset, batch_size=64, num_workers=1, shuffle=False\n )\n\n else:\n print(\"Dataset not yet implemented. Exiting from test_model.py.\")\n sys.exit()\n\n ####################################################\n\n ####################################################\n # Network and Optimizer\n net = get_model(args.model, args.dataset)\n\n # load model from path if a path is provided\n if args.model_path is not None:\n net = load_model_from_checkpoint(args.model, args.model_path, args.dataset)\n else:\n print(\"No model path provided, continuing test with untrained network.\")\n net = net.to(device)\n ####################################################\n\n ####################################################\n # Test Model\n training_acc = test(net, trainloader, device)\n natural_acc = test(net, testloader, device)\n print(now(), \" Training accuracy: \", training_acc)\n print(now(), \" Natural accuracy: \", natural_acc)\n stats = OrderedDict(\n [\n (\"model path\", args.model_path),\n (\"model\", args.model),\n (\"normalize\", args.normalize),\n (\"augment\", args.train_augment),\n (\"training_acc\", training_acc),\n (\"natural_acc\", natural_acc),\n ]\n )\n to_results_table(stats, args.output, \"clean_performance.csv\")\n ####################################################\n\n return", "def test_dataloader(self) -> torch.utils.data.DataLoader: \n return torch.utils.data.DataLoader(self.dataset_test, **self.dl_kwargs)", "def load_data(args, dataset_str):\n\n if dataset_str == 'friendster':\n dataset = h5py.File(\"../data/friendster/friendster_25K.h5\")\n adj_list = dataset[\"adjacency\"][:] # Adjacency list\n if args.model_choice == 'gs' or args.model_choice == 'gs_rand':\n graph = defaultdict(set)\n for i in range(len(adj_list)):\n for j in adj_list[i]:\n graph[i].add(j)\n graph[j].add(i)\n adj = graph\n else:\n adj = torch.zeros((len(adj_list), len(adj_list)))\n for i in range(len(adj_list)):\n for j in adj_list[i]:\n adj[i, j] = 1\n features = dataset[\"features\"][:] # Feature matrix\n labels = np.load(\"../data/friendster/age_labels.npy\", allow_pickle=True)\n features = features[:, 1:]\n mu = features.mean(0)\n sigma = features.std(0)\n sigma[sigma == 0] = 1\n features = (features - mu) / sigma\n features = torch.FloatTensor(features)\n elif dataset_str == 'fb':\n edge_list = np.load(\"../data/fb.edgelist.npy\")\n labels = np.load(\"../data/fb.labels.npy\")\n adj = torch.zeros((len(labels)), len(labels))\n for (i,j) in edge_list:\n adj[i, j] = 1\n adj[j, i] = 1\n features = np.load(\"../data/fb.attrs.npy\")\n features = torch.FloatTensor(features)\n # print(labels)\n elif dataset_str == 'protein':\n edge_list = np.loadtxt(\"../data/proteins/edges_protein.txt\")\n labels = np.loadtxt(\"../data/proteins/labels_protein.txt\")\n features = np.load(\"../data/proteins/features_protein.npy\")\n mu = features.mean(0)\n sigma = features.std(0)\n sigma[sigma == 0] = 1\n features = (features - mu) / sigma\n features = torch.FloatTensor(features)\n if args.model_choice == 'gs_rand':\n graph = defaultdict(set)\n for (i, j) in edge_list:\n graph[i].add(j)\n graph[j].add(i)\n graph[8890].add(8890)\n graph[11963].add(11963)\n adj = graph\n\n else:\n adj = torch.zeros((len(labels)), len(labels))\n for (i, j) in edge_list:\n adj[int(i), int(j)] = 1\n adj[int(j), int(i)] = 1\n\n else:\n names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']\n objects = []\n for i in range(len(names)):\n with open(\"../data/ind.{}.{}\".format(dataset_str, names[i]), 'rb') as f:\n if sys.version_info > (3, 0):\n objects.append(pkl.load(f, encoding='latin1'))\n else:\n objects.append(pkl.load(f))\n\n x, y, tx, ty, allx, ally, graph = tuple(objects)\n test_idx_reorder = parse_index_file(\"../data/ind.{}.test.index\".format(dataset_str))\n test_idx_range = np.sort(test_idx_reorder)\n features = sp.vstack((allx, tx)).tolil()\n features[test_idx_reorder, :] = features[test_idx_range, :]\n adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))\n labels = np.vstack((ally, ty))\n labels[test_idx_reorder, :] = labels[test_idx_range, :]\n labels = torch.LongTensor(labels)\n labels = torch.max(labels, 1)[1]\n features = normalize(features)\n features = torch.FloatTensor(np.array(features.todense()))\n if not args.model_choice == 'gs' and not args.model_choice == 'gs_rand':\n # print(adj)\n adj = sp.coo_matrix(adj)\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n adj = normalize(adj + sp.eye(adj.shape[0]))\n adj = sparse_mx_to_torch_sparse_tensor(adj)\n elif args.dataset != 'friendster' and args.dataset != 'protein':\n adj = sp.coo_matrix(adj)\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n adj = np.array(adj.todense())\n graph = defaultdict(set)\n edges = set()\n for i, v in enumerate(adj):\n for j, u in enumerate(v):\n if u != 0 and frozenset([i, j]) not in edges:\n edges.add(frozenset([i, j]))\n graph[i].add(j)\n graph[j].add(i)\n adj = graph\n labels = torch.LongTensor(labels)\n if args.dataset != 'protein':\n idx_train_full = torch.from_numpy(\n np.loadtxt('../data/idx_train_' + args.dataset + '_' + str(args.trial) + '.txt')).long()\n idx_test = torch.from_numpy(\n np.loadtxt('../data/idx_test_' + args.dataset + '_' + str(args.trial) + '.txt')).long()\n idx_val_full = torch.from_numpy(\n np.loadtxt('../data/idx_val_' + args.dataset + '_' + str(args.trial) + '.txt')).long()\n\n return adj, features, labels, idx_train_full, idx_val_full, idx_test", "def setup(self, ds):\n pass", "def load_torch_data(load_data_func):\n\n def torch_loader(dataset, data_path, batch_size, shuffle=True, cuda_device=None, num_workers=1):\n (train_data, val_data), (train_labels, val_labels), label_names = load_data_func(dataset, data_path)\n\n kwargs = {'num_workers': num_workers, 'pin_memory': True} if cuda_device is not None else {}\n kwargs['drop_last'] = True\n\n if type(train_data) == numpy.ndarray:\n train_dataset = TensorDataset(torch.from_numpy(train_data), torch.from_numpy(train_labels))\n val_dataset = TensorDataset(torch.from_numpy(val_data), torch.from_numpy(val_labels))\n elif type(train_data) == scipy.sparse.csr.csr_matrix:\n from sklearn.feature_extraction.text import TfidfTransformer\n tfidf_trans = TfidfTransformer(norm=None)\n tfidf_trans.fit(train_data)\n train_dataset = SparseDataset(train_data, tfidf_trans.idf_)\n val_dataset = SparseDataset(val_data, tfidf_trans.idf_)\n else:\n train_dataset = torchvision.datasets.ImageFolder(train_data)\n val_dataset = torchvision.datasets.ImageFolder(val_data)\n\n train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=shuffle, **kwargs)\n val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, **kwargs)\n\n return train_loader, val_loader, label_names\n\n return torch_loader", "def __init__(self, args):\n self.train_img_file = os.path.join(args.data_dir, args.train_img_file)\n self.train_lbl_file = os.path.join(args.data_dir, args.train_lbl_file)\n self.test_img_file = os.path.join(args.data_dir, args.test_img_file)\n self.test_lbl_file = os.path.join(args.data_dir, args.test_lbl_file)\n self.batch_size = args.batch_size\n self.num_workers = args.data_workders\n self.shuffle = True\n self.dataset_name = args.dataset_name\n self.pin_memory = False #args.cuda\n\n # check dataset files exist\n files = [self.train_img_file, self.train_lbl_file,\n self.test_img_file, self.test_lbl_file]\n for file in files:\n if not os.path.isfile(file):\n msg = \"Data file not found. Please check the path \" +\\\n \"or download files using scripts/download_files.py \"\n raise IOError(msg)", "def dataset(options):\n pass", "def setup(self):\n\n folder_name, file_name, url, md5 = self.resource\n dataset_folder = os.path.join(self.data_root, folder_name)\n if not os.path.exists(dataset_folder):\n sh_utils.download_and_extract_archive(url, dataset_folder, md5, file_name)\n\n test_transform = tv_transforms.Compose(\n [\n tv_transforms.ToTensor(),\n tv_transforms.Lambda(lambda x: x.permute(1, 2, 0)),\n ]\n )\n\n dataset_out = tv_datasets.ImageFolder(\n root=dataset_folder, transform=test_transform\n )\n self.images_only_dataset_out = sh_data_torch.IndexedTorchDataset(\n sh_data_torch.ImagesOnlyTorchDataset(dataset_out)\n )", "def dataloaders():\n # train data path\n data_train = '../dataset/train/'\n # set transformations\n train_transforms = transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n \n train_data = datasets.ImageFolder(data_train, transform = train_transforms)\n trainloader = torch.utils.data.DataLoader(train_data, batch_size = 16, shuffle = True)\n \n return trainloader", "def load_data(data_dir):\n train_dir = data_dir + '/train'\n valid_dir = data_dir + '/valid'\n test_dir = data_dir + '/test'\n\n # define your transforms for the training, validation, and testing sets\n data_transforms_training = transforms.Compose([\n transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n\n data_transforms_validation = transforms.Compose([\n transforms.Resize(224),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n\n data_transforms_test = transforms.Compose([\n transforms.Resize(224),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n\n # Load the datasets with ImageFolder\n image_datasets_training = datasets.ImageFolder(train_dir, transform=data_transforms_training)\n image_datasets_validation = datasets.ImageFolder(valid_dir, transform=data_transforms_validation)\n image_datasets_test = datasets.ImageFolder(test_dir, transform=data_transforms_test)\n\n # Using the image datasets and the trainforms, define the dataloaders\n dataloaders_training = torch.utils.data.DataLoader(image_datasets_training, shuffle=True, batch_size=128)\n dataloaders_validation = torch.utils.data.DataLoader(image_datasets_validation, shuffle=True, batch_size=128)\n dataloaders_test = torch.utils.data.DataLoader(image_datasets_test, shuffle=True, batch_size=128)\n\n return {\"training_dataloader\": dataloaders_training,\n \"validation_dataloader\": dataloaders_validation,\n \"testing_dataloader\": dataloaders_test,\n \"class_to_idx\": image_datasets_training.class_to_idx}", "def __init__(self, dataset_path):\n super(TorchData, self).__init__()\n self.dataset = h5py.File(dataset_path, 'r')\n self.bg = self.dataset['bg']\n self.vocal = self.dataset['vocal']\n self.mix = self.dataset['mix']\n self.len = self.bg.shape[0]", "def main(config_file):\n \n # Load the configuration from json file\n assert os.path.isfile(\n config_file), \"No json configuration file found at {}\".format(config_file)\n config = utils.LoadConfig(config_file)\n\n # use GPU if available\n config.cuda = torch.cuda.is_available()\n\n # Set the random seed for reproducible experiments\n torch.manual_seed(config.general['seed'])\n if config.cuda:\n torch.cuda.manual_seed(config.general['seed'])\n \n #Generate output path if it does not exist\n out_dir = config.general['out_dir']\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n \n #Save config file\n config.save(os.path.join(out_dir, 'experiment_config.json'))\n\n # Set the logger\n utils.set_logger(os.path.join(out_dir, 'train.log'))\n\n # Create the input data pipeline\n logging.info(\"Loading the datasets...\")\n\n # Load data\n train, test = read_and_format_full_dataset()\n train_kaggle, test_kaggle = read_and_format_kaggle_dataset()\n \n #Using kaggle's training data for training\n train, val = split_train_val_partition(train_kaggle, config.data['split_train_percentage'],config.general['seed'])\n \n #Adding data augmentation to training\n # train = MNISTDatasetLabels(train,\n # transform=transforms.Compose([\n # Normalization(),\n # transforms.RandomHorizontalFlip(0.5),\n # transforms.RandomVerticalFlip(0.5),\n # transforms.RandomPerspective(),\n # transforms.RandomRotation(30)])) \n \n train = MNISTDatasetLabels(train,\n transform=transforms.Compose([\n Normalization(),\n transforms.RandomRotation(15)])) \n \n val = MNISTDatasetLabels(val,\n transform=transforms.Compose([Normalization()])) \n \n test = MNISTDatasetLabels(test,\n transform=transforms.Compose([Normalization()])) \n \n test_kaggle = MNISTDatasetNoLabels(test_kaggle,\n transform=transforms.Compose([Normalization()])) \n \n train_dataloader = DataLoader(train, batch_size=config.CNN_train['batch_size'], shuffle=True, num_workers=config.CNN_train['num_workers'])\n val_dataloader = DataLoader(val, batch_size=config.CNN_train['batch_size'], shuffle=True, num_workers=config.CNN_train['num_workers'])\n test_dataloader = DataLoader(test, batch_size=config.CNN_train['batch_size'], shuffle=False, num_workers=config.CNN_train['num_workers'])\n test_kaggle_dataloader = DataLoader(test_kaggle, batch_size=config.CNN_train['batch_size'], shuffle=False, num_workers=config.CNN_train['num_workers'])\n\n logging.info(\"- done.\")\n \n # Train the model\n logging.info(\"Starting training for {} epoch(s)\".format(config.CNN_train['num_epochs']))\n train_wraper(train_dataloader, val_dataloader, config)\n logging.info(\"- done.\")\n \n #Evaluate the model test set \n # Using Kaggle's test set unknown labels (can have true labels or not (Kaggle's case))\n logging.info(\"Starting the model evaluation on Kaggle's test data\")\n eval_out_kaggle = evaluate_return_labels(test_kaggle_dataloader, config)\n #Save the results\n eval_out_kaggle.to_csv(os.path.join(out_dir, 'test_result_kaggle.csv'),index=False)\n logging.info(\"- done.\")\n \n # Using test set with known labels\n logging.info(\"Starting the model evaluation on test data\")\n eval_out = evaluate_return_labels(test_dataloader, config)\n #Save the results\n eval_out.to_csv(os.path.join(out_dir, 'test_result.csv'),index=False)\n logging.info(\"- done.\")\n \n # Compute metrics\n if 'TrueLabel' in eval_out:\n #Evaluate the model with test set (known labels)\n logging.info(\"Calculating final metrics\")\n # Get unique true labels in dataset\n classes = eval_out.TrueLabel.unique()\n # Sort them\n classes.sort()\n # Calculate accuracy\n accuracy_total = accuracy(eval_out)\n # Calculate error rate\n error_rate_total = error_rate(eval_out)\n # Confussion matrix\n c_matrix = confusion_matrix(eval_out, classes)\n plot_confusion_matrix(c_matrix, classes, 'CNN', out_dir)\n # Overall metrics\n metrics_per_class, metrics_overall = confusion_matrix_metrics(c_matrix)\n metrics_overall['accuracy_percent'] = accuracy_total\n metrics_overall['error_rate_percent'] = error_rate_total\n \n metrics_per_class.to_csv(os.path.join(out_dir, 'CNN_results_per_class.csv'))\n metrics_overall.to_csv(os.path.join(out_dir, 'CNN_results_overall.csv'))\n \n logging.info(\"- done.\")", "def __init__(self, dataset: Dataset):\n self.dataset = dataset", "def data_loaders(args):\n\n transform = transforms.Compose([\n transforms.Resize(64),\n transforms.ToTensor(),\n lambda image: (image - 0.5) * 2\n ])\n\n train_mnist = datasets.MNIST(\n root=args.database_root,\n train=True,\n download=True,\n transform=transform\n )\n train_loader = DataLoader(\n dataset=train_mnist,\n batch_size=args.train_batch_size,\n shuffle=True,\n num_workers=1,\n pin_memory=True\n )\n\n test_mnist = datasets.MNIST(\n root=args.database_root,\n train=False,\n download=True,\n transform=transform\n )\n test_loader = DataLoader(\n dataset=test_mnist,\n batch_size=args.test_batch_size,\n shuffle=True,\n num_workers=1,\n pin_memory=True\n )\n\n return train_loader, test_loader", "def __init__(self, options):\n print('Prepare the network and data.')\n self._options = options\n # Network.\n self._net = torch.nn.DataParallel(BCNN()).cuda()\n # Load the model from disk.\n #self._net.load_state_dict(torch.load(self._path['model']))\n print(self._net)\n # Criterion.\n self._criterion = torch.nn.CrossEntropyLoss().cuda()\n # Solver.\n self._solver = torch.optim.SGD(\n self._net.parameters(), lr=self._options['base_lr'],\n momentum=0.9, weight_decay=self._options['weight_decay'])\n self._scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n self._solver, mode='max', factor=0.1, patience=3, verbose=True,\n threshold=1e-4)\n\n self._train_path = os.path.join(self._options['text_path'],'train.txt')\n self._test_path = os.path.join(self._options['text_path'],'test.txt')\n\n #Dataloader\n transform = T.Compose([\n T.Resize(448), \n T.CenterCrop(448), \n T.ToTensor(), \n T.Normalize(mean=(0.485, 0.456, 0.406),\n std=(0.229, 0.224, 0.225)) \n ])\n\n train_data = Data( train_path = self._train_path, aug_path = options['aug_data'], img_transform=transform)\n\n\n\n test_data = Data( train_path = self._test_path, aug_path = options['aug_data'], img_transform=transform)\n\n\n\n self._train_loader = torch.utils.data.DataLoader(dataset=train_data,\n batch_size=self._options['batch_size'],drop_last=True, pin_memory=True,\n shuffle=True,num_workers=4)\n\n self._test_loader = torch.utils.data.DataLoader(dataset=test_data,\n batch_size=self._options['batch_size'],pin_memory=True,\n shuffle=False,num_workers=4)", "def __init__(\n self,\n data: List[List[Dict]],\n class_ind_maps: Dict,\n augmentor: Callable = lambda samples, classes: samples,\n transform: nn.Module = transforms.Lambda(\n lambda np_audio: torch.from_numpy(np_audio)[:, None, :]),\n num_parallel_versions=None,\n add_zero_parallel=False,\n utt_len_sec: float = 3.,\n size: int = None,\n samplerate: int = 16000,\n convert_to_ohe: bool = False,\n mode: str = \"torch\",\n debug: bool = True,\n ):\n # data might be dict {spk_ind : List[spk_utts]} or list of tuples : (utt_path, spk_ind)\n # spec nn.Module might be passed into Dataset to calculate features on CPU (for TPU for example)\n assert mode in [\"keras\", \"torch\"]\n print(f\"created ds with : {len(data)}\")\n self.data = data\n self.size = size\n self.mode = mode\n self.debug = debug\n self.transform = transform\n self.class_ind_maps = class_ind_maps\n self.add_zero_parallel = add_zero_parallel\n self.random = num_parallel_versions is not None\n self.num_parallel_versions = num_parallel_versions\n # if isinstance(augmentor,BaseAugmentor):\n # # Wrap to handle self.augmentor(samples,cls_ind) signature\n # self.augmentor = lambda samples,cls_ind : augmentor(samples)\n # else:\n self.augmentor = augmentor\n self.convert_to_ohe = convert_to_ohe\n # Initial min/max output_len in samples\n self.utt_len_samples = int(utt_len_sec * samplerate)\n self.load_audio = get_audio_loader(samplerate=samplerate, raise_error=True)", "def get_dataloader(dataset, datadir, train_bs, test_bs, dataidxs=None, noise_level=0, net_id=None, total=0):\n if dataset in ('mnist', 'femnist', 'fmnist', 'cifar10','cifar100', 'svhn', 'generated', 'covtype', 'a9a', 'rcv1', 'SUSY','tinyimagenet'):\n if dataset == 'mnist':\n dl_obj = MNIST_truncated\n\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n\n elif dataset == 'femnist':\n dl_obj = FEMNIST\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n\n elif dataset == 'fmnist':\n dl_obj = FashionMNIST_truncated\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n\n elif dataset == 'svhn':\n dl_obj = SVHN_custom\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n\n\n elif dataset == 'cifar10':\n dl_obj = CIFAR10_truncated\n\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n transforms.Lambda(lambda x: F.pad(\n Variable(x.unsqueeze(0), requires_grad=False),\n (4, 4, 4, 4), mode='reflect').data.squeeze()),\n transforms.ToPILImage(),\n transforms.RandomCrop(32),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)\n ])\n # data prep for test set\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n AddGaussianNoise(0., noise_level, net_id, total)])\n \n elif dataset == 'cifar100':\n dl_obj = CIFAR100_truncated\n\n normalize = transforms.Normalize(mean=[0.5070751592371323, 0.48654887331495095, 0.4409178433670343],\n std=[0.2673342858792401, 0.2564384629170883, 0.27615047132568404])\n # transform_train = transforms.Compose([\n # transforms.RandomCrop(32),\n # transforms.RandomHorizontalFlip(),\n # transforms.ToTensor(),\n # normalize\n # ])\n transform_train = transforms.Compose([\n # transforms.ToPILImage(),\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation(15),\n transforms.ToTensor(),\n normalize\n ])\n # data prep for test set\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n normalize])\n\n elif dataset == 'tinyimagenet': \n # random_ids = np.random.randint(1000, size=datasize)\n # train_indices = random_ids\n\n imagenet_mean = [0.485, 0.456, 0.406]\n imagenet_std = [0.229, 0.224, 0.225]\n\n train_dl = torch.utils.data.DataLoader(\n torchvision.datasets.ImageFolder(datadir +\"/train\",\n transform=transforms.Compose([\n transforms.Resize(32), \n transforms.ToTensor(),\n # Phuong 09/26 change (mean, std) -> same as pretrained imagenet\n transforms.Normalize(mean=imagenet_mean, std=imagenet_std)])),\n #Phuong 09/26 drop_last=False -> True\n batch_size=train_bs, drop_last=True)\n \n test_dl = torch.utils.data.DataLoader(\n torchvision.datasets.ImageFolder(datadir +\"/test\",\n transform=transforms.Compose([\n transforms.Resize(32), \n transforms.ToTensor(),\n # Phuong 09/26 change (mean, std) -> same as pretrained imagenet\n transforms.Normalize(mean=imagenet_mean, std=imagenet_std)])),\n #Phuong 09/26 drop_last=False -> True\n batch_size=test_bs, drop_last=True)\n\n return train_dl, test_dl, None, None\n\n\n else:\n dl_obj = Generated\n transform_train = None\n transform_test = None\n\n\n train_ds = dl_obj(datadir, dataidxs=dataidxs, train=True, transform=transform_train, download=True)\n test_ds = dl_obj(datadir, train=False, transform=transform_test, download=True)\n\n train_dl = data.DataLoader(dataset=train_ds, batch_size=train_bs, shuffle=True, drop_last= dataset in ['cifar100'])\n test_dl = data.DataLoader(dataset=test_ds, batch_size=test_bs, shuffle=False, drop_last=False)\n\n return train_dl, test_dl, train_ds, test_ds", "def _train(args): \n\n #device = 'cuda' if torch.cuda.is_available() else 'cpu'\n device = 'cpu'\n logger.info(\"Device Type: {}\".format(device))\n\n logger.info(\"Loading SUN360 dataset\")\n transform = transforms.Compose(\n [transforms.Resize((224,224)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n target_transform = transforms.Compose([transforms.Resize((224,224)),\n transforms.ToTensor()]) \n\n trainset = SUN360Dataset(\"imagedata.json\",transform = transform, target_transform = target_transform)\n train_loader = DataLoader(trainset, batch_size=args.batch_size,\n shuffle=True, num_workers=args.workers)\n \"\"\"\n testset = torchvision.datasets.CIFAR10(root=args.data_dir, train=False,\n download=False, transform=transform)\n test_loader = DataLoader(testset, batch_size=args.batch_size,\n shuffle=False, num_workers=args.workers)\n \"\"\" \n\n logger.info(\"Model loaded\")\n model = EfficientNet.from_name('efficientnet-b0',conv_type='Equi')\n\n if torch.cuda.device_count() > 1:\n logger.info(\"Gpu count: {}\".format(torch.cuda.device_count()))\n model = nn.DataParallel(model)\n\n model = model.to(device)\n\n criterion = CELoss().to(device)\n optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\n\n for epoch in range(0, args.epochs):\n running_loss = 0.0\n for i, data in enumerate(train_loader):\n # get the inputs\n inputs, EM , CM = data\n inputs, EM, CM = inputs.to(device), EM.to(device), CM.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = model(inputs)\n EMLoss, CMLoss = map_loss(outputs,EM,CM,criterion)\n loss = EMLoss + CMLoss\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n if i % 2000 == 1999: # print every 2000 mini-batches\n print('[%d, %5d] loss: %.3f' %\n (epoch + 1, i + 1, running_loss / 2000))\n running_loss = 0.0\n print('Finished Training')\n return _save_model(model, args.model_dir)", "def setup_datasets(self):\r\n\r\n train_transform = transforms.Compose(\r\n [\r\n transforms.Resize(self.crop_size),\r\n transforms.RandomRotation(degrees=self.random_angle, resample=Image.BILINEAR),\r\n transforms.RandomResizedCrop(\r\n size=self.crop_size, scale=(1-self.random_scale, 1+self.random_scale), ratio=(1, 1)),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize(\r\n mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225]\r\n )\r\n ]\r\n )\r\n val_transform = transforms.Compose(\r\n [\r\n transforms.Resize(self.crop_size),\r\n transforms.CenterCrop(self.crop_size),\r\n transforms.ToTensor(),\r\n transforms.Normalize(\r\n mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225]\r\n )\r\n ]\r\n )\r\n\r\n train_dataset = CocoDatasetPairs(\r\n root_dir=self.coco_path,\r\n set_name='train2014',\r\n transform=train_transform,\r\n dataset_size_ratio=self.dataset_size_ratio\r\n )\r\n train_subset_dataset = Subset(train_dataset, range(0, len(train_dataset), 5*self.dataset_size_ratio))\r\n val_dataset = CocoDatasetPairs(\r\n root_dir=self.coco_path,\r\n set_name='val2014',\r\n transform=val_transform,\r\n )\r\n\r\n train_loader = DataLoader(\r\n train_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=True,\r\n num_workers=self.num_workers\r\n )\r\n train_subset_loader = DataLoader(\r\n train_subset_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=False,\r\n num_workers=self.num_workers\r\n )\r\n val_loader = DataLoader(\r\n val_dataset,\r\n batch_size=self.batch_size,\r\n shuffle=False,\r\n num_workers=self.num_workers\r\n )\r\n return train_loader, train_subset_loader, val_loader", "def example1():\n DATASETS_DICT = './data'\n IMG_SIZE = CONFIG['img_size']\n\n # x_train = DataLoader.load(os.path.join(DATASETS_DICT, 'x_train_cats_dogs.npy'))\n # y_train = DataLoader.load(os.path.join(DATASETS_DICT, 'y_train_cats_dogs.npy'))\n # x_train = DataLoader.load(os.path.join(DATASETS_DICT, 'x_cats_dogs_skimage.npy'))\n # y_train = DataLoader.load(os.path.join(DATASETS_DICT, 'y_cats_dogs_skimage.npy'))\n\n # x_train = DataLoader.load(os.path.join(DATASETS_DICT, 'x_rps_skimage.npy'))\n # y_train = DataLoader.load(os.path.join(DATASETS_DICT, 'y_rps_skimage.npy'))\n x_train = DataLoader.load_npy(CONFIG['data']['x_path'])\n y_train = DataLoader.load_npy(CONFIG['data']['y_path'])\n\n x_train = torch.Tensor(x_train).view(-1, IMG_SIZE, IMG_SIZE)\n y_train = torch.Tensor(y_train)\n\n N_TRAIN = CONFIG['n_train']\n N_EVAL = CONFIG['n_eval']\n N_TEST = CONFIG['n_test']\n\n if N_TRAIN + N_EVAL + N_TEST > len(x_train):\n raise Exception('Not enough data!')\n\n\n # resnet50 works with 224, 244 input size\n n_output = 2\n net = ConvNet(n_output)\n optimizer = optim.Adam(net.parameters(), lr=1e-3)\n loss_function = nn.MSELoss()\n\n # split data\n x_eval = x_train[:N_EVAL]\n y_eval = y_train[:N_EVAL]\n\n x_test = x_train[N_EVAL:N_EVAL+N_TEST]\n y_test = y_train[N_EVAL:N_EVAL+N_TEST]\n\n x_train = x_train[N_EVAL+N_TEST:N_EVAL+N_TEST+N_TRAIN]\n y_oracle = y_train[N_EVAL+N_TEST:N_EVAL+N_TEST+N_TRAIN]\n\n # show_grid_imgs(x_train[:16], y_oracle[:16], (4, 4))\n\n EPOCHS = 10\n BATCH_SIZE = 128\n\n print('Start training')\n for epoch in range(EPOCHS):\n for k in tqdm(range(0, len(x_train), BATCH_SIZE)):\n batch_x = x_train[k:k+BATCH_SIZE].view(-1, 1, IMG_SIZE, IMG_SIZE)\n batch_y = y_oracle[k:k+BATCH_SIZE]\n\n net.zero_grad()\n\n out = net(batch_x)\n loss = loss_function(out, batch_y)\n loss.backward()\n optimizer.step()\n\n print(f'Epoch: {epoch}. Loss: {loss}')\n\n correct = 0\n total = 0\n\n with torch.no_grad():\n for k in tqdm(range(len(x_test))):\n real_class = torch.argmax(y_test[k])\n net_out = net(x_test[k].view(-1, 1, IMG_SIZE, IMG_SIZE))[0] # returns list\n predicted_class = torch.argmax(net_out)\n\n if predicted_class == real_class:\n correct += 1\n total += 1\n\n print('Accuracy: ', round(correct/total, 3))\n\n torch.save(net, f'{DATASETS_DICT}/cnn_rps_model.pt')", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( '',)\n out_pData[0].plugin_data_setup( 'PROJECTION','multiple')", "def dataloader(self):\n\n # load / split data\n train_data = self.data.get_train_data()\n if self.args.use_dev:\n train_data, dev_data = self.data.split_data(train_data)\n test_data = self.data.get_test_data()\n\n #print(train_data[0])\n #print(dev_data[0])\n #print(test_data[0])\n\n # build dataset\n train_dataset = self.loader.build_dataset(\n train_data, \n self.args.train_max_seq_len)\n train_loader = self.loader.build_dataloader(\n train_dataset, 'train')\n\n test_dataset = self.loader.build_dataset(\n test_data,\n self.args.eval_max_seq_len)\n test_loader = self.loader.build_dataloader(\n test_dataset, 'test')\n\n if self.args.use_dev:\n dev_dataset = self.loader.build_dataset(\n dev_data,\n self.args.eval_max_seq_len)\n dev_loader = self.loader.build_dataloader(\n dev_dataset, 'dev')\n return train_loader, dev_loader, test_loader\n else:\n return train_loader, test_loader", "def run_data (arguments):\n if arguments.define_labels:\n data.define_labels()\n elif arguments.preprocess:\n # Preprocess from data_raw --> data_preprocessed\n data.preprocess()\n elif arguments.annotate:\n # Annotate from data_preprocessed --> data_annotated\n reverse = False # DEBUG\n annotator.annotate(reverse)\n elif arguments.split:\n # Split from data_annotated --> train.txt/valid.txt\n restrict = 100 # Default: Keep 100% of all files\n splitter.train_valid(restrict_to=restrict)", "def generate_dataset():\n if not os.path.exists(\"../data/COVID-19/COVID-19.npy\"):\n print(\"Processing Training Data.\")\n training_data = get_training_data('../data/COVID-19/train')\n print(\"Processing Test Data.\")\n test_data = get_training_data('../data/COVID-19/test')\n\n x_train, y_train, x_test, y_test = [], [], [], []\n\n for feature, label in training_data:\n x_train.append(feature)\n y_train.append(label)\n\n for feature, label in test_data:\n x_test.append(feature)\n y_test.append(label)\n\n # Normalize the data\n x_train = np.array(x_train) / 255\n x_test = np.array(x_test) / 255\n\n # resize data for deep learning\n x_train = x_train.reshape(-1, 3, img_size, img_size)\n y_train = np.array(y_train)\n x_test = x_test.reshape(-1, 3, img_size, img_size)\n y_test = np.array(y_test)\n\n # With data augmentation to prevent overfitting and handling the imbalance in dataset\n dataset = {\"x_train\": x_train, \"y_train\": y_train, \"x_test\": x_test, \"y_test\": y_test}\n np.save(\"../data/COVID-19/COVID-19.npy\", dataset)\n else:\n dataset = np.load(\"../data/COVID-19/COVID-19.npy\", allow_pickle=True).item()\n x_train, y_train, x_test, y_test = dataset[\"x_train\"], dataset[\"y_train\"], dataset[\"x_test\"], dataset[\"y_test\"]\n\n x_train_tensor = torch.from_numpy(x_train)\n x_train_tensor = x_train_tensor.type(torch.FloatTensor)\n y_train_tensor = torch.from_numpy(y_train)\n y_train_tensor = y_train_tensor.type(torch.LongTensor)\n x_test_tensor = torch.from_numpy(x_test)\n x_test_tensor = x_test_tensor.type(torch.FloatTensor)\n y_test_tensor = torch.from_numpy(y_test)\n y_test_tensor = y_test_tensor.type(torch.LongTensor)\n\n train_dataset = TensorDataset(x_train_tensor, y_train_tensor)\n test_dataset = TensorDataset(x_test_tensor, y_test_tensor)\n\n return train_dataset, test_dataset", "def __init__(self, data_dir: Path, config: Config):\n self.device = torch.device(\n \"cuda\" if torch.cuda.is_available() else \"cpu\"\n )\n\n training_path_list, ground_truth_path_list = get_file_paths(data_dir)\n\n X_train, X_test, y_train, y_test = self.train_test_split(\n training_path_list,\n ground_truth_path_list,\n test_portion=config.val_split,\n )\n\n train_dataset = TrainDataset(\n config, X_train, y_train, random_augmentation=True\n )\n val_dataset = TrainDataset(\n config, X_test, y_test, random_augmentation=False\n )\n\n self.train_loader = DataLoader(\n train_dataset,\n batch_size=config.train_batch_size,\n shuffle=True,\n pin_memory=True,\n )\n self.val_loader = DataLoader(\n val_dataset,\n batch_size=config.test_batch_size,\n # No shuffle as it won't make any difference\n pin_memory=True,\n )\n\n model = UNet(INPUT_CHANNELS, OUTPUT_CHANNELS, config)\n self.model = DataParallel(model).to(self.device)\n\n if config.loss == \"logit_bce\":\n loss_weight = (\n self._get_loss_weight() if config.balanced_loss else None\n )\n # Using logits directly is numerically more stable and efficient\n self.class_loss_fn = BCEWithLogitsLoss(pos_weight=loss_weight)\n elif config.loss == \"soft_dice\":\n self.class_loss_fn = soft_dice_loss\n\n self.texture_transform = get_texture_transform(config)\n self.shape_loss_fn = ContrastiveLoss(config.temperature)\n\n self.optim = Adam(\n self.model.parameters(),\n lr=config.learn_rate,\n weight_decay=config.weight_decay,\n )\n max_steps = config.epochs * len(self.train_loader)\n self.scheduler = OneCycleLR(\n self.optim,\n max_lr=config.max_learn_rate,\n total_steps=max_steps,\n )\n self.scaler = GradScaler(enabled=config.mixed_precision)\n\n # Used when dumping hyper-params to a file\n self.config = config\n\n # To store best acc achieved so far\n self.best_acc = 0.0", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( 'PROJECTION',multiple)\n out_pData[0].plugin_data_setup( 'PROJECTION',multiple)", "def main():\n # Manual seed for reproducibility\n torch.manual_seed(363636)\n\n # Global instances\n global args, use_cuda, device\n # Instantiating the parser\n args = parser.parse_args()\n # Global CUDA flag\n use_cuda = args.cuda and torch.cuda.is_available()\n # Defining device and device's map locationo\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n print('chosen device: ', device)\n\n # Defining loss function and printing CUDA information (if available)\n if use_cuda:\n print(\"PyTorch version: \")\n print(torch.__version__)\n print(\"CUDA Version: \")\n print(torch.version.cuda)\n print(\"cuDNN version is: \")\n print(cudnn.version())\n cudnn.benchmark = True\n criterion = nn.CrossEntropyLoss().cuda()\n else:\n criterion = nn.CrossEntropyLoss()\n\n # Dataloaders for CIFAR, ImageNet and MNIST\n if args.dataset == 'CIFAR100':\n\n normalize = transforms.Normalize(mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],\n std=[x / 255.0 for x in [63.0, 62.1, 66.7]])\n\n kwargs = {'num_workers': args.workers, 'pin_memory': True} if use_cuda else {}\n\n train_loader = torch.utils.data.DataLoader(\n datasets.CIFAR100(root=args.data_path, train=True, transform=transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(32, 4),\n transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.075),\n transforms.ToTensor(),\n normalize,\n Cutout(n_holes=1, length=16),\n ]), download=True),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n\n val_loader = torch.utils.data.DataLoader(\n datasets.CIFAR100(root=args.data_path, train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])),\n batch_size=args.val_batch_size, shuffle=False, **kwargs)\n\n elif args.dataset == 'ImageNet':\n\n traindir = os.path.join(args.data_path, 'train')\n valdir = os.path.join(args.data_path, 'val')\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n\n train_dataset = datasets.ImageFolder(\n traindir,\n transforms.Compose([\n transforms.RandomResizedCrop(args.image_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ]))\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=args.workers, pin_memory=True)\n\n image_size = args.image_size\n val_dataset = datasets.ImageFolder(\n valdir,\n transforms.Compose([\n transforms.Resize(image_size, interpolation=PIL.Image.BICUBIC),\n transforms.CenterCrop(image_size),\n transforms.ToTensor(),\n normalize,\n ]))\n val_loader = torch.utils.data.DataLoader(\n val_dataset, batch_size=args.val_batch_size, shuffle=False,\n num_workers=args.workers, pin_memory=True)\n\n elif args.dataset == 'MNIST':\n\n kwargs = {'num_workers': args.workers, 'pin_memory': True} if use_cuda else {}\n\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST(args.data_path, train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n val_loader = torch.utils.data.DataLoader(\n datasets.MNIST(args.data_path, train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.val_batch_size, shuffle=True, **kwargs)\n\n elif args.dataset == 'CIFAR10':\n\n normalize = transforms.Normalize(mean=[x / 255.0 for x in [125.3, 123.0, 113.9]],\n std=[x / 255.0 for x in [63.0, 62.1, 66.7]])\n\n kwargs = {'num_workers': args.workers, 'pin_memory': True} if use_cuda else {}\n\n train_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10(root=args.data_path, train=True, transform=transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(32, 4),\n transforms.ToTensor(),\n normalize,\n ]), download=True),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n\n val_loader = torch.utils.data.DataLoader(\n datasets.CIFAR10(root=args.data_path, train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n normalize,\n ])),\n batch_size=args.val_batch_size, shuffle=False, **kwargs)\n\n # original grid = [(1.0, 1.0), (1.9, 1.0), (1.7, 1.1), (1.6, 1.1), (1.4, 1.2), (1.2, 1.3), (1.0, 1.4)]\n\n grid = [(args.grid[i], args.grid[i+1]) for i in range(0, len(args.grid), 2)]\n\n for coeff in grid:\n alpha = coeff[0] ** args.phi\n beta = coeff[1] ** args.phi\n grid_search(train_loader, val_loader, criterion, alpha, beta)", "def __init__(self):\n\n TEST_RATIO = 0.05\n mnist_trainset = datasets.MNIST(root='./data', train=True, download=True, transform=None)\n idxs = np.arange(mnist_trainset.train_data.size(0))\n np.random.shuffle(idxs)\n\n #print(torch.min(mnist_trainset.train_labels), torch.max(mnist_trainset.train_labels))\n #print(mnist_trainset.train_labels.size())\n \n # reshape input data to (1, 784) and normalize to range [0., 1.]\n self.train_data = torch.reshape(\n mnist_trainset.train_data[idxs].float(), (-1,1,28,28))/255.\n self.data_size = self.train_data.size(0)\n self.train_len = self.train_data.size(0)\n self.train_label = torch.Tensor([1]).float() # since there is only one class - 'real' image\n\n print('Train images -- {}'.format(self.train_data.size()))", "def __prepare(self, data):\n #print(\"Running Prepare data\")\n #print(data)\n #print(type(data))\n if len(data) > 1:\n if type(data[0]) == np.ndarray:\n return np.concatenate(data)\n else:\n return torch.cat(data).cpu().numpy()\n else:\n return data[0].cpu().numpy()", "def __init__(self):\n #self.NN = Neural_Network()\n y_vals = pd.read_csv('training_data_y.csv')\n x_vals_original = pd.read_csv('training_data_x.csv')\n x_vals_original.columns = ['R1', 'G1', 'B1', 'W1', 'R2', 'G2', 'B2', 'W2', 'R3', 'G3', 'B3', 'W3']\n total_x_train = self.getNewDF_X(x_vals_original)\n total_y_train = self.getNewDF_Y(y_vals) \n #training data is numpy arrays here\n x_arr = np.asarray(total_x_train,dtype=np.float32)\n y_train = np.asarray(total_y_train,dtype=np.float32)\n #convert training data to tensors and scale it\n x_train = torch.tensor((x_arr), dtype=torch.float)\n self.x_train = self.scaleInputTestData(x_train)\n self.y_train = torch.tensor((y_train), dtype=torch.float) / 100", "def main():\n\n downloadData()\n\n parser = argparse.ArgumentParser(description=\"Train model or test model (default)\")\n parser.add_argument(\"--train-model\", action=\"store_true\", default=False)\n parser.add_argument(\"--num-channels\", type=int, help=\"Number of channels in lowest dimension\", default=8)\n\n arg_parser = parser.parse_args()\n\n if arg_parser.train_model:\n encoder, generator = train(channels=arg_parser.num_channels)\n torch.save(encoder.state_dict(), f\"../models/encoder_{arg_parser.num_channels}.model\")\n torch.save(generator.state_dict(), f\"../models/generator_{arg_parser.num_channels}.model\")\n else:\n validate_models(VALIDATE_CHANNELS)", "def train(self, dataset) -> None:\n raise NotImplementedError()", "def main(args):\n # Use CUDA\n use_cuda = args.use_cuda and torch.cuda.is_available()\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n # Fix random seed\n torch.manual_seed(args.seed)\n\n # Generate token-to-index and index-to-token mapping\n tok2id, id2tok = data_loader.build_or_load_vocab(\n args.train, overwrite=False)\n\n print(\"*\" * 5)\n print(args)\n\n # Create DataLoader() objects\n params = {\n \"batch_size\": args.batch_size,\n \"collate_fn\": data_loader.collate_fn,\n \"shuffle\": args.shuffle,\n \"num_workers\": args.num_workers,\n }\n # train_dataset = data_loader.SNLIDataSet(args.train, tok2id)\n # train_loader = torch.utils.data.DataLoader(train_dataset, **params)\n val_dataset = data_loader.SNLIDataSet(args.val, tok2id)\n val_loader = torch.utils.data.DataLoader(val_dataset, **params)\n\n # Initialize model\n if args.model == \"rnn\": # RNN model\n model = RNN(\n vocab_size=const.MAX_VOCAB_SIZE, # Vocabulary size\n emb_dim=const.EMB_DIM, # Embedding dimensions\n hidden_dim=args.hidden_dim, # Hidden dimensions\n dropout_prob=args.dropout_prob, # Dropout probability\n padding_idx=const.PAD_IDX, # Padding token index\n num_classes=const.NUM_CLASSES, # Number of class labels\n id2tok=id2tok, # Vocabulary\n ).to(device)\n # Load model weights from disk\n model.load_state_dict(torch.load(const.MODELS + \"rnn.pt\"))\n model.eval()\n elif args.model == \"cnn\": # CNN model\n model = CNN(\n vocab_size=const.MAX_VOCAB_SIZE, # Vocabulary size\n emb_dim=const.EMB_DIM, # Embedding dimensions\n hidden_dim=args.hidden_dim, # Hidden dimensions\n kernel_size=args.kernel_size, # Kernel size\n dropout_prob=args.dropout_prob, # Dropout probability\n padding_idx=const.PAD_IDX, # Padding token index\n num_classes=const.NUM_CLASSES, # Number of class labels\n id2tok=id2tok, # Vocabulary\n ).to(device)\n # Load model weights from disk\n model.load_state_dict(torch.load(const.MODELS + \"cnn.pt\"))\n model.eval()\n else:\n print(\"Invalid model specification, exiting\")\n exit()\n\n # Criterion\n criterion = torch.nn.CrossEntropyLoss()\n # Model parameters\n params = [p for p in model.parameters() if p.requires_grad]\n\n # Inspect correct/incorrect predictions\n if args.inspect:\n right, wrong = eval_model(val_loader, model, device, criterion,\n inspect=True)\n print(\"\\nValidation premises with correct predictions:\\n\")\n for i, item in enumerate(right):\n text = \" \".join([id2tok[idx] for idx in item if idx > 0])\n print(\"#{}\\n {}\".format(i + 1, text))\n print(\"\\nValidation premises with incorrect predictions:\\n\")\n for i, item in enumerate(wrong):\n text = \" \".join([id2tok[idx] for idx in item if idx > 0])\n print(\"#{}\\n {}\".format(i + 1, text))\n return\n\n # Validation\n val_acc, _ = eval_model(val_loader, model, device, criterion)\n print(\"\\n Validation accuracy: {}\".format(val_acc))\n\n print(\"*\" * 5 + \"\\n\")", "def datasets(self):\n pass", "def get_datasets(data):\n train_dataset, test_dataset = None, None\n data_dir = '../data'\n\n if data == 'fmnist':\n transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.2860], std=[0.3530])])\n train_dataset = datasets.FashionMNIST(data_dir, train=True, download=True, transform=transform)\n test_dataset = datasets.FashionMNIST(data_dir, train=False, download=True, transform=transform)\n \n elif data == 'fedemnist':\n train_dir = '../data/Fed_EMNIST/fed_emnist_all_trainset.pt'\n test_dir = '../data/Fed_EMNIST/fed_emnist_all_valset.pt'\n train_dataset = torch.load(train_dir)\n test_dataset = torch.load(test_dir) \n \n elif data == 'cifar10':\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.2023, 0.1994, 0.2010)),\n ])\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.4914, 0.4822, 0.4465), std=(0.2023, 0.1994, 0.2010)),\n ])\n train_dataset = datasets.CIFAR10(data_dir, train=True, download=True, transform=transform_train)\n test_dataset = datasets.CIFAR10(data_dir, train=False, download=True, transform=transform_test)\n train_dataset.targets, test_dataset.targets = torch.LongTensor(train_dataset.targets), torch.LongTensor(test_dataset.targets) \n \n return train_dataset, test_dataset", "def main():\n # Fix random seed.\n torch.manual_seed(0)\n\n # Create checkpoint directory.\n try:\n os.mkdir('checkpoints')\n except FileExistsError:\n pass\n\n # Make preparations.\n args = get_args()\n logger = get_logger()\n data_train, data_val, data_test = get_data(args.batch_size,\n args.num_workers)\n model = get_model()\n\n # Log command arguments.\n logger.info(' '.join(sys.argv))\n logger.info(vars(args))\n\n # Send the model to the GPU, if enabled and available.\n if args.cuda:\n model = model.cuda()\n\n # Create the loss function and optimizer.\n loss_function = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(model.parameters(),\n lr=args.learning_rate,\n momentum=args.momentum)\n\n # Load checkpoint, if given.\n if args.checkpoint:\n load_checkpoint(args.checkpoint, model, optimizer)\n\n # Loop epochs.\n for epoch in range(args.num_epochs):\n logger.info(f'Epoch {epoch}:')\n\n mean_loss = train(model, loss_function, optimizer, data_train)\n logger.info(f' - [training] mean loss: {mean_loss:.3f}')\n\n accuracy = evaluate(model, data_val)\n logger.info(f' - [validation] accuracy: {accuracy:.3f}')\n\n torch.save([model.state_dict(), optimizer.state_dict()],\n os.path.join('checkpoints', f'{epoch}.pth'))\n\n # Run final evaluation on the test data.\n logger.info('Test:')\n accuracy = evaluate(model, data_test)\n logger.info(f' - [test] accuracy: {accuracy:.3f}')", "def main(opt):\n ##################################################################################################################\n # Setup\n ##################################################################################################################\n # Device handling (CPU, GPU, multi GPU)\n if opt.device is None:\n device = torch.device('cpu')\n opt.n_gpu = 0\n else:\n opt.n_gpu = len(opt.device)\n os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.device[opt.local_rank])\n device = torch.device('cuda:0')\n torch.cuda.set_device(0)\n # In the case of multi GPU: sets up distributed training\n if opt.n_gpu > 1 or opt.local_rank > 0:\n torch.distributed.init_process_group(backend='nccl')\n # Since we are in distributed mode, divide batch size by the number of GPUs\n assert opt.batch_size % opt.n_gpu == 0\n opt.batch_size = opt.batch_size // opt.n_gpu\n # Seed\n if opt.seed is None:\n opt.seed = random.randint(1, 10000)\n else:\n assert isinstance(opt.seed, int) and opt.seed > 0\n print(f'Learning on {opt.n_gpu} GPU(s) (seed: {opt.seed})')\n random.seed(opt.seed)\n np.random.seed(opt.seed + opt.local_rank)\n torch.manual_seed(opt.seed)\n # cuDNN\n if opt.n_gpu > 1 or opt.local_rank > 0:\n assert torch.backends.cudnn.enabled\n cudnn.deterministic = True\n # Mixed-precision training\n if opt.torch_amp and not torch_amp_imported:\n raise ImportError('Mixed-precision not supported by this PyTorch version, upgrade PyTorch or use Apex')\n if opt.apex_amp and not apex_amp_imported:\n raise ImportError('Apex not installed (https://github.com/NVIDIA/apex)')\n\n ##################################################################################################################\n # Data\n ##################################################################################################################\n print('Loading data...')\n # Load data\n dataset = data.load_dataset(opt, True)\n trainset = dataset.get_fold('train')\n valset = dataset.get_fold('val')\n # Change validation sequence length, if specified\n if opt.seq_len_test is not None:\n valset.change_seq_len(opt.seq_len_test)\n\n # Handle random seed for dataloader workers\n def worker_init_fn(worker_id):\n np.random.seed((opt.seed + itr + opt.local_rank * opt.n_workers + worker_id) % (2**32 - 1))\n # Dataloader\n sampler = None\n shuffle = True\n if opt.n_gpu > 1:\n # Let the distributed sampler shuffle for the distributed case\n sampler = torch.utils.data.distributed.DistributedSampler(trainset)\n shuffle = False\n train_loader = DataLoader(trainset, batch_size=opt.batch_size, collate_fn=data.collate_fn, sampler=sampler,\n num_workers=opt.n_workers, shuffle=shuffle, drop_last=True, pin_memory=True,\n worker_init_fn=worker_init_fn)\n val_loader = DataLoader(valset, batch_size=opt.batch_size_test, collate_fn=data.collate_fn,\n num_workers=opt.n_workers, shuffle=True, drop_last=True, pin_memory=True,\n worker_init_fn=worker_init_fn) if opt.local_rank == 0 else None\n\n ##################################################################################################################\n # Model\n ##################################################################################################################\n # Buid model\n print('Building model...')\n model = srvp.StochasticLatentResidualVideoPredictor(opt.nx, opt.nc, opt.nf, opt.nhx, opt.ny, opt.nz, opt.skipco,\n opt.nt_inf, opt.nh_inf, opt.nlayers_inf, opt.nh_res,\n opt.nlayers_res, opt.archi)\n model.init(res_gain=opt.res_gain)\n # Make the batch norms in the model synchronized in the distributed case\n if opt.n_gpu > 1:\n if opt.apex_amp:\n from apex.parallel import convert_syncbn_model\n model = convert_syncbn_model(model)\n else:\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n model.to(device)\n\n ##################################################################################################################\n # Optimizer\n ##################################################################################################################\n optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)\n opt.n_iter = opt.lr_scheduling_burnin + opt.lr_scheduling_n_iter\n lr_sch_n_iter = opt.lr_scheduling_n_iter\n lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer,\n lr_lambda=lambda i: max(0, (lr_sch_n_iter - i) / lr_sch_n_iter))\n\n ##################################################################################################################\n # Automatic Mixed Precision\n ##################################################################################################################\n scaler = None\n if opt.torch_amp:\n scaler = torch_amp.GradScaler()\n if opt.apex_amp:\n model, optimizer = apex_amp.initialize(model, optimizer, opt_level=opt.amp_opt_lvl,\n keep_batchnorm_fp32=opt.keep_batchnorm_fp32,\n verbosity=opt.apex_verbose)\n\n ##################################################################################################################\n # Multi GPU\n ##################################################################################################################\n if opt.n_gpu > 1:\n if opt.apex_amp:\n from apex.parallel import DistributedDataParallel\n forward_fn = DistributedDataParallel(model)\n else:\n forward_fn = torch.nn.parallel.DistributedDataParallel(model)\n else:\n forward_fn = model\n\n ##################################################################################################################\n # Training\n ##################################################################################################################\n cudnn.benchmark = True # Activate benchmarks to select the fastest algorithms\n assert opt.n_iter > 0\n itr = 0\n finished = False\n # Progress bar\n if opt.local_rank == 0:\n pb = tqdm(total=opt.n_iter, ncols=0)\n # Current and best model evaluation metric (lower is better)\n val_metric = None\n best_val_metric = None\n try:\n while not finished:\n if sampler is not None:\n sampler.set_epoch(opt.seed + itr)\n # -------- TRAIN --------\n for batch in train_loader:\n # Stop when the given number of optimization steps have been done\n if itr >= opt.n_iter:\n finished = True\n status_code = 0\n break\n\n itr += 1\n model.train()\n # Optimization step on batch\n # Allow PyTorch's mixed-precision computations if required while ensuring retrocompatibilty\n with (torch_amp.autocast() if opt.torch_amp else nullcontext()):\n loss, nll, kl_y_0, kl_z = train(forward_fn, optimizer, scaler, batch, device, opt)\n\n # Learning rate scheduling\n if itr >= opt.lr_scheduling_burnin:\n lr_scheduler.step()\n\n # Evaluation and model saving are performed on the process with local rank zero\n if opt.local_rank == 0:\n # Evaluation\n if itr % opt.val_interval == 0:\n model.eval()\n val_metric = evaluate(forward_fn, val_loader, device, opt)\n if best_val_metric is None or best_val_metric > val_metric:\n best_val_metric = val_metric\n torch.save(model.state_dict(), os.path.join(opt.save_path, 'model_best.pt'))\n\n # Checkpointing\n if opt.chkpt_interval is not None and itr % opt.chkpt_interval == 0:\n torch.save(model.state_dict(), os.path.join(opt.save_path, f'model_{itr}.pt'))\n\n # Progress bar\n if opt.local_rank == 0:\n pb.set_postfix({'loss': loss, 'nll': nll, 'kl_y_0': kl_y_0, 'kl_z': kl_z, 'val_metric': val_metric,\n 'best_val_metric': best_val_metric}, refresh=False)\n pb.update()\n\n except KeyboardInterrupt:\n status_code = 130\n\n if opt.local_rank == 0:\n pb.close()\n # Save model\n print('Saving...')\n if opt.local_rank == 0:\n torch.save(model.state_dict(), os.path.join(opt.save_path, 'model.pt'))\n print('Done')\n return status_code", "def __init__(self):\n '''\n # Defines paths for training and validation datasets.\n train_path = \"/home/gauravkuppa24/Documents/datasets/MRNet-v1.0/train/coronal\"\n valid_path = \"/home/gauravkuppa24/Documents/datasets/MRNet-v1.0/valid/axial\"\n '''\n # Create Dataset and DataLoader for training and validation dataset\n self.dataset_train = CustomDatasetNPY(\"train\")[0:200]\n self.train_loader = torch.utils.data.DataLoader(\n self.dataset_train, batch_size=30, shuffle=False # , num_workers=4\n )\n self.dataset_valid = CustomDatasetNPY(\"valid\")[0:25]\n self.valid_loader = torch.utils.data.DataLoader(\n self.dataset_valid, batch_size=30, shuffle=False # , num_workers=4\n )\n self.dataset_sizes = {'train':len(self.dataset_train), 'valid':len(self.dataset_valid)}\n self.dataloaders = {\n 'train': self.train_loader,\n 'valid': self.valid_loader\n }\n # Create Neural Network with hyperparameters.\n self.net = ConvNet(2)\n self.optimizer = torch.optim.Adam(\n self.net.parameters(), lr=0.01\n ) # how do you know which optim to use when?\n self.criterion = (\n torch.nn.CrossEntropyLoss()\n ) # how do you know which criterion to use? why do we choose cross entropy loss\n self.exp_lr_scheduler = torch.optim.lr_scheduler.StepLR(\n self.optimizer, step_size=7, gamma=0.1\n )\n self.device = torch.device(\"cpu\")", "def load_data(data_dir):\n \n #Define training, validation, and testing directories, structured for use with ImageFolder Class\n train_dir = data_dir + '/train'\n valid_dir = data_dir + '/valid'\n test_dir = data_dir + '/test'\n \n #Define image transforms for training, validation, and testing\n training_transforms = transforms.Compose([transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n\n validation_transforms = transforms.Compose([transforms.Resize(224),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n\n testing_transforms = validation_transforms\n\n\n #Load the datasets with ImageFolder\n training_data = datasets.ImageFolder(train_dir, transform = training_transforms)\n validation_data = datasets.ImageFolder(valid_dir, transform = validation_transforms)\n testing_data = datasets.ImageFolder(test_dir, transform = testing_transforms)\n\n #Using the image datasets and the trainforms, define the dataloaders\n training_loader = torch.utils.data.DataLoader(training_data, batch_size = 64, shuffle = True)\n validation_loader = torch.utils.data.DataLoader(validation_data, batch_size = 64, shuffle = False)\n testing_loader = torch.utils.data.DataLoader(testing_data, batch_size = 64, shuffle = False)\n \n return training_loader, validation_loader, testing_loader", "def __init__(self, dataset):\n self._dataset = dataset", "def main(args):\n args = parse_args(args)\n setup_logging(args.loglevel)\n dataset_opts = read_metadata(args.base_dataset_dir)\n size = dataset_opts[\"size\"]\n\n # Set seed number\n if args.seed:\n _logger.info('Seed: %d', args.seed)\n random.seed(args.seed)\n\n # Gather all files in dataset\n true_files = glob.glob(os.path.join(args.base_dataset_dir, 't', '*.jpg'))\n false_files = glob.glob(os.path.join(args.base_dataset_dir, 'f', '*.jpg'))\n\n # Split dataset into train, validation and test sets\n split_dataset(\n (true_files, false_files),\n args.base_dataset_dir,\n test_size=args.test_size,\n validation_size=args.validation_size,\n balancing_multiplier=args.balancing_multiplier)\n\n train(args.trainable_layers, args.output_model, args.batch_size,\n args.epochs, size, args.dataset_dir)\n\n _logger.info('Done')", "def test_single_image_dataset_train(mocker, simple_img):\n\n # This is in X, Y cooridnates\n fake_random_data = torch.Tensor(\n np.array(\n [\n [0, 0], # top left\n [0, 1], # bottom left\n [1, 0], # top right\n ]\n )\n )\n\n mock_random = mocker.patch(\"dataset.torch.rand\")\n mock_random.return_value = fake_random_data\n\n dataset = SingleImageDataset(simple_img, 3, normalize=False)\n assert len(dataset) == 9\n\n dataloader = DataLoader(dataset, batch_size=None, batch_sampler=None)\n\n n_iter = 0\n for x, y in dataloader:\n n_iter += 1\n x = x.numpy()\n y = y.numpy()\n expected_x = np.array(\n [\n [-1, -1], # Top left\n [-1, 1], # Bottom left\n [1, -1], # Top right\n ]\n )\n assert (expected_x == x).all()\n\n expected_y = np.array(\n [\n [1.0, 2.0, 3.0],\n [4.0, 5.0, 6.0],\n [7.0, 8.0, 9.0],\n ]\n )\n assert (expected_y == y).all()\n\n assert n_iter == 9", "def main():\n\n gpu_id = 1\n d_batch = 64\n d_embed = 256\n d_hidden = 256\n d_image_size = 256\n device = torch.device(f'cuda:{gpu_id}' if torch.cuda.is_available() else 'cpu')\n dataset, train_loader = get_default_flickr30k_loader(d_batch=d_batch, d_image_size=d_image_size)\n model = Img2Txt(dataset.d_vocab, d_embed, d_hidden, dataset.start_token, dataset.end_token).to(device)\n\n train(model, dataset, train_loader, device)", "def __init__(self, opt):\n BaseDataset.__init__(self, opt)\n self.dir_A = os.path.join(opt.dataroot, opt.phase5 + 'A') # create a path '/path/to/data/trainA'\n #self.dir_B20 = os.path.join(opt.dataroot, opt.phase + 'B20') # create a path '/path/to/data/trainB1'\n\n #self.dir_B2 = os.path.join(opt.dataroot, opt.phase + 'B2') # create a path '/path/to/data/trainB2'\n self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA'\n #self.B20_paths = sorted(make_dataset(self.dir_B20, opt.max_dataset_size)) # load images from '/path/to/data/trainB'\n #self.B2_paths = sorted(make_dataset(self.dir_B2, opt.max_dataset_size)) # load images from '/path/to/data/trainB'\n self.A_size = len(self.A_paths) # get the size of dataset A\n # self.B20_size = len(self.B20_paths) # get the size of dataset B\n #self.B2_size = len(self.B2_paths)\n\n #self.dir_A50 = os.path.join(opt.dataroot, opt.phase + 'A50') # create a path '/path/to/data/trainA'\n self.dir_B50 = os.path.join(opt.dataroot, opt.phase5 + 'B50') # create a path '/path/to/data/trainB1'\n #self.dir_B2 = os.path.join(opt.dataroot, opt.phase + 'B2') # create a path '/path/to/data/trainB2'\n #self.A50_paths = sorted(make_dataset(self.dir_A50, opt.max_dataset_size)) # load images from '/path/to/data/trainA'\n self.B50_paths = sorted(make_dataset(self.dir_B50, opt.max_dataset_size)) # load images from '/path/to/data/trainB'\n #self.B2_paths = sorted(make_dataset(self.dir_B2, opt.max_dataset_size)) # load images from '/path/to/data/trainB'\n #self.A50_size = len(self.A50_paths) # get the size of dataset A\n self.B50_size = len(self.B50_paths) # get the size of dataset B\n #self.B2_size = len(self.B2_paths)\n\n self.dir_B100 = os.path.join(opt.dataroot, opt.phase5 + 'B100') # create a path '/path/to/data/trainB1'\n self.B100_paths = sorted(\n make_dataset(self.dir_B100, opt.max_dataset_size)) # load images from '/path/to/data/trainB'\n self.B100_size = len(self.B100_paths) # get the size of dataset B\n\n self.dir_B150 = os.path.join(opt.dataroot, opt.phase5 + 'B150') # create a path '/path/to/data/trainB1'\n self.B150_paths = sorted(\n make_dataset(self.dir_B150, opt.max_dataset_size)) # load images from '/path/to/data/trainB'\n self.B150_size = len(self.B150_paths) # get the size of dataset B\n\n\n\n self.dir_m0 = os.path.join(opt.dataroot, 'mask_0') # create a path '/path/to/data/trainB1'\n self.m0_paths = sorted(\n make_dataset(self.dir_m0, opt.max_dataset_size)) # load images from '/path/to/data/trainB'\n self.m0_size = len(self.m0_paths) # get the size of dataset B\n\n\n self.dir_m50 = os.path.join(opt.dataroot, 'mask_50') # create a path '/path/to/data/trainB1'\n self.m50_paths = sorted(\n make_dataset(self.dir_m50, opt.max_dataset_size)) # load images from '/path/to/data/trainB'\n self.m50_size = len(self.m50_paths) # get the size of dataset B\n\n\n\n self.dir_m100 = os.path.join(opt.dataroot, 'mask_100') # create a path '/path/to/data/trainB1'\n self.m100_paths = sorted(\n make_dataset(self.dir_m100, opt.max_dataset_size)) # load images from '/path/to/data/trainB'\n self.m100_size = len(self.m100_paths) # get the size of dataset B\n\n\n self.dir_m150 = os.path.join(opt.dataroot, 'mask_150') # create a path '/path/to/data/trainB1'\n self.m150_paths = sorted(\n make_dataset(self.dir_m150, opt.max_dataset_size)) # load images from '/path/to/data/trainB'\n self.m150_size = len(self.m150_paths) # get the size of dataset B\n\n\n \n\n btoA = self.opt.direction == 'BtoA'\n input_nc = self.opt.output_nc if btoA else self.opt.input_nc # get the number of channels of input image\n output_nc = self.opt.input_nc if btoA else self.opt.output_nc # get the number of channels of output image\n self.transform_A = get_transform(self.opt, grayscale=(input_nc == 1))\n self.transform_B = get_transform(self.opt, grayscale=(output_nc == 1))", "def load_data(\n root: str, dataset: Dataset, subset: float, seed: int\n) -> Tuple[TensorDataset, TensorDataset, TensorDataset]:\n np.random.seed(seed)\n torch.manual_seed(seed)\n\n if dataset == Dataset.MNIST_FASHION_05:\n path = os.path.join(root, \"FashionMNIST0.5.npz\")\n elif dataset == Dataset.MNIST_FASHION_06:\n path = os.path.join(root, \"FashionMNIST0.6.npz\")\n elif dataset == Dataset.CIFAR:\n path = os.path.join(root, \"CIFAR.npz\")\n else:\n raise NotImplementedError()\n\n data = np.load(path)\n\n # To get val data, we randomly sample 80% of the n examples to train a model\n # and use the remaining 20% to validate the model.\n train_val_feats = data[\"Xtr\"] # Train features. Size (n, *image_size)\n train_val_labels = data[\"Str\"] # Noisy labels. Size (n,) each label in {0, 1, 2}\n\n test_feats = data[\"Xts\"] # Test features. Size (m, *image_size)\n test_labels = data[\"Yts\"] # Clean labels. Size (m,) each label in {0, 1, 2}\n\n # TODO Move to proper preprocessing step\n train_val_feats = train_val_feats / 255\n test_feats = test_feats / 255\n\n # Randomly split train and val data\n indices = np.random.permutation(train_val_labels.shape[0])\n take = int(subset * train_val_labels.shape[0])\n print(f\"seed: {seed}\")\n print(\n f\"shuffle: [{', '.join([str(i) for i in indices[:min(10, take)]])}, ...]\",\n )\n shuffled_feats = train_val_feats[indices]\n shuffled_labels = train_val_labels[indices]\n\n train_feats = torch.tensor(shuffled_feats[:take], dtype=torch.float32)\n train_labels = torch.tensor(shuffled_labels[:take], dtype=torch.long)\n val_feats = torch.tensor(shuffled_feats[take:], dtype=torch.float32)\n val_labels = torch.tensor(shuffled_labels[take:], dtype=torch.long)\n test_feats = torch.tensor(test_feats, dtype=torch.float32)\n test_labels = torch.tensor(test_labels, dtype=torch.long)\n\n if dataset in [Dataset.MNIST_FASHION_05, Dataset.MNIST_FASHION_06]:\n train_feats = torch.unsqueeze(train_feats, 1)\n val_feats = torch.unsqueeze(val_feats, 1)\n test_feats = torch.unsqueeze(test_feats, 1)\n else:\n # Dataset is CIFAR. Comes packaged as (num_samples, dim, dim, channels).\n # We change this to (num_samples, channels, dim, dim).\n train_feats = train_feats.permute(0, 3, 1, 2)\n val_feats = val_feats.permute(0, 3, 1, 2)\n test_feats = test_feats.permute(0, 3, 1, 2)\n return (\n TensorDataset(train_feats, train_labels),\n TensorDataset(val_feats, val_labels),\n TensorDataset(test_feats, test_labels),\n )", "def get_dataset(args):\n\n if args.dataset == 'cifar':\n data_dir = 'data/cifar/'\n apply_transform = transforms.Compose(\n [transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n train_dataset = datasets.CIFAR10(data_dir, train=True, download=True,\n transform=apply_transform)\n\n test_dataset = datasets.CIFAR10(data_dir, train=False, download=True,\n transform=apply_transform)\n\n # sample training data amongst users\n if args.iid:\n user_groups = cifar_iid(train_dataset, args.num_users)\n else:\n if args.unequal:\n # Chose euqal splits for every user\n user_groups = cifar_noniid(train_dataset, args.num_users)\n else:\n user_groups = cifar_noniid_class(train_dataset, args.num_users, args.class_per_user)\n \n elif args.dataset == 'mnist' or 'fmnist':\n if args.dataset == 'mnist':\n data_dir = 'data/mnist/'\n else:\n data_dir = 'data/fmnist/'\n\n apply_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))])\n\n train_dataset = datasets.MNIST(data_dir, train=True, download=True,\n transform=apply_transform)\n\n test_dataset = datasets.MNIST(data_dir, train=False, download=True,\n transform=apply_transform)\n\n # sample training data amongst users\n if args.iid:\n # Sample IID user data from Mnist\n user_groups = mnist_iid(train_dataset, args.num_users)\n else:\n # Sample Non-IID user data from Mnist\n if args.unequal:\n # Chose uneuqal splits for every user\n user_groups = mnist_noniid_unequal(train_dataset, args.num_users)\n else:\n # Chose euqal splits for every user\n user_groups = mnist_noniid_class(train_dataset, args.num_users, args.class_per_user)\n\n return train_dataset, test_dataset, user_groups", "def train():\n pass", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n out_dataset[1].create_dataset(in_dataset[1])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( 'SINOGRAM',multiple)\n out_pData[0].plugin_data_setup( 'PROJECTION','multiple')\n\n in_pData[1].plugin_data_setup( 'PROJECTION',)\n out_pData[1].plugin_data_setup( 'PROJECTION','multiple')", "def data_creator(config):\n train_dataset, val_dataset = LinearDataset(2, 5), LinearDataset(2, 5)\n train_loader = DataLoader(train_dataset, batch_size=config[\"batch_size\"])\n val_loader = DataLoader(val_dataset, batch_size=config[\"batch_size\"])\n return train_loader, val_loader", "def __init__(self,\n data:Tuple[dict, LoadData],\n net:torch.nn.Module,\n criterion:torch.nn.Module,\n opt_func:torch.optim.Optimizer=torch.optim.Adam,\n metrics:Tuple[yamlf_metrics.SKMetrics, yamlf_metrics.Metrics, str, Callable, list]=None,\n defs:dict=None,\n mixed_precision:bool=False,\n batch_lrs_func:torch.optim.lr_scheduler._LRScheduler=OneCycleLR,\n epoch_lrs_func:torch.optim.lr_scheduler._LRScheduler=None,\n tblogtype:str=\"batch\"\n ):\n # Set dataloaders\n if isinstance(data, dict):\n self.train_dl = data['train']\n if 'val' in data.keys(): self.val_dl = data['val']\n if 'test' in data.keys(): self.test_dl = data['test']\n if defs == None: raise TypeError('Please pass default settings dict. defs=<DefaultSettings>')\n self.__dict__.update(defs)\n elif isinstance(data, LoadData): self.__dict__.update(data.__dict__)\n else: raise NotImplementedError('unsupported data!')\n # set network\n self.net = net.to(self.device)\n # set loss or criterion\n self.criterion = criterion\n if opt_func.__name__ in ['SGD', 'RMSprop']:\n self.optimizer = opt_func(filter(lambda p: p.requires_grad, self.net.parameters()), lr=self.lr, momentum=max(self.moms))\n else:\n self.optimizer = opt_func(filter(lambda p: p.requires_grad, self.net.parameters()), lr=self.lr, betas=self.moms)\n # set metrics\n self.metrics = metrics\n if isinstance(self.metrics, (str, list)):\n if self.metrics in yamlf_metrics.SKMetrics.__dict__.keys():\n self.metrics = yamlf_metrics.SKMetrics([self.metrics])\n elif self.metrics in yamlf_metrics.Metrics.__dict__.keys():\n self.metrics = yamlf_metrics.Metrics([self.metrics])\n else:\n raise AttributeError(f\"{self.metrics} is unknown metric.\")\n\n # Set LR scheduler\n self.batch_lrs_func = batch_lrs_func\n self.epoch_lrs_func = epoch_lrs_func\n if self.batch_lrs_func != None and self.epoch_lrs_func != None:\n print(\"[WARNING]: Both batch and epoch scheduler are given. This might cause inconsistent training.\")\n # Other settings\n self.low_storage = self.low_storage # If low storage, don't save chkpts automatically\n self.tblogtype = tblogtype # tensorboard log type (batch or epoch)\n if not self.low_storage and self.tblogtype == 'batch':\n self.tbc = 0 # Traning batch counter for tensorboard logs\n self.vbc = 0 # Validation batch counter for tensorboard logs\n # Mixed Precision\n self.mixed_precision = mixed_precision\n if mixed_precision:\n self.net, self.optimizer = amp.initialize(self.net, self.optimizer)", "def __init__(self):\n self.device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n \n self.model = models.resnet101() \n self.model.load_state_dict(torch.load(WEIGHTS_DIR))\n \n self.model.to(self.device)\n \n self.model.eval()\n \n # labels\n with open(LABELS, 'r') as f:\n self.labels = ast.literal_eval(f.read())\n \n # define data transform\n self.transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])", "def init_data(in_arg, model_param, phase=\"train\"): \n # Firstly, set the directories\n # PRE-REQUISITES: \n # train & valid sets (1 per folder) must exist within the in_arg.data_dir (to improve if I have some time later on)\n # train folder must be \"train\", validation folwer must be \"valid\"\n # each file must be correctly classified (=within the correct id folder). file name doesn't matter\n model_param['data_dir'] = in_arg.data_dir\n train_dir = model_param['data_dir'] + '/train'\n valid_dir = model_param['data_dir'] + '/valid'\n\n model_param['save_dir'] = in_arg.save_dir\n \n # Prepare the transformations for train & validation sets\n train_transforms = transforms.Compose([transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n\n valid_transforms = transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n\n try:\n # Load the datasets with ImageFolder\n train_dataset = datasets.ImageFolder(train_dir, transform=train_transforms)\n valid_dataset = datasets.ImageFolder(valid_dir, transform=valid_transforms)\n\n model_param['class_to_idx'] = train_dataset.class_to_idx\n \n # TODO: Using the image datasets and the trainforms, define the dataloaders\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=in_arg.batch_size, shuffle = True)\n valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=in_arg.batch_size, shuffle = True)\n\n # Initialize the cat_to_name catalog\n #with open(in_arg.cat_to_name, 'r') as f:\n #cat_to_name = json.load(f)\n # model_param['cat_to_name'] = json.load(f)\n\n except Exception as e:\n print(\"An exception occured: {}.\".format(e))\n sys.exit(0)\n\n print(\"Data loading completed!\")\n\n # Return all parameters we will need later on\n return train_loader, valid_loader, model_param", "def _creatExamplesTensorData(self, examples):\n\n images = []\n \n images2 = []\n images3 = []\n images4 = []\n images5 = [] \n labels = []\n for (img_idx, label) in examples:\n img = self.dataset[img_idx][0]\n #print(img)\n ##exit(0)\n if self.load:\n img = Image.fromarray(img)\n else:\n img = read_image(img)\n #print(img.size)\n #print(np.array(img).shape)\n #exit(0)\n if self.transform is not None:\n img1 = self.transform(img)\n\n img2 = self.transform_test(img)\n img3 = self.transform_test(img)\n img4 = self.transform_test(img)\n img5 = self.transform_test(img) \n #print((img2-img1).abs().sum(),(img3-img1).abs().sum(),(img2-img3).abs().sum())\n #print(img.shape,'located in test_loader.py at 146')\n #exit(0)\n images.append(img1)\n \n images2.append(img2)\n images3.append(img3)\n images4.append(img4)\n images5.append(img5) \n labels.append(label)\n images = torch.stack(images, dim=0)\n\n images2 = torch.stack(images2, dim=0)\n images3 = torch.stack(images3, dim=0)\n images4 = torch.stack(images4, dim=0)\n images5 = torch.stack(images5, dim=0) \n labels = torch.LongTensor(labels)\n return images, images2,images3,images4,images5,labels", "def main(dataset):\n # Save everything 'MNE_DATA' dir ... defaults to ~/mne_data\n mne_data_dir = mne.get_config(key='MNE_DATA', default=False)\n if not mne_data_dir:\n mne.set_config('MNE_DATA', str(DEFAULT_DATA_DIR))\n DEFAULT_DATA_DIR.mkdir(exist_ok=True)\n mne_data_dir = DEFAULT_DATA_DIR\n else:\n mne_data_dir = Path(mne_data_dir)\n\n ds_names = DATASET_OPTIONS.keys() if not dataset else (dataset,)\n\n for ds_name in ds_names:\n print('\\n----------------------')\n ds_path = mne_data_dir / ds_name\n _download(ds_name=ds_name, ds_path=ds_path)", "def main(unused_argv):\n del unused_argv\n if not os.path.exists(FLAGS.data_dir):\n os.makedirs(FLAGS.data_dir)\n\n tfds_cached_dict = {}\n data_dir = FLAGS.tfds_data_dir if FLAGS.tfds_data_dir else None\n name = FLAGS.dataset_name\n tfds_cached_dict[name] = tfds.load(name, batch_size=-1, data_dir=data_dir)\n dataset_dict = tfds_cached_dict[name]\n dataset_dict[tfds.Split.TRAIN] = tfds.as_numpy(\n dataset_dict[tfds.Split.TRAIN])\n dataset_dict[tfds.Split.TEST] = tfds.as_numpy(\n dataset_dict[tfds.Split.TEST])\n # To mock the API of tfds.load to cache the downloaded datasets.\n # Used as an argument to `get_dataset`.\n def load_fn(name, data_dir=None, batch_size=-1):\n # This function will always return the whole dataset.\n assert batch_size == -1\n del data_dir\n del batch_size\n return tfds_cached_dict[name]\n class_ids = sorted([int(x) for x in FLAGS.class_ids])\n num_classes = len(class_ids)\n for i in range(num_classes):\n for j in range(i+1, num_classes):\n print('Generating pos {} neg {}'.format(i, j))\n positive_class = class_ids[i]\n negative_class = class_ids[j]\n random_seeds = range(FLAGS.min_data_seed, FLAGS.max_data_seed)\n for seed in random_seeds:\n dataset = create_projected_binary_dataset(\n FLAGS.dataset_name, positive_class, negative_class,\n FLAGS.num_train_examples, FLAGS.num_valid_examples,\n FLAGS.num_test_examples, FLAGS.projected_dim, seed, load_fn)\n filename = 'binary_{}-pos_{}-neg_{}-dim_{}-seed_{}'.format(\n FLAGS.dataset_name, positive_class, negative_class,\n FLAGS.projected_dim, seed)\n serialized_dataset = dataset.SerializeToString()\n\n with open(os.path.join(FLAGS.data_dir, filename), 'wb') as f:\n f.write(serialized_dataset)", "def prepare_demo_dataset(path, reso, batch_size=1):\r\n transform = transforms.Compose([\r\n transforms.Resize(size=(reso, reso), interpolation=3),\r\n transforms.ToTensor()\r\n ])\r\n\r\n img_datasets = DemoDataset(path, transform)\r\n dataloader = torch.utils.data.DataLoader(img_datasets, batch_size=batch_size, num_workers=8)\r\n\r\n return img_datasets, dataloader", "def getDataset(self, train=True):\n \n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\") \n \n if self.dataset == \"ELLIPSE\":\n a = np.array([[0,1.0],[1.0,2.0]]) \n b = a*0.5 \n myE = el.ellipse(device, 500, 100, a, b) \n if train == True:\n return myE.create_dataset(myE.examples)\n return myE.create_dataset(myE.valid) \n \n if self.dataset == \"SWISS\": \n myS = sw.SwissRoll(device, 500, 0.2) \n if train == True:\n return myS.create_dataset(myS.examples)\n return myS.create_dataset(myS.valid)\n \n \n #open file\n myFile = h5py.File(self.dataString, 'r', self.driver)\n \n if train == True: \n inputString = \"train_inputs\"\n labelsString = \"train_labels\"\n \n else:\n inputString = \"test_inputs\"\n labelsString = \"test_labels\"\n \n #get hdf5 datsets\n features = myFile.get(inputString)\n labels = myFile.get(labelsString)\n \n #convert to tensors\n features = torch.from_numpy(np.array(features))\n labels = torch.from_numpy(np.array(labels))\n \n #close file to ensure dataset is in memory\n myFile.close()\n \n #conver to correct datatypes\n features = features.float()\n \n if self.conv_sg == False:\n labels = labels.long() \n \n dataset = torch.utils.data.TensorDataset(features, labels)\n \n return dataset", "def __init__(self):\n self.__dataset = None", "def __init__(self, sample_df, data_path, load_semilabels=True, load_mask=True,\n output_size=512, data_augmentation=True):\n data.Dataset.__init__(self)\n self.sample_df = sample_df\n self.data_path = data_path\n self.load_semilabels = load_semilabels\n self.load_mask = load_mask\n if data_augmentation:\n self.transform = tf.Compose(tf.Grayscale(), \\\n tf.AutoContrast(cutoff=1), \\\n tf.RandomHorizontalFlip(p=0.5), \\\n tf.RandomVerticalFlip(p=0.5), \\\n tf.RandomBrightness(lower=0.8, upper=1.2), \\\n tf.RandomScaling(scale_range=(0.8,1.2)), \\\n tf.RandomRotation(degree_range=(-20,20)), \\\n tf.ResizeMax(output_size), \\\n tf.PadToSquare(), \\\n tf.MinMaxNormalization(), \\\n tf.ToTorchTensor())\n else:\n self.transform = tf.Compose(tf.Grayscale(), \\\n tf.AutoContrast(cutoff=1), \\\n tf.ResizeMax(output_size), \\\n tf.PadToSquare(), \\\n tf.MinMaxNormalization(), \\\n tf.ToTorchTensor())", "def prepare_data(self,d):\n train_loaders, train_iters = {}, {}\n unlabeled_loaders, unlabeled_iters = {}, {}\n for domain in opt.domains:\n #CONVERT TO FLOAT32\n features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape((-1,1))\n train = data_utils.TensorDataset(features,target)\n train_loaders[domain] = DataLoader(train, opt.batch_size, shuffle = True)\n train_iters[domain] = iter(train_loaders[domain])\n for domain in opt.unlabeled_domains:\n features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape(-1,1))\n uset = data_utils.TensorDataset(features,target)\n unlabeled_loaders[domain] = DataLoader(uset,opt.batch_size, shuffle = True)\n unlabeled_iters[domain] = iter(unlabeled_loaders[domain])\n \n return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters", "def __init__(self,\n dataset: str,\n train: bool,\n subset: bool):\n\n PERCENT = .3\n\n if dataset == 'MNIST':\n data = torchvision.datasets.MNIST('./data', train=train, download=True,\n transform=torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(\n (0.1307,), (0.3081,))\n ]))\n else:\n raise ValueError\n\n data_size = len(data)\n self.data = data\n\n if subset:\n indx = torch.randperm(data_size)[:int(data_size * PERCENT)]\n self.samples = self.data.data[indx, :, :]\n self.labels = self.data.targets[indx]\n else:\n self.samples = self.data.data\n self.labels = self.data.targets\n\n self.random_seed = 42", "def get_inference_dataset(dataset_path,debug=False):\n\n if not os.path.exists(dataset_path):\n assert False, \"Couldn't find path : '{}'\".format(dataset_path)\n print(\"\\nprocessing data :'{}'\\n\".format(dataset_path))\n\n path = os.getcwd()\n os.chdir(dataset_path)\n\n dataset = []\n for file in tqdm(os.listdir('.')):\n if not file.endswith('features'):\n continue\n name = file.replace(\".features\", \"\") # removing \"features\"\n x = np.loadtxt(name + '.features')\n np.nan_to_num(x, copy=False)\n #get labels file\n if os.path.exists(name + '.test.labels'):\n labels_file = open(name + '.test.labels').readlines()\n elif os.path.exists(name + '.labels'):\n labels_file = open(name + '.labels').readlines()\n else:\n continue\n file_info = (name , float(labels_file[-2].split(' ')[-1]),\n np.fromstring(labels_file[1].strip(), sep=' ')[:2],\n float(labels_file[2]))#(file name,window_offset,(onset,offset),vot_type)\n\n dataset.append([torch.from_numpy(x).float(), file_info])\n if debug and len(dataset)>100:\n break\n os.chdir(path)\n\n return DataLoader(dataset,shuffle=False)", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)", "def train(self, data):\n pass", "def prepare_data(self):\n data = self._get_dataset(self.hparams.dataset_path)\n label_encoder = data[\"label_encoder\"]\n del data[\"label_encoder\"]\n\n click.secho(\"Building inputs and labels.\", fg=\"yellow\")\n datasets = {\n \"train\": defaultdict(list),\n \"valid\": defaultdict(list),\n \"test\": defaultdict(list),\n }\n for dataset_name, dataset in data.items():\n for sample in dataset:\n instance = self.build_input(\n self.tokenizer, sample[\"text\"], label_encoder, sample[\"label\"]\n )\n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n\n click.secho(\"Padding inputs and building tensors.\", fg=\"yellow\")\n tensor_datasets = {\"train\": [], \"valid\": [], \"test\": []}\n for dataset_name, dataset in datasets.items():\n dataset = self.pad_dataset(dataset, padding=self.tokenizer.pad_index)\n for input_name in MODEL_INPUTS:\n if input_name == \"labels\":\n tensor = torch.tensor(dataset[input_name], dtype=torch.float32)\n else:\n tensor = torch.tensor(dataset[input_name])\n tensor_datasets[dataset_name].append(tensor)\n\n self.train_dataset = TensorDataset(*tensor_datasets[\"train\"])\n self.valid_dataset = TensorDataset(*tensor_datasets[\"valid\"])\n self.test_dataset = TensorDataset(*tensor_datasets[\"test\"])\n click.secho(\n \"Train dataset (Batch, Candidates, Seq length): {}\".format(\n self.train_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Valid dataset (Batch, Candidates, Seq length): {}\".format(\n self.valid_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Test dataset (Batch, Candidates, Seq length): {}\".format(\n self.test_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )", "def __init__(self, opt):\n BaseDataset.__init__(self, opt)\n self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A') # create a path '/path/to/data/trainA'\n self.dir_B = os.path.join(opt.dataroot, opt.phase + 'B') # create a path '/path/to/data/trainB'\n\n self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA'\n self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB'\n self.A_size = len(self.A_paths) # get the size of dataset A\n self.B_size = len(self.B_paths) # get the size of dataset B\n btoA = self.opt.direction == 'BtoA'\n input_nc = self.opt.output_nc if btoA else self.opt.input_nc # get the number of channels of input image\n output_nc = self.opt.input_nc if btoA else self.opt.output_nc # get the number of channels of output image\n self.transform_A = get_transform(self.opt, grayscale=(input_nc == 1))\n self.transform_B = get_transform(self.opt, grayscale=(output_nc == 1))", "def __init__(self, dataset_name, source_path_str=None, th=0, nprocs=1):\n\n # wherever you call from, the dataset root path\n # will be the same, \\\\|// relative to project root,\n # as marked by the -(@ @)- .kilroy flag file\n # --oOO---(_)--- OOo--\n\n base_path = mooltipath(\"datasets\")\n\n # dataset directory path\n self.path = Path(base_path, dataset_name)\n\n # database file path\n self.db_path = Path(self.path, \"database.db\")\n\n # manifest file path\n mnf_path = Path(base_path, dataset_name + \".mnf\")\n\n # samples audio files path\n self.samples_path = Path(self.path, \"samples\")\n\n # if no source path was provided, it means\n # that we want to retrieve an existing AudioDataset db\n if not source_path_str:\n # if the database file does not exist, we bailout friendly\n if not self.db_path.is_file():\n iprint(\"Dataset database file does not exist.\")\n iprint(\"Please insure that the dataset name is correct and\")\n iprint(\"that the dataset has been previously created.\")\n iprint(\"### ABORTING! ###\")\n raise FileNotFoundError(\"Dataset database not found\")\n\n # else retrieve dataset configuration from db\n self._get_config_from_db()\n else:\n # Else we have to build the dataset from its manifest.\n # we expect:\n # - the manifest file to exist,\n # - the ds directory to not exist\n # - the source_dir to exist.\n # Else exit friendly\n source_path = Path(source_path_str).absolute()\n\n if not mnf_path.is_file():\n iprint(\"Dataset manifest does not exist.\")\n iprint(\"Please insure that the dataset name is correct.\")\n iprint(\"### ABORTING! ###\")\n raise FileNotFoundError(\"Dataset manifest not found\")\n\n if not source_path.is_dir():\n iprint(f\"Source directory ({source_path}) does not exist.\")\n iprint(\"Please insure that provided path is correct.\")\n iprint(\"### ABORTING! ###\")\n raise FileNotFoundError(\"Dataset source directory not found\")\n\n if self.path.exists():\n iprint(f\"The dataset directory ({self.path}) already exists.\")\n iprint(\n \"If you really intent to CREATE this dataset, please \"\n \"erase this directory first\"\n )\n iprint(\"### ABORTING! ###\")\n raise FileExistsError(\"Dataset directory already exists\")\n\n iprint(f\">>>>> Starting Dataset {dataset_name} build\")\n # Create directory structure\n self.samples_path.mkdir(parents=True)\n\n # Read manifest\n with mnf_path.open(\"r\") as f:\n lines = f.read().split(\"\\n\")\n sample_rate = int(lines[0])\n duration = float(lines[1])\n overlap = float(lines[2])\n md5 = lines[3]\n filenames = lines[4:]\n\n # Perform sanity checks\n assert duration > 0, \"duration must be strictly positive\"\n assert overlap >= 0, \"overlap must be positive or zero\"\n assert (\n sample_rate <= 44100 and sample_rate >= 1024\n ), \"sample_rate must belong to [1024 - 44100]\"\n assert overlap < duration, \"overlap must be < duration\"\n\n # Pre Populate instance properties\n self.sample_rate = sample_rate\n self.duration = duration\n self.overlap = overlap\n self.filenames = filenames\n self.nb_samples = 0\n\n # Create database tables\n self._create_tables()\n\n # Perform actual audio chunks slicing\n self._build(source_path, th, nprocs)\n\n # Compute md5 checksum and check it agains manifest expected value\n assert self._is_valid(\n md5\n ), \"Computed MD5 checksum does not match manifest expected value.\"\n iprint(\"Checksum OK!\")\n\n # Everything went well, we are done (Yay!)\n iprint(f\">>>>> Dataset {dataset_name} successfully created.\")\n\n return", "def init(*args):\n global dataset\n dataset = args[0]", "def get_dataloaders(args):\n if args.dataset == 'heat':\n dataset_class = heat.HeatDiffusionDataset\n else:\n raise ValueError(f'Unknown dataset {args.dataset}')\n train_dataset = dataset_class(\n dataset_class.get_train_path(args.data_path), args, train=True)\n if args.dist:\n train_sampler = torch.utils.data.distributed.DistributedSampler(\n train_dataset)\n else:\n train_sampler = torch.utils.data.RandomSampler(train_dataset)\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, num_workers=args.workers,\n sampler=train_sampler, pin_memory=True, drop_last=args.drop_last)\n if not args.no_eval:\n validation_dataset = dataset_class(\n dataset_class.get_validation_path(args.data_path), args, train=False)\n if args.dist:\n validation_sampler = torch.utils.data.distributed.DistributedSampler(\n validation_dataset, shuffle=False)\n else:\n validation_sampler = torch.utils.data.SequentialSampler(\n validation_dataset)\n validation_loader = torch.utils.data.DataLoader(\n validation_dataset, batch_size=args.batch_size,\n num_workers=args.workers, sampler=validation_sampler,\n pin_memory=True, drop_last=args.drop_last)\n\n test_dataset = dataset_class(\n dataset_class.get_test_path(args.data_path), args, train=False)\n if args.dist:\n test_sampler = torch.utils.data.distributed.DistributedSampler(\n test_dataset, shuffle=False)\n else:\n test_sampler = torch.utils.data.SequentialSampler(\n test_dataset)\n test_loader = torch.utils.data.DataLoader(\n test_dataset, batch_size=args.batch_size,\n num_workers=args.workers, sampler=test_sampler,\n pin_memory=True, drop_last=args.drop_last)\n else:\n validation_loader = None\n test_loader = None\n\n # Update the data shape if needed.\n if args.data_shape is None:\n args.data_shape = train_dataset.get_shape()\n if args.data_target_shape is None:\n args.data_target_shape = train_dataset.get_target_shape()\n\n return train_loader, validation_loader, test_loader", "def setup(self):\n # TODO check if need both dataset together\n self.train_dataset = ABSADataset(data_path=self.train_path, mode=self.in_mode, task=self.task, \n tokenizer=self.tokenizer, vocab=\"bert\")\n self.vocabulary = self.train_dataset.vocabulary\n\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=self.vocabulary)\n #self.train_restaurant = ABSADataset(data_path=RESTAURANT_TRAIN)\n #self.eval_restaurant = ABSADataset(data_path=RESTAURANT_DEV)", "def __init__(self, options, path):\n print('Prepare the network and data.')\n self._options = options\n self._path = path\n self._epoch = 0\n\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n # Network.\n network = SCNN()\n #weight_init(network)\n network = network.to(self.device)\n # self._net = network.cuda()\n self._net = network\n #self._net = torch.nn.DataParallel(network)\n\n logspaced_LR = np.logspace(-1, -4, self._options['epochs'])\n # Load the model from disk.\n checkpoints_list = os.listdir(self._path['model'])\n if len(checkpoints_list) != 0:\n self._net.load_state_dict(torch.load(\n os.path.join(self._path['model'], '%s%s%s' % ('net_params', str(len(checkpoints_list) - 1), '.pkl'))))\n self._epoch = len(checkpoints_list)\n self._options['base_lr'] = logspaced_LR[len(checkpoints_list)]\n # self._net.load_state_dict(torch.load(self._path['model']))\n print(self._net)\n # Criterion.\n self._criterion = torch.nn.CrossEntropyLoss().cuda()\n # Solver.\n self._solver = torch.optim.SGD(\n self._net.parameters(), lr=self._options['base_lr'],\n momentum=0.9, weight_decay=self._options['weight_decay'])\n # self._solver = torch.optim.Adam(\n # self._net.parameters(), lr=self._options['base_lr'],\n # weight_decay=self._options['weight_decay'])\n lambda1 = lambda epoch: logspaced_LR[epoch]\n self._scheduler = torch.optim.lr_scheduler.LambdaLR(self._solver, lr_lambda=lambda1)\n\n self.train_transforms = torchvision.transforms.Compose([\n torchvision.transforms.Resize(size=256), # Let smaller edge match\n torchvision.transforms.RandomHorizontalFlip(),\n torchvision.transforms.RandomCrop(size=224),\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),\n std=(0.229, 0.224, 0.225))\n ])\n self.test_transforms = torchvision.transforms.Compose([\n torchvision.transforms.Resize(size=256),\n torchvision.transforms.CenterCrop(size=224),\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),\n std=(0.229, 0.224, 0.225))\n ])\n self.train_data = ImageDataset(csv_file=os.path.join(path['kadis'], 'train.txt'),\n img_dir=os.path.join(path['kadis'], 'dist_imgs'),\n transform=self.train_transforms,\n test=False)\n self._train_loader = torch.utils.data.DataLoader(\n self.train_data, batch_size=self._options['batch_size'],\n shuffle=True, num_workers=1, pin_memory=True)\n self.test_data = ImageDataset(csv_file=os.path.join(path['kadis'], 'test.txt'),\n img_dir=os.path.join(path['kadis'], 'dist_imgs'),\n transform=self.test_transforms,\n test=True)\n self._test_loader = torch.utils.data.DataLoader(\n self.test_data, batch_size=self._options['batch_size'],\n shuffle=False, num_workers=1, pin_memory=True)", "def __init__(self, dataset_folder: str, mode: str, output_folder: str, transforms: torchvision.transforms = None, show_stats: bool = False):\n super(FER2013, self).__init__()\n\n self.transforms = transforms\n self.show_stats = show_stats\n self.mode = mode\n \n self.database_path = join(dataset_folder, 'icml_face_data.csv')\n self.db_mode = 'Usage'\n\n self.transforms = transforms\n self.db = pd.read_csv(self.database_path)\n self.db = self.db[self.db['emotion'] != 7] # Remove the contempt\n\n # Rename columns so to have always same across datasets\n self.db = self.db.rename(columns={'emotion': 'expression', self.db.columns[1]: 'Usage', self.db.columns[-1]: 'pixels'})\n\n self.idx_to_expression = {\n 0: 'Angry', \n 1: 'Disgust', \n 2: 'Fear', \n 3: 'Happy', \n 4: 'Sad', \n 5: 'Surprise', \n 6: 'Neutral'\n }\n \n if mode == 'Training' and self.show_stats: # Print stats only once\n self.__dataset_statistics()\n\n # Select the specific data split -- This call must follow the one for __dataset_statistics() \n self.set_usage()", "def main(args):\n dataset = MelSpectrogramDataset(args.dataset_file, args.label_file,\n args.context, None, device, None)\n\n # Split train and test datasets\n train_size = int(0.8 * len(dataset))\n test_size = len(dataset) - train_size\n train_dataset, test_dataset = torch.utils.data.random_split(\n dataset, [train_size, test_size])\n train_loader = torch.utils.data.DataLoader(train_dataset,\n batch_size=args.batch_size,\n shuffle=args.shuffle,\n num_workers=0,\n pin_memory=False)\n validation_loader = torch.utils.data.DataLoader(test_dataset,\n batch_size=args.batch_size,\n shuffle=args.shuffle,\n num_workers=0,\n pin_memory=False)\n\n model = get_model(args.model_name, dataset.vector_length).to(device)\n\n optimizer = getattr(torch.optim, args.optimizer)(model.parameters(),\n lr=args.lr,\n weight_decay=args.wd)\n\n loss_func = F.cross_entropy\n\n training_loss = []\n # Train\n for epoch in tqdm(range(args.num_epochs)):\n\n loss_epoch = []\n\n for input_vector, label in train_loader:\n label = label.to(dtype=torch.long,\n device=device,\n non_blocking=False)\n\n input_vector = input_vector.to(device, non_blocking=False)\n input_vector = input_vector.float()\n\n pred = model(input_vector).transpose(1, 2)\n\n optimizer.zero_grad()\n\n loss = loss_func(pred, label)\n\n loss.backward()\n\n optimizer.step()\n\n loss_epoch.append(loss.item())\n\n print(f\"Loss at epoch {epoch} is {sum(loss_epoch)/len(loss_epoch)}\")\n training_loss.append(sum(loss_epoch) / len(loss_epoch))\n validation_losses = validate(args, model, loss_func, validation_loader)\n\n # Graph training loss\n y_loss = np.array(training_loss)\n x_epochs = np.arange(1, len(y_loss) + 1)\n sns.set()\n loss_plot = sns.lineplot(x=x_epochs, y=y_loss)\n loss_plot.set(xlabel='Epoch', ylabel='Cross Entropy Loss')\n plt.title('Training Loss')\n plt.show()", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def __init__(self):\n self.train(positivity_files, 0)\n self.train(subjectivity_files, 1)", "def __init__(self, dataset_name, device):\r\n\r\n dataFrame = pandas.read_csv(dataset_name)\r\n\r\n Y = dataFrame.values[:,1:]\r\n Yt = Y.transpose()\r\n\r\n #create the input time series for the model, with one unit of delay, is no model parameter, no grad needed\r\n #yInput = Variable(torch.Tensor(Yt[:, :-1]).type(dtype), requires_grad = False).to(device)\r\n self.yInput = torch.tensor(Yt[:, :-1], dtype=torch.float, device=device, requires_grad=False)\r\n \r\n # create the target or ground truth data\r\n #yTarget = Variable(torch.Tensor(Yt[:, 1:]).type(dtype), requires_grad = False).to(device)\r\n self.yTarget = torch.tensor(Yt[:, 1:], dtype=torch.float, device=device, requires_grad=False)\r\n \r\n # Normalizes values\r\n self.yInput = self.yInput / torch.max(self.yInput)\r\n self.yTarget = self.yTarget / torch.max(self.yTarget)", "def main(args):\n\n save_name = '../logs/train_' + args.name + '.html'\n fh = FileHandler(save_name, mode=\"w\")\n logger.addHandler(fh) \n \n if args.verbose:\n logger.setLevel(logging.DEBUG)\n\n net = VarNet2()\n net = net.cuda()\n\n if args.loss == 'l1':\n criterion = torch.nn.L1Loss()\n criterion = criterion.cuda()\n optimizer = torch.optim.Adam(net.parameters(),lr=args.lr)\n adaptive = None\n elif args.loss == 'adaptive':\n adaptive = robust_loss_pytorch.adaptive.AdaptiveLossFunction(\n num_dims = 1, float_dtype=np.float32, device='cuda:0')\n params = list(net.parameters()) + list(adaptive.parameters())\n optimizer = torch.optim.Adam(params, lr=args.lr)\n criterion = None\n \n trainset = kneeData(root= pathlib.Path('../data/div_knee3d/Train'))\n \n validset = kneeData(root= pathlib.Path('../data/div_knee3d/Val'))\n\n testset = kneeData(root= pathlib.Path('../data/div_knee3d/Test'))\n\n trainloader = DataLoader(trainset, batch_size=1, shuffle=True, num_workers=4)\n\n validloader = DataLoader(validset, batch_size=1, shuffle=False, num_workers=4)\n \n logger.info('Training .... ')\n\n for epoch in range(args.num_epochs):\n avg_loss = train_epoch(\n epoch, trainloader, net, optimizer, criterion, args, adaptive)\n logger.info('Epoch: {}, Train Loss: {}'.format(epoch, avg_loss))\n \n val_loss = validate_epoch(\n validloader, net, criterion, args, adaptive)\n logger.info('Epoch: {}, Val Loss: {}'.format(epoch, val_loss))\n\n\n if epoch % 5 == 0:\n for idx in [80,100,120,140,160]:\n out_cat = test_result(idx,testset,net)\n logger.debug(VisualRecord(\n \"epoch: {}, slice:{}\".format(epoch, idx), out_cat, fmt=\"png\"))\n \n # Save network to weight\n weight_name = '../exp/' + args.name + '.pt'\n torch.save(net.state_dict(), weight_name)", "def main():\n\n # If there checkpoint is already, assign checkpoint=checkpoint_file\n checkpoint=None\n\n # Set epochs, load the data and the trainable model\n start_epoch=0\n end_epoch=7000\n learning_rate=1e-3\n batch_size=6\n\n model = DarkNet()\n data=DataLoader(416,\"data/train\")\n dataloader=torch.utils.data.DataLoader(dataset=data,batch_size=batch_size,num_workers=0,shuffle=True)\n model=model.to(\"cuda\")\n optimizer=torch.optim.Adam(model.parameters(),lr=learning_rate)\n\n # If there's a checkpoint, load its values\n if checkpoint!=None:\n model.load_state_dict(torch.load(checkpoint)['state_dict'])\n optimizer.load_state_dict(torch.load(checkpoint)['optimizer'])\n start_epoch=torch.load(checkpoint)['epoch']\n\n for param in model.parameters():\n param.requires_grad = True\n count=0\n x_y=[]\n w_h=[]\n conf_loss=[]\n final_loss=[]\n\n # Train the model\n print(\"Starting Training..\")\n\n for epoch in range(start_epoch,end_epoch):\n print(\"------------------------------------------------------------------------------------------------------------\")\n for batch_id,(imgs,target) in enumerate(dataloader):\n imgs=imgs.cuda()\n target=target.cuda()\n optimizer.zero_grad()\n loss=model(imgs,target)\n loss.backward()\n optimizer.step()\n if batch_id%10==0:\n print(\"Epoch %d/%d || Batch %d || Overall Loss %.2f || X-Loss %.2f || Y-Loss %.2f || W-Loss %.2f || H-Loss %.2f\" %(epoch, \n end_epoch, batch_id, loss.item(), model.losses[0], model.losses[1], model.losses[2], model.losses[3]))\n x_y.append(model.losses[0]+model.losses[1])\n w_h.append(model.losses[2]+model.losses[3])\n conf_loss.append(model.losses[4])\n final_loss.append(loss.item())\n\n # Plot the graph to check if the loss is decreasing through the epochs\n \n # X-Y Loss\n plt.plot(x_y,label='X and Y')\n plt.savefig('x-y-loss.png')\n plt.close()\n\n # W-H Loss\n plt.plot(w_h,label='W and H')\n plt.savefig('w-h-loss.png')\n plt.close()\n\n # Confidence Loss\n plt.plot(conf_loss,label='Conf')\n plt.savefig('conf-loss.png')\n plt.close()\n\n # Overall Loss\n plt.plot(final_loss,label='Loss')\n plt.savefig('final-loss.png')\n plt.show()\n plt.close()\n\n # Save the model as checkpoint\n torch.save({\n 'epoch': epoch,\n 'state_dict': model.state_dict(),\n 'optimizer' : optimizer.state_dict()},\n 'checkpoints/checkpoint.epoch.{}.pth.tar'.format(epoch))", "def main():\n\n batch_size = 30\n feature_size = 33\n hidden_dim = 128\n n_layers = 2\n out_dim = 6\n\n # EXP = f\"LSTM_genre6_all_38\"\n #EXP = \"dim256_layer3\"\n EXP = f\"ExperimentalRNN_genre6_cqt_33_batch30\"\n\n print(\"Preprocessing all data from scratch....\")\n dev_dataset = MusicDataset(f\"./data/adev6_cqt_33_128_feature.pkl\",\n f\"./data/adev6_cqt_33_128_target.pkl\")\n train_dataset = MusicDataset(f\"./data/atrain6_cqt_33_128_feature.pkl\",\n f\"./data/atrain6_cqt_33_128_target.pkl\")\n test_dataset = MusicDataset(f\"./data/atest6_cqt_33_128_feature.pkl\",\n f\"./data/atest6_cqt_33_128_target.pkl\")\n\n dev_generator = DataLoader(dataset=dev_dataset, batch_size=batch_size, shuffle=True)\n train_generator = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)\n test_generator = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True)\n\n print(\"build model\")\n # use GPU or CPU\n if USE_CUDA:\n device = torch.device(\"cuda\")\n print(\"GPU is available\")\n else:\n device = torch.device(\"cpu\")\n print(\"GPU not available, CPU used\")\n\n print(EXP)\n\n\n #model = models.LSTMNet(input_dim=feature_size, hidden_dim=hidden_dim,\n # batch_size=batch_size, output_dim=out_dim, num_layers=n_layers)\n\n model = models.ExperimentalRNN(input_dim=feature_size, hidden_dim=hidden_dim,\n output_dim=out_dim, num_layers=n_layers)\n\n # learning rate\n lr = 0.001\n # loss function\n loss_fn = nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n\n if os.path.exists(f'./models/{EXP}_model.pth'):\n trained_model = torch.load(f\"./models/{EXP}_model.pth\")\n\n else:\n model.to(device)\n trained_model = train_model(model, loss_fn, optimizer, train_generator, dev_generator, EXP)\n torch.save(trained_model, f\"./models/{EXP}_model.pth\")\n\n test_model(trained_model, loss_fn, test_generator)", "def __init__(\n self,\n datasets: Sequence[IDataset[T_co]],\n *,\n indices: Optional[List[int]] = None,\n data_attributes: Optional[List[DataAttribute]] = None,\n transform_groups: Optional[TransformGroups] = None,\n frozen_transform_groups: Optional[TransformGroups] = None,\n collate_fn: Optional[Callable[[List], Any]] = None,\n ):\n if isinstance(datasets, TorchDataset) or isinstance(datasets, AvalancheDataset):\n warnings.warn(\n \"AvalancheDataset constructor has been changed. \"\n \"Please check the documentation for the correct usage. You can\"\n \" use `avalanche.benchmarks.utils.make_classification_dataset \"\n \"if you need the old behavior.\",\n DeprecationWarning,\n stacklevel=2,\n )\n\n if issubclass(type(datasets), TorchDataset) or issubclass(\n type(datasets), AvalancheDataset\n ):\n datasets = [datasets] # type: ignore\n\n # NOTES on implementation:\n # - raw datasets operations are implemented by _FlatData\n # - data attributes are implemented by DataAttribute\n # - transformations are implemented by TransformGroups\n # AvalancheDataset just takes care to manage all of these attributes\n # together and decides how the information propagates through\n # operations (e.g. how to pass attributes after concat/subset\n # operations).\n flat_datas = []\n for d in datasets:\n if len(d) > 0:\n if isinstance(d, AvalancheDataset):\n flat_datas.append(d._flat_data)\n elif not isinstance(d, _FlatDataWithTransform):\n flat_datas.append(_FlatDataWithTransform([d]))\n else:\n flat_datas.append(d)\n if (\n transform_groups is None\n and frozen_transform_groups is None\n and indices is not None\n and len(flat_datas) == 1\n ):\n # TODO: remove. shouldn't be needed but helps with flattening\n assert len(flat_datas) == 1\n self._flat_data = flat_datas[0].subset(indices)\n elif (\n transform_groups is None\n and frozen_transform_groups is None\n and indices is None\n and len(flat_datas) >= 1\n ):\n # TODO: remove. shouldn't be needed but helps with flattening\n if len(flat_datas) == 0:\n self._flat_data = _FlatDataWithTransform([])\n self._flat_data = flat_datas[0]\n if not isinstance(self._flat_data, _FlatDataWithTransform):\n self._flat_data = _FlatDataWithTransform([self._flat_data])\n\n for d in flat_datas[1:]:\n if not isinstance(d, _FlatDataWithTransform):\n d = _FlatDataWithTransform([d])\n self._flat_data = self._flat_data.concat(d)\n else:\n self._flat_data: _FlatDataWithTransform[T_co] = _FlatDataWithTransform(\n flat_datas,\n indices=indices,\n transform_groups=transform_groups,\n frozen_transform_groups=frozen_transform_groups,\n )\n self.collate_fn = collate_fn\n\n ####################################\n # Init collate_fn\n ####################################\n if len(datasets) > 0:\n self.collate_fn = self._init_collate_fn(datasets[0], collate_fn)\n else:\n self.collate_fn = default_collate\n \"\"\"\n The collate function to use when creating mini-batches from this\n dataset.\n \"\"\"\n\n ####################################\n # Init data attributes\n ####################################\n # concat attributes from child datasets\n new_data_attributes: Dict[str, DataAttribute] = dict()\n if data_attributes is not None:\n new_data_attributes = {da.name: da for da in data_attributes}\n ld = sum(len(d) for d in datasets)\n for da in data_attributes:\n if len(da) != ld:\n raise ValueError(\n \"Data attribute {} has length {} but the dataset \"\n \"has length {}\".format(da.name, len(da), ld)\n )\n\n self._data_attributes: Dict[str, DataAttribute] = OrderedDict()\n first_dataset = datasets[0] if len(datasets) > 0 else None\n if isinstance(first_dataset, AvalancheDataset):\n for attr in first_dataset._data_attributes.values():\n if attr.name in new_data_attributes:\n # Keep overridden attributes in their previous position\n self._data_attributes[attr.name] = new_data_attributes.pop(\n attr.name\n )\n continue\n\n acat = attr\n found_all = True\n for d2 in datasets[1:]:\n if hasattr(d2, attr.name):\n acat = acat.concat(getattr(d2, attr.name))\n elif len(d2) > 0: # if empty we allow missing attributes\n found_all = False\n break\n if found_all:\n self._data_attributes[attr.name] = acat\n\n # Insert new data attributes after inherited ones\n for da in new_data_attributes.values():\n self._data_attributes[da.name] = da\n\n if indices is not None: # subset operation for attributes\n for da in self._data_attributes.values():\n # TODO: this was the old behavior. How do we know what to do if\n # we permute the entire dataset?\n # DEPRECATED! always subset attributes\n # we keep this behavior only for `classification_subset`\n # if len(da) != sum([len(d) for d in datasets]):\n # self._data_attributes[da.name] = da\n # else:\n # self._data_attributes[da.name] = da.subset(self._indices)\n #\n # dasub = da.subset(indices)\n # self._data_attributes[da.name] = dasub\n dasub = da.subset(indices)\n self._data_attributes[da.name] = dasub\n\n # set attributes dynamically\n for el in self._data_attributes.values():\n assert len(el) == len(self), f\"BUG: Wrong size for attribute {el.name}\"\n\n is_property = False\n if hasattr(self, el.name):\n is_property = True\n # Do not raise an error if a property.\n # Any check related to the property will be done\n # in the property setter method.\n if not isinstance(getattr(type(self), el.name, None), property):\n raise ValueError(\n f\"Trying to add DataAttribute `{el.name}` to \"\n f\"AvalancheDataset but the attribute name is \"\n f\"already used.\"\n )\n if not is_property:\n setattr(self, el.name, el)", "def process_dataset(self):\n\n logger.info('\\n')\n logger.info('=' * 40)\n logger.info('=\\t DeepRank Data Set')\n logger.info('=')\n logger.info('=\\t Training data')\n for f in self.train_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.valid_database:\n logger.info('=\\t Validation data')\n for f in self.valid_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n if self.test_database:\n logger.info('=\\t Test data')\n for f in self.test_database:\n logger.info(f'=\\t -> {f}')\n logger.info('=')\n logger.info('=' * 40 + '\\n')\n sys.stdout.flush()\n\n # check if the files are ok\n self.check_hdf5_files(self.train_database)\n\n if self.valid_database:\n self.valid_database = self.check_hdf5_files(\n self.valid_database)\n\n if self.test_database:\n self.test_database = self.check_hdf5_files(\n self.test_database)\n\n # create the indexing system\n # alows to associate each mol to an index\n # and get fname and mol name from the index\n self.create_index_molecules()\n\n # get the actual feature name\n if self.mapfly:\n self.get_raw_feature_name()\n else:\n self.get_mapped_feature_name()\n\n # get the pairing\n self.get_pairing_feature()\n\n # get grid shape\n self.get_grid_shape()\n\n # get the input shape\n self.get_input_shape()\n\n # get renormalization factor\n if self.normalize_features or self.normalize_targets or self.clip_features:\n if self.mapfly:\n self.compute_norm()\n else:\n self.get_norm()\n\n logger.info('\\n')\n logger.info(\" Data Set Info:\")\n logger.info(\n f' Augmentation : {self.use_rotation} rotations')\n logger.info(\n f' Training set : {self.ntrain} conformations')\n logger.info(\n f' Validation set : {self.nvalid} conformations')\n logger.info(\n f' Test set : {self.ntest} conformations')\n logger.info(f' Number of channels : {self.input_shape[0]}')\n logger.info(f' Grid Size : {self.data_shape[1]}, '\n f'{self.data_shape[2]}, {self.data_shape[3]}')\n sys.stdout.flush()", "def iris():\n return IrisDataset()", "def main(): # pylint: disable=R0914, R0915\n args = parse_args()\n\n # load dataset\n test_dataset = load_dataset(dataset_name=args.dataset, dataset_part='test')\n\n # load model\n model_name = args.model\n model = load_model(model_name)\n model.load_state_dict(torch.load(args.checkpoint_path)['model'])\n model.eval()\n\n # create sets of samples of images and their corresponding saliency maps\n all_samples = []\n all_saliency_maps = []\n sample_to_image = lambda x: np.transpose(x, (1, 2, 0))\n\n for _ in range(6):\n samples, true_labels = next(iter(DataLoader(test_dataset,\n batch_size=6,\n shuffle=True)))\n all_samples.append(torch.cat([sample_to_image(s).unsqueeze(0)\n for s in samples]))\n saliency_maps = compute_gradient_saliency_maps(samples.to(device),\n true_labels.to(device),\n model)\n all_saliency_maps.append(saliency_maps.cpu().detach())\n\n all_samples = torch.cat(all_samples)\n all_saliency_maps = torch.cat(all_saliency_maps)\n\n saliency_maps_and_images_pairs = plt.figure()\n plt.suptitle('Images and their saliency maps')\n for idx, (image, saliency_map) in enumerate(zip(all_samples,\n all_saliency_maps)):\n plt.subplot(6, 6 * 2, 2 * idx + 1)\n # plot image\n image -= image.min()\n image /= image.max()\n plt.imshow(image)\n plt.xticks([])\n plt.yticks([])\n # plot saliency map\n plt.subplot(6, 6 * 2, 2 * idx + 2)\n saliency_map -= saliency_map.min()\n saliency_map /= saliency_map.max()\n plt.imshow(saliency_map)\n plt.xticks([])\n plt.yticks([])\n\n saliency_maps_and_images_pairs.set_size_inches((8, 8))\n saliency_maps_and_images_pairs.savefig(\n os.path.join(FIGURES_DIR,\n f'{args.dataset}_{args.model}_'\n f'saliency_maps_and_images_pairs.png'))\n\n # loop through the images in the test set and compute saliency map for\n # each image. Compute the average map of all real face image and\n # all fake face image images.\n dataloader = DataLoader(test_dataset, batch_size=32, shuffle=True)\n real_images_saliency_maps = []\n fake_images_saliency_maps = []\n\n for samples, true_labels in dataloader:\n fake_samples = samples[true_labels == 1].to(device)\n fake_labels = true_labels[true_labels == 1].to(device)\n real_samples = samples[true_labels == 0].to(device)\n real_labels = true_labels[true_labels == 0].to(device)\n saliency_maps = compute_gradient_saliency_maps(fake_samples,\n fake_labels,\n model)\n fake_images_saliency_maps.append(saliency_maps.cpu().detach())\n saliency_maps = compute_gradient_saliency_maps(real_samples,\n real_labels,\n model)\n real_images_saliency_maps.append(saliency_maps.cpu().detach())\n\n all_real_saliency_maps = torch.cat(real_images_saliency_maps)\n all_fake_saliency_maps = torch.cat(fake_images_saliency_maps)\n\n for idx in range(all_real_saliency_maps.shape[0]):\n all_real_saliency_maps[idx] -= all_real_saliency_maps[idx].min()\n all_real_saliency_maps[idx] /= all_real_saliency_maps[idx].max()\n\n for idx in range(all_fake_saliency_maps.shape[0]):\n all_fake_saliency_maps[idx] -= all_fake_saliency_maps[idx].min()\n all_fake_saliency_maps[idx] /= all_fake_saliency_maps[idx].max()\n\n mean_saliency_maps = plt.figure()\n plt.subplot(1, 2, 1)\n mean_map = all_fake_saliency_maps.mean(axis=0)\n mean_map -= mean_map.min()\n mean_map /= mean_map.max()\n plt.imshow(mean_map)\n plt.title('mean of fake images saliency maps')\n plt.subplot(1, 2, 2)\n mean_map = all_real_saliency_maps.mean(axis=0)\n mean_map -= mean_map.min()\n mean_map /= mean_map.max()\n plt.imshow(mean_map)\n plt.title('mean of real images saliency maps')\n mean_saliency_maps.set_size_inches((8, 6))\n mean_saliency_maps.savefig(\n os.path.join(FIGURES_DIR,\n f'{args.dataset}_{args.model}_mean_saliency_maps.png'))", "def train(self)->None:", "def test_model_sample(net, data_loader):\n net.eval()\n array = []\n with torch.no_grad():\n for data in data_loader:\n X = data['X']\n output = net(X)\n output = ToPILImage()(output)\n array.append(output)\n return array", "def setup(self):\n (self.X, self.Y) = load_iris(problem=\"label_ranking\")", "def __init__(self, opt):\n BaseDataset.__init__(self, opt)\n self.dir_AB = os.path.join(opt.dataroot, opt.phase) # get the image directory\n self.AB_paths = sorted(make_dataset(self.dir_AB, opt.max_dataset_size)) # get image paths\n assert(self.opt.load_size >= self.opt.crop_size) # crop_size should be smaller than the size of loaded image\n self.input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc\n self.output_nc = self.opt.input_nc if self.opt.direction == 'BtoA' else self.opt.output_nc\n\n\n\n\n basic_dir = '/home/akira/Project/simplestCNN_sparse2dense/data/32-128data'\n bin32file = os.path.join(basic_dir, \"Lidar32.bin\")\n bin128file = os.path.join(basic_dir, \"Lidar128.bin\")\n bin32pa = os.path.join(basic_dir, \"Lidar32.pa\")\n bin128pa = os.path.join(basic_dir, \"Lidar128.pa\")\n rimg32loader = ImageTensorBinLoader(bin32file, bin32pa)\n rimg128loader = ImageTensorBinLoader(bin128file, bin128pa)\n self.rimgdataset = SyncDataset((0, 500))\n self.rimgdataset.registerDataLoader(rimg32loader, \"rimg32\")\n self.rimgdataset.registerDataLoader(rimg128loader, \"rimg128\")", "def get_val_dataloader(dataset, datadir, datasize, val_bs):\n val_dl = None\n if dataset == 'tinyimagenet':\n if not os.path.exists('./data/tiny-imagenet-200'):\n download_and_unzip('http://cs231n.stanford.edu/tiny-imagenet-200.zip','./data/')\n random_ids = np.random.randint(100000, size=datasize)\n val_indices = random_ids\n\n imagenet_mean = [0.485, 0.456, 0.406]\n imagenet_std = [0.229, 0.224, 0.225]\n\n val_dl = torch.utils.data.DataLoader(\n torchvision.datasets.ImageFolder(datadir,\n transform=transforms.Compose([\n transforms.Resize(32), \n transforms.ToTensor(),\n # Phuong 09/26 change (mean, std) -> same as pretrained imagenet\n transforms.Normalize(mean=imagenet_mean, std=imagenet_std)])),\n #Phuong 09/26 drop_last=False -> True\n batch_size=val_bs, drop_last=True, sampler=SubsetRandomSampler(val_indices))\n \n elif dataset == 'fmnist':\n dl_obj = FashionMNIST_truncated\n transform_val = transforms.Compose([\n transforms.ToTensor(),])\n \n random_ids = np.random.randint(10000, size=datasize)\n val_indices = random_ids\n\n val_ds = dl_obj(datadir, dataidxs=val_indices, train=True, transform=transform_val, download=True)\n val_dl = torch.utils.data.DataLoader(dataset=val_ds, batch_size=val_bs, shuffle=True, drop_last=False)\n \n elif dataset == \"cifar10\":\n dl_obj = CIFAR10_truncated\n transform_val = transforms.Compose([\n transforms.ToTensor(),\n transforms.Lambda(lambda x: F.pad(\n Variable(x.unsqueeze(0), requires_grad=False),\n (4, 4, 4, 4), mode='reflect').data.squeeze()),\n transforms.ToPILImage(),\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n # Phuong 09/26 change (mean, std) -> same as pretrained imagenet\n transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n # transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n\n ])\n random_ids = np.random.randint(10000, size=datasize)\n val_indices = random_ids\n\n val_ds = dl_obj(datadir, dataidxs=val_indices, train=True, transform=transform_val, download=True)\n val_dl = torch.utils.data.DataLoader(dataset=val_ds, batch_size=val_bs, shuffle=True, drop_last=False)\n\n\n return val_dl", "def train(train_loader : torch.utils.data.DataLoader, model : nn.Module, criterion : nn.Module, optimizer : torch.optim.Optimizer) -> logger.Result:", "def train(self, dataset):\n \"*** YOUR CODE HERE ***\"\n batch_size = 1\n while True:\n error = False\n for x, y in dataset.iterate_once(batch_size):\n y_pred = self.get_prediction(x)\n y = nn.as_scalar(y)\n if y != y_pred:\n error = True\n nn.Parameter.update(self.get_weights(),x,y)\n if error == False:\n break", "def setUp(self):\n self.dataset = self.dataset_cls()", "def _init_train_loader(self):\n # Choose the right dataset type\n if self.config_args[\"num_members\"] > 1:\n class_dataset_wrapper = dataset_wrapper.MixMoDataset\n else:\n class_dataset_wrapper = dataset_wrapper.MSDADataset\n\n # Load augmentations\n self.traindatasetwrapper = class_dataset_wrapper(\n dataset=self.train_dataset,\n num_classes=int(self.config_args[\"data\"][\"num_classes\"]),\n num_members=self.config_args[\"num_members\"],\n dict_config=self.config_args[\"training\"][\"dataset_wrapper\"],\n properties=self.properties\n )\n\n # Build standard sampler\n _train_sampler = torch.utils.data.sampler.RandomSampler(\n data_source=self.traindatasetwrapper, ## only needed for its length\n num_samples=None,\n replacement=False,\n )\n\n # Wrap it with the repeating sampler used for multi-input models\n batch_sampler = batch_repetition_sampler.BatchRepetitionSampler(\n sampler=_train_sampler,\n batch_size=self.batch_size,\n num_members=self.config_args[\"num_members\"],\n drop_last=True,\n config_batch_sampler=self.config_args[\"training\"][\"batch_sampler\"]\n )\n\n self.train_loader = torch.utils.data.DataLoader(\n self.traindatasetwrapper,\n batch_sampler=batch_sampler,\n num_workers=self.num_workers,\n batch_size=1,\n shuffle=False,\n sampler=None,\n drop_last=False,\n pin_memory=True,\n )" ]
[ "0.71006465", "0.67667514", "0.67667514", "0.6723365", "0.6689075", "0.66602546", "0.66412747", "0.6627261", "0.65898776", "0.658745", "0.6563074", "0.6560465", "0.65538913", "0.6544277", "0.6510383", "0.6499532", "0.6479281", "0.6478729", "0.64679545", "0.6463354", "0.6457403", "0.6445921", "0.64337486", "0.6432474", "0.64279574", "0.6426093", "0.64148545", "0.6398067", "0.6390227", "0.6361221", "0.6352849", "0.634289", "0.6329834", "0.63208705", "0.63182455", "0.63121617", "0.63078403", "0.63060653", "0.6292663", "0.6284723", "0.6251597", "0.62475586", "0.62471867", "0.623887", "0.6223418", "0.622122", "0.6211223", "0.6204711", "0.6182241", "0.6170674", "0.6163547", "0.6161726", "0.6160794", "0.61564374", "0.6156032", "0.61556363", "0.6150316", "0.6148964", "0.61482626", "0.6122289", "0.6117143", "0.61164063", "0.61152774", "0.60929585", "0.6088351", "0.60881567", "0.6085672", "0.60795575", "0.6075105", "0.6074201", "0.60702604", "0.60686314", "0.60682535", "0.6066753", "0.60579324", "0.60576504", "0.6055356", "0.60536563", "0.6052187", "0.6052187", "0.6052187", "0.6052187", "0.6052187", "0.6050156", "0.60428095", "0.6036763", "0.60357505", "0.6030807", "0.6029843", "0.6027642", "0.6020013", "0.6019917", "0.601209", "0.6006165", "0.600329", "0.6002582", "0.59995306", "0.5990725", "0.59904987", "0.59895283", "0.5989513" ]
0.0
-1
Generates a new MLP using the nn.Sequential class. Returns
def generate(self): components = [] components.append(nn.Linear(self.n_features,self.hidden_sizes[0])) self._activation(components,self.activation) self._dropout(components,self.dropout) for i in range(1,len(self.hidden_sizes)): components.append(nn.Linear(self.hidden_sizes[i-1],self.hidden_sizes[i])) self._activation(components,self.activation) self._dropout(components,self.dropout) components.append(nn.Linear(self.hidden_sizes[-1],self.n_classes)) mlp = nn.Sequential(*components) num_params = sum(p.numel() for p in mlp.parameters() if p.requires_grad) print("Created MLP with "+str(num_params)+" learnable params") return mlp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_mlp_model():\n return snt.Sequential([\n snt.nets.MLP([LATENT_SIZE] * NUM_LAYERS, activate_final=True),\n snt.LayerNorm()\n ])", "def mlp_model(self):\n\n model = Sequential()\n model.add(Dense(self.dense1, input_shape=(784,)))\n model.add(Activation(self.activation))\n model.add(Dropout(self.drop1))\n\n model.add(Dense(self.dense2))\n model.add(Activation(self.activation))\n model.add(Dropout(self.drop2))\n\n model.add(Dense(10))\n model.add(Activation('softmax'))\n\n return model", "def mlp(self):\n # Model.\n model = Sequential()\n model.add(Flatten(input_shape=self.input_shape))\n model.add(Dense(1024))\n model.add(Dropout(0.6))\n model.add(Dense(512))\n model.add(Dropout(0.6))\n model.add(Dense(self.nb_classes, activation='softmax'))\n\n return model", "def MLP_model(self):\n print(\"Building model..\")\n self.model = Sequential()\n\n # first hidden layer (0)\n self.model.add(Dense(self.h_nodes0, input_dim=self.input_size, use_bias=True))\n self.model.add(Activation(self.activation0))\n self.model.add(Dropout(self.dropout0))\n\n # second hidden layer (1)\n if self.h_nodes1 != None:\n self.model.add(Dense(self.h_nodes1, use_bias=True))\n self.model.add(Activation(self.activation1))\n self.model.add(Dropout(self.dropout1))\n\n # third hidden layer (2)\n if self.h_nodes2 != None:\n self.model.add(Dense(self.h_nodes2, use_bias=True))\n self.model.add(Activation(self.activation2))\n self.model.add(Dropout(self.dropout2))\n\n #output layer\n self.model.add(Dense(self.output_size))\n self.model.add(Activation(self.activation_out))\n\n #compile model\n self.model.compile(loss=self.loss, optimizer=self.optimizer, metrics=[R_squared])\n\n return self.model", "def mlp_model():\n\tmodel = Sequential()\n\tmodel.add(Dense(256, activation='relu', input_shape=(X_train_scaled.shape[1], )))\n\tmodel.add(Dropout(0.4))\n\tmodel.add(Dense(256, activation='relu'))\n\tmodel.add(Dropout(0.4))\n\tmodel.add(Dense(FLAGS.nb_classes, activation='softmax'))\n\tmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n\tmodel.summary()\n\treturn model", "def mlp_model2():\n model = Sequential()\n model.add(Dense(256, activation='relu', input_shape=(X_train_scaled.shape[1], )))\n model.add(Dropout(0.2))\n model.add(Dense(128, activation='relu'))\n model.add(Dropout(0.2))\n model.add(Dense(64, activation='relu'))\n model.add(Dropout(0.2))\n model.add(Dense(32, activation='relu'))\n model.add(Dropout(0.2))\n model.add(Dense(FLAGS.nb_classes, activation='softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n model.summary()\n return model", "def modelMLP():\n \n layerSizes = [(3), (10), (10,10,10), (20,50,20)]\n random_state = 20 # Do not change this random_state\n max_iter = 2000 # fixed max_iter\n\n objs_MLP = []\n\n # Create a list of objects for the classifier for each of the above \"layerSizes\"\n for size in layerSizes:\n mlp = MLPClassifier(hidden_layer_sizes=size, max_iter=max_iter, random_state=random_state)\n objs_MLP.append(mlp)\n\n return objs_MLP", "def mlp_model(layers, units, dropout_rate, input_shape, num_classes):\n # https://developers.google.com/machine-learning/guides/text-classification/step-4\n\n op_units, op_activation = _get_last_layer_units_and_activation(num_classes)\n model = models.Sequential()\n model.add(Dropout(rate=dropout_rate, input_shape=input_shape))\n\n for _ in range(layers-1):\n model.add(Dense(units=units, activation='relu'))\n model.add(Dropout(rate=dropout_rate))\n\n model.add(Dense(units=op_units, activation=op_activation))\n return model", "def create_model(num_vars, num_categs, hidden_dims, actfn=None):\n num_outputs = max(1, num_categs)\n num_inputs = num_vars\n actfn = get_activation_function(actfn)\n\n mask = InputMask(None)\n if num_categs > 0:\n pre_layers = EmbedLayer(num_vars=num_vars,\n num_categs=num_categs,\n hidden_dim=hidden_dims[0],\n input_mask=mask,\n sparse_embeds=(num_vars >= 50))\n num_inputs = pre_layers.hidden_dim\n pre_layers = [pre_layers, actfn()]\n else:\n pre_layers = mask\n\n mlps = MultivarMLP(input_dims=num_inputs,\n hidden_dims=hidden_dims,\n output_dims=num_outputs,\n extra_dims=[num_vars],\n actfn=actfn,\n pre_layers=pre_layers)\n return mlps", "def build_mlp(input_data, output_data, n_neurons=[512, 256, 128]):\n input_layer = keras.layers.Input([input_data.shape[-1]], name='input-layer')\n for i, n_unit in enumerate(n_neurons):\n if i == 0:\n x = keras.layers.Dense(units=n_unit, activation='relu', name='hidden-layer'+str(i+1))(input_layer)\n else:\n x = keras.layers.Dense(units=n_unit, activation='relu', name='hidden-layer'+str(i+1))(x)\n \n output_layer = keras.layers.Dense(units=output_data.shape[-1],activation='softmax' , name='output-layer')(x)\n model = keras.models.Model(inputs=input_layer, outputs=output_layer)\n return model", "def mlp(\n\t# input_shape: Tuple[int, ...],\n\t# output_shape: Tuple[int, ...],\n\t# layer_size: int = 128,\n\t# dropout_amount: float = 0.2,\n\t# num_layers: int = 3, \n\tnet_config: Dict\n)->Model:\n\tactivation_fn = net_config[\"hyperparams\"][\"activation_fn\"]\n\tinput_s = net_config[\"shapes\"][\"input_shape\"]\n\toutput_s = net_config[\"shapes\"][\"output_shape\"]\n\n\tinputs = keras.Input(shape=(input_s,))\n\tdense = layers.Dense(64, activation=\"relu\")\n\tx = dense(inputs)\n\tlayer1 = layers.Dense(64, activation=activation_fn)(x)\n\tlayer2 = layers.Dense(64, activation=activation_fn)(layer1)\n\toutputs = layers.Dense(output_s)(layer2)\n\tmodel = keras.Model(inputs=inputs, outputs=outputs, name=\"house_pred\")\n\t\n\treturn model", "def MLP(input_dim, num_classes, hidden_layer_sizes = (100,), activation='relu', solver='sgd', alpha = 1e-4, momentum = 0.2, learning_rate_init=1e-4):\n # create model\n model = Sequential()\n \n # first layer\n model.add(Dense(hidden_layer_sizes[0], \n input_dim=input_dim, \n activation=activation, \n bias_regularizer=regularizers.l2(alpha),\n activity_regularizer=regularizers.l2(alpha)))\n model.add(Dropout(0.2))\n \n # unroll ffn\n # 1 since we already created the first layer\n for i in range(1, len(hidden_layer_sizes)):\n model.add(Dense(hidden_layer_sizes[i], \n activation=activation, \n bias_regularizer=regularizers.l2(alpha),\n activity_regularizer=regularizers.l2(alpha)))\n \n # last layer\n model.add(Dense(num_classes, activation='softmax'))\n \n # Optimiser and compile model \n if solver=='sgd':\n optimizer = SGD(lr=learning_rate_init)\n elif solver=='adam':\n optimizer = Adam(lr=learning_rate_init)\n else :\n optimizer = solver\n model.compile(loss='sparse_categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])\n return model", "def train(self):\n\n\t\tinput_size = len(self.inputs[0])\n\t\toutput_size = len(set(self.labels))\n\t\thidden_size_1 = 15\n\t\thidden_size_2 = 15\n\n\t\t# One hot encode the labels\n\t\tencoder = LabelEncoder()\n\t\tencoder.fit(self.labels)\n\t\tenc_labels = encoder.transform(self.labels)\n\t\tenc_labels = np_utils.to_categorical(enc_labels)\n\n\t\t# Create the MLP\n\t\tmodel = Sequential()\n\t\tmodel.add(Dense(hidden_size_1, activation='relu', input_dim=input_size))\n\t\tmodel.add(Dense(hidden_size_2, activation='relu'))\n\t\tmodel.add(Dense(output_size, activation='softmax'))\n\n\t\t# Compile model with optimizer and loss function\n\t\tmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n\n\t\t# Train the model\n\t\tmodel.fit(self.inputs, enc_labels, steps_per_epoch=1000, epochs=20, verbose=2)\n\n\t\tself.model = model", "def get_mlp_model(input_dim, hidden_layer_one=50, hidden_layer_two=25,\n dropout=0.2, learn_rate=0.01):\n \n # initialize a sequential model and add layer to flatten the\n # input data\n model = Sequential()\n #model.add(Flatten())\n \n model.add(Dense(hidden_layer_one, activation=\"relu\",\n input_dim=input_dim))\n model.add(Dropout(dropout))\n model.add(Dense(hidden_layer_two, activation=\"relu\"))\n model.add(Dropout(dropout))\n model.add(Dense(1, activation='linear'))\n # compile the model\n model.compile(\n optimizer=Adam(learning_rate=learn_rate),\n loss=\"mean_squared_error\",\n metrics=[\"mse\", \"mae\"])\n # return compiled model\n return model", "def make_mlp(dim_list, activation, batch_norm=False, dropout=0.0):\n layers = []\n\n for dim_in, dim_out in zip(dim_list[:-1], dim_list[1:]):\n layers.append(nn.Linear(dim_in, dim_out))\n if batch_norm:\n layers.append(nn.BatchNorm1d(dim_out))\n if activation == 'relu':\n layers.append(nn.ReLU())\n elif activation == 'tanh':\n layers.append(nn.Tanh())\n elif activation == 'leakyrelu':\n layers.append(nn.LeakyReLU())\n elif activation == 'sigmoid':\n layers.append(nn.Sigmoid())\n if dropout > 0:\n layers.append(nn.Dropout(p=dropout))\n\n return nn.Sequential(*layers)", "def _build_model(self):\n\n with tf.variable_scope(self.name):\n # adds placeholders, data_normalization and data_noise if desired. Also adds a placeholder for dropout probability\n self.layer_in_x, self.layer_in_y = self._build_input_layers()\n\n # create core multi-layer perceptron\n mlp_output_dim = 2 * self.ndim_y * self.n_centers + self.n_centers\n core_network = MLP(\n name=\"core_network\",\n input_layer=self.layer_in_x,\n output_dim=mlp_output_dim,\n hidden_sizes=self.hidden_sizes,\n hidden_nonlinearity=self.hidden_nonlinearity,\n output_nonlinearity=None,\n weight_normalization=self.weight_normalization,\n dropout_ph=self.dropout_ph if self.dropout else None\n )\n\n core_output_layer = core_network.output_layer\n\n # slice output of MLP into three equally sized parts for loc, scale and mixture weights\n slice_layer_locs = L.SliceLayer(core_output_layer, indices=slice(0, self.ndim_y * self.n_centers), axis=-1)\n slice_layer_scales = L.SliceLayer(core_output_layer, indices=slice(self.ndim_y * self.n_centers, 2 * self.ndim_y * self.n_centers), axis=-1)\n slice_layer_weights = L.SliceLayer(core_output_layer, indices=slice(2 * self.ndim_y * self.n_centers, mlp_output_dim), axis=-1)\n\n # locations mixture components\n self.reshape_layer_locs = L.ReshapeLayer(slice_layer_locs, (-1, self.n_centers, self.ndim_y))\n self.locs = L.get_output(self.reshape_layer_locs)\n\n # scales of the mixture components\n reshape_layer_scales = L.ReshapeLayer(slice_layer_scales, (-1, self.n_centers, self.ndim_y))\n self.softplus_layer_scales = L.NonlinearityLayer(reshape_layer_scales, nonlinearity=tf.nn.softplus)\n self.scales = L.get_output(self.softplus_layer_scales)\n\n # weights of the mixture components\n self.logits = L.get_output(slice_layer_weights)\n self.softmax_layer_weights = L.NonlinearityLayer(slice_layer_weights, nonlinearity=tf.nn.softmax)\n self.weights = L.get_output(self.softmax_layer_weights)\n\n # # put mixture components together\n self.y_input = L.get_output(self.layer_in_y)\n self.cat = cat = Categorical(logits=self.logits)\n self.components = components = [MultivariateNormalDiag(loc=loc, scale_diag=scale) for loc, scale\n in zip(tf.unstack(self.locs, axis=1), tf.unstack( self.scales, axis=1))]\n self.mixture = mixture = Mixture(cat=cat, components=components, value=tf.zeros_like(self.y_input))\n\n # regularization\n self._add_softmax_entropy_regularization()\n self._add_l1_l2_regularization(core_network)\n\n # tensor to store samples\n self.samples = mixture.sample() #TODO either use it or remove it\n\n # tensor to compute probabilities\n if self.data_normalization:\n self.pdf_ = mixture.prob(self.y_input) / tf.reduce_prod(self.std_y_sym)\n self.log_pdf_ = mixture.log_prob(self.y_input) - tf.reduce_sum(tf.log(self.std_y_sym))\n else:\n self.pdf_ = mixture.prob(self.y_input)\n self.log_pdf_ = mixture.log_prob(self.y_input)\n\n # symbolic tensors for getting the unnormalized mixture components\n if self.data_normalization:\n self.scales_unnormalized = self.scales * self.std_y_sym\n self.locs_unnormalized = self.locs * self.std_y_sym + self.mean_y_sym\n else:\n self.scales_unnormalized = self.scales\n self.locs_unnormalized = self.locs\n\n # initialize LayersPowered --> provides functions for serializing tf models\n LayersPowered.__init__(self, [self.softmax_layer_weights, self.softplus_layer_scales, self.reshape_layer_locs,\n self.layer_in_y])", "def build_mlp(\n input_size: int,\n output_size: int,\n n_layers: int,\n size: int,\n activation='tanh',\n output_activation='identity',\n):\n if isinstance(activation, str):\n activation = _str_to_activation[activation]\n if isinstance(output_activation, str):\n output_activation = _str_to_activation[output_activation]\n layers = []\n in_size = input_size\n for _ in range(n_layers):\n layers.append(nn.Linear(in_size, size))\n layers.append(activation)\n in_size = size\n layers.append(nn.Linear(in_size, output_size))\n layers.append(output_activation)\n return nn.Sequential(*layers)", "def model_creator(config):\n return nn.Linear(1, 1)", "def mlp(\n sizes: List[int],\n inner_activation: nn.Module,\n last_activation: nn.Module = None,\n):\n layers = []\n for j in range(len(sizes) - 1):\n act = (\n inner_activation\n if j < len(sizes) - 2 or not last_activation\n else last_activation\n )\n layers += [nn.Linear(sizes[j], sizes[j + 1]), act]\n\n return nn.Sequential(*layers)", "def mlp_policy_net(x, output_sizes):\n net = hk.Sequential([hk.nets.MLP(output_sizes), jnp.tanh])\n return net(x)", "def create_neural_network():\n model = Sequential()\n model.add(LSTM(32, input_shape=(4, 45))) # 4 time-steps and 45 features\n model.add(Dense(64))\n model.add(Activation('tanh'))\n model.add(Dense(units=45)) # 45 is the number of class\n model.add(Activation('softmax')) # Output the density of probability\n\n model.compile(optimizer=adam(lr=0.001, decay=1e-6),\n loss=\"categorical_crossentropy\",\n metrics=['accuracy'])\n\n model.summary()\n print(\"Creation of the Neural Network is finished.\")\n return model", "def mlp_model(train, layers=(100,), window_size=5):\n # generate a window\n window = mlp_window_selector(train, window_size)\n # interpolate new data\n train_x = mlp_input_mapper(train[0], window)\n train_y = mlp_input_mapper(train[1], window)\n # generate model\n model = MLPRegressor(hidden_layer_sizes=tuple(layers))\n # fit model with new rounded data\n model.fit(train_x, train_y)\n # return model and window\n return (model, window)", "def generator(noise_dim=NOISE_DIM):\n model = nn.Sequential(\n nn.Linear(noise_dim, 1024),\n nn.ReLU(inplace=True),\n nn.Linear(1024, 1024),\n nn.ReLU(inplace=True),\n nn.Linear(1024, 784),#784\n nn.Tanh(),\n )\n return model", "def create_nn(self):\n\n\t\tmodel = Sequential()\n\t\tmodel.add(Dense(32, input_dim=self.state_size, activation='relu'))\n\t\tmodel.add(Dense(32, activation='relu'))\n\t\tmodel.add(Dense(64, activation='relu'))\n\t\tmodel.add(Dense(self.action_size, activation='linear'))\n\t\tmodel.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))\n\t\treturn model", "def build_network(num_actions: int) -> hk.Transformed:\n\n def q(obs):\n network = hk.Sequential(\n [hk.Flatten(),\n nets.MLP([FLAGS.hidden_units, num_actions])])\n return network(obs)\n\n return hk.without_apply_rng(hk.transform(q, apply_rng=True))", "def neural_network(xtrain, ytrain, xtest, ytest,labels_mapping, scaled = False):\n if not scaled :\n scaler = StandardScaler()\n xtrain = scaler.fit_transform(xtrain)\n xtest = scaler.transform(xtest)\n\n nn = MLPClassifier() #hidden_layer_sizes=30, alpha=0.0001, early_stopping=True\n nn = __train_and_test(nn, xtrain, ytrain, xtest, ytest,labels_mapping)\n return nn", "def build(obs_space: Box, action_space: Box, spec: Spec) -> MLPModel:\n model = MLPModel(obs_space, action_space, spec.network)\n model.initialize_parameters(spec.initializer)\n if spec.residual:\n model = ResidualStochasticModel(model)\n return model", "def __init__(self, hidden_layer_sizes, activation='relu', reg=0.001, k_fold=5, random_state=0):\n print(\"Initialize model Multi-layer Perceptron\")\n self.hidden_layer_sizes = hidden_layer_sizes\n self.activation = activation\n self.reg = reg\n self.k_fold = k_fold\n self.random_state = random_state\n self.model = sklearn.neural_network.MLPClassifier(self.hidden_layer_sizes,\n activation=self.activation,\n alpha=self.reg, max_iter=1000, \n random_state=self.random_state)", "def build_model(self) -> nn.Module:\n pass", "def build_mlp(\n n_in: int,\n n_out: int,\n n_hidden: Optional[Union[int, Sequence[int]]] = None,\n n_layers: int = 2,\n activation: Callable = F.silu,\n last_bias: bool = True,\n last_zero_init: bool = False,\n) -> nn.Module:\n # get list of number of nodes in input, hidden & output layers\n if n_hidden is None:\n c_neurons = n_in\n n_neurons = []\n for i in range(n_layers):\n n_neurons.append(c_neurons)\n c_neurons = max(n_out, c_neurons // 2)\n n_neurons.append(n_out)\n else:\n # get list of number of nodes hidden layers\n if type(n_hidden) is int:\n n_hidden = [n_hidden] * (n_layers - 1)\n else:\n n_hidden = list(n_hidden)\n n_neurons = [n_in] + n_hidden + [n_out]\n\n # assign a Dense layer (with activation function) to each hidden layer\n layers = [\n snn.Dense(n_neurons[i], n_neurons[i + 1], activation=activation)\n for i in range(n_layers - 1)\n ]\n # assign a Dense layer (without activation function) to the output layer\n\n if last_zero_init:\n layers.append(\n snn.Dense(\n n_neurons[-2],\n n_neurons[-1],\n activation=None,\n weight_init=torch.nn.init.zeros_,\n bias=last_bias,\n )\n )\n else:\n layers.append(\n snn.Dense(n_neurons[-2], n_neurons[-1], activation=None, bias=last_bias)\n )\n # put all layers together to make the network\n out_net = nn.Sequential(*layers)\n return out_net", "def compile_model_mlp(genome, nb_classes, input_shape):\n # Get our network parameters.\n nb_layers = genome.geneparam['nb_layers' ]\n nb_neurons = genome.nb_neurons()\n activation = genome.geneparam['activation']\n optimizer = genome.geneparam['optimizer' ]\n\n logging.info(\"Architecture:%s,%s,%s,%d\" % (str(nb_neurons), activation, optimizer, nb_layers))\n\n model = Sequential()\n\n # Add each layer.\n for i in range(nb_layers):\n\n # Need input shape for first layer.\n if i == 0:\n model.add(Dense(nb_neurons[i], activation=activation, input_shape=input_shape))\n else:\n model.add(Dense(nb_neurons[i], activation=activation))\n\n model.add(Dropout(0.2)) # hard-coded dropout for each layer\n\n # Output layer.\n model.add(Dense(nb_classes, activation='softmax'))\n\n model.compile(loss='categorical_crossentropy', \n optimizer=optimizer,\n metrics=['accuracy'])\n\n return model", "def create_model(self): # noqa: D103\n # reference for creation of the model https://yilundu.github.io/2016/12/24/Deep-Q-Learning-on-Space-Invaders.html\n model=Sequential()\n model.add(Flatten( input_shape=(84,84,4)))\n model.add(Dense(self.num_actions)) \n\n return model", "def build_model(self):\n self.G = Generator(self.g_conv_dim)\n self.D = Discriminator(self.d_conv_dim, self.c_dim)\n self.generator = Generator(self.g_conv_dim).train(False)\n\n self.G = nn.DataParallel(self.G)\n self.D = nn.DataParallel(self.D)\n\n # For Adam (Unofficial)\n # self.g_optimizer = torch.optim.Adam(self.G.parameters(), self.g_lr, [self.beta1, self.beta2])\n # self.d_optimizer = torch.optim.Adam(self.D.parameters(), self.d_lr, [self.beta1, self.beta2])\n\n # For RMSprop(Official)\n self.g_optimizer = torch.optim.RMSprop(self.G.parameters(), lr=0.0001)\n self.d_optimizer = torch.optim.RMSprop(self.D.parameters(), lr=0.0001)\n\n self.accumulate(self.generator, self.G.module, 0)\n # self.print_network(self.G, 'G')\n # self.print_network(self.D, 'D')\n \n self.G.to(self.device)\n self.D.to(self.device)\n self.generator.to(self.device)\n\n # weight init\n self.G.apply(self.weights_init)\n self.D.apply(self.weights_init)\n self.generator.apply(self.weights_init)", "def create_model_net(n_input,n_hidden,n_output):\n net = Sequential(\n L.Linear(n_input, n_hidden), F.relu,\n L.Linear(n_hidden, n_hidden), F.relu,\n L.Linear(n_hidden, n_output), F.softmax)\n return net", "def build_graph(self):\n return nn.Sequential(\n nn.Linear(self.input_dim, self.hidden_dim),\n self.hidden_activation,\n nn.Linear(self.hidden_dim, self.n_classes_))", "def generate_homography_nn_adam(self):\n # Create the NN\n self.set_optimizer_adam()\n self.set_callback(utils.lr_callback)\n self.build_model()\n self.compile()", "def _create_nn(self):\n with tf.name_scope('policy_network'):\n with tf.variable_scope(\"policy_network\"):\n model = tf.keras.Sequential(name='policy_network_model')\n model.add(tf.keras.layers.Dense(self.neurons_in_each_layer[0], activation=tf.nn.relu,\n input_shape=(1, self.neurons_in_each_layer[0])))\n for num_neurons in self.neurons_in_each_layer[1:-1]:\n model.add(tf.keras.layers.Dense(num_neurons, activation=tf.nn.relu))\n model.add(tf.keras.layers.Dense(self.neurons_in_each_layer[-1], name='policy_output_layer'))\n\n return model", "def create_probabilistic_nn(feature_names, target_names, hidden_units, name = 'PNN'):\n return create_deterministic_nn(feature_names, target_names, hidden_units,\n name = name, out = 'P')", "def demo():\n def load_data():\n train = open(\"csv/svd_train.csv\", \"r\")\n r = csv.reader(train)\n next(r)\n\n data = []\n target = []\n\n print \"Prepping data...\"\n for row in r:\n aux = [0 for x in xrange(10)]\n aux[int(row[0])] = 1\n target.append(aux)\n data.append([float(x) for x in row[1:]])\n\n train.close()\n\n data = np.array(data)\n\n target = np.array(target)\n\n #train = [target[:35000],data[:35000]]\n #test = [target[35000:],data[35000:]]\n\n return [target, data]\n\n NN = MLP_NeuralNetwork(101, 75, 35, 10,\n iterations = 200,\n learning_rate = 0.5,\n momentum = 0.05,\n rate_decay = 0.005)\n\n train = load_data()\n\n NN.train(train)\n #NN.test_cross(test)\n #NN.test()\n NN.test_against()", "def new(self):\n self.define_layers()\n self.model = nn.Sequential(*self.layers)\n self.model.cuda()\n self.model = orthogonal_init(self.model)\n\n # Re-count N\n self.count_params()", "def MLP(input_h, num_layers, out_dim, name=\"MLP\", activation=tf.nn.relu, reuse=None):\n with tf.variable_scope(name, reuse=reuse):\n h = input_h\n for i in range(num_layers):\n h = tf.layers.dense(\n h, out_dim, \n use_bias=True, activation=activation,\n name=\"mlp{}\".format(i),\n reuse = reuse\n )\n h = tf.layers.dense(\n h, out_dim, use_bias=True, name=\"mlp_out\", reuse=reuse\n )\n return h", "def start_mlp(settings):\n\n\t# settings = config_model(parser, validator)\n\tsettings = validate_config(settings)\n\n\tnetwork = networks.FeedForwardNetwork(\n\t\tincoming=settings.input_shape,\n\t\tobjective_functions=settings.objective,\n\t\tupdate_function=settings.update,\n\t\tlearning_rate_policy=settings.learning_rate,\n\t\tparameter_max_local_l2_norm=settings.parameter_max_local_l2_norm,\n\t\tgradient_max_global_l2_norm=settings.gradient_max_global_l2_norm,\n\t\t# validation_interval=settings.validation_interval,\n\t)\n\tmlp = networks.MultiLayerPerceptronFromSpecifications(\n\t\tnetwork._input_layer,\n\t\tdense_dimensions=settings.dense_dimensions,\n\t\tdense_nonlinearities=settings.dense_nonlinearities,\n\t\tlayer_activation_types=settings.layer_activation_types,\n\t\tlayer_activation_parameters=settings.layer_activation_parameters,\n\t\tlayer_activation_styles=settings.layer_activation_styles\n\t)\n\tnetwork.set_network(mlp)\n\tnetwork.set_regularizers(settings.regularizer)\n\n\tstart_training(network, settings)", "def make_model(self):\n model = Sequential()\n model.add(Embedding(self.vocab, self.embd_size,\n input_length=self.sentence_size))\n model.add(LSTM(self.lstm_size, return_sequences=False))\n if self.den1_size > 0:\n model.add(Dense(self.den1_size, activation='relu'))\n if self.drop_rate > 0:\n model.add(Dropout(self.drop_rate))\n if self.den2_size > 0:\n model.add(Dense(self.den2_size, activation='relu'))\n model.add(Dense(16, activation='relu'))\n model.add(Activation(self.activation))\n model.compile(optimizer=self.optimizer,\n loss=self.loss_func,\n metrics=['accuracy'])\n return model", "def init_model(self):\n model = Sequential()\n model.add(Dense(units=24, input_dim=self.input_shape[0],\n activation='relu'))\n model.add(Dense(units=24, activation='relu'))\n # We want rewards instead of probability, so use linear here\n model.add(Dense(units=self.output_num, activation='linear'))\n model.compile(loss='mse', optimizer=Adam(lr=self.eta))\n return model", "def build_mlp(input_, config):\n current_input = input_\n print(current_input)\n for i in range(len(config['fcc_layers']) - 1):\n current_input = tf.keras.layers.Dense(\n units=config['fcc_layers'][i], activation='tanh',\n name='fcc_layer_{}'.format(i + 1))(current_input)\n current_input = tf.keras.layers.Dropout(\n rate=config['dropout'], name='dropout_{}'.format(i + 1))(current_input)\n cascade_embedding_layer = tf.keras.layers.Dense(\n units=config['fcc_layers'][-1], activation='tanh',\n name='cascade_embedding_layer')(current_input)\n cascade_embedding_layer_do = tf.keras.layers.Dropout(\n rate=config['dropout'],\n name='cascade_embedding_dropout')(cascade_embedding_layer)\n prediction_layer = tf.keras.layers.Dense(\n units=1, activation='sigmoid',\n name='prediction_layer')(cascade_embedding_layer_do)\n return cascade_embedding_layer, prediction_layer", "def _setup_model(self) -> torch.nn.Sequential:\r\n\r\n # setting up model\r\n ids_ = self.get_hyperparam().get_dim_ids()\r\n if self.get_hyperparam().get_value(ids_[13]):\r\n init_ = lambda mod: self._default_weight_bias_init(mod,\r\n self.get_hyperparam().get_value(ids_[14]),\r\n self.get_hyperparam().get_value(ids_[15]),\r\n self.get_hyperparam().get_value(ids_[16]))\r\n\r\n modules = []\r\n for hd in range(int(self.get_hyperparam().get_value(ids_[3]))+1):\r\n if hd == 0:\r\n act_input_size = self.get_hyperparam().get_value(ids_[0])\r\n output_size = self.get_hyperparam().get_value(ids_[4])[hd]\r\n act_fct = self.get_hyperparam().get_value(ids_[5])[hd]()\r\n elif hd == self.get_hyperparam().get_value(ids_[3]):\r\n act_input_size = self.get_hyperparam().get_value(ids_[4])[hd-1]\r\n output_size = self.get_hyperparam().get_value(ids_[1])\r\n act_fct = self.get_hyperparam().get_value(ids_[6])()\r\n else:\r\n act_input_size = self.get_hyperparam().get_value(ids_[4])[hd-1]\r\n output_size = self.get_hyperparam().get_value(ids_[4])[hd]\r\n act_fct = self.get_hyperparam().get_value(ids_[5])[hd]()\r\n \r\n if self.get_hyperparam().get_value(ids_[13]):\r\n modules.append(init_(torch.nn.Linear(int(act_input_size), int(output_size))))\r\n else:\r\n modules.append(torch.nn.Linear(int(act_input_size), int(output_size)))\r\n modules.append(act_fct)\r\n\r\n model = torch.nn.Sequential(*modules)\r\n \r\n # add process to the model\r\n try:\r\n model = self._add_init(model)\r\n except:\r\n pass \r\n \r\n self._loss_fct = self.get_hyperparam().get_value(ids_[8])()\r\n self._optimizer = self.get_hyperparam().get_value(ids_[7])(model.parameters(), lr=self.get_hyperparam().get_value(ids_[12]))\r\n self._sampling_seed = self.get_hyperparam().get_value(ids_[11])\r\n \r\n return model", "def train():\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n np.random.seed(42)\n\n ## Prepare all functions\n # Get number of units in each hidden layer specified in the string such as 100,100\n if FLAGS.dnn_hidden_units:\n dnn_hidden_units = FLAGS.dnn_hidden_units.split(\",\")\n dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]\n else:\n dnn_hidden_units = []\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n model = MLP(n_hidden=dnn_hidden_units,n_classes=10,batch_size=FLAGS.batch_size, input_dim=32*32*3, \n weight_decay=FLAGS.weight_reg_strength, weight_scale=FLAGS.weight_init_scale)\n\n Datasets = utils.get_cifar10(data_dir = DATA_DIR_DEFAULT, one_hot = True, validation_size = 0)\n \n for i in range(1500): #(FLAGS.max_steps):\n train_batch = Datasets.train.next_batch(batch_size = FLAGS.batch_size)\n #Get the model output\n logits = model.inference(x=train_batch[0].reshape([FLAGS.batch_size,32*32*3]))\n #Get the loss and let the model set the loss derivative.\n loss = model.loss(logits=logits, labels=train_batch[1])\n #Perform training step\n model.train_step(loss=loss, flags=FLAGS)\n\n #Every 100th iteratin print accuracy on the whole test set.\n if i % 100 == 0:\n # for layer in model.layers:\n test_batch = Datasets.test.next_batch(batch_size = 200) #Datasets.test.num_examples\n logits = model.inference(x=test_batch[0].reshape([200,32*32*3]))\n print('-- Step: ', i, \" accuracy: \",model.accuracy(logits=logits,labels=test_batch[1]),'loss', loss )\n\n ########################\n # END OF YOUR CODE #\n #######################", "def __init__(self, num_lemmas, num_pos, num_dep, num_directions=5, n_epochs=10, num_relations=2,\n alpha=0.01, lemma_embeddings=None, dropout=0.0, use_xy_embeddings=False, num_hidden_layers=0):\n self.n_epochs = n_epochs\n self.num_lemmas = num_lemmas\n self.num_pos = num_pos\n self.num_dep = num_dep\n self.num_directions = num_directions\n self.num_relations = num_relations\n self.alpha = alpha\n self.dropout = dropout\n self.use_xy_embeddings = use_xy_embeddings\n self.num_hidden_layers = num_hidden_layers\n self.update = True\n\n self.lemma_vectors = None\n if lemma_embeddings is not None:\n self.lemma_vectors = lemma_embeddings\n self.lemma_embeddings_dim = lemma_embeddings.shape[1]\n else:\n self.lemma_embeddings_dim = LEMMA_DIM\n\n # Create the network\n print 'Creating the network...'\n self.builder, self.model, self.model_parameters = create_computation_graph(self.num_lemmas, self.num_pos,\n self.num_dep, self.num_directions,\n self.num_relations,\n self.lemma_vectors,\n use_xy_embeddings,\n self.num_hidden_layers,\n self.lemma_embeddings_dim)\n print 'Done!'", "def make_basic_ngpu(nb_classes=10, input_shape=(None, 28, 28, 1), **kwargs):\n model = make_basic_cnn()\n layers = model.layers\n\n model = MLPnGPU(nb_classes, layers, input_shape)\n return model", "def generator (self) -> tf.keras.Sequential:\n return self._generator", "def simple_mlp(num_classes, depth, hidden_dim, **kwargs):\n return SimpleMLP(d_in=3*32*32, d_out=num_classes, d_h=hidden_dim, n_h=depth, seed=0)", "def create(window_size=3,\r\n input_dimension=9,\r\n output_vocabsize=8,\r\n n_quadratic_filters=2,\r\n token_representation_size=5,\r\n concatenated_representation_size=7,\r\n lr=0.01,\r\n seed=123,\r\n noise_level=0.2,\r\n qfilter_relscale=0.1,\r\n compile_mode=None):\r\n activation_function = T.tanh\r\n\r\n architecture = ConvolutionalMLP( \\\r\n window_size=window_size,\r\n n_quadratic_filters=n_quadratic_filters,\r\n activation_function=activation_function,\r\n reconstruction_cost_function=quadratic,\r\n tie_weights=False\r\n )\r\n\r\n backup = config.warn.sum_div_dimshuffle_bug\r\n config.warn.sum_div_dimshuffle_bug = False\r\n try:\r\n model = architecture.make(input_size=input_dimension,\r\n input_representation_size=token_representation_size, hidden_representation_size=concatenated_representation_size, output_size=output_vocabsize, lr=lr, seed=seed, noise_level=noise_level, qfilter_relscale=qfilter_relscale, mode=compile_mode)\r\n finally:\r\n config.warn.sum_div_dimshuffle_bug = backup\r\n return model", "def create_model(self, input_state, layer1=450, layer2=350):\n # create the DQN\n self.model = Sequential()\n self.model.add(Dense(units=layer1, input_dim=input_state.nn_input.size))\n self.model.add(Activation('relu'))\n\n self.model.add(Dense(units=layer2))\n self.model.add(Activation('relu'))\n\n self.model.add(Dense(units=(input_state.size_graph+1)))\n self.model.add(Activation('linear'))\n\n self.model.compile(optimizer='rmsprop', loss='mse')\n\n self.model.predict(input_state.nn_input.reshape(1, input_state.nn_input.size), batch_size=1)", "def sequential_model():\n model = build_models()\n seq_model = Sequential(model[0]['layers'], name=model[0]['name'])\n return seq_model", "def train(n_hidden_1, dropout, lr, wdecay, _run):\n\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n np.random.seed(42)\n\n ## Prepare all functions\n # Get number of units in each hidden layer specified in the string such as 100,100\n if FLAGS.dnn_hidden_units:\n dnn_hidden_units = FLAGS.dnn_hidden_units.split(\",\")\n dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]\n else:\n dnn_hidden_units = []\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n def get_xy_tensors(batch):\n x, y = batch\n x = torch.tensor(x.reshape(-1, 3072), dtype=torch.float32).to(device)\n y = torch.tensor(y, dtype=torch.long).to(device)\n return x, y\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n datasets = cifar10_utils.read_data_sets(DATA_DIR_DEFAULT, one_hot=False)\n train_data = datasets['train']\n test_data = datasets['test']\n model = MLP(n_inputs=3072, n_hidden=[n_hidden_1, 400], n_classes=10, dropout=dropout).to(device)\n loss_fn = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=wdecay)\n\n log_every = 50\n avg_loss = 0\n avg_acc = 0\n for step in range(FLAGS.max_steps):\n x, y = get_xy_tensors(train_data.next_batch(FLAGS.batch_size))\n\n # Forward and backward passes\n optimizer.zero_grad()\n out = model.forward(x)\n loss = loss_fn(out, y)\n loss.backward()\n\n # Parameter updates\n optimizer.step()\n\n avg_loss += loss.item() / log_every\n avg_acc += accuracy(out, y) / log_every\n if step % log_every == 0:\n print('[{}/{}] train loss: {:.6f} train acc: {:.6f}'.format(step,\n FLAGS.max_steps,\n avg_loss, avg_acc))\n _run.log_scalar('train-loss', avg_loss, step)\n _run.log_scalar('train-acc', avg_acc, step)\n avg_loss = 0\n avg_acc = 0\n\n # Evaluate\n if step % FLAGS.eval_freq == 0 or step == (FLAGS.max_steps - 1):\n x, y = get_xy_tensors(test_data.next_batch(test_data.num_examples))\n model.eval()\n out = model.forward(x)\n model.train()\n test_loss = loss_fn(out, y).item()\n test_acc = accuracy(out, y)\n print('[{}/{}] test accuracy: {:6f}'.format(step, FLAGS.max_steps, test_acc))\n\n _run.log_scalar('test-loss', test_loss, step)\n _run.log_scalar('test-acc', test_acc, step)\n ########################\n # END OF YOUR CODE #\n #######################", "def test_mlp():\r\n datasets = gen_data()\r\n\r\n train_set_x, train_set_y = datasets[0]\r\n valid_set_x, valid_set_y = datasets[1]\r\n test_set_x , test_set_y = datasets[2]\r\n\r\n\r\n\r\n batch_size = 100 # size of the minibatch\r\n\r\n # compute number of minibatches for training, validation and testing\r\n n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size\r\n n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size\r\n n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size\r\n\r\n ######################\r\n # BUILD ACTUAL MODEL #\r\n ######################\r\n #print '... building the model'\r\n\r\n # allocate symbolic variables for the data\r\n index = T.lscalar() # index to a [mini]batch\r\n x = T.matrix('x') # the data is presented as rasterized images\r\n y = T.ivector('y') # the labels are presented as 1D vector of\r\n # [int] labels\r\n\r\n rng = numpy.random.RandomState(1234)\r\n\r\n # construct the MLP class\r\n classifier = MLP( rng = rng, input=x, n_in=28*28, n_hidden = 500, n_out=10)\r\n\r\n # the cost we minimize during training is the negative log likelihood of\r\n # the model.\r\n # We take the mean of the cost over each minibatch.\r\n cost = classifier.negative_log_likelihood(y).mean()\r\n\r\n # compute the gradient of cost with respect to theta (stored in params)\r\n # the resulting gradients will be stored in a list gparams\r\n gparams = []\r\n for param in classifier.params:\r\n gparam = T.grad(cost, param)\r\n gparams.append(gparam)\r\n\r\n # Some optimizations needed are tagged with 'fast_run'\r\n # TODO: refine that and include only those\r\n mode = theano.compile.get_default_mode().including('fast_run')\r\n\r\n updates2 = OrderedDict()\r\n\r\n updates2[classifier.hiddenLayer.params[0]]=T.grad(cost,classifier.hiddenLayer.params[0])\r\n train_model =theano.function( inputs = [index],\r\n updates = updates2,\r\n givens={\r\n x:train_set_x[index*batch_size:(index+1)*batch_size],\r\n y:train_set_y[index*batch_size:(index+1)*batch_size]},\r\n mode=mode)\r\n #print 'MODEL 1'\r\n #theano.printing.debugprint(train_model, print_type=True)\r\n assert any([isinstance(i.op,T.nnet.CrossentropySoftmax1HotWithBiasDx) for i in train_model.maker.fgraph.toposort()])\r\n\r\n # Even without FeatureShape\r\n train_model =theano.function( inputs = [index],\r\n updates = updates2,\r\n mode=mode.excluding('ShapeOpt'),\r\n givens={\r\n x:train_set_x[index*batch_size:(index+1)*batch_size],\r\n y:train_set_y[index*batch_size:(index+1)*batch_size]})\r\n #print\r\n #print 'MODEL 2'\r\n #theano.printing.debugprint(train_model, print_type=True)\r\n assert any([isinstance(i.op,T.nnet.CrossentropySoftmax1HotWithBiasDx) for i in train_model.maker.fgraph.toposort()])", "def generate( k, n, a0 = 10, scale = 10, prior = \"uniform\" ): \n tm = TopicModel.generate( k, n, scale, prior )\n return LDATopicModel( a0 * tm.weights, tm.topics )", "def mlp(sizes, sac=False):\n layers = []\n limit = len(sizes) if sac is False else len(sizes) - 1\n for j in range(limit - 1):\n act = nn.ReLU if j < limit - 2 else nn.Identity\n layers += [nn.Linear(sizes[j], sizes[j + 1]), act()]\n return nn.Sequential(*layers)", "def CreateMdp(self):\n self.states = {\"\":0}\n self.actions = {}\n self.rewards = []\n stateNodes = filter(lambda x: x.type not in [\"ActionNode\",\"MotorNode\"],self.nodes.values())\n actionNodes = filter(lambda x: x.type is \"ActionNode\",self.nodes.values())\n for i in (range(1,len(stateNodes)+1)):\n self.states[stateNodes[i-1].name] = i\n for i in (range(0,len(actionNodes))):\n self.actions[i] = actionNodes[i].name\n if(len(self.states) < 2 or len(self.actions) < 1):\n print(\"Error: States and actions must be non-empty\")\n # Transition probabilities: equal probabilities\n self.P = []\n for i in (range(0,len(self.actions))):\n b = []\n for j in (range(0,len(self.states))):\n a = []\n for k in (range(0,len(self.states))):\n a.append(1.0/len(self.states))\n b.append(a)\n self.P.append(b)\n\n # Rewards\n self.R = []\n for i in (range(0,len(self.actions))):\n b = []\n for j in (range(0,len(self.states))):\n a = []\n for k in (range(0,len(self.states))):\n a.append(actionNodes[i].reward)\n b.append(a)\n self.R.append(b)\n\n print(\"Number of states: \" + str(len(self.states)))\n print(\"Number of actions: \" + str(len(self.actions)))\n self.vi = mdptoolbox.mdp.QLearning(np.array(self.P), np.array(self.R), 0.5)\n self.vi.run()\n print(self.vi.Q)", "def trainNN():\n\n yTrain = [] # holds y vals of curves/lines\n trainLabels = [] # holds center labels\n\n tryCenters = np.linspace(1, 9, 45)\n\n for i in range(len(tryCenters)):\n x = np.linspace(tryCenters[i]-.2, tryCenters[i]+.2, 18)\n for j in range(1000):\n centers = round(random.uniform(tryCenters[i]-.05,\n tryCenters[i]+.05), 1)\n y = gauss_func(x, .05, centers, 1)\n yTrain.append(y)\n trainLabels.append(1)\n\n y = gauss_func(x, .05,\n round(random.uniform(tryCenters[i]-.3,\n tryCenters[i]-.17), 1), 1)\n yTrain.append(y)\n trainLabels.append(0)\n\n y = gauss_func(x, .05,\n round(random.uniform(tryCenters[i]+.17,\n tryCenters[i]+.3), 1), 1)\n yTrain.append(y)\n trainLabels.append(0)\n\n y = 0*x\n yTrain.append(y)\n trainLabels.append(0)\n clf = MLPClassifier(solver='lbfgs')\n clf.fit(yTrain, trainLabels)\n return clf", "def create_mtl_pt_model(model_args):\n global_model = torch.load(model_args['global_model_fn'])\n model = MTL_MIMIC_Model(model_args[\"input_dim\"],\n model_args[\"n_layers\"],\n model_args[\"units\"],\n model_args[\"num_dense_shared_layers\"],\n model_args[\"dense_shared_layer_size\"],\n model_args[\"n_multi_layers\"],\n model_args[\"multi_units\"],\n model_args[\"output_dim\"],\n model_args[\"tasks\"],\n lstm = global_model.lstm,\n shared = global_model.rest[:-2],\n )\n return model", "def create_model():\n\n # Create a sequential model (a simple NN is created) adding a softmax activation at the end with 10 units:\n model = Sequential()\n model.add(Dense(units=128, activation=\"relu\", input_shape=(784,)))\n model.add(Dense(units=128, activation=\"relu\"))\n model.add(Dense(units=128, activation=\"relu\"))\n model.add(Dense(units=10, activation=\"softmax\"))\n\n # Compile the model using the loss function \"categorical_crossentropy\" and Stocastic Gradient Descent optimizer:\n model.compile(optimizer=SGD(0.001), loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\n\n # Return the created model\n return model", "def make_model():\n model = Sequential()\n model.add(Dense(1000, input_shape=(INPUT_SIZE,), activation='relu'))\n model.add(Dense(1000, activation='relu'))\n model.add(Dense(4, activation='sigmoid'))\n model.compile(loss='mse', metrics=['accuracy'])\n return model", "def _make_network(self):\n inp = Input(shape = (self.input_dim,))\n x = Dense(256, activation='relu')(inp)\n x = GaussianNoise(1.0)(x)\n #x = Flatten()(x) # I assume this is if the input is a convolutional neural net?\n x = Dense(128, activation='relu')(x)\n x = GaussianNoise(1.0)(x)\n out = Dense(self.output_dim, activation='tanh', kernel_initializer=RandomUniform())(x)\n out = Lambda(lambda i: i * self.act_range)(out)\n return Model(inp, out)", "def mlpfwd(self,inputs):\n\n self.hidden = np.dot(inputs,self.weights1);\n self.hidden = 1.0/(1.0+np.exp(-self.beta*self.hidden))\n self.hidden = np.concatenate((self.hidden,-np.ones((np.shape(inputs)[0],1))),axis=1)\n\n outputs = np.dot(self.hidden,self.weights2);\n\n # Different types of output neurons\n if self.outtype == 'linear':\n \treturn outputs\n elif self.outtype == 'logistic':\n return 1.0/(1.0+np.exp(-self.beta*outputs))\n elif self.outtype == 'softmax':\n normalisers = np.sum(np.exp(outputs),axis=1)*np.ones((1,np.shape(outputs)[0]))\n return np.transpose(np.transpose(np.exp(outputs))/normalisers)\n else:\n print \"error\"", "def build_model(nx, layers, activations, lambtha, keep_prob):\n model = K.Sequential()\n for i in range(len(layers)):\n model.add(K.layers.Dense(layers[i],\n activation=activations[i],\n input_shape=(nx,),\n kernel_regularizer=K.regularizers.l2(lambtha)))\n if i + 1 < len(layers):\n model.add(K.layers.Dropout(1 - keep_prob))\n return model", "def setup_model(self) -> (nn.Module, int):", "def __init__(\n\t\t\tself, xi_dim, u_dim, noise_dim, n_hidden=[50, 50,],\n\t\t\tact_fct=tf.nn.tanh, noise_scale=1.):\n\n\t\tPolicy.__init__(self, xi_dim, u_dim)\n\n\t\tself._nn = MLP(\n\t\t\tn_input=xi_dim + noise_dim,\n\t\t\tn_output=u_dim,\n\t\t\tn_hidden=n_hidden,\n\t\t\tbatch_size_svi=1,\n\t\t\tact_fct=act_fct\n\t\t)\n\n\t\tself._noise_dim = noise_dim\n\t\tself._noise_scale = noise_scale", "def __init__(self, num_layers, input_dim, hidden_dim, output_dim, linear_or_not=True):\n super(MLP, self).__init__()\n self.linear_or_not = linear_or_not # default is linear model\n self.num_layers = num_layers\n self.output_dim = output_dim\n\n if num_layers < 1:\n raise ValueError(\"number of layers should be positive!\")\n elif num_layers == 1:\n # Linear model\n self.linear = nn.Linear(input_dim, output_dim)\n else:\n # Multi-layer model\n self.linear_or_not = False\n self.linears = torch.nn.ModuleList()\n self.batch_norms = torch.nn.ModuleList()\n\n self.linears.append(nn.Linear(input_dim, hidden_dim))\n for layer in range(num_layers - 2):\n self.linears.append(nn.Linear(hidden_dim, hidden_dim))\n self.linears.append(nn.Linear(hidden_dim, output_dim))\n\n for layer in range(num_layers - 1):\n self.batch_norms.append(nn.BatchNorm1d((hidden_dim)))", "def create_deterministic_nn(feature_names, target_names, hidden_units, name = 'DNN', out = 'D'):\n inputs = create_model_inputs(feature_names)\n features = inputs #layers.concatenate(list(inputs.values()))\n\n # Create hidden layers using the Dense layer.\n for units in hidden_units:\n features = layers.Dense(\n units=units,\n activation=\"sigmoid\",\n )(features)\n\n if out == 'D':\n outputs = create_model_outputs_det(target_names,features)\n if out == 'P':\n outputs = create_model_outputs_prob(target_names,features)\n\n model = keras.Model(inputs=inputs, outputs=outputs, name = name)\n return model", "def __init__(self, rng, input, n_in, n_hidden, n_out):\r\n\r\n # Since we are dealing with a one hidden layer MLP, this will translate\r\n # into a HiddenLayer with a tanh activation function connected to the\r\n # LogisticRegression layer; the activation function can be replaced by\r\n # sigmoid or any other nonlinear function\r\n self.hiddenLayer = HiddenLayer(rng=rng, input=input,\r\n n_in=n_in, n_out=n_hidden,\r\n activation=T.tanh)\r\n\r\n # The logistic regression layer gets as input the hidden units\r\n # of the hidden layer\r\n self.logRegressionLayer = LogisticRegression(\r\n input=self.hiddenLayer.output,\r\n n_in=n_hidden,\r\n n_out=n_out)\r\n\r\n # L1 norm ; one regularization option is to enforce L1 norm to\r\n # be small\r\n self.L1 = abs(self.hiddenLayer.W).sum() \\\r\n + abs(self.logRegressionLayer.W).sum()\r\n\r\n # square of L2 norm ; one regularization option is to enforce\r\n # square of L2 norm to be small\r\n self.L2_sqr = (self.hiddenLayer.W ** 2).sum() \\\r\n + (self.logRegressionLayer.W ** 2).sum()\r\n\r\n # negative log likelihood of the MLP is given by the negative\r\n # log likelihood of the output of the model, computed in the\r\n # logistic regression layer\r\n self.negative_log_likelihood = self.logRegressionLayer.negative_log_likelihood\r\n # same holds for the function computing the number of errors\r\n self.errors = self.logRegressionLayer.errors\r\n\r\n # the parameters of the model are the parameters of the two layer it is\r\n # made out of\r\n self.params = self.hiddenLayer.params + self.logRegressionLayer.params", "def mlp(x, hidden_sizes=(32,), activation=tf.tanh, output_activation=None):\n for h in hidden_sizes[:-1]:\n x = tf.layers.dense(x, units=h, activation=activation)\n return tf.layers.dense(x, units=hidden_sizes[-1], activation=output_activation)", "def build_weight_model(self):\r\n model = nn.Sequential(\r\n nn.Linear(self.in_channels, self.out_channels),\r\n )\r\n init_sequential_weights(model)\r\n return model", "def build_weight_model(self):\r\n model = nn.Sequential(\r\n nn.Linear(self.in_channels, self.out_channels),\r\n )\r\n init_sequential_weights(model)\r\n return model", "def __init__(self, rng, input, n_in, n_hidden, n_out):\n\n # Since we are dealing with a one hidden layer MLP, this will translate\n # into a HiddenLayer with a tanh activation function connected to the\n # LogisticRegression layer; the activation function can be replaced by\n # sigmoid or any other nonlinear function\n self.hiddenLayer = HiddenLayer(\n rng=rng,\n input=input,\n n_in=n_in,\n n_out=n_hidden,\n activation=T.tanh\n )\n\n # The logistic regression layer gets as input the hidden units\n # of the hidden layer\n self.logRegressionLayer = LogisticRegression(\n input=self.hiddenLayer.output,\n n_in=n_hidden,\n n_out=n_out\n )\n # end-snippet-2 start-snippet-3\n # L1 norm ; one regularization option is to enforce L1 norm to\n # be small\n self.L1 = (\n abs(self.hiddenLayer.W).sum()\n + abs(self.logRegressionLayer.W).sum()\n )\n\n # square of L2 norm ; one regularization option is to enforce\n # square of L2 norm to be small\n self.L2_sqr = (\n (self.hiddenLayer.W ** 2).sum()\n + (self.logRegressionLayer.W ** 2).sum()\n )\n\n # negative log likelihood of the MLP is given by the negative\n # log likelihood of the output of the model, computed in the\n # logistic regression layer\n self.negative_log_likelihood = (\n self.logRegressionLayer.negative_log_likelihood\n )\n # same holds for the function computing the number of errors\n self.errors = self.logRegressionLayer.errors\n\n # the parameters of the model are the parameters of the two layer it is\n # made out of\n self.params = self.hiddenLayer.params + self.logRegressionLayer.params\n # end-snippet-3\n\n # keep track of model input\n self.input = input", "def build_model(nx, layers, activations, lambtha, keep_prob):\n λ = lambtha\n\n # create model\n a_model = K.Sequential()\n n_layers = len(layers)\n regularizer = K.regularizers.l2(λ)\n\n for i in range(n_layers):\n # Adds a densely-connected layer with layer[i] units to the model:\n a_model.add(K.layers.Dense(\n units=layers[i],\n input_dim=nx,\n kernel_regularizer=regularizer,\n activation=activations[i],\n )\n )\n # To avoid creation of:\n # Layer (type) Output Shape Param #\n # dropout_2 (Dropout) (None, 10) 0\n if i < n_layers - 1:\n a_model.add(K.layers.Dropout(1 - keep_prob))\n return a_model", "def build_model(self):\n import tensorflow as tf\n \n y = tf.nn.relu(tf.matmul(self.variables[\"input_observation\"], self.variables[\"W1\"]) + \n self.variables[\"b1\"], name=\"y1\")\n \n for i in range(self.n_hidden-1):\n y = tf.nn.relu(tf.matmul(y, self.variables[\"W\"+str(i+2)]) + \n self.variables[\"b\"+str(i+2)], name=\"y\"+str(i+2))\n \n self.variables[\"y\"] = [tf.matmul(y, self.variables[\"Wo_0\"]) + self.variables[\"bo_0\"]]\n for i in range(1, len(self.output_size)):\n self.variables[\"y\"] += [tf.matmul(y, self.variables[\"Wo_%s\"%i]) + self.variables[\"bo_%s\"%i]]", "def __init__(self, num_layers, num_mlp_layers, input_dim, hidden_dim,\n output_dim, final_dropout, learn_eps, graph_pooling_type,\n neighbor_pooling_type):\n super(GIN, self).__init__()\n self.num_layers = num_layers\n self.learn_eps = learn_eps\n\n # List of MLPs\n self.ginlayers = torch.nn.ModuleList()\n self.batch_norms = torch.nn.ModuleList()\n\n for layer in range(self.num_layers):\n if layer == 0:\n mlp = MLP(num_mlp_layers, input_dim, hidden_dim, hidden_dim)\n else:\n mlp = MLP(num_mlp_layers, hidden_dim, hidden_dim, hidden_dim)\n\n self.ginlayers.append(\n GINConv(ApplyNodeFunc(mlp), neighbor_pooling_type, 0, self.learn_eps))\n self.batch_norms.append(nn.BatchNorm1d(hidden_dim))\n\n # Linear function for graph poolings of output of each layer\n # which maps the output of different layers into a prediction score\n self.linears_prediction = torch.nn.ModuleList()\n\n for layer in range(num_layers):\n if layer == 0:\n self.linears_prediction.append(\n nn.Linear(input_dim, output_dim))\n else:\n self.linears_prediction.append(\n nn.Linear(hidden_dim, output_dim))\n\n self.drop = nn.Dropout(final_dropout)\n\n if graph_pooling_type == 'sum':\n self.pool = SumPooling()\n elif graph_pooling_type == 'mean':\n self.pool = AvgPooling()\n elif graph_pooling_type == 'max':\n self.pool = MaxPooling()\n else:\n raise NotImplementedError", "def get_model( ):\n\n return Lasso(alpha = 1e-3, fit_intercept = True, precompute = True, max_iter = 1e4)", "def _create_model(self, input_state, num_actions):\n with tf.name_scope('shared_layers'):\n layer = Dense(NN_WIDTH, activation = 'relu')(input_state);\n layer = Dense(NN_WIDTH, activation = 'relu')(layer);\n layer = Dense(NN_WIDTH, activation = 'relu')(layer);\n layer = Dense(NN_WIDTH, activation = 'relu')(layer);\n with tf.name_scope('policy_network'):\n policy = Dense(num_actions, activation = 'softmax')(layer);\n with tf.name_scope('value_network'):\n value = Dense(1)(layer);\n return (policy, value);", "def _model(self):\n\t\tmodel = Sequential()\n\t\tmodel.add(Dense(units=64, input_dim=self.state_size, activation=\"relu\"))\n\t\tmodel.add(Dense(units=32, activation=\"relu\"))\n\t\tmodel.add(Dense(units=16, activation=\"relu\"))\n\t\tmodel.add(Dense(units=8, activation=\"relu\"))\n\t\tmodel.add(Dense(self.action_size, activation=\"linear\"))\n\t\tmodel.compile(loss=\"mse\", optimizer=Adam(lr=self.learning_rate))\n\n\t\treturn model", "def buildP(self):\r\n\r\n print 'Building P ...'\r\n\r\n #---Building p(y|x)---#\r\n pygx_params_mlp = MLP(activations=self.hyper['pygx_activs'],\r\n dims=self.hyper['pygx_dims'],\r\n weights_init=self.hyper['pygx_W_init'],\r\n biases_init=Constant(0))\r\n\r\n pygx_params = pygx_params_mlp.apply(self.x.reshape((self.x.shape[0]*self.x.shape[1],self.x.shape[2])))\r\n pygx_params = pygx_params.reshape((self.x.shape[0],self.x.shape[1],2*self.hyper['y_dim']))\r\n pygx_params_mlp.initialize()\r\n\r\n # self.pygx_mu.shape == (minibatch size, L_x , num of dimension of y)\r\n self.pygx_mu = pygx_params[:,:,:self.hyper['y_dim']]\r\n\r\n # self.pygx_var.shape == (minibatch size, L_x, num of dimension of y)\r\n self.pygx_var = T.exp( pygx_params[:,:,self.hyper['y_dim']:] )\r\n\r\n\r\n #---Building graph for the density of p(y|x)---#\r\n little_num = 10**(-32)\r\n inside_exp = -T.sum((self.y.dimshuffle(0,'x',1) - self.pygx_mu)**2/(2*self.pygx_var), axis=2)\r\n norm_cst = (2*np.pi)**(-self.hyper['y_dim']/2.)*T.exp(T.sum(T.log(self.pygx_var), axis=2))**(-1/2.)\r\n\r\n # shape == (minibatch size, # of x samples)\r\n pygx = norm_cst*T.exp(inside_exp)\r\n\r\n # shape == (minibatch size, # of x samples)\r\n self.log_pygx = T.log(pygx + little_num)\r\n\r\n #---Building NN for p(x|z=j,w) for all j---#\r\n pxgzw_mus = [None]*self.hyper['num_clust']\r\n pxgzw_vars = [None]*self.hyper['num_clust']\r\n pxgzw = [None]*self.hyper['num_clust']\r\n\r\n for j in range(self.hyper['num_clust']):\r\n\r\n pxgzw_params_mlp = MLP(activations=self.hyper['pxgzw_activs'][j],\r\n dims=self.hyper['pxgzw_dims'][j],\r\n weights_init=self.hyper['pxgzw_W_init'],\r\n biases_init=Constant(0))\r\n\r\n pxgzw_params = pxgzw_params_mlp.apply(self.w.reshape((self.w.shape[0]*self.w.shape[1],self.w.shape[2])))\r\n pxgzw_params = pxgzw_params.reshape((self.w.shape[0],self.w.shape[1], 2*self.hyper['x_dim']))\r\n pxgzw_params_mlp.initialize()\r\n\r\n # pxgzw_mus[j].shape == (minibatch size, L_w , num of dimension of x)\r\n pxgzw_mus[j] = pxgzw_params[:,:,:self.hyper['x_dim']]\r\n\r\n # pxgzw_vars[j].shape == (minibatch size, L_w, num of dimension of x)\r\n pxgzw_vars[j] = T.exp( pxgzw_params[:,:,self.hyper['x_dim']:] )\r\n\r\n #---Building graph for the density of p(x|z=j,w)---#\r\n little_num = 10**(-32)\r\n inside_exp = -T.sum((self.x.dimshuffle(0,'x',1,2) - pxgzw_mus[j].dimshuffle(0,1,'x',2))**2/(2*pxgzw_vars[j].dimshuffle(0,1,'x',2)), axis=3)\r\n norm_cst = (2*np.pi)**(-self.hyper['x_dim']/2.)*T.exp(T.sum(T.log(pxgzw_vars[j]), axis=2))**(-1/2.)\r\n\r\n # shape == (minibatch size, # of w samples (L_w), # of x samples (L_x))\r\n pxgzw[j] = norm_cst.dimshuffle(0,1,'x')*T.exp(inside_exp)\r\n\r\n\r\n # shape is (minibatch size, L_w , # of clusters , num of dimension of x)\r\n self.pxgzw_mus = T.concatenate([mu.dimshuffle(0,1,'x',2) for mu in pxgzw_mus], axis=2)\r\n # shape is (minibatch size, L_w , # of clusters , num of dimension of x)\r\n self.pxgzw_vars = T.concatenate([var.dimshuffle(0,1,'x',2) for var in pxgzw_vars], axis=2)\r\n\r\n # self.pxgzw.shape == (minibatch size, L_w, L_x, num_clust)\r\n self.pxgzw = T.concatenate([density.dimshuffle(0,1,2,'x') for density in pxgzw], axis=3)\r\n self.log_pxgzw = T.log(self.pxgzw + little_num)\r\n\r\n #---Building the p(z=j|x,w) posterior for all j---#\r\n # self.log_pzgxw.shape == (minibatch size, L_w, L_x, num_clust)\r\n self.log_pzgxw = T.log(self.pxgzw + little_num) -T.log(T.sum(self.pxgzw + little_num, axis=3).dimshuffle(0,1,2,'x'))", "def get_trans_net():\n return nn.Sequential(nn.Linear(2, 64), nn.LeakyReLU(), nn.Linear(64, 64), nn.LeakyReLU(), nn.Linear(64, 2))", "def __init__(self,layers,activations):\n model = utils.buildMLP(layers, activations)\n super().__init__(torch.nn.Sequential(model), nnType='dnn')", "def create_model(window, input_shape, num_actions,\n model_name='q_network'):\n if model_name == 0:\n model = linear_model(window, input_shape, num_actions)\n elif model_name == 1:\n model = deep_model(window, input_shape, num_actions)\n elif model_name == 2:\n model = dueling_deep(window, input_shape, num_actions)\n else:\n print(\"No suitable models found.\")\n exit()\n print(model.summary())\n return model", "def __init__(self, sizes, final=None, batchnorm=False, dropout=0.0):\n super(MLP, self).__init__()\n\n self.layers = nn.ModuleList()\n # If there is only one input dimension, everything is fine\n if sizes[0] == 1:\n self.layers.append(nn.Linear(sizes[0], sizes[1]))\n\n # For multiple input dimensions, each one has a separate following\n # hidden layer.\n # This is necessary for the partial training later on.\n else:\n self.layers.append(nn.ModuleList([nn.Linear(1, sizes[1])\n for _ in range(sizes[0])]))\n\n # Add the remaining layers with selu activations\n for i in range(len(sizes) - 1)[1:]:\n if i != (len(sizes) - 1):\n if batchnorm:\n self.layers.append(nn.BatchNorm1d(sizes[i]))\n self.layers.append(nn.SELU())\n if dropout is not None:\n if sizes[i] < 32:\n print(\"Warning: Dropout {} on only {} parameters...\"\n .format(dropout, sizes[i]))\n self.layers.append(nn.Dropout(p=dropout))\n self.layers.append(nn.Linear(sizes[i], sizes[i + 1]))\n\n if final is not None:\n self.layers.append(final)", "def train_init():\n np.random.seed(seed)\n tf.random.set_random_seed(seed)\n random.seed(seed)\n\n name = str(seed)\n desc = MNMDescriptor(5, inp_dict, outp_dict, name=name)\n desc = recursive_creator(desc, 0, 0, seed)\n hypers = {}\n for hyper in hyps:\n hypers[hyper] = np.random.choice(hyps[hyper])\n\n model = MNM(desc, hypers[\"btch_sz\"], data_inputs[\"Train\"], data_outputs[\"Train\"], loss_func_weights={\"o0\": hypers[\"wo0\"], \"o1\": hypers[\"wo1\"], \"o2\": hypers[\"wo2\"]}, name=name, lr=hypers[\"lr\"], opt=hypers[\"opt\"], random_seed=seed)\n if intelligent_training == 2:\n loss_weights = model.sequential_training(hypers[\"btch_sz\"], iter_lim // 50, conv_param, proportion, iter_lim, display_step=-1)\n else:\n loss_weights = model.autoset_training(hypers[\"btch_sz\"], iter_lim//50, conv_param, proportion, iter_lim, display_step=-1, incr=incr, decr=decr, scaling=scale)\n\n\n # ####### Save model characteristics.\n\n model.descriptor.save(path=\"\")\n model.save_weights(path=\"\")\n\n results = evaluate_model(model)\n\n np.save(\"hypers\" + str(seed) + \"_\" + str(intelligent_training) + \"_\" + str(n_networks) + \"_\" + \".npy\", hypers)\n\n np.save(\"orig_results\" + str(seed) + \"_\" + str(intelligent_training) + \"_\" + str(n_networks) + \"_\" + \".npy\", results)\n\n np.save(\"loss_weights\" + str(seed) + \"_\" + str(intelligent_training) + \"_\" + str(n_networks) + \"_\" + \".npy\", loss_weights)", "def createModel(data, eta, epoch, file_name, plot_name, no_of_input=2):\n logging.info(f\"\\n\\n>>>>>>>>>>Starting training>>>>>>>>>>>>>>>>{file_name}\")\n logging.info(f\"eta = {str(eta)} epochs ={str(epoch)}\\n\")\n\n df = pd.DataFrame(data)\n\n X,y = prepare_data(df)\n logging.info(f\"X={X}\")\n logging.info(f\"Y={y}\") \n\n model = Perceptron(eta, epoch, no_of_input)\n\n model.fit(X, y)\n\n model.predict(X)\n\n model.total_loss()\n\n save_model(model, file_name)\n\n \n logging.info(f\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>\")\n\n if no_of_input == 2:\n save_plot(df, plot_name, model)", "def nn_model():\n seed = 321\n np.random.seed(seed)\n rmsprop = RMSprop(lr=0.0001)\n # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\n # kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=seed)\n # for train, test in kfold.split(X, y):\n model_nn = Sequential()\n model_nn.add(Dense(100, input_shape=(117,), activation='relu'))\n model_nn.add(Dropout(0.5))\n model_nn.add(Dense(125, activation='relu', kernel_initializer='normal'))\n model_nn.add(Dropout(0.5))\n model_nn.add(Dense(30, activation='relu', kernel_initializer='normal'))\n model_nn.add(Dropout(0.5))\n model_nn.add(Dense(1, activation='sigmoid'))#softmax\n model_nn.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer=rmsprop)\n #model_nn.compile(loss='binary_crossentropy', metrics=['accuracy'], optimizer=rmsprop)\n # Compile model\n model_nn.compile(optimizer=rmsprop, loss='binary_crossentropy', metrics=['accuracy'])\n return model_nn", "def __init__(self, rng, input, n_in, n_hidden, n_out):\r\n\r\n # Since we are dealing with a one hidden layer MLP, this will\r\n # translate into a TanhLayer connected to the LogisticRegression\r\n # layer; this can be replaced by a SigmoidalLayer, or a layer\r\n # implementing any other nonlinearity\r\n self.hiddenLayer = HiddenLayer(rng = rng, input = input,\r\n n_in = n_in, n_out = n_hidden,\r\n activation = T.tanh, name_prefix='hid_')\r\n\r\n # The logistic regression layer gets as input the hidden units\r\n # of the hidden layer\r\n self.logRegressionLayer = LogisticRegression(\r\n input = self.hiddenLayer.output,\r\n n_in = n_hidden,\r\n n_out = n_out, name_prefix='log_')\r\n\r\n # negative log likelihood of the MLP is given by the negative\r\n # log likelihood of the output of the model, computed in the\r\n # logistic regression layer\r\n self.negative_log_likelihood = self.logRegressionLayer.negative_log_likelihood\r\n\r\n # the parameters of the model are the parameters of the two layer it is\r\n # made out of\r\n self.params = self.hiddenLayer.params + self.logRegressionLayer.params", "def __init__(self, rng, input, n_in, n_hidden, n_out, n_hiddenLayers, binary, stochastic):\n self.binary=binary\n self.stochastic=stochastic\n \n # Since we are dealing with a one hidden layer MLP, this will translate\n # into a HiddenLayer with a tanh activation function connected to the\n # LogisticRegression layer; the activation function can be replaced by\n # sigmoid or any other nonlinear function.\n self.hiddenLayers = []\n self.normLayers=[]\n for i in xrange(n_hiddenLayers):\n h_input = input if i == 0 else self.hiddenLayers[i-1].output\n h_in = n_in if i == 0 else n_hidden\n\n # if binary==True, we append a binary hiddenlayer\n if binary==True:\n self.hiddenLayers.append(\n HiddenLayer(\n rng=rng,\n input=h_input,\n n_in=h_in,\n n_out=n_hidden,\n activation=T.tanh,\n binary=True,\n stochastic=stochastic\n ))\n self.normLayers.append(\n BatchNormLayer(\n input=self.hiddenLayers[i].output,\n n_in=n_hidden,\n n_out=n_hidden\n ))\n else:\n self.hiddenLayers.append(\n HiddenLayer(\n rng=rng,\n input=h_input,\n n_in=h_in,\n n_out=n_hidden,\n activation=T.tanh,\n binary=False,\n stochastic=False\n ))\n\n # The logistic regression layer gets as input the hidden units\n # of the hidden layer\n self.logRegressionLayer = LogisticRegression(\n input=self.hiddenLayers[-1].output,\n n_in=n_hidden,\n n_out=n_out,\n binary=binary,\n stochastic=stochastic\n )\n \n # same holds for the function computing the number of errors\n self.errors = self.logRegressionLayer.errors\n\n # the parameters of the model are the parameters of the two layer it is\n # made out of\n self.params = sum([x.params for x in self.hiddenLayers], []) + self.logRegressionLayer.params\n self.wrt = sum([x.wrt for x in self.hiddenLayers], []) + self.logRegressionLayer.wrt\n self.Ws = sum([x.Ws for x in self.hiddenLayers], []) + self.logRegressionLayer.Ws\n # keep track of model input\n self.input = input", "def create_neural_network(NumberOfFeatures, NumberOfClasses, optimizer_type, lr, moment, lr_decay):\n model = create_base_network(NumberOfFeatures, NumberOfClasses)\n if optimizer_type == 'sgd':\n opt = optimizers.SGD(lr=lr, momentum=moment, decay=lr_decay)\n else:\n opt = optimizer_type\n\n model.compile(loss='categorical_crossentropy',\n optimizer=opt,\n metrics=['accuracy'])\n print(model.summary())\n return model", "def _create_model(self):\n config = {\n \"input_features\": self.input_features,\n \"output_features\": self.output_features,\n \"combiner\": {\"type\": \"concat\", \"output_size\": 14},\n TRAINER: {\"epochs\": 2, BATCH_SIZE: 128},\n }\n return LudwigModel(config, logging_level=logging.WARN)", "def build_model(cls, args):\n base_architecture(args) \n return StyleGANGeneratorPretrain(args)", "def __init__(self, n_tokens, embed_size=50, n_features=36,\n hidden_size=200, n_classes=3, dropout_prob=0.5):\n super(ParserModel, self).__init__()\n self.embeddings = nn.Embedding(n_tokens, embed_size)\n self.model = nn.Sequential(\n nn.Linear(embed_size * n_features, hidden_size),\n nn.ReLU(),\n nn.Dropout(p=dropout_prob),\n nn.Linear(hidden_size, n_classes),\n )", "def build(self):\n sequence_input = Input(shape=(self.max_sequence_length, ), dtype='int32')\n embedded_sequences = self.embedding_layer(sequence_input)\n x = Conv1D(128, 5, activation='relu')(embedded_sequences)\n x = MaxPooling1D(5)(x)\n x = Conv1D(128, 5, activation='relu')(x)\n x = MaxPooling1D(5)(x)\n x = Flatten()(x)\n x = Dense(128, activation='relu')(x)\n\n y = Bidirectional(LSTM(50, dropout=0.2, recurrent_dropout=0.2))(embedded_sequences)\n z = concatenate([x, y])\n preds = Dense(6, activation='softmax')(z)\n self.model = Model(sequence_input, preds)", "def build_model(nx, layers, activations, lambtha, keep_prob):\n model = K.Sequential()\n reg = K.regularizers.l2\n model.add(K.layers.Dense(layers[0], input_shape=(nx,),\n activation=activations[0],\n kernel_regularizer=reg(lambtha)))\n\n for layer, act in zip(layers[1:], activations[1:]):\n model.add(K.layers.Dropout(1 - keep_prob))\n model.add(K.layers.Dense(layer, activation=act,\n kernel_regularizer=reg(lambtha)))\n\n return model", "def mlp(x, output_size, hidden_sizes=[64], activation=tf.tanh, output_activation=None):\n for size in hidden_sizes:\n x = tf.layers.dense(x, size, activation=activation)\n return tf.layers.dense(x, output_size, activation=output_activation)", "def build(self, lang, linearInMLP=False):\n inputLayers, concLayers = [], []\n inputToken = Input((3 + configuration['embedding']['useB1'] + configuration['embedding']['useB-1'],))\n inputLayers.append(inputToken)\n tokenEmb = Embedding(len(self.vocabulary.tokenIndices), configuration['mlp']['tokenEmb'],\n trainable=configuration['mlp']['trainable'],\n weights=self.getWeightMatrix(self.vocabulary.tokenIndices, lang))(inputToken)\n tokenFlatten = Flatten()(tokenEmb)\n concLayers.append(tokenFlatten)\n inputPos = Input((3 + configuration['embedding']['useB1'] + configuration['embedding']['useB-1'],))\n inputLayers.append(inputPos)\n posEmb = Embedding(len(self.vocabulary.posIndices), configuration['mlp']['posEmb'],\n trainable=configuration['mlp']['trainable'])(inputPos)\n posFlatten = Flatten()(posEmb)\n concLayers.append(posFlatten)\n if linearInMLP:\n linearPredInput = Input(shape=(8,))\n inputLayers.append(linearPredInput)\n concLayers.append(linearPredInput)\n\n conc = keras.layers.concatenate(concLayers) if len(concLayers) > 1 else concLayers[0]\n dense1Layer = Dense(configuration['mlp']['dense1UnitNumber'],\n activation=configuration['nn']['dense1Activation'])(conc)\n lastLayer = Dropout(configuration['mlp']['dense1Dropout'])(dense1Layer)\n if configuration['mlp2']['dense2']:\n dense2Layer = Dense(configuration['mlp2']['dense2UnitNumber'],\n activation=configuration['mlp2']['dense2Activation'])(lastLayer)\n lastLayer = Dropout(configuration['mlp2']['dense2Dropout'])(dense2Layer)\n softmaxLayer = Dense(8 if enableCategorization else 4, activation='softmax')(lastLayer)\n return inputLayers, softmaxLayer", "def create_linear_model(x, N, outputs=1):\n with C.layers.default_options(initial_state = 0.1):\n return linear_layer(x, outputs)" ]
[ "0.7659937", "0.75626165", "0.74614346", "0.7456196", "0.7027464", "0.70230585", "0.6835712", "0.67514044", "0.67060864", "0.6647528", "0.6643573", "0.659611", "0.6572788", "0.6491478", "0.6484547", "0.6470002", "0.64629227", "0.64441985", "0.64162695", "0.6321424", "0.6284879", "0.6251099", "0.6249069", "0.62486655", "0.6218163", "0.6205383", "0.6184046", "0.61528987", "0.6148236", "0.6146496", "0.6129883", "0.6116537", "0.61118555", "0.610778", "0.61050177", "0.6076721", "0.6061412", "0.60584277", "0.60559356", "0.60433894", "0.60390764", "0.60349923", "0.60278267", "0.60228145", "0.601841", "0.6015383", "0.6009451", "0.6006973", "0.5993523", "0.59776014", "0.5947595", "0.59458226", "0.5938382", "0.5930833", "0.59266776", "0.5917289", "0.5910018", "0.59012854", "0.5899879", "0.58942145", "0.58921194", "0.5881209", "0.58702356", "0.5869543", "0.58651465", "0.5861345", "0.58599305", "0.58526814", "0.5841872", "0.5841642", "0.5836013", "0.58312416", "0.58293056", "0.58293056", "0.58189774", "0.58177024", "0.5799084", "0.5798437", "0.57957464", "0.57956517", "0.5789852", "0.57849216", "0.5778673", "0.57778585", "0.57740635", "0.57714343", "0.57644373", "0.57468194", "0.57363355", "0.5733039", "0.5721553", "0.5710143", "0.56995857", "0.56985503", "0.56885", "0.56658584", "0.56638783", "0.5662055", "0.5657197", "0.56544787" ]
0.8253452
0
Creates a new activation function and adds it to the list of components.
def _activation(self,components,activation): if activation == "ReLU": components.append(nn.ReLU()) elif activation == "Sigmoid": components.append(nn.Sigmoid()) else: raise Exception("Invalid activation fn: "+activation)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def construct_activation_function(self):\n # Add the activation function\n if not self.activation_function is None:\n # Check if it is a string\n if isinstance(self.activation_function, str):\n activation_function = get_activation_function_by_name(\n self.activation_function\n )()\n else:\n assert isinstance(self.activation_function, ActivationFunction)\n activation_function = self.activation_function\n # Plot the function above the rest of the layer\n self.activation_function = activation_function\n self.add(self.activation_function)", "def get_activation_function(actfn):\n if actfn is None or actfn == 'leakyrelu':\n def create_actfn(): return nn.LeakyReLU(0.1, inplace=True)\n elif actfn == 'gelu':\n def create_actfn(): return nn.GELU()\n elif actfn == 'relu':\n def create_actfn(): return nn.ReLU()\n elif actfn == 'swish' or actfn == 'silu':\n def create_actfn(): return nn.SiLU()\n else:\n raise Exception('Unknown activation function ' + str(actfn))\n return create_actfn", "def encoder_activation_func(num_layer):\n ec_funct = []\n for i in range(num_layer):\n ec_funct.append('relu')\n ec_funct.append('softmax')\n\n return ec_funct", "def linear_activation_calculation(A, W, b, activation_function):\n\n # Your code here\n return activation_function(linear_forward_calculation(A, W, b))\n # raise NotImplementedError", "def initialiseActivationFunctions(self):\n\n\t\t###uniform for output units\n\t\tif self._outputActivationFunctions == None or self._outputActivationDerivatives == None:\t\n\t\n\t\t\tself._outputActivationFunctions = []\n\t\t\tself._outputActivationDerivatives = []\n\n\t\t\tactFunc = lambda x : x\n\t\t\tdActFunc = lambda x : 1.0\n\t\n\t\t\tfor i in range(self.nOutputs):\n\t\t\t\t\n\t\t\t\tself._outputActivationFunctions.append(actFunc)\n\t\t\t\tself._outputActivationDerivatives.append(dActFunc)\n\n\t\t\tself._outputActivationFunctions = np.array(self._outputActivationFunctions)\n\t\t\tself._outputActivationDerivatives = np.array(self._outputActivationDerivatives)\n\t\t\t\n\n\t\tif self._hiddenActivationFunctions == None or self._hiddenActivationDerivatives == None:\n\n\t\t\tself._hiddenActivationFunctions = []\n\t\t\tself._hiddenActivationDerivatives = []\n\n\t\t\tfor i in range(self.nHiddenLayers):\n\n\t\t\t\tfTemp = []\n\t\t\t\tdTemp = []\n\t\t\t\t\n\t\t\t\t#Make the default sigmoid the one suggested in LeCun et al 1998\n\t\t\t\ttwist = 0.01\n\t\t\t\ta = 1.7159\n\t\t\t\tc = 2.0/3.0\n\n\t\t\t\tactFunc = lambda x : a*np.tanh(c*x) + twist*x\n\t\t\t\tdActFunc = lambda x : twist + a*c*(1.0 - (np.tanh(c*x)**2.0))\n\n#\t\t\t\tactFunc = lambda x : np.tanh(x)\n#\t\t\t\tdActFunc = lambda x : 1.0 - np.tanh(x)**2.0\n\n\t\t\t\t#plus all of the bias\n\t\t\t\tfor j in range(self.nUnitsPerLayer+1):\n\t\t\t\t\t\n\t\t\t\t\tfTemp.append(actFunc)\n\t\t\t\t\tdTemp.append(dActFunc)\n\t\t\t\t\n\t\t\t\tself._hiddenActivationFunctions.append(fTemp)\n\t\t\t\tself._hiddenActivationDerivatives.append(dTemp)\n\t\t\t\n\t\t\tself._hiddenActivationFunctions = np.array(self._hiddenActivationFunctions)\n\t\t\tself._hiddenActivationDerivatives = np.array(self._hiddenActivationDerivatives)", "def activation_factory(name):\n if name == 'relu':\n return nn.ReLU(inplace=True)\n if name == 'leaky_relu':\n return nn.LeakyReLU(0.2, inplace=True)\n if name == 'elu':\n return nn.ELU(inplace=True)\n if name == 'sigmoid':\n return nn.Sigmoid()\n if name == 'tanh':\n return nn.Tanh()\n if name is None or name == \"identity\":\n return nn.Identity()\n\n raise ValueError(f'Activation function `{name}` not yet implemented')", "def get_activation_function(func_name):\n return {\n 'linear': lambda x: x,\n 'relu': lambda x: x * (x > 0),\n 'elu': lambda x: x * (x >= 0) + (T.exp(x) - 1) * (x < 0),\n 'softmax': T.nnet.softmax,\n 'tanh': T.tanh,\n 'log_softmax': log_softmax,\n 'sigmoid': T.nnet.sigmoid\n }[func_name]", "def __init__(self, layers=[2, 2, 1], activation_function=\"bentidentity\"):\n self.layers = layers\n self.activation_function = th.activation_functions[activation_function]\n self.activation_derivative = th.activation_derivatives[\n activation_function]\n self.weights = self._generate_weights()", "def register_activations(model: onnx_pb.ModelProto, activation_names: List):\n for act_name in activation_names:\n _ = add_hook_to_get_activation(model, act_name)", "def forward_activationfunction(self, x):\n if self.forward_activation == 'tanh':\n return torch.tanh(x)\n elif self.forward_activation == 'relu':\n return F.relu(x)\n elif self.forward_activation == 'linear':\n return x\n elif self.forward_activation == 'leakyrelu':\n return F.leaky_relu(x, 0.2)\n elif self.forward_activation == 'sigmoid':\n return torch.sigmoid(x)\n else:\n raise ValueError('The provided forward activation {} is not '\n 'supported'.format(self.forward_activation))", "def activation_function(self, x: np.array) -> np.array:\r\n\t\treturn self._activation_function(x)", "def add_activation(self, op, input_name, name=None, attr={}):\n attr['alpha'] = 1.0\n attr['beta'] = 1.0\n if 'op' == 'Selu':\n attr['alpha'] = 1.6732632423543772848170429916717\n attr['beta'] = 1.0507009873554804934193349852946\n\n return self._build_op(op, [input_name], name=name, attr=attr)", "def add_function(self, function):\n self.functions.append(function)", "def add_function(self, function):\n self.functions.append(function)", "def activation_func(activation, inplace=False):\n return nn.ModuleDict([\n ['relu', nn.ReLU(inplace=inplace)],\n ['leaky_relu', nn.LeakyReLU(negative_slope=0.2, inplace=inplace)],\n ['selu', nn.SELU(inplace=inplace)],\n ['none', nn.Identity()]\n ])[activation]", "def activate(self, input_layer, funcname=None):\n if isinstance(funcname, tuple):\n funcname = funcname[0]\n params = funcname[1:]\n if funcname is None:\n funcname = self.activation_func\n if funcname == 'LINEAR':\n return input_layer\n activation_map = {\n 'RELU': tf.nn.relu,\n 'RELU6': tf.nn.relu6,\n 'ELU': tf.nn.elu,\n 'SIGMOID': tf.nn.sigmoid,\n 'TANH': tf.nn.tanh,\n 'LRELU': lambda x, name: tf.maximum(params[0]*x, x, name=name)\n }\n return activation_map[funcname](input_layer, name=funcname.lower())", "def addFunction(self, func):\n self.__functions.append(func)", "def activation_func(activation:str):\n return nn.ModuleDict([\n ['relu', nn.ReLU(inplace=True)],\n ['leaky_relu', nn.LeakyReLU(negative_slope=0.01, inplace=True)],\n ['selu', nn.SELU(inplace=True)],\n ['none', nn.Identity()]\n ])[activation]", "def activation(activation_fun=None):\n activation_fun = (activation_fun or cfg.MODEL.ACTIVATION_FUN).lower()\n if activation_fun == \"relu\":\n return nn.ReLU(inplace=cfg.MODEL.ACTIVATION_INPLACE)\n elif activation_fun == \"silu\" or activation_fun == \"swish\":\n try:\n return torch.nn.SiLU()\n except AttributeError:\n return SiLU()\n elif activation_fun == \"gelu\":\n return torch.nn.GELU()\n else:\n raise AssertionError(\"Unknown MODEL.ACTIVATION_FUN: \" + activation_fun)", "def linear_activation_forward(A_prev, W, b, activation):\n pass", "def generate_activation(act_par):\n\n if type(act_par) == list:\n if len(act_par) == 2:\n atype, par = act_par\n if atype == 'elu':\n return ELU(alpha=par)\n elif atype == 'leaky':\n return LeakyReLU(alpha=par)\n elif atype == 'prelu':\n return PReLU()\n else:\n raise NameError(\"No such Activation layer\")\n elif len(act_par) == 1:\n if act_par[0] == 'snake':\n return Activation(snake)\n elif act_par[0] == 'snakeh2':\n return Activation(snakeh2)\n elif act_par[0] == 'snake2':\n return Activation(snake2)\n elif act_par[0] == 'xsin':\n return Activation(xsin)\n elif act_par[0] == 'swish':\n return Activation(swish)\n else:\n return Activation(act_par[0])\n else:\n raise NameError(\"No such Activation layer\")\n elif type(act_par) == str:\n return Activation(act_par)\n else:\n raise NameError(\"Wrong parameters for activation layer\")", "def uf_activate(self, output_reg):\n if len(self.inputs) is 2:\n self.two_activation(output_reg)\n elif len(self.inputs) is 3:\n self.three_activation(output_reg)\n else:\n self.large_activation(output_reg)", "def convert_activation(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n act_type = attrs[\"act_type\"]\n\n # Creating a dictionary here, but if this titlecase pattern\n # mxnet_name.title()\n act_types = {\n \"tanh\": \"Tanh\",\n \"relu\": \"Relu\",\n \"sigmoid\": \"Sigmoid\",\n \"softrelu\": \"Softplus\",\n \"softsign\": \"Softsign\"\n }\n\n act_name = act_types.get(act_type)\n if act_name:\n node = onnx.helper.make_node(\n act_name,\n input_nodes,\n [name],\n name=name\n )\n else:\n raise AttributeError(\n \"Activation %s not implemented or recognized in the converter\" % act_type\n )\n\n return [node]", "def forward_activate(self, a_prev, w, b, func_type):\n\n\t\tz = np.dot(w, a_prev) + b\n\t\tif 'sigmod' == func_type.lower(): \n\t\t\ta = 1 / (1 + np.exp(-z))\n\t\telif 'relu' == func_type.lower():\n\t\t\ta = np.where(z >= 0, z, 0)\n\t\telif 'leaky relu' == func_type.lower():\n\t\t\ta = np.where(z >= 0, z, 0.01 * z)\n\t\telif 'tanh' == func_type.lower():\n\t\t\ta = (np.exp(z) - np.exp(-z)) / (np.exp(z) + np.exp(-z))\n\n\t\tcache = (a_prev, w, b, z)\n\t\treturn a, cache", "def activate(self, inputvaluelist: List[float]):\n if len(inputvaluelist) != len(self.inputWeight):\n raise Exception(f\"The length input is {len(inputvaluelist)} and is not equal\"\n f\" to length of weights({len(self.inputWeight)})\")\n self.inputvaluelist = inputvaluelist\n inputlist = list(zip(inputvaluelist, self.inputWeight))\n\n input_sum = 0\n for inp in inputlist:\n input_sum += inp[0] * inp[1]\n input_sum += self.bias\n\n self.output = sigmoid(input_sum)\n\n return self.output", "def activation_function(self, X):\n return self.net_input(X)", "def activation_function(self, X):\n return self.net_input(X)", "def new_layer(self, nodes, inputs, alpha=0.1):\n weights = [[random.uniform(-0.1, 0.1) for _ in range(inputs)] for i in range(nodes)]\n alphas = [alpha for _ in range(nodes)]\n self._layers.append(Layer(weights, alphas))", "def activation_function(X):\n\tz = np.sum(w*x+b)\n\treturn z", "def pre_activation(features, weights, bias):\n # this is a dot product between features and weights, added to bias after.\n return np.dot(features, weights) + bias", "def build_activation(activation: str) -> nn.Module:\n if hasattr(nn, activation):\n return getattr(nn, activation)()\n elif activation == \"Swish\":\n return Swish()\n else:\n raise Exception(\"{} invalid activation function.\".format(activation))", "def add(self, func):\n self._getfunctionlist().append(func)\n return self", "def _activate(self, x):\n self._activation_map = self._activation_distance(x, self._weights)", "def forward(inputs,weights,function=sigmoid,step=-1):\n if step == 0:\n return inputs\n elif step == -1:\n step = len(weights) #go to output layer \n output = np.append(1, inputs)\n for i in range(step):\n output = np.append(1, function(np.dot(weights[i], output))) #calculating activation\n return output[1:]", "def bind(self, function, execOnUpdate=True, arguments=[]):\n if isinstance(function, types.FunctionType):\n self.functions.append(function)\n self.functionArguments.append(arguments)\n self.functionUpdate.append(execOnUpdate)\n else:\n raise Exception(\"el elemento a agregar debe ser una funcion\")", "def add(self, func):\n\n self._getfunctionlist().append(func)\n return self", "def add_comp(self, basis=None):\n if self._is_identity:\n raise TypeError(\"the components of the identity map cannot be \" +\n \"changed\")\n else:\n return FreeModuleTensor.add_comp(self, basis=basis)", "def forward(W,X):\n return activation_func(np.dot(add_bias(X),W))", "def get_activation(fn: Union[Callable, str]):\n if isinstance(fn, str):\n return getattr(torch.nn.functional, fn)\n return fn", "def add_new_function(self, pfunc, pfunc_bound= True, index = None):\n raise NotImplementedError()", "def get_activation_fns(act_fn_names, act_fn_params):\n act_fns = []\n \n for n, p in zip(act_fn_names, act_fn_params):\n if p is None or p == \"None\":\n act_fns.append(act_fn_dict[n])\n else:\n act_fns.append(act_fn_dict[n](p))\n \n return act_fns", "def get_activation_function(activation: str) -> nn.Module:\n if activation == \"ReLU\":\n return nn.ReLU()\n elif activation == \"LeakyReLU\":\n return nn.LeakyReLU(0.1)\n elif activation == \"PReLU\":\n return nn.PReLU()\n elif activation == \"tanh\":\n return nn.Tanh()\n elif activation == \"SELU\":\n return nn.SELU()\n elif activation == \"ELU\":\n return nn.ELU()\n elif activation == 'Swish':\n return Swish()\n elif activation == 'Mish':\n return Mish()\n else:\n raise ValueError(f'Activation \"{activation}\" not supported.')", "def fit_custom_fx(self, custom_function, input_width, output_width, task_name):\n new_classifier = ClassifierNode(\n end_in_address=input_width,\n out_address=self.classifiers_out_address_start + self.classifiers_current_count + np.arange(output_width),\n classifier_name=task_name,\n given_predictor=custom_function\n )\n self.classifiers_current_count += output_width\n self.classifiers_list.append(new_classifier)", "def addFunction(name, callables, icon=None):\n\n\tif not _funcDict.has_key(name):\n\t\t_functionOrder.append(name)\n\t_funcDict[name] = callables\n\tif icon:\n\t\timageDict[name] = icon\n\tif _mouseCategory:\n\t\t_mouseCategory.addFunc(name, callables)", "def add_layer(self, in_dim, out_dim, activation: Module or None, i=None):\n i = i or len(self.modules)\n self.modules.insert(i, Linear(in_dim=in_dim, out_dim=out_dim, activation=activation))", "def get_activation_fn(name: Optional[str]):\n if name in (None, 'linear'):\n return lambda x: x\n elif name in ('sigmoid', 'tanh'):\n return getattr(torch, name)\n else:\n return getattr(F, name)", "def __init__(self, layer_size, activation_function=linear,\n derivative_function=dlinear,\n forward_function=propagate_forward,\n backward_function=propagate_backward_irpropm,\n init_weights_function=fully_connected_weights, bias=True):\n self.forward_propagation = forward_function\n self.back_propagation = backward_function\n self.activation_function = activation_function\n self.derivative_function = derivative_function\n self.bias = bias\n\n # the activations of these nodes\n bias_add = 0\n if self.bias:\n bias_add = 1\n self.visible = numpy.ones(layer_size + bias_add)\n self.init_weights_function = init_weights_function", "def makeFastFeedForwardFunction(self):\n\n\t\toutWeightMatrix = []\n\t\tfor unit in self.outputLayer:\n\n\t\t\trow = []\n\t\t\tfor b in unit.branchesIn:\n\t\t\t\tprint b.weight\n\t\t\t\trow.append(b.weight)\n\t\t\t\n\t\t\toutWeightMatrix.append(row)\n\t\toutWeightMatrix = np.array(outWeightMatrix).squeeze()\n\n\t\thiddenMatrices = []\n\t\tfor layer in self.hiddenLayers:\n\t\t\tmatrix = []\n\t\t\t#ignore the bias unit, since it has no branches in\n\t\t\tfor unit in layer[1:]:\n\t\t\t\trow = []\n\t\t\t\tfor b in unit.branchesIn:\n\t\t\t\t\trow.append(b.weight)\n\n\t\t\t\tmatrix.append(row)\n\t\t\tmatrix = np.array(matrix)\n\n\t\t\thiddenMatrices.append(matrix)\n\n\t\thidActFunc = (self.hiddenLayers[0])[1].activationFunction\n\t\toutActFunc = self.outputLayer[0].activationFunction\n\n\t\tdef ffFunc(inp):\n\t\n\t\t\tforward = np.insert(inp.T,0,1.0,axis=0)\n\t\t\tfor matrix in hiddenMatrices:\n\t\t\t\tnext = np.dot(matrix,forward)\n\t\t\t\tnext = hidActFunc(next)\n\t\t\t\tforward = np.insert(next,0,1.0,axis=0)\n\n\t\t\tout = np.dot(outWeightMatrix,forward)\n\n\t\t\treturn outActFunc(out)\n\n\t\treturn ffFunc", "def new_initial_layer(self, nodes, inputs, alpha=0.1):\n weights = [[1 for _ in range(inputs)] for i in range(nodes)]\n alphas = [alpha for _ in range(nodes)]\n self._layers.insert(0, Layer(weights, alphas))", "def _make_train_function(self):\n if self.train_function is None:\n print('compiling train function...')\n start = time.time()\n inputs = self._estimator.inputs + [self.T_Y]\n\n training_updates = self.optimizer.get_updates(\n self._estimator.trainable_weights,\n {}, self.fqi_loss)\n\n # returns loss and metrics. Updates weights at each call.\n self.train_function = theano.function(inputs, [self.fqi_loss],\n updates=training_updates,\n name=\"trainer\")\n print('compiled in {}s'.format(time.time() - start))", "def _get_activation_fn(activation):\n if activation == \"relu\":\n return ReLU()\n if activation == \"gelu\":\n return GELU()\n # if activation == \"glu\":\n # return F.glu\n raise RuntimeError(F\"activation should be relu/gelu, not {activation}.\")", "def linear_activation_forward(A_prev, W, b, activation):\n if activation == \"sigmoid\":\n Z,linear_cache = linear_forward(A_prev,W,b)\n A,activation_cache = sigmoid(Z)\n elif activation == \"relu\":\n Z,linear_cache = linear_forward(A_prev,W,b)\n A,activation_cache = relu(Z)\n\n cache = (linear_cache,activation_cache)\n return A,cache", "def feedforward(self, a):\n for activation, b, w in zip(self._activation_funcs, self.biases, self.weights):\n z = np.dot(w, a) + b\n a = activation.fn(z)\n return a", "def __init__(self, incoming, a=tf.identity, name='ActivationLayer'):\n super(ActivationLayer, self).__init__()\n \n with tf.variable_scope(name) as self.layer_scope:\n self.incoming, self.incoming_shape = get_input(incoming)\n \n self.out = None\n self.a = a\n self.name = name", "def linear_activation_forward(A_prev, W, b, activation):\n Z, linear_cache = linear_forward(A_prev, W, b)\n if activation == 'sigmoid':\n activation_fun = sigmoid\n else:\n activation_fun = relu\n A, cache = activation_fun(Z)\n activation_cache = Z\n cache = {'linear_cache':linear_cache, 'activation_cache':activation_cache}\n return A, cache", "def _make_train(self):\n with context.context(training=True):\n prediction = self(*self.inputs)\n thecost = self.cost(self.target, prediction)\n return theano.function(self.inputs + [self.target], \n thecost, \n updates=self.updater.get_updates(self.params(), thecost))", "def output_layer_activation(x):\n return x", "def _get_activation_fn(activation):\n if activation == \"relu\":\n return nn.ReLU()\n # if activation == \"gelu\":\n # return F.gelu\n # if activation == \"glu\":\n # return F.glu\n raise RuntimeError(F\"activation should be relu/gelu, not {activation}.\")", "def __act_f(self, p):\n # linear activation if no function is given\n if self.activation is None:\n return p\n # tanh\n if self.activation == \"tanh\":\n return np.tanh(p)\n # sigmoid\n if self.activation == \"sigmoid\":\n return 1 / (1 + np.exp(-p))\n # relu\n if self.activation == \"relu\":\n return p * (p > 0)", "def feedback_activationfunction(self, x):\n if self.feedback_activation == 'tanh':\n return torch.tanh(x)\n elif self.feedback_activation == 'relu':\n return F.relu(x)\n elif self.feedback_activation == 'linear':\n return x\n elif self.feedback_activation == 'leakyrelu':\n return F.leaky_relu(x, 5)\n elif self.feedback_activation == 'sigmoid':\n if torch.sum(x < 1e-12) > 0 or torch.sum(x > 1-1e-12) > 0:\n warnings.warn('Input to inverse sigmoid is out of'\n 'bound: x={}'.format(x))\n inverse_sigmoid = torch.log(x/(1-x))\n if utils.contains_nan(inverse_sigmoid):\n raise ValueError('inverse sigmoid function outputted a NaN')\n return torch.log(x/(1-x))\n else:\n raise ValueError('The provided feedback activation {} is not '\n 'supported'.format(self.feedback_activation))", "def get_act_fn(act_fn):\n if act_fn == 'relu':\n activation_fn = tf.nn.relu\n elif act_fn == 'sigmoid':\n activation_fn = tf.nn.sigmoid\n elif act_fn == 'elu':\n activation_fn = tf.nn.elu\n elif act_fn is None:\n activation_fn = None\n else:\n raise ValueError('Wrong activation function name!')\n return activation_fn", "def register(self, function: str, creator: _Loss):\n self._criterion[function] = creator", "def add_layer(self, freeze = True, add = True):\n if add:\n self.num_layers += 1\n if self.conv_dim == 1:\n new_cnn = layers.Conv1D(self.n_filters,\n (self.n_kernels),\n activation='elu',\n input_shape=(None, self.inp_shape[0], self.n_filters),\n padding=\"same\",\n name='cnn_1d_{}'.format(self.num_layers-1),\n kernel_initializer = initializers.get(self.initializer),\n bias_initializer=initializers.get(\"zeros\"),\n kernel_regularizer=self.regularizer,\n bias_regularizer=self.regularizer\n )\n elif self.conv_dim == 2:\n new_cnn = layers.Conv2D(self.n_filters,\n (self.n_kernels, self.n_kernels),\n activation='elu',\n input_shape=(None, self.inp_shape[0],self.inp_shape[1], self.n_filters),\n padding=\"same\",\n name='cnn_2d_{}'.format(self.num_layers-1),\n kernel_initializer=initializers.get(self.initializer),\n bias_initializer=initializers.get(\"zeros\"),\n kernel_regularizer=self.regularizer,\n bias_regularizer=self.regularizer\n )\n self.list_cnn.append(new_cnn)\n\n if freeze:\n for index in range(0,self.num_layers-1):\n self.list_cnn[index].trainable = False\n else:\n for index in range(0,self.num_layers-1):\n self.list_cnn[index].trainable = True", "def add_layer(inputs, in_size, out_size, activation_function=None):\n Weights = tf.Variable(tf.random_normal([in_size, out_size]))\n baises = tf.Variable(tf.zeros([1, out_size]) + 0.1)\n Wx_plus_b = tf.matmul(inputs, Weights) + baises\n if activation_function is None:\n out_puts = Wx_plus_b\n else:\n out_puts = activation_function(Wx_plus_b)\n return out_puts", "def register_activation_hooks(self, model):\n self.outputs = []\n self.hooks = []\n b = None\n self.list_mods = self.prune_modules\n if ('norm' in self.prune_selection):\n self.list_mods = self.norm_modules\n for l in self.list_mods:\n if ('norm' in self.list_mods):\n (b, l) = l\n # Skip non-prunable layers\n if (hasattr(l, 'unprunable') and l.unprunable):\n continue\n hook_handle = l.register_forward_hook(append_hook)\n self.hooks.append(hook_handle)", "def new_activation(self, X, y):\n yz = y.dot(self.net_input(X))\n return np.log(self._sigmoid(yz))", "def start_func_default(self, activation):\n activation.prepare()\n activation.done()\n return activation", "def activate(self, x):\n self._activate(x)\n return self._activation_map", "def forward(self, x):\n\n h = x.mm(self.weights.t())\n if self.bias is not None:\n h += self.bias.unsqueeze(0).expand_as(h)\n self.linearactivations = h\n\n self.activations = self.forward_activationfunction(h)\n return self.activations", "def activate(self, inputs: Tuple[float, ...]) -> Tuple[float, ...]:\n self.z = [Math.dot(self.w[i], inputs) + self.b[i]\n for i in range(len(self.w))]\n self.a = [self.g(real) for real in self.z]\n return tuple(self.a)", "def _forward(z: np.array, W: np.array, b: np.array,\n activation: str) -> np.array:\n a = np.dot(z, W) + b\n if activation == 'sigmoid':\n return sigmoid(a)\n elif activation == 'identity':\n return identity(a)", "def add_layer(self, num_nodes, transfer_function=\"Linear\"):\r\n self.weights.append(np.random.randn(self.input_dimension, num_nodes))\r\n self.biases.append(np.random.randn(num_nodes))\r\n self.transferfunction.append(transfer_function)\r\n self.input_dimension = num_nodes", "def _get_activation_fn(activation):\n if activation == \"relu\":\n return F.relu\n if activation == \"gelu\":\n return F.gelu\n if activation == \"glu\":\n return F.glu\n raise RuntimeError(F\"activation should be relu/gelu, not {activation}.\")", "def get_activation(act):\n if act is None:\n return lambda x: x\n if isinstance(act, str):\n if act == 'leaky':\n return nn.LeakyReLU(0.1)\n elif act == 'identity':\n return IdentityActivation()\n elif act == 'elu':\n return ELU()\n elif act in ['relu', 'sigmoid', 'tanh', 'softrelu', 'softsign']:\n return nn.Activation(act)\n else:\n raise NotImplementedError\n else:\n return act", "def activation_func(x):\r\n a = -1\r\n return 1/(1+np.exp(-a*x))", "def _create_function_base(self):\n global link_names, current_link_suffix\n default_link_name = 'jit_func'\n current_link_suffix += 1\n self.link_name = default_link_name + str(current_link_suffix)\n link_names.add(self.link_name)\n\n fn_type = ll.FunctionType(self.llvm_ret_type, self.llvm_arg_types)\n self.fn = ll.Function(self.module, fn_type, name=self.link_name)", "def Pool2DOptionsAddFusedActivationFunction(builder, fusedActivationFunction):\n return AddFusedActivationFunction(builder, fusedActivationFunction)", "def feed_forward(self, inputs):\n #assert len(inputs)==self._shape, 'Inputs must be of size %d; was %d instead' % (self._shape, len(inputs)) \n # Update output vector for later use, and return it.\n self._outputs = self._activation_function.activate(np.sum(np.dot(inputs, self.get_weights()) + self.get_bias()))\n return self._outputs", "def forward(self, state: list) -> list:\n x = F.relu(self.fc1_layer(state))\n x = F.relu(self.fc2_layer(x))\n x = F.tanh(self.fc3_layer(x))\n\n return x", "def forward_pass(self, x):\n activations = [x]\n zs = []\n for b, w in zip(self.biases, self.weights):\n zs.append(np.dot(w, activations[-1]) + b)\n activations.append(sigmoid(zs[-1]))\n return zs, activations", "def compute_activation(self):\r\n\r\n x=0\r\n edges=self.in_edges\r\n for edge in edges:\r\n x+= edge.source.activation*edge.weight\r\n self.activation=1/(1+exp(-x))", "def add_function(self, func_name, *args, **kwargs):\n if len(args) > 0:\n attr = args[0]\n else:\n attr = func_name.func_name\n self._user_funcs[attr] = func_name", "def _forward(self, a):\n a = np.array(a)\n self.weighted_layer, self.activations = [], [a]\n for w, b in zip(self.weights, self.biases):\n z = w.dot(a) + b\n a = sigmoid(z)\n self.weighted_layer.append(z)\n self.activations.append(a)\n\n return a", "def new_random_layer(self, nodes, inputs, alpha=0.1):\n weights = [[random.uniform(-1, 1) for _ in range(inputs)] for i in range(nodes)]\n alphas = [alpha for _ in range(nodes)]\n self._layers.append(Layer(weights, alphas))", "def create_feature_layers(self):\n feature_columns = [tf.feature_column.numeric_column(name,\n normalizer_fn=lambda x: (x - self.train_features[\n name].mean()) /\n self.train_features[name].std())\n for name in self.feature_names]\n\n self.feature_layers = layers.DenseFeatures(feature_columns)\n return 'feature layers had been created'", "def hybrid_forward(self, F, x):\n identity = x\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n out = self.conv3(out)\n out = self.bn3(out)\n if self.downsample is not None:\n identity = self.downsample(x)\n out = F.Activation(out + identity, act_type='relu')\n\n if self.nonlocal_block is not None:\n out = self.nonlocal_block(out)\n return out", "def __init__(self, linear_args, activation_functions, softmax=False):\n super(NN, self).__init__()\n assert len(activation_functions) == len(linear_args) - 1\n k = len(linear_args)\n assert all([linear_args[j][1] == linear_args[j+1][0] for j in range(k-1)])\n self.linears = torch.nn.ModuleList([torch.nn.Linear(*arg) for arg in linear_args])\n self.activation_functions = activation_functions\n self.softmax = softmax", "def register(self, funcs):\n for name, func in funcs.items():\n self.functions[name] = func", "def __init__(self):\r\n self.activation = Activation(u'signup')\r\n self.activated = False", "def forward(self, a):\n if self.activation_type == \"sigmoid\":\n return self.sigmoid(a)\n\n elif self.activation_type == \"tanh\":\n return self.tanh(a)\n\n elif self.activation_type == \"ReLU\":\n return self.ReLU(a)", "def activate(x):\n raise NotImplementedError()", "def forward(self, state, action): \n ##x = F.relu(self.fc1(state)) \n x = F.relu(self.bn1(self.fc1(state))) \n x = torch.cat([x, action], dim=1)\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x", "def get_activation_fn(activation: str = None):\n if activation is None or activation.lower() == \"none\":\n activation_fn = lambda x: x # noqa: E731\n else:\n activation_fn = torch.nn.__dict__[activation]()\n return activation_fn", "def linear_activation_forward(A_prev, W, b, activation): \n if activation == \"softmax\":\n # Inputs: \"A_prev, W, b\". Outputs: \"A, activation_cache\".\n Z, linear_cache = linear_forward(A_prev, W, b)\n A,activation_cache = stable_softmax(Z)\n \n elif activation == \"relu\":\n # Inputs: \"A_prev, W, b\". Outputs: \"A, activation_cache\".\n Z, linear_cache = linear_forward(A_prev, W, b)\n A, activation_cache = relu(Z)\n \n assert (A.shape == (W.shape[0], A_prev.shape[1]))\n cache = (linear_cache, activation_cache)\n\n return A, cache", "def __forward(self, A, W, b, activation_fn, output_layer=False):\n Z = np.dot(W, A) + b\n A_new = activation_fn(Z)\n D = np.ones_like(A_new) # Mask\n\n # Implement the Inverted Dropout Regularization\n if self.regularization == \"dropout\" and not output_layer:\n D = np.random.rand(A_new.shape[0], A_new.shape[1]) < self.keep_prob\n A_new = np.multiply(A_new, D) / self.keep_prob\n\n assert (Z.shape == (W.shape[0], A.shape[1]))\n assert (A_new.shape == (W.shape[0], A.shape[1]))\n\n cache = (A, W, b, Z, D)\n\n return A_new, cache", "def register_function_compilation(self, func, compilation_cbk, listclass):\n self.compilations_function[func] = {\n 'callback': compilation_cbk,\n 'listclass': listclass\n }", "def gelu_activation(inp):\n out = 1 + tf.tanh(np.sqrt(np.pi) * (inp + 0.044715 * tf.pow(inp, 3)))\n out *= 0.5 * inp\n return out", "def gelu_activation(inp):\n out = 1 + tf.tanh(np.sqrt(np.pi) * (inp + 0.044715 * tf.pow(inp, 3)))\n out *= 0.5 * inp\n return out", "def add_layer(inputs, in_size, out_size, n_layer, activation_function=None, ):\r\n layer_name = \"layer%s\" % n_layer\r\n with tf.name_scope(layer_name):\r\n with tf.name_scope(\"Weights\"):\r\n Weights = tf.Variable(tf.random_normal([in_size, out_size]), name=\"W\")\r\n tf.summary.histogram(layer_name + \"/Weight\", Weights)\r\n with tf.name_scope(\"Biases\"):\r\n biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name=\"b\")\r\n tf.summary.histogram(layer_name + \"/Biases\", biases)\r\n with tf.name_scope(\"Wx_plus_b\"):\r\n Wx_plus_b = tf.matmul(inputs, Weights) + biases\r\n if activation_function is None:\r\n outputs = Wx_plus_b\r\n else:\r\n outputs = activation_function(Wx_plus_b, )\r\n tf.summary.histogram(layer_name + \"/output\", outputs)\r\n return outputs", "def apply_activation(self, tens):\n if(self.activation == \"ReLU\"): # pylint: disable=no-else-return\n return tf.nn.relu(tens)\n elif(self.activation == \"Leaky_ReLU\"):\n return tf.nn.leaky_relu(tens)\n elif(self.activation == \"Tanh\"):\n return tf.nn.tanh(tens)\n elif(self.activation == \"Sigmoid\"):\n return tf.nn.sigmoid(tens)\n elif(self.activation == \"Linear\"):\n return tens\n else:\n raise InvalidActivationError(self.activation)" ]
[ "0.7639841", "0.6542784", "0.61630833", "0.6001237", "0.6001181", "0.59439", "0.591768", "0.5908562", "0.58893776", "0.58730316", "0.5850897", "0.5848719", "0.58140385", "0.58140385", "0.5795392", "0.5792487", "0.5785407", "0.57467926", "0.57267064", "0.5675934", "0.5668909", "0.56354433", "0.56326073", "0.56173855", "0.55790275", "0.55623114", "0.55623114", "0.55437165", "0.54910827", "0.54903257", "0.5489159", "0.5481204", "0.54672503", "0.54553753", "0.5443681", "0.5431458", "0.5423927", "0.5412035", "0.54011416", "0.5399231", "0.53979903", "0.5376476", "0.53590024", "0.534646", "0.53415054", "0.533901", "0.5338766", "0.5336896", "0.53344804", "0.53234035", "0.5319358", "0.5313386", "0.53008807", "0.5287614", "0.5285917", "0.5260732", "0.525848", "0.5250083", "0.524219", "0.52400666", "0.52365667", "0.5234708", "0.52326417", "0.5227271", "0.5219535", "0.5215494", "0.5214042", "0.520276", "0.51989436", "0.51977587", "0.5196139", "0.51898706", "0.51795405", "0.5171101", "0.5169685", "0.514882", "0.5143373", "0.51395243", "0.51340884", "0.51235366", "0.51120245", "0.5104681", "0.50982016", "0.5095853", "0.50949717", "0.50818175", "0.50777245", "0.50537366", "0.50473386", "0.50440186", "0.5043149", "0.50389135", "0.503315", "0.5026576", "0.5022695", "0.50169224", "0.5012086", "0.5012086", "0.49909723", "0.49720815" ]
0.7331862
1
Adds a dropout object to the list of components
def _dropout(self,components,dropout=None): if dropout is not None: components.append(nn.Dropout(dropout))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add(self, component) -> None:\n pass", "def add_component(self, componentInstance):\n\n #print \"Componet being added to %s entity.\"%(self._sName)\n #print componentInstance\n \n self._dComponents[componentInstance.get_name()] = componentInstance\n\n #These if statements will save a pointer of the same variable as in dComponents if True.\n\n if componentInstance.get_updateable():\n self._lUpdatables.append(componentInstance)\n\n if componentInstance.is_view_drawable():\n self._lViewDrawables.append(componentInstance)\n\n elif componentInstance.is_screen_drawable():\n self._lScreenDrawables.append(componentInstance)", "def add_component(self, lib_component):\n comp_name = lib_component.name\n try:\n comp = self.__component_list[comp_name]\n except KeyError:\n self.__component_list[comp_name] = lib_component", "def addDropzone( self, dropzone ):\n self._dropzones.append(dropzone)", "def add_dut(self):\n pass", "def changeDropout(self,dropout):\n self.dropout = dropout", "def add(self, *components):\n for component in components:\n if component.container is not None:\n component.container.remove(component)\n component.container = self\n self._components.extend(components)", "def add(self, comp):\n\t\tif comp:\n\t\t\tif isinstance(comp, Component):\n\t\t\t\tcomp.container = self\n\t\t\telse:\n\t\t\t\tfor item in comp:\n\t\t\t\t\tself.add(item)", "def buttonAdd_Clicked( self, event ):\n\t\tid = DM.FixedIndex(self._combos[self._treasureIndex].GetSelection())\n\t\tif id is not None and id >= DM.FixedIndex(0):\n\t\t\tqty = self.spinCtrlQuantity.GetValue()\n\t\t\tprob = self.spinCtrlProbability.GetValue()\n\t\t\ttreasure = (id, prob, qty)\n\t\t\tself.Treasure[self._treasureIndex].append(treasure)\n\t\t\tself.refreshTreasureList()", "def _addOutlet(self, outlet, other): \n self._outlets.append(outlet)\n if self._type == 2 and other._type == 1:\n self._reservoirs.append(other)", "def append(self, obj: Any) -> None:\n from ..pane import panel\n new_objects = list(self)\n new_objects.append(panel(obj))\n self.objects = new_objects", "def add(self, name, obj):\n obj = super(Assembly, self).add(name, obj)\n if is_instance(obj, Component):\n self._depgraph.add(obj.name)\n return obj", "def DoAdd(self,event):\r\n newItem = self.data.add()\r\n if newItem and newItem not in self.items:\r\n self.items = self.data.getItemList()\r\n index = self.items.index(newItem)\r\n self.list.InsertItems([newItem],index)", "def add_gearbox(self):\n next_gearbox_name = ''.join(('gearbox_', str(self.num_gearboxes)))\n self.gearboxes.append(gearbox.Gearbox(self.cables_per_gearbox,\n name=next_gearbox_name, \n level=self.num_gearboxes))\n print \"Added gearbox\", self.num_gearboxes\n self.num_gearboxes += 1\n self.gearbox_added = True", "def put_in(self, item):\n try:\n self.bag_of_holding.append(item)\n print(\"You have added {} to your inventory.\".format(item))\n except:\n print('Error in Inventory method: put_in')", "def _add_lamp_outlet(self, model):\r\n\r\n # Create a new CameraItem and set the model\r\n item = LampOutletItem()\r\n item.setModel(model)\r\n\r\n # Create a new CameraInfoWidget and set the model\r\n widget = LampOutletInfoWidget()\r\n widget.setModel(model)\r\n\r\n item.double_clicked.connect(widget.show)\r\n item.deleteSocketAction.connect(model.prepare_for_deletion)\r\n\r\n self.scene().addItem(item)\r\n proxy = self.scene().addWidget(widget)\r\n widget.setProxy(proxy)", "def add(self, item):", "def add(self, widget: Component) -> None:\n self._add(widget)", "def add_inventory(cd_instance, lst_Inventory):\r\n \r\n lst_Inventory.append(cd_instance) \r\n return lst_Inventory", "def added(self, comp):\n\t\tpass", "def add_plant(self, desc, obj_list):\n self.plants.append((desc, obj_list))\n if len(self.plants) == 1:\n self.set_default_brush()", "def on_item_dropped(self, url):\n print 'Weld.on_item_dropped:', url\n #make sure all struct are present\n if not(self.project and self.project.level):\n print >> sys.stderr, 'it\\'s too early to drop stuff: '\\\n 'create a project and a level first !'\n return\n\n #retrieve data if it comes from weld\n if url in self.resMan:\n props = self.resMan.file_props(url)\n if props is None:\n print >> sys.stderr, curr_f(), ': url(\\'%s\\') in self.resMan '\\\n 'but can\\'t retrieve props.' % (url)\n return\n props = self.project.level.resMan.add_resource(self.resMan.base_path,\n props)\n url = props['url']\n if props == {} or url not in self.project.level.resMan:\n print >> sys.stderr, curr_f(), 'could not retrieve file and/or '\\\n 'dependencies for props:', pp(props)\n return\n\n #instanciate it\n if url in self.project.level.resMan:\n props = self.project.level.resMan.file_props(url)\n dtp = self.project.level.qsteelwidget.dropTargetPosition(Config.instance().drop_target_vec)\n props['position'] = dtp\n props['rotation'] = self.project.level.qsteelwidget.dropTargetRotation()\n if props['resource_type'] == 'meshes':\n props['meshName'] = props['name']\n self.project.level.instanciate(props)\n s = 'dropped agent \\'%s\\' with id %i' % (props['name'], props['agentId'])\n print s\n Ui.instance().show_status(s)\n else:\n Ui.instance().show_status('can only drop meshes so far')", "def append(self, pane: Any) -> None:\n new_object, new_name = self._to_object_and_name(pane)\n new_objects = list(self)\n new_objects.append(new_object)\n self._names.append(new_name)\n self.objects = new_objects", "def addObject(self,object):\n object.screen = self.screen\n object.parent = self\n self.addList.append(object)", "def add_handout(self, asset_name):\r\n self._handouts.append(asset_name)", "def add_joint_to_list(list_widget, combo_box, add_btn, del_btn, forward):\n\n global ftm_list # Forward transition matrices list\n global btm_list # Backward transition matrices list\n global robot_obj\n\n # Getting the current item\n ind = combo_box.currentIndex()\n\n # Finding the associated joint\n i_joint = 0\n for _, _, node in robot_obj.tree:\n type_, nb = node.name.split('_')\n nb = int(nb)\n\n if type_ == 'joint':\n if forward:\n if 'joint_' + str(nb) in ftm_list:\n i_joint += 1\n continue\n else:\n if 'joint_' + str(nb) in btm_list:\n i_joint += 1\n continue\n if ind == nb:\n text = robot_obj.joints[nb].name\n list_widget.addItem(text)\n\n # Disabling the item in the combo box\n combo_box.model().item(i_joint).setEnabled(False)\n\n # If all the joints are added\n if list_widget.count() == combo_box.count():\n add_btn.setEnabled(False)\n del_btn.setEnabled(True)\n\n if forward:\n ftm_list.append(\"joint_\" + str(nb))\n else:\n btm_list.append(\"joint_\" + str(nb))\n\n i_joint += 1", "def add_to_bag(self, item):\n self._bag.append(item)", "def add_output(self):\r\n if self.slots[self.length-1].item is not Item.E:\r\n self.outputs.append(self.slots[self.length-1].item)", "def add_fleet(self, index, *args, **kw):\n\n fleetid = self.fleets.append(ListNode(\"{0!s}\".format(kw.get(\"name\", \"Fleet {0:d}\".format(index))), [\n ListNode(\"Nodes\"),\n ListNode(\"Behaviours\", data=kw.get(\n \"behaviours\", self.defaults[2].get_data()))\n ])\n )\n for i in range(kw.get(\"nodes\", 1)):\n self.add_node(fleetid)", "def add_component(self, new: components.Component) -> None:\n for existing in self.components:\n if isinstance(existing, type(new)):\n raise Exception(type(new))\n self.components.append(new)", "def add_destination(self):\n pass", "def drop_object():\r\n\r\n\t\tglobal bodies, geom, counter, objcount\r\n\r\n\t\tbody, geom = create_box(world, space, 1000, 1.0, 0.2, 0.2)\r\n\t\tbody_position = (random.gauss(0, 0.1), 3.0, random.gauss(0, 0.1))\r\n\t\tbody.setPosition(body_position)\r\n\t\ttheta = random.uniform(0,2 * pi)\r\n\t\tct = cos(theta)\r\n\t\tst = sin(theta)\r\n\t\tbody.setRotation([ct, 0.0, -st, 0.0, 1.0, 0.0, st, 0.0, ct])\r\n\t\tnode = scene_manager.addCubeSceneNode(position = vector3df(*body_position))\r\n\t\tnode.setMaterial(material)\r\n\t\tnode.setMaterialFlag(EMF_LIGHTING, False)\r\n\t\tw,xx,yy,zz = body.getQuaternion()\r\n\t\tnode.setRotation(vector3df(degrees(xx), degrees(yy), degrees(zz)))\r\n\t\tbodies.append((node, body))\r\n\t\tgeoms.append(geom)\r\n\t\tcounter = 0\r\n\t\tobjcount += 1", "def add_item(self, item):\n self.items.append(item)", "def add(self, pipe_element):\n self.__iadd__(pipe_element)", "def _add_lamp_outlets(self):\r\n lst = self.model.get_all_lamp_outlets()\r\n\r\n for itm in lst:\r\n self._add_lamp_outlet(itm)", "def dropEvent(self, QDropEvent):\n srcItems = self.selectedItems()\n dstInd = (self.indexAt(QDropEvent.pos()).row() + 1)\n kbMod = QDropEvent.keyboardModifiers()\n #-- Create New Items --#\n for n, srcItem in enumerate(srcItems):\n itemDict = self.treeParent.getItemDict(srcItem)\n newItem = self.treeParent.on_addVar(index=(dstInd + n))\n self.treeParent.setItem(newItem, **itemDict)\n #-- Remove Items --#\n if not kbMod == QtCore.Qt.ControlModifier:\n for srcItem in srcItems:\n self.takeTopLevelItem(self.indexOfTopLevelItem(srcItem))\n self.treeParent.reindexVar()", "def createappendcomp(self, componentname, componentclass, *args, **kwargs):\n component = componentclass(self, self.debugmode, *args, **kwargs)\n self.components.append(componentname, component)\n return component", "def addObject(self):\n\t\tsel = mc.ls( sl = True, typ = 'transform' )\n\t\tif sel:\n\t\t\tself.objects_lw.addItems( sel )", "def addFanOut(self,gate):\n assert type(gate)==Gate\n self.fanOut.append(gate)", "def add(self):\n pass", "def add(self, game_obj):\r\n self.game_objects_for_adding.append(game_obj)", "def update_dropout(info,\n dropout,\n dropout_type,\n prop_name):\n\n if dropout_type == \"schnet_dropout\":\n info[\"model_params\"][\"schnet_dropout\"] = dropout\n\n elif dropout_type == \"chemprop_dropout\":\n info[\"model_params\"][\"cp_dropout\"] = dropout\n\n elif dropout_type == \"readout_dropout\":\n # if it's in the readout layers, find the dropout\n # layers in the readout dictionary and update them\n readout = info[\"model_params\"][\"readoutdict\"]\n layer_dics = readout[prop_name]\n for layer_dic in layer_dics:\n if layer_dic[\"name\"] == \"Dropout\":\n layer_dic[\"param\"][\"p\"] = dropout\n info[\"model_params\"][\"readoutdict\"] = {prop_name: layer_dics}\n\n elif dropout_type == \"attention_dropout\":\n info[\"model_params\"][\"boltzmann_dict\"][\"dropout_rate\"] = dropout\n\n else:\n info[\"model_params\"][dropout_type] = dropout", "def add_connector(self):\n \n no = len(self.connectors)\n state = {}\n state[\"s_pin\"] = no\n state[\"p_pin\"] = no\n state[\"s_label\"] = \"C%d\" % no\n \n if len(self.connectors)>0:\n state = self.connectors[-1].get_state()\n state[\"s_pin\"] = no\n state[\"p_pin\"] = no\n state[\"s_label\"] = \"C%d\" % (no)\n else:\n if self.mount == self.MOUNT_THT:\n state[\"p_shape\"] = Con.SHAPE_HOLE\n elif self.mount == self.MOUNT_SMD:\n state[\"p_shape\"] = Con.SHAPE_PAD\n \n c = Con(no)\n c.set_state(state) \n \n self.sch_layers[\"pins\"].add(c.s_svg)\n self.pcb_layers[\"copper1\"].add(c.p_svg)\n self.connectors.append(c)", "def add_fruit(self):\n # print('fruit added to container')", "def __add__(self, obj):\n if isinstance(obj, vtk.vtkProp3D):\n self.AddPart(obj)\n\n self.actors.append(obj)\n\n if hasattr(obj, \"scalarbar\") and obj.scalarbar is not None:\n if self.scalarbar is None:\n self.scalarbar = obj.scalarbar\n return self\n\n def unpack_group(scalarbar):\n if isinstance(scalarbar, Group):\n return scalarbar.unpack()\n else:\n return scalarbar\n\n if isinstance(self.scalarbar, Group):\n self.scalarbar += unpack_group(obj.scalarbar)\n else:\n self.scalarbar = Group([unpack_group(self.scalarbar), unpack_group(obj.scalarbar)])\n self.pipeline = vedo.utils.OperationNode(\"add mesh\", parents=[self, obj], c=\"#f08080\")\n return self", "def test_drag_after_addition(self):\r\n group_a_menu = 0\r\n\r\n def add_new_components_and_rearrange(container):\r\n # Add a video component to Group 1\r\n container.add_discussion(group_a_menu)\r\n # Duplicate the first item in Group A\r\n container.duplicate(self.group_a_item_1_action_index)\r\n\r\n first_handle = self.group_a_item_1_handle\r\n # Drag newly added video component to top.\r\n container.drag(first_handle + 3, first_handle)\r\n # Drag duplicated component to top.\r\n container.drag(first_handle + 2, first_handle)\r\n\r\n duplicate_label = self.duplicate_label.format(self.group_a_item_1)\r\n\r\n expected_ordering = [{self.container_title: [self.group_a, self.group_empty, self.group_b]},\r\n {self.group_a: [duplicate_label, self.discussion_label, self.group_a_item_1, self.group_a_item_2]},\r\n {self.group_b: [self.group_b_item_1, self.group_b_item_2]},\r\n {self.group_empty: []}]\r\n\r\n self.do_action_and_verify(add_new_components_and_rearrange, expected_ordering)", "def appendcomp(self, componentname, component):\n self.components.append(componentname, component)\n return component", "def drop(self, item: Item):\n self.items.remove(item)\n item.place(self.parent.x, self.parent.y, self.gamemap)\n\n self.engine.message_log.add_message(f'You yeeted the {item.name}.')", "def add_item(self, item: Item):\n self.__items_list.append(item)", "def dropEvent(self, e: QtGui.QDropEvent):\n src = e.source()\n if src is not self:\n for item in src.selectedItems():\n clone = item.clone()\n clone.setFlags(clone.flags() | Qt.ItemIsEditable)\n self.addTopLevelItem(clone)\n super().dropEvent(e) # Call the original function", "async def loaddnd(self, ctx):\n await self.bot.di.new_items(ctx.guild, (ServerItem(**item) for item in self.bot.dnditems.values()))\n await ctx.send(await _(ctx, \"Successfully added all D&D items!\"))", "def add_item(self, item):\n self._add_widget(item)\n\n self._items.append(item)\n\n if (\n item.connected and self._last_item is not None\n ): # It's not the first item added ::\n self._connected_items.append(ConnectedItem(self._last_item, item))\n\n self._last_item = item", "def add_component(self, component, targets):\n data = [{\n 'targets': targets,\n 'labels': {'service': self.name, 'component': component},\n }]\n with open(self.filename) as f:\n old_data = json.load(f)\n for s in old_data:\n if s['labels']['service'] == self.name and s['labels'].get('component','') == component:\n data[0]['targets'].extend(s['targets'])\n else:\n data.append(s)\n logging.debug('set(%s) %r', self.filename, data)\n with open(self.filename, 'w') as f:\n json.dump(data, f)", "def add_coupled_el(self, el_to_add):\n if isinstance(el_to_add, list):\n self.coupled_el.extend(el_to_add)\n else:\n self.coupled_el.append(el_to_add)", "def add_to_basket(self, item):\n self._products.append(item)", "def __append_to_item_list(self):\n Item.get_item_list().append(self)", "def add_item(self, item_to_append):\n self.items.append(item_to_append)", "def add(self, component):\n\n self.append(component)\n self.sum.Add(component.th1f)", "def create_dropout_layer(self):\n return tf.keras.layers.Dropout(rate=self.dropout)", "def add(self, item: Any) -> None:\n pass", "def drop(self):\n Game.instance.area_map.entities.append(self.owner)\n Game.instance.inventory.remove(self.owner)\n self.owner.x = Game.instance.player.x\n self.owner.y = Game.instance.player.y\n message('You dropped a ' + self.owner.name + '.', palette.yellow)", "async def loaddndshop(self, ctx):\n await self.bot.di.add_shop_items(ctx.guild,\n {item: value[\"meta\"][\"Cost\"] for item, value in self.bot.dnditems.items() if\n value[\"meta\"].get(\"Cost\", \"\").isdigit()})\n await ctx.send(await _(ctx, \"Successfully added all D&D items to shop!\"))", "def dropouts(self):\n return self._dropouts", "def add_ui_object(self, UI_object: object):\n self.__UI_objects.append(UI_object)", "def _add_items_to_color(self, drop_id_color):\n\n # Get the drop item names\n selected_items = self.selectedItems()\n\n drop_names = [x.data(1, QtCore.Qt.UserRole) for x in selected_items\n if x.data(0, QtCore.Qt.UserRole) == \"object\"] or None\n\n # If no drop items return\n if drop_names is None:\n return\n\n # Get the tree widget items for the drop names\n drop_list = [self.items_dict[drop_name][drop_id_color.parent()]\n for drop_name in list(set(drop_names))]\n\n # Update the id color tree widget content list\n self._drop_tree_items(drop_list, drop_id_color)\n\n return None", "def add(self):\n self.inp.inputs.add(self)\n self.out.outputs.add(self)", "def add_donut(self):\n self.scenes[self.current_scene].add_object(Donut())\n self.redraw()", "def add_object(self, obj):\n\t\tself.objects.append(obj)", "def add_components(self, comps):\r\n if not isinstance(comps, list):\r\n comps = [comps]\r\n for comp in comps:\r\n self.comps.append(comp)", "def _add_component(self, __assistant):\r\n\r\n # Retrieve the hardware ID.\r\n _model = self.cmbHardware.get_model()\r\n _row = self.cmbHardware.get_active_iter()\r\n _hardware_id = int(_model.get_value(_row, 1))\r\n\r\n self._controller.add_component(self._incident_id, _hardware_id)\r\n self._workbook.load_component_list()\r\n\r\n return False", "def add(self, item):\n # make sure there's enough space to fit all items\n if self.container.capacity(Item.MIN_SIDE_SIZE) < len(self.items) + 1:\n raise LayoutError(\"container too small to fit all items\")\n\n self.items.append(item)\n coords = self.item_coordinates(len(self.items))\n\n self.arrange(coords)\n\n if self.items_intersect():\n raise LayoutError(\"overlapping items\")", "def add(self, item):\n\n if item not in self:\n self._index_map[item] = len(self._list)\n self._list.append(item)", "def add_to_group(self,item):\n self.items.append(item)\n self.n += 1", "def drop_boxes(self): \r\n model = loader.load_model('models/box.egg')\r\n model.set_pos(-0.5, -0.5, -0.5)\r\n model.flatten_light()\r\n shape = BulletBoxShape(LVector3(0.5, 0.5, 0.5))\r\n ahead = self.vehicleNP.get_pos() + self.vehicle.get_forward_vector()*15\r\n \r\n for i in range(6):\r\n node = BulletRigidBodyNode('Box')\r\n node.set_mass(5.0)\r\n node.add_shape(shape)\r\n node.set_deactivation_enabled(False)\r\n np = render.attach_new_node(node)\r\n np.set_pos(ahead.x, ahead.y, ahead.z + i*2)\r\n self.world.attach(node)\r\n model.copy_to(np)", "def append(self, item):\n self.items.append(item)", "def do_poortego_add(self, arg, opts=None):\n # Code moved to .command.add sub-module for easier reading/debugging \n poortego_add(self.my_interface, arg, opts)", "def add_state_desires(self):\n raise NotImplementedError()", "def insert(self, index: int, obj: Any) -> None:\n from ..pane import panel\n new_objects = list(self)\n new_objects.insert(index, panel(obj))\n self.objects = new_objects", "def AddPane3(self, window, pane_info, drop_pos):\r\n \r\n if not self.AddPane(window, pane_info):\r\n return False\r\n\r\n pane = self.GetPane(window)\r\n indx = self._panes.index(pane)\r\n\r\n ret, pane = self.DoDrop(self._docks, self._panes, pane, drop_pos, wx.Point(0, 0))\r\n self._panes[indx] = pane\r\n\r\n return True", "def set_item(self, input=None, output=None):\n self.remove()\n if input is not None:\n self.inp = input\n if output is not None:\n self.out = output\n self.add()", "def AddUnit(self):\n unitName = \"\"\n if isinstance(self.squad, squad.Squad):\n unitName = list(self.squad.additional_units.keys())[0]\n self.squad.addUnit(unitName)\n self.addButton\n self.exportButton\n self.pointLabel['text'] = self.squad.point_cost\n self.sizeLabel['text'] = self.squad.current_size\n r=6\n if isinstance(self.squad, squad.Squad):\n for u in self.squad.units:\n Label(self.__mainWindow, text=u.name, font=__item_format__).grid(row=r, column=0)\n Label(self.__mainWindow, text=u.weapon_skill.__str__(), font=__item_format__).grid(row=r, column=1)\n Label(self.__mainWindow, text=u.ballistics_skill.__str__(), font=__item_format__).grid(row=r, column=2)\n Label(self.__mainWindow, text=u.strength.__str__(), font=__item_format__).grid(row=r, column=3)\n Label(self.__mainWindow, text=u.toughness.__str__(), font=__item_format__).grid(row=r, column=4)\n Label(self.__mainWindow, text=u.wounds.__str__(), font=__item_format__).grid(row=r, column=5)\n Label(self.__mainWindow, text=u.initiative, font=__item_format__).grid(row=r, column=6)\n Label(self.__mainWindow, text=u.melee_attacks.__str__(), font=__item_format__).grid(row=r, column=7)\n Label(self.__mainWindow, text=u.leadership.__str__(), font=__item_format__).grid(row=r, column=8)\n Label(self.__mainWindow, text=u.armor_save.__str__(), font=__item_format__).grid(row=r, column=9)\n Label(self.__mainWindow, text=u.invuln_save.__str__(), font=__item_format__).grid(row=r, column=10)\n r += 1\n\n else:\n for i in range(self.squad.current_size):\n Label(self.__mainWindow, text=self.squad.squad_name, font=__item_format__).grid(row=r, column=0)\n Label(self.__mainWindow, text=self.squad.ballistics_skill.__str__(), font=__item_format__).grid(row=r, column=1)\n Label(self.__mainWindow, text=self.squad.front_armor.__str__(), font=__item_format__).grid(row=r, column=2)\n Label(self.__mainWindow, text=self.squad.side_armor.__str__(), font=__item_format__).grid(row=r, column=3)\n Label(self.__mainWindow, text=self.squad.rear_armor.__str__(), font=__item_format__).grid(row=r, column=4)\n Label(self.__mainWindow, text=self.squad.hull_points, font=__item_format__).grid(row=r, column=5)\n r += 1\n \n self.addButton['state']='normal'\n if self.squad.current_size == self.squad.max_size:\n self.addButton['state']='disabled'\n if isinstance(self.squad, squad.Squad):\n self.wepSpin.grid(row=r, column=1, columnspan=4)\n self.weaponAdd.grid(row=r, column=5)\n r += 1", "def to_add(self):\n pass", "def add_objects_to_space(self):\n self.anti_spacecraft.add_to_space(self.space) # Anti-spacecraft Parts (represent the whole vehicle)\n self.space.add(self.spacecraft.body, self.spacecraft.shape) # Spacecraft body and shape\n self.space.add(self.pm_landing_pad) # Landing pad", "def add(self, obj):\n if isinstance(obj, Drawable):\n self._drawables.add(obj)\n if isinstance(obj, Updateable):\n self._updateables.add(obj)\n if isinstance(obj, Collidable) and not isinstance(obj, Projectile):\n self._collidables.add(obj)\n if isinstance(obj, Collidable) and isinstance(obj, Projectile):\n self._projectiles.add(obj)\n if isinstance(obj, Textbox):\n self._textboxes.add(obj)\n # Always make sure the newest textbox is on top.\n obj.z = zlayer.TEXT + max(t.z for t in self._textboxes) + 1\n self.__len__.cache_clear()", "def add(self, item):\n self.contents.append(item)", "def add_item(self, item_name):\n if not self.has_item(item_name):\n self.item_list.append(item_name)", "def add(self, element):\n pass", "def add_unit(self):\n self.q(css=self._bounded_selector(self.ADD_BUTTON_SELECTOR)).click()", "def removeObject(self,object):\n self.removeList.append(object)", "def add(self, obj):\n raise NotImplementedError", "def take(self, pitem):\n\n #if adding one more item is exceeding the max item carry , say no to add \n if self.max_items <= len(self.items):\n \n print('The player item list has been exceeded the maximum number of \\n items the player can carry')\n\n #if not add the item to the list \n else:\n self.items.append(pitem)", "def add_after ( self ):\n self.add_item( 1 )", "def set_destination_to_add_destination(self, destination):\n self.multiple_items_selection_from_kendo_dropdown(self.destination_multiple_kendo_dropdown_locator, destination)\n self.click_element(self.new_destination_header_locator)", "def add(self, item):\n self._set(item, None)", "def on_drop(self):\n print(\"You have dropped\", self.name)", "def collect(item):\n inventory.append(item)\n print(f'You now have: {inventory}')", "def add_pack_to_pack(self, pack):\n logging.debug(\"Merging packs\")\n for wolf in pack.wolves:\n self.add_wolf_to_pack(wolf)\n logging.debug(\"Pack is now {} wolves\".format(len(self.wolves)))\n self.model.schedule.remove(pack)\n self.model.grid.remove_agent(pack)", "def add(self, element):\n if not isinstance(element, self._instance):\n warn(\"Cannot add, incompatible type.\")\n return\n self._elements.append(element)", "def add_widget(self, widget, new_name=False):\n # we don't list placeholders\n if isinstance(widget, Placeholder):\n return\n\n # The internal widgets (e.g. the label of a GtkButton) are handled\n # by gtk and don't have an associated gadget: we don't want to\n # add these to our list. It would be nicer to have a flag to check\n # (as we do for placeholders) instead of checking for the associated\n # gadget, so that we can assert that if a widget is _not_\n # internal, it _must_ have a corresponding gadget...\n # Anyway this suffice for now.\n gadget = Gadget.from_widget(widget)\n if not gadget:\n return\n\n children = gadget.get_children()\n\n gadget.project = self\n self._widget_old_names[widget] = widget.get_name()\n\n widget_name = widget.name\n if new_name and gadget.name in self._widgets:\n widget_name = self.set_new_widget_name(gadget.widget)\n\n widget.connect('notify::name', self._on_widget_notify_name)\n self._widgets[widget_name] = widget\n\n for child in children:\n self.add_widget(child, new_name=new_name)\n\n self.emit('add-gadget', gadget)", "def add_item(self, item):\n\n if item.descriptor in self.__slots:\n slot = self.__slots[item.descriptor]\n slot.quantity += 1\n else:\n self.__slots[item.descriptor] = Slot(item)" ]
[ "0.63641316", "0.56994003", "0.5660805", "0.5521632", "0.5519748", "0.5501247", "0.54634714", "0.5435919", "0.53915066", "0.5373497", "0.53394043", "0.53063345", "0.5302059", "0.52375495", "0.52294815", "0.5180793", "0.51650107", "0.51634353", "0.515684", "0.51533777", "0.5146691", "0.5139247", "0.51310295", "0.51231766", "0.5123073", "0.50958693", "0.5091321", "0.5090395", "0.50851196", "0.50776845", "0.5021721", "0.5019062", "0.49958247", "0.49957153", "0.49889812", "0.4980599", "0.49805528", "0.4976928", "0.49754274", "0.4967941", "0.49538887", "0.49426803", "0.49368754", "0.4921868", "0.49214473", "0.49149412", "0.4913046", "0.4909768", "0.49040344", "0.49021286", "0.48901278", "0.48819938", "0.48789155", "0.48761225", "0.48719707", "0.48693335", "0.4868354", "0.48675328", "0.4865673", "0.48560873", "0.48454586", "0.484285", "0.48355743", "0.48344058", "0.4828303", "0.48139226", "0.4811828", "0.4811188", "0.4806023", "0.48016116", "0.47982153", "0.4776831", "0.47411066", "0.4729286", "0.47214845", "0.4717215", "0.47126257", "0.47108552", "0.4710631", "0.4710536", "0.47096625", "0.47005805", "0.46955082", "0.46895218", "0.46888122", "0.46745148", "0.4667547", "0.4657629", "0.46494636", "0.46455002", "0.4645007", "0.46367586", "0.46360552", "0.4635566", "0.46354982", "0.46342942", "0.4632242", "0.46285242", "0.46277353", "0.46266767" ]
0.673964
0
Sets model, data, and training algo parameters.
def __init__(self,model,dataset,args): self.args = args self.dataset = dataset self.model = model.to(args.device) self.optimizer = optim.Adam(model.parameters(), lr=args.learning_rate) self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=self.optimizer, mode='min', factor=0.5, patience=1) vectorizer = dataset.get_vectorizer() self.mask_index = vectorizer.target_vocab.mask_index self.train_state = make_train_state(args) self.epoch_bar = tqdm_notebook(desc='training routine', total=args.num_epochs, position=0) self.dataset.set_split('train') self.train_bar = tqdm_notebook(desc='split=train', total=dataset.get_num_batches(args.batch_size), position=1, leave=True) self.dataset.set_split('val') self.val_bar = tqdm_notebook(desc='split=val', total=dataset.get_num_batches(args.batch_size), position=1, leave=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_params(self, params: Dict):\n\n if params['training_instances'] is not None:\n self.training_instances = params['training_instances']\n if params['n'] is not None:\n self.n = params['n']\n if params['lda'] is not None:\n self.lda = params['lda']\n if params['verbose'] is not None:\n self.verbose = params['verbose']\n\n self.num_features = self.training_instances[0].get_feature_count()\n self.w = None\n self.b = None", "def set_load_model_parameters(self):\n\n self.controller.set_new_model_test_input_path(self.test_input.get())\n self.controller.set_new_model_results_input_path(self.results_input.get())\n self.controller.set_new_model_running(False)", "def _set_training_params(self, params):\n self.lyapunov_hybrid_system.lyapunov_relu.load_state_dict(\n params[\"lyap_relu_params\"])\n if not self.R_options.fixed_R:\n self.R_options._variables = params[\"R_params\"].clone()\n if isinstance(self.lyapunov_hybrid_system.system,\n feedback_system.FeedbackSystem):\n self.lyapunov_hybrid_system.system.controller_network.\\\n load_state_dict(params[\"controller_params\"])", "def setup(self):\n self.model.monitor = myMonitor.get_monitor(self.model)\n self.model.monitor.time_budget_exceeded = False\n if self.algorithm is not None:\n self.algorithm.setup(model=self.model, dataset=self.dataset)\n self.setup_extensions()\n\n # Model.modify_updates is used by the training algorithm to\n # enforce constraints after each step of learning. Here we\n # make sure the constraints are enforced from the start.\n self.model.enforce_constraints()", "def _set_params(self, estimator_args, scaler_args, execution_args, metric_args=None, dim_reduction_args=None):\n \n # Set default values which will be used if execution arguments are not passed\n \n # Default parameters:\n self.model.overwrite = True\n self.model.debug = False\n self.model.test_size = 0.33\n self.model.cv = 0\n self.model.time_series_split = 0\n self.model.max_train_size = None\n self.model.random_state = 42\n self.model.compress = 3\n self.model.retain_data = False\n self.model.scale_hashed = True\n self.model.scale_vectors = True\n self.model.scaler = \"StandardScaler\"\n self.model.scaler_kwargs = {}\n self.model.estimator_kwargs = {}\n self.model.missing = \"zeros\"\n self.model.calc_feature_importances = False\n self.model.importances_n_repeats = 30\n self.model.lags= None\n self.model.lag_target = False\n self.model.scale_target = False\n self.model.scale_lag_target= True\n self.model.make_stationary = None\n self.model.stationarity_lags = [1]\n self.model.using_keras = False\n self.model.current_sample_as_input = True\n self.model.prediction_periods = 1\n \n # Default metric parameters:\n if metric_args is None:\n self.model.metric_args = {}\n \n # Set execution parameters\n \n # If the execution key word arguments were included in the request, get the parameters and values\n if len(execution_args) > 0:\n \n # Transform the string of arguments into a dictionary\n execution_args = utils.get_kwargs(execution_args)\n \n # Set the overwite parameter if any existing model with the specified name should be overwritten\n if 'overwrite' in execution_args:\n self.model.overwrite = 'true' == execution_args['overwrite'].lower()\n \n # Set the test_size parameter that will be used to split the samples into training and testing data sets\n # Default value is 0.33, i.e. we use 66% of the samples for training and 33% for testing\n if 'test_size' in execution_args:\n self.model.test_size = utils.atof(execution_args['test_size'])\n\n # Enable K-fold cross validation. For more information see: http://scikit-learn.org/stable/modules/cross_validation.html#multimetric-cross-validation\n # Default value is 0 in which case a simple holdout strategy based on the test_size parameter is used.\n # If cv > 0 then the model is validated used K = cv folds and the test_size parameter is ignored.\n if 'cv' in execution_args:\n self.model.cv = utils.atoi(execution_args['cv'])\n \n # Enable timeseries backtesting using TimeSeriesSplit. https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.TimeSeriesSplit.html\n # This will select the a validation strategy appropriate for time series and sequential data.\n # The feature definitions must include an 'identifier' field which can be used to sort the series into the correct order.\n # The integer supplied in this parameter will split the data into the given number of subsets for training and testing.\n if 'time_series_split' in execution_args:\n self.model.time_series_split = utils.atoi(execution_args['time_series_split'])\n\n # This parameter can be used together with time_series_split.\n # It specifies the maximum samples to be used for training in each split, which allows for rolling/ walk forward validation.\n if 'max_train_size' in execution_args:\n self.model.max_train_size = utils.atoi(execution_args['max_train_size'])\n\n # Add lag observations to the feature matrix. Only applicable for Keras models.\n # An identifier field must be included in the feature definitions to correctly sort the data for this capability.\n # For e.g. if lags=2, features from the previous two samples will be concatenated as input features for the current sample.\n # This is useful for framing timeseries and sequence prediction problems into 3D or 4D data required for deep learning.\n if 'lags' in execution_args:\n self.model.lags = utils.atoi(execution_args['lags'])\n\n # Include targets in the lag observations\n # If True an additional feature will be created for each sample using the previous value of y \n if 'lag_target' in execution_args:\n self.model.lag_target = 'true' == execution_args['lag_target'].lower()\n \n # Scale the target before fitting\n # The scaling will be inversed before predictions so they are returned in the original scale \n if 'scale_target' in execution_args:\n self.model.scale_target = 'true' == execution_args['scale_target'].lower()\n\n # Scale lag values of the targets before fitting\n # Even if scale_target is set to false, the lag values of targets being used as features can be scaled by setting this to true \n if 'scale_lag_target' in execution_args:\n self.model.scale_lag_target = 'true' == execution_args['scale_lag_target'].lower()\n\n # Make the target series more stationary. This only applies to sequence prediction problems.\n # Valid values are 'log' in which case we apply a logarithm to the target values,\n # or 'difference' in which case we transform the targets into variance from the previous value.\n # The transformation will be reversed before returning predictions.\n if 'make_stationary' in execution_args:\n self.model.make_stationary = execution_args['make_stationary'].lower()\n\n # Provide lags periods for differencing\n # By default the difference will be done with lag = 1. Alternate lags can be provided by passing a list of lags as a list.\n # e.g. 'stationarity_lags=1;12|list|int'\n if 'stationarity_lags' in execution_args:\n self.model.stationarity_lags = utils.get_kwargs_by_type({'stationarity_lags': execution_args['stationarity_lags']})['stationarity_lags']\n\n # Specify if the current sample should be used as input to the model\n # This is to allow for models that only use lag observations to make future predictions\n if 'current_sample_as_input' in execution_args:\n self.model.current_sample_as_input = 'true' == execution_args['current_sample_as_input'].lower()\n\n # Specify the number of predictions expected from the model\n # This can be used to get a model to predict the next m periods given inputs for the previous n periods.\n # This is only valid for Keras models which have a final output layer with more than one node\n if 'prediction_periods' in execution_args:\n self.model.prediction_periods = utils.atoi(execution_args['prediction_periods'])\n \n # Seed used by the random number generator when generating the training testing split\n if 'random_state' in execution_args:\n self.model.random_state = utils.atoi(execution_args['random_state'])\n \n # Compression level between 1-9 used by joblib when saving the model\n if 'compress' in execution_args:\n self.model.compress = utils.atoi(execution_args['compress'])\n \n # Flag to determine if the training and test data should be saved in the model\n if 'retain_data' in execution_args:\n self.model.retain_data = 'true' == execution_args['retain_data'].lower()\n\n # Flag to determine if feature importances should be calculated when the fit method is called\n if 'calculate_importances' in execution_args:\n self.model.calc_feature_importances = 'true' == execution_args['calculate_importances'].lower()\n\n # Sets the number of times a feature is randomly shuffled during the feature importance calculation\n if 'importances_n_repeats' in execution_args:\n self.model.importances_n_repeats = utils.atoi(execution_args['importances_n_repeats'])\n \n # Set the debug option for generating execution logs\n # Valid values are: true, false\n if 'debug' in execution_args:\n self.model.debug = 'true' == execution_args['debug'].lower()\n \n # Additional information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n # Increment log counter for the class. Each instance of the class generates a new log.\n self.__class__.log_no += 1\n\n # Create a log file for the instance\n # Logs will be stored in ..\\logs\\SKLearn Log <n>.txt\n self.logfile = os.path.join(os.getcwd(), 'logs', 'SKLearn Log {}.txt'.format(self.log_no))\n \n # Create dictionary of parameters to display for debug\n self.exec_params = {\"overwrite\":self.model.overwrite, \"test_size\":self.model.test_size, \"cv\":self.model.cv,\\\n \"time_series_split\": self.model.time_series_split, \"max_train_size\":self.model.max_train_size, \"lags\":self.model.lags,\\\n \"lag_target\":self.model.lag_target, \"scale_target\":self.model.scale_target, \"make_stationary\":self.model.make_stationary,\\\n \"random_state\":self.model.random_state, \"compress\":self.model.compress, \"retain_data\":self.model.retain_data,\\\n \"calculate_importances\": self.model.calc_feature_importances, \"importances_n_repeats\": self.model.importances_n_repeats,\\\n \"debug\":self.model.debug}\n\n self._print_log(1)\n \n # If the scaler key word arguments were included in the request, get the parameters and values\n if len(scaler_args) > 0:\n \n # Transform the string of arguments into a dictionary\n scaler_args = utils.get_kwargs(scaler_args)\n \n # Set scaler arguments that will be used when preprocessing the data\n # Valid values are: StandardScaler, MinMaxScaler, MaxAbsScaler, RobustScaler and QuantileTransformer\n # More information here: http://scikit-learn.org/stable/modules/preprocessing.html\n if 'scaler' in scaler_args:\n self.model.scaler = scaler_args.pop('scaler')\n \n if 'missing' in scaler_args:\n self.model.missing = scaler_args.pop('missing').lower()\n \n if 'scale_hashed' in scaler_args:\n self.model.scale_hashed = 'true' == scaler_args.pop('scale_hashed').lower()\n \n if 'scale_vectors' in scaler_args:\n self.model.scale_vectors = 'true' == scaler_args.pop('scale_vectors').lower()\n \n # Get the rest of the scaler parameters, converting values to the correct data type\n self.model.scaler_kwargs = utils.get_kwargs_by_type(scaler_args) \n else:\n err = \"Arguments for scaling did not include the scaler name e.g StandardScaler\"\n raise Exception(err)\n \n # If the estimator key word arguments were included in the request, get the parameters and values\n if len(estimator_args) > 0:\n \n # Transform the string of arguments into a dictionary\n estimator_args = utils.get_kwargs(estimator_args)\n \n # Set estimator arguments that will be used when preprocessing the data\n # The parameters available will depend on the selected estimator\n # More information here: http://scikit-learn.org/stable/modules/classes.html#api-reference\n if 'estimator' in estimator_args:\n self.model.estimator = estimator_args.pop('estimator')\n \n # Set the estimator type for the model\n if self.model.estimator in self.classifiers:\n self.model.estimator_type = \"classifier\"\n elif self.model.estimator in self.regressors:\n self.model.estimator_type = \"regressor\"\n elif self.model.estimator in self.decomposers:\n self.model.estimator_type = \"decomposer\"\n elif self.model.estimator in self.clusterers:\n self.model.estimator_type = \"clusterer\"\n else:\n err = \"Unknown estimator class: {0}\".format(self.model.estimator)\n raise Exception(err)\n\n # Get the rest of the estimator parameters, converting values to the correct data type\n self.model.estimator_kwargs = utils.get_kwargs_by_type(estimator_args) \n else:\n err = \"Arguments for estimator did not include the estimator class e.g. RandomForestClassifier\"\n raise Exception(err)\n \n # If key word arguments for model evaluation metrics are included in the request, get the parameters and values\n if metric_args is not None and len(metric_args) > 0:\n # Transform the string of arguments into a dictionary\n metric_args = utils.get_kwargs(metric_args)\n \n # Get the metric parameters, converting values to the correct data type\n self.model.metric_args = utils.get_kwargs_by_type(metric_args) \n \n # If key word arguments for dimensionality reduction are included in the request, get the parameters and values\n if dim_reduction_args is not None and len(dim_reduction_args) > 0:\n # Transform the string of arguments into a dictionary\n dim_reduction_args = utils.get_kwargs(dim_reduction_args)\n \n # Set dim_reduction arguments that will be used after preprocessing the data\n # The parameters available will depend on the selected dimensionality reduction method\n # Acceptable classes are PCA, KernelPCA, IncrementalPCA, TruncatedSVD\n # More information here: http://scikit-learn.org/stable/modules/classes.html#api-reference\n if 'reduction' in dim_reduction_args:\n self.model.reduction = dim_reduction_args.pop('reduction')\n \n # Get the rest of the dim_reduction parameters, converting values to the correct data type\n self.model.dim_reduction_args = utils.get_kwargs_by_type(dim_reduction_args) \n else:\n err = \"Arguments for dimensionality reduction did not include the class e.g. PCA\"\n raise Exception(err)\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(2)", "def __init__(self, parameters: ParametersList, algorithm: ClassVar, algorithm_data: AlgorithmData):\n super(GreedyTrain, self).__init__(parameters, algorithm, algorithm_data)", "def initialize_training(self, training_info):\n self.model.reset_weights()\n self.algo.initialize(self.settings, model=self.model, environment=self.environment, device=self.device)", "def set_params(self, **params):\n if('threshold' in params.keys()):\n self.threshold = params['threshold']\n if('subsample' in params.keys()):\n self.subsample = params['subsample']\n if('estimator' in params.keys()):\n self.estimator = params['estimator']\n if('n_folds' in params.keys()):\n self.n_folds = params['n_folds']\n if('stratify' in params.keys()):\n self.stratify = params['stratify']\n if('random_state' in params.keys()):\n self.random_state = params['random_state']\n if('n_jobs' in params.keys()):\n self.n_jobs = params['n_jobs']", "def _setup_training(self, params, **kwargs):\n model_params = params.permute_training_on_top().model\n\n model_kwargs = {**model_params.fixed, **model_params.variable}\n\n model = self.model_cls(**model_kwargs)\n\n training_params = params.permute_training_on_top().training\n losses = training_params.nested_get(\"losses\")\n optimizer_cls = training_params.nested_get(\"optimizer_cls\")\n optimizer_params = training_params.nested_get(\"optimizer_params\")\n train_metrics = training_params.nested_get(\"train_metrics\", {})\n lr_scheduler_cls = training_params.nested_get(\"lr_sched_cls\", None)\n lr_scheduler_params = training_params.nested_get(\"lr_sched_params\",\n {})\n val_metrics = training_params.nested_get(\"val_metrics\", {})\n\n # necessary for resuming training from a given path\n save_path = kwargs.pop(\"save_path\", os.path.join(\n self.save_path,\n \"checkpoints\",\n \"run_%02d\" % self._run))\n\n return self.trainer_cls(\n network=model,\n save_path=save_path,\n losses=losses,\n key_mapping=self.key_mapping,\n optimizer_cls=optimizer_cls,\n optimizer_params=optimizer_params,\n train_metrics=train_metrics,\n val_metrics=val_metrics,\n lr_scheduler_cls=lr_scheduler_cls,\n lr_scheduler_params=lr_scheduler_params,\n optim_fn=self._optim_builder,\n save_freq=self.checkpoint_freq,\n **kwargs\n )", "def setup(self, stage=None):\n self.data_train, self.data_val, self.data_test = [None] * 3", "def set_training_parameters(\n self,\n config: ConfigDict,\n len_train: int,\n len_test: int,\n ):\n self.configure_steps(config, len_train, len_test)\n self.configure_reporting(config)\n self.configure_training_functions(config)", "def __init__(self, parameters: ParametersList, algorithm: ClassVar, algorithm_data: AlgorithmData):\n\n super(SupervisedTrain, self).__init__(parameters, algorithm, algorithm_data)\n self.best_score = np.inf", "def tune_params(self, X_train, Y_train):\n return self.model # No hyper-parameter tuning", "def set_parameters(self, **kwargs):\n self.__select_k_best.set_params(**kwargs)", "def __init__(self, **kwargs):\n\n # Identify the mode to start the model in\n if \"x\" in kwargs and \"y\" in kwargs:\n x = kwargs.get(\"x\")\n y = kwargs.get(\"y\")\n if \"model_name\" not in kwargs:\n self.__mode = \"train\"\n else:\n self.__mode = \"retrain\"\n elif \"model_name\" in kwargs:\n self.__mode = \"test\"\n else:\n raise NameError(\"Cannot infer mode from arguments.\")\n\n print(\"Initializing model in %s mode.\" % self.__mode)\n\n if self.mode == \"train\":\n # Infer input type from type(x)\n if type(x[0]) == np.bytes_:\n print(\"Input type is 'binary mols'.\")\n self.__input_type = \"mols\" # binary RDKit mols\n else:\n print(\"Input type is 'molecular descriptors'.\")\n self.__input_type = \"descriptors\" # other molecular descriptors\n\n # If scaling is required\n if kwargs.get(\"scaling\", False) is True:\n # Normalize the input\n print(\"Applying scaling on input.\")\n self.__scaler = StandardScaler()\n x = self.__scaler.fit_transform(x)\n else:\n self.__scaler = None\n\n # If PCA is required\n if kwargs.get(\"pca\", False) is True:\n print(\"Applying PCA on input.\")\n self.__pca = PCA(\n n_components=x.shape[1]\n ) # n_components=n_features for now\n x = self.__pca.fit_transform(x)\n else:\n self.__pca = None\n\n self.__maxlen = (\n kwargs.get(\"dataset_info\")[\"maxlen\"] + 10\n ) # Extend maxlen to avoid breaks in training\n self.__charset = kwargs.get(\"dataset_info\")[\"charset\"]\n self.__dataset_name = kwargs.get(\"dataset_info\")[\"name\"]\n self.__lstm_dim = kwargs.get(\"lstm_dim\", 256)\n self.__h_activation = kwargs.get(\"h_activation\", \"relu\")\n self.__bn = kwargs.get(\"bn\", True)\n self.__bn_momentum = kwargs.get(\"bn_momentum\", 0.9)\n self.__noise_std = kwargs.get(\"noise_std\", 0.01)\n self.__td_dense_dim = kwargs.get(\n \"td_dense_dim\", 0\n ) # >0 squeezes RNN connections with Dense sandwiches\n self.__batch_size = kwargs.get(\"batch_size\", 256)\n self.__dec_layers = kwargs.get(\"dec_layers\", 2)\n\n if self.input_type == \"descriptors\":\n self.__codelayer_dim = x.shape[1] # features\n if \"codelayer_dim\" in kwargs:\n print(\n \"Ignoring requested codelayer_dim because it is inferred from the cardinality of the descriptors.\"\n )\n else:\n self.__codelayer_dim = kwargs.get(\"codelayer_dim\", 128)\n \n # Create the left/right-padding vectorizers\n self.__smilesvec1 = SmilesVectorizer(\n canonical=False,\n augment=True,\n maxlength=self.maxlen,\n charset=self.charset,\n binary=True,\n )\n\n self.__smilesvec2 = SmilesVectorizer(\n canonical=False,\n augment=True,\n maxlength=self.maxlen,\n charset=self.charset,\n binary=True,\n leftpad=False,\n )\n\n # self.train_gen.next() #This line is needed to set train_gen.dims (to be fixed in HetSmilesGenerator)\n self.__input_shape = self.smilesvec1.dims\n self.__dec_dims = list(self.smilesvec1.dims)\n self.__dec_dims[0] = self.dec_dims[0] - 1\n self.__dec_input_shape = self.dec_dims\n self.__output_len = self.smilesvec1.dims[0] - 1\n self.__output_dims = self.smilesvec1.dims[-1]\n\n # Build all sub-models as untrained models\n if self.input_type == \"mols\":\n self.__build_mol_to_latent_model()\n else:\n self.__mol_to_latent_model = None\n\n self.__build_latent_to_states_model()\n self.__build_batch_model()\n\n # Build data generators\n self.__build_generators(x, y)\n\n # Retrain or Test mode\n else:\n self.__model_name = kwargs.get(\"model_name\")\n\n # Load the model\n self.__load(self.model_name)\n \n if self.mode == \"retrain\":\n # If scaling is required\n if self.scaler is not None:\n print(\"Applying scaling on input.\")\n x = self.scaler.transform(x)\n\n # If PCA is required\n if self.pca is not None:\n print(\"Applying PCA on input.\")\n x = self.pca.transform(x)\n \n # Build data generators\n self.__build_generators(x, y)\n\n # Build full model out of the sub-models\n self.__build_model()\n\n # Show the resulting full model\n print(self.model.summary())", "def train(self, training_data, cfg, **kwargs):\n pass", "def setParameters(self, params):\n self.module._setParameters(params)\n # update parameters for learner\n self.learner.setModule(self.module)", "def set_parameters(self):\n\n if self.model_with_set_params:\n return\n\n self._model_with_set_params = self._parameter_values.process_model(\n self._unprocessed_model, inplace=False\n )\n self._parameter_values.process_geometry(self.geometry)\n self.model = self._model_with_set_params", "def __init__(self, x_train, model):\n self.x_train = x_train\n self.model = model", "def set_train(self):\n self.model.train()", "def _initialize_model_params(self):\n\n if 'model' not in self._raw_data_dict:\n raise Error('The \"model\" key is not found in the configuration file. Looks like the parsed file is not '\n 'Object Detection API model configuration file.')\n params = list(self._raw_data_dict['model'].values())[0]\n for rule in mapping_rules:\n self._update_param_using_rule(params, rule)", "def __init__(self, params, data):\n super(HyperOptNoTraining, self).__init__(params, data)\n self.objective = None\n self.trial_losses = None\n self.best_trial = None\n self.trial_list = None", "def _set_train(self):\n\n if not self.model.__dict__['training']:\n self.model.train()", "def setupModel(cls, roadrunner, parameters, logger=Logger()):\r\n pp = parameters.valuesdict()\r\n for parameter in pp.keys():\r\n try:\r\n roadrunner.model[parameter] = pp[parameter]\r\n except Exception as err:\r\n msg = \"_modelFitterCore.setupModel: Could not set value for %s\" \\\r\n % parameter\r\n logger.error(msg, err)", "def _setupModel(self, parameters):\r\n ModelFitterCore.setupModel(self.roadrunnerModel, parameters,\r\n logger=self.logger)", "def __init__(self, conf):\n self.model_conf = conf[\"model\"]\n self.epochs = self.model_conf.getint(\"n_epochs\")\n self.epoch = self.model_conf.getint(\"epoch_start\")\n self.batch_size = self.model_conf.getint(\"batch_size\")\n self.criterion = nn.CrossEntropyLoss()\n self.device = torch.device(self.model_conf.get('device'))\n #self.model = (\n # eval(self.model_conf.get('name'))(self.model_conf).to(self.device)\n #)\n self.model = nn.DataParallel(\n eval(self.model_conf.get('name'))(self.model_conf).to(self.device)\n )\n total_params = sum(p.numel() for p in self.model.parameters())\n print(\"Created model {}: {} parameters\"\n .format(self.model_conf.get('name'), total_params))\n if self.model_conf.get(\"optim\") == 'SGD':\n self.optimizer = optim.SGD(\n self.model.parameters(),\n lr=self.model_conf.getfloat(\"learning_rate\"),\n momentum=self.model_conf.getfloat(\"momentum\"),\n weight_decay=self.model_conf.getfloat(\"weight_decay\"))\n elif self.model_conf.get(\"optim\") == 'Adam':\n self.optimizer = optim.Adam(\n self.model.parameters(),\n lr=self.model_conf.getfloat(\"learning_rate\"),\n betas=json.loads(self.model_conf.get(\"betas\")))\n else:\n raise ValueError('Only SGD is supported')\n\n if self.model_conf.get(\"checkpoint\") is not None:\n self.load_checkpoint(self.model_conf.get(\"checkpoint\"))\n\n self.checkpoints_path = conf.get(\"paths\", \"checkpoints\")\n self.results_path = conf.get(\"paths\", \"results\")\n self.best_accuracy = 0\n self.train_size = None\n self.valid_size = None\n self.iteration_print_freq = conf.getint(\"log\", \"iteration_print_freq\")", "def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.gtg.set_parameters(self.parameters)\n self.avoidobstacles.set_parameters(self.parameters)\n self.wall.set_parameters(self.parameters)", "def set_params(self, **params):\n if not hasattr(self, \"_non_sklearn_base\"):\n return super().set_params(**params)\n if not (\n len(params) == 1 and\n (\"nthreads\" in params or \"n_jobs\" in params)\n ):\n self.is_fitted_ = False\n valid_params = self.get_params(deep=False)\n for k,v in params.items():\n if k not in valid_params:\n raise ValueError(\"Invalid parameter: \", k)\n setattr(self, k, v)\n return self", "def set_hyperparams(self, params):", "def _train_model(\n self,\n dataset: DatasetEntity,\n ):\n logger.info(\"init data cfg.\")\n self._data_cfg = ConfigDict(data=ConfigDict())\n\n for cfg_key, subset in zip(\n [\"train\", \"val\", \"unlabeled\"],\n [Subset.TRAINING, Subset.VALIDATION, Subset.UNLABELED],\n ):\n subset = get_dataset(dataset, subset)\n if subset and self._data_cfg is not None:\n self._data_cfg.data[cfg_key] = ConfigDict(\n otx_dataset=subset,\n labels=self._labels,\n )\n\n self._is_training = True\n\n self._init_task()\n\n cfg = self.configure(True, None)\n logger.info(\"train!\")\n\n timestamp = time.strftime(\"%Y%m%d_%H%M%S\", time.localtime())\n\n # Environment\n logger.info(f\"cfg.gpu_ids = {cfg.gpu_ids}, distributed = {cfg.distributed}\")\n env_info_dict = collect_env()\n env_info = \"\\n\".join([(f\"{k}: {v}\") for k, v in env_info_dict.items()])\n dash_line = \"-\" * 60 + \"\\n\"\n logger.info(f\"Environment info:\\n{dash_line}{env_info}\\n{dash_line}\")\n\n # Data\n datasets = [build_dataset(cfg.data.train)]\n\n if self._train_type == TrainType.Semisupervised:\n # forward the knowledge of num iters per epoch to model for filter loss\n bs_per_gpu = cfg.data.train_dataloader[\"samples_per_gpu\"]\n actual_bs = bs_per_gpu * torch.distributed.get_world_size() if cfg.distributed else bs_per_gpu\n cfg.model.num_iters_per_epoch = math.ceil(len(datasets[0]) / actual_bs)\n\n # FIXME: Currently segmentor does not support multi batch evaluation.\n # For the Self-SL case, there is no val data. So, need to check the\n\n if \"val\" in cfg.data and \"val_dataloader\" in cfg.data:\n cfg.data.val_dataloader[\"samples_per_gpu\"] = 1\n\n # Target classes\n if \"task_adapt\" in cfg:\n target_classes = cfg.task_adapt.final\n else:\n target_classes = datasets[0].CLASSES\n\n # Metadata\n meta = dict()\n meta[\"env_info\"] = env_info\n meta[\"seed\"] = cfg.seed\n meta[\"exp_name\"] = cfg.work_dir\n if cfg.checkpoint_config is not None:\n cfg.checkpoint_config.meta = dict(\n mmseg_version=__version__ + get_git_hash()[:7],\n CLASSES=target_classes,\n )\n\n # Model\n model = self.build_model(cfg, fp16=cfg.get(\"fp16\", False), is_training=self._is_training)\n model.train()\n model.CLASSES = target_classes\n\n if cfg.distributed:\n convert_sync_batchnorm(model)\n\n validate = bool(cfg.data.get(\"val\", None))\n\n if self._hyperparams.learning_parameters.auto_adapt_batch_size != BatchSizeAdaptType.NONE:\n train_func = partial(train_segmentor, meta=deepcopy(meta), model=deepcopy(model), distributed=False)\n adapt_batch_size(\n train_func,\n cfg,\n datasets,\n isinstance(self, NNCFBaseTask), # nncf needs eval hooks\n not_increase=(self._hyperparams.learning_parameters.auto_adapt_batch_size == BatchSizeAdaptType.SAFE),\n )\n\n train_segmentor(\n model,\n datasets,\n cfg,\n distributed=cfg.distributed,\n validate=validate,\n timestamp=timestamp,\n meta=meta,\n )\n\n # Save outputs\n output_ckpt_path = os.path.join(cfg.work_dir, \"latest.pth\")\n best_ckpt_path = glob.glob(os.path.join(cfg.work_dir, \"best_mDice_*.pth\"))\n if len(best_ckpt_path) > 0:\n output_ckpt_path = best_ckpt_path[0]\n best_ckpt_path = glob.glob(os.path.join(cfg.work_dir, \"best_mIoU_*.pth\"))\n if len(best_ckpt_path) > 0:\n output_ckpt_path = best_ckpt_path[0]\n return dict(\n final_ckpt=output_ckpt_path,\n )", "def __init__(self, model, loss, optim, callbacks, train_data, val_data):\n super().__init__(model=model, loss=loss, optim=optim, callbacks=callbacks, train_data=train_data, val_data=val_data)", "def __init__(self, config, data_loader, layer_hyperparams):\n self.config = config\n self.layer_hyperparams = layer_hyperparams\n\n if config.is_train:\n self.train_loader = data_loader[0]\n self.valid_loader = data_loader[1]\n self.num_train = len(self.train_loader.dataset)\n self.num_valid = self.valid_loader.dataset.trials\n else:\n if config.get_embedding:\n self.test_embedding_loader = data_loader\n self.n_embeddings = config.n_embeddings\n else:\n self.test_loader = data_loader\n self.num_test = self.test_loader.dataset.trials\n\n if config.use_batch_norm:\n self.model = SiameseNetWithBN()\n else:\n self.model = SiameseNet()\n \n if config.use_gpu:\n self.model.cuda()\n\n # model params\n self.num_params = sum(\n [p.data.nelement() for p in self.model.parameters()]\n )\n self.num_model = get_num_model(config)\n self.num_layers = len(list(self.model.children()))\n\n print('[*] Number of model parameters: {:,}'.format(self.num_params))\n\n # path params\n self.ckpt_dir = os.path.join(config.ckpt_dir, self.num_model)\n self.logs_dir = os.path.join(config.logs_dir, self.num_model)\n\n # misc params\n self.resume = config.resume\n self.use_gpu = config.use_gpu\n self.dtype = (\n torch.cuda.FloatTensor if self.use_gpu else torch.FloatTensor\n )\n\n # optimization params\n self.best = config.best\n self.best_valid_acc = 0.\n self.epochs = config.epochs\n self.start_epoch = 0\n self.lr_patience = config.lr_patience\n self.train_patience = config.train_patience\n self.counter = 0\n\n # grab layer-wise hyperparams\n self.init_lrs = self.layer_hyperparams['layer_init_lrs']\n self.init_momentums = [config.init_momentum]*self.num_layers\n self.end_momentums = self.layer_hyperparams['layer_end_momentums']\n self.l2_regs = self.layer_hyperparams['layer_l2_regs']\n\n # compute temper rate for momentum\n if self.epochs == 1:\n f = lambda max, min: min\n else:\n f = lambda max, min: (max - min) / (self.epochs-1)\n self.momentum_temper_rates = [\n f(x, y) for x, y in zip(self.end_momentums, self.init_momentums)\n ]\n\n # set global learning rates and momentums\n self.lrs = self.init_lrs\n self.momentums = self.init_momentums\n\n # # initialize optimizer\n # optim_dict = []\n # for i, layer in enumerate(self.model.children()):\n # group = {}\n # group['params'] = layer.parameters()\n # group['lr'] = self.lrs[i]\n # group['momentum'] = self.momentums[i]\n # group['weight_decay'] = self.l2_regs[i]\n # optim_dict.append(group)\n # self.optimizer = optim.SGD(optim_dict)\n # self.optimizer = optim.SGD(\n # self.model.parameters(), lr=1e-3, momentum=0.9, weight_decay=4e-4,\n # )\n self.optimizer = optim.Adam(\n self.model.parameters(), lr=3e-4, weight_decay=6e-5,\n )\n\n # # learning rate scheduler\n # self.scheduler = StepLR(\n # self.optimizer, step_size=self.lr_patience, gamma=0.99,\n # )\n self.debug = dict()", "def set_params(self, params_):\n x_start, x_end = params_[\"lim_fit\"]\n self.find_idx_of_fit_limit(x_start, x_end)\n self.is_error_bar_for_fit = params_[\"use_error_bar\"]\n self.fitting_method1 = params_[\"method1\"]\n self.fitting_method2 = params_[\"method2\"]\n self.qty_to_min = params_[\"qty_to_min\"]\n\n for i, key in enumerate(self.params):\n # self.params[key].set(value=params_[\"val\"][i], min=params_[\"min\"][i], max=params_[\"max\"][i], vary=bool(params_[\"hold\"][i]), brute_step=params_[\"brute_step\"][i])\n if self.params[key].user_data is not None:\n if \"dontGenerate\" in self.params[key].user_data:\n continue\n self.params[key].set(value=params_[key][\"value\"], min=params_[key][\"min\"], max=params_[key][\"max\"], vary=params_[key][\"vary\"], brute_step=params_[key][\"b_step\"])", "def train(self, hyps):\n\n # Print Hyperparameters To Screen\n items = list(hyps.items())\n for k, v in sorted(items):\n print(k+\":\", v)\n\n # Make Save Files\n if \"save_folder\" in hyps:\n save_folder = hyps['save_folder']\n else:\n save_folder = \"./saved_data/\"\n\n if not os.path.exists(save_folder):\n os.mkdir(save_folder)\n base_name = save_folder + hyps['exp_name']\n net_save_file = base_name+\"_net.p\"\n best_net_file = base_name+\"_best.p\"\n optim_save_file = base_name+\"_optim.p\"\n log_file = base_name+\"_log.txt\"\n if hyps['resume']: log = open(log_file, 'a')\n else: log = open(log_file, 'w')\n for k, v in sorted(items):\n log.write(k+\":\"+str(v)+\"\\n\")\n\n # Miscellaneous Variable Prep\n logger = Logger()\n shared_len = hyps['n_tsteps']*hyps['n_rollouts']\n env = gym.make(hyps['env_type'])\n obs = env.reset()\n prepped = hyps['preprocess'](obs)\n hyps['state_shape'] = [hyps['n_frame_stack']] + [*prepped.shape[1:]]\n if hyps['env_type'] == \"Pong-v0\":\n action_size = 3\n else:\n action_size = env.action_space.n*(hyps['env_type']!=\"Pong-v0\")\n hyps['action_shift'] = (4-action_size)*(hyps['env_type']==\"Pong-v0\") \n print(\"Obs Shape:,\",obs.shape)\n print(\"Prep Shape:,\",prepped.shape)\n print(\"State Shape:,\",hyps['state_shape'])\n print(\"Num Samples Per Update:\", shared_len)\n print(\"Samples Wasted in Update:\", shared_len % hyps['batch_size'])\n del env\n\n # Make Network\n net = hyps['model'](hyps['state_shape'],action_size,h_size=hyps['h_size'],bnorm=hyps['use_bnorm'])\n if hyps['resume']:\n net.load_state_dict(torch.load(net_save_file))\n base_net = copy.deepcopy(net)\n net = cuda_if(net)\n net.share_memory()\n base_net = cuda_if(base_net)\n\n # Prepare Shared Variables\n shared_data = {'states': cuda_if(torch.zeros(shared_len, *hyps['state_shape']).share_memory_()),\n 'rewards': cuda_if(torch.zeros(shared_len).share_memory_()),\n 'deltas': cuda_if(torch.zeros(shared_len).share_memory_()),\n 'dones': cuda_if(torch.zeros(shared_len).share_memory_()),\n 'actions': torch.zeros(shared_len).long().share_memory_()}\n if net.is_recurrent:\n shared_data['h_states'] = cuda_if(torch.zeros(shared_len, hyps['h_size']).share_memory_())\n n_rollouts = hyps['n_rollouts']\n gate_q = mp.Queue(n_rollouts)\n stop_q = mp.Queue(n_rollouts)\n reward_q = mp.Queue(1)\n reward_q.put(-1)\n\n # Make Runners\n runners = []\n for i in range(hyps['n_envs']):\n runner = Runner(shared_data, hyps, gate_q, stop_q, reward_q)\n runners.append(runner)\n\n # Start Data Collection\n print(\"Making New Processes\")\n procs = []\n for i in range(len(runners)):\n proc = mp.Process(target=runners[i].run, args=(net,))\n procs.append(proc)\n proc.start()\n print(i, \"/\", len(runners), end='\\r')\n col_start_time = time.time()\n for i in range(n_rollouts):\n gate_q.put(i)\n\n # Make Updater\n updater = Updater(base_net, hyps)\n if hyps['resume']:\n updater.optim.load_state_dict(torch.load(optim_save_file))\n updater.optim.zero_grad()\n updater.net.train(mode=True)\n updater.net.req_grads(True)\n\n # Prepare Decay Precursors\n entr_coef_diff = hyps['entr_coef'] - hyps['entr_coef_low']\n epsilon_diff = hyps['epsilon'] - hyps['epsilon_low']\n lr_diff = hyps['lr'] - hyps['lr_low']\n\n # Training Loop\n past_rews = deque([0]*hyps['n_past_rews'])\n last_avg_rew = 0\n best_rew_diff = 0\n best_avg_rew = -1000\n epoch = 0\n T = 0\n while T < hyps['max_tsteps']:\n basetime = time.time()\n epoch += 1\n\n # Collect data\n for i in range(n_rollouts):\n stop_q.get()\n collection_time = time.time() - col_start_time\n\n T += shared_len\n\n # Reward Stats\n avg_reward = reward_q.get()\n reward_q.put(avg_reward)\n last_avg_rew = avg_reward\n if avg_reward > best_avg_rew:\n best_avg_rew = avg_reward\n updater.save_model(best_net_file, None)\n\n # Calculate the Loss and Update nets\n start_time = time.time()\n updater.update_model(shared_data)\n update_time = time.time() - start_time\n net.load_state_dict(updater.net.state_dict()) # update all collector nets\n \n # Resume Data Collection\n col_start_time = time.time()\n for i in range(n_rollouts):\n gate_q.put(i)\n\n # Decay HyperParameters\n if hyps['decay_eps']:\n updater.epsilon = (1-T/(hyps['max_tsteps']))*epsilon_diff + hyps['epsilon_low']\n print(\"New Eps:\", updater.epsilon)\n if hyps['decay_lr']:\n new_lr = (1-T/(hyps['max_tsteps']))*lr_diff + hyps['lr_low']\n updater.new_lr(new_lr)\n print(\"New lr:\", new_lr)\n if hyps['decay_entr']:\n updater.entr_coef = entr_coef_diff*(1-T/(hyps['max_tsteps']))+hyps['entr_coef_low']\n print(\"New Entr:\", updater.entr_coef)\n\n # Periodically save model\n if epoch % 10 == 0:\n updater.save_model(net_save_file, optim_save_file)\n\n # Print Epoch Data\n past_rews.popleft()\n past_rews.append(avg_reward)\n max_rew, min_rew = deque_maxmin(past_rews)\n updater.print_statistics()\n avg_action = shared_data['actions'].float().mean().item()\n print(\"Epoch\", epoch, \"– T =\", T)\n print(\"Grad Norm:\",float(updater.norm),\"– Avg Action:\",avg_action,\"– Best AvgRew:\",best_avg_rew)\n print(\"Avg Rew:\", avg_reward, \"– High:\", max_rew, \"– Low:\", min_rew, end='\\n')\n updater.log_statistics(log, T, avg_reward, avg_action, best_avg_rew)\n updater.info['AvgRew'] = avg_reward\n logger.append(updater.info, x_val=T)\n\n # Check for memory leaks\n gc.collect()\n max_mem_used = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss\n print(\"Time:\", time.time()-basetime, \"– Collection:\", collection_time, \"– Update:\", update_time)\n if 'hyp_search_count' in hyps and hyps['hyp_search_count'] > 0 and hyps['search_id'] != None:\n print(\"Search:\", hyps['search_id'], \"/\", hyps['hyp_search_count'])\n print(\"Memory Used: {:.2f} memory\\n\".format(max_mem_used / 1024))\n\n logger.make_plots(base_name)\n log.write(\"\\nBestRew:\"+str(best_avg_rew))\n log.close()\n # Close processes\n for p in procs:\n p.terminate()\n return best_avg_rew", "def set_params(self, **parameters):\n for parameter, value in parameters.items():\n if parameter == 'predictor':\n if isinstance(value, chainer.Link):\n del self.predictor\n with self.init_scope():\n self.predictor = value\n else:\n assert False, 'predictor is not Chain instance'\n elif parameter in ['lossfun', 'accfun', 'device']:\n setattr(self, parameter, value)\n else:\n self.sk_params.update({parameter: value})\n return self", "def train(self, **kwargs):\n self.solver.train(**kwargs)", "def config_and_train(self, sys_args):\n \n self.run_config_function(sys_args)\n self.set_model_name('vgg_16')\n self.set_trainable_and_exclude_scopes(constants.checkpoint_exclude_scopes,\n constants.trainable_scopes)\n self.set_optimizer('sgd')\n self.set_max_number_of_steps(6000)\n self.train_or_eval_net(sys_args)", "def __init__(self):\n # Number of examples per epoch of training data.\n self.num_examples_per_epoch = None \n\n # Optimizer for training the model.\n self.optimizer = \"SGD\" #default \"SGD\"\n\n # Learning rate for the initial phase of training.\n self.initial_learning_rate = 2.0 # default 2.0\n self.learning_rate_decay_factor = 0.8\n self.num_epochs_per_decay = 4 #default 8\n\n # If not None, clip gradients to this value.\n self.clip_gradients = 5.0\n\n # How many model checkpoints to keep.\n self.max_checkpoints_to_keep = 2", "def setup(args):\n cfg = get_cfg()\n\n cfg.merge_from_file(model_zoo.get_config_file(\"COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml\"))\n cfg.merge_from_list(args.opts)\n\n # configs for training\n if args.small_vidor: # cfg.DATASETS.VIDOR.SIZE == 'small':\n cfg.DATASETS.TRAIN = (\"vidor_small_train\",)\n elif args.small_vidor_10imgs: # cfg.DATASETS.VIDOR.SIZE == 'small-10imgs':\n cfg.DATASETS.TRAIN = (\"vidor_small_10imgs_train\",)\n else:\n cfg.DATASETS.TRAIN = (\"vidor_large_train\",)\n # cfg.DATALOADER.NUM_WORKERS = 2\n if not args.eval_only:\n cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(\"COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml\") # Let training initialize from model zoo\n factor = 4\n cfg.SOLVER.IMS_PER_BATCH = 16 * factor\n cfg.SOLVER.BASE_LR = 0.0001 * factor # finetune using 10x smaller base_lr\n cfg.SOLVER.MAX_ITER = 270000 // factor \n cfg.SOLVER.STEPS = [210000 // factor, 250000 // factor]\n # cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # default: 512\n cfg.MODEL.ROI_HEADS.NUM_CLASSES = 78\n\n # configs for testing\n # cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, \"model_final.pth\")\n if args.small_vidor: # cfg.DATASETS.VIDOR.SIZE == 'small':\n cfg.DATASETS.TEST = (\"vidor_small_val\",)\n elif args.small_vidor_10imgs: # cfg.DATASETS.VIDOR.SIZE == 'small-10imgs':\n cfg.DATASETS.TEST = (\"vidor_small_10imgs_val\",)\n else:\n cfg.DATASETS.TEST = (\"vidor_large_val\",)\n # cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5\n\n # cfg.OUTPUT_DIR = './output/train_vidor_with_pseudo_labels'\n \n \n if not args.eval_only:\n os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)\n cfg.freeze()\n default_setup(cfg, args)\n return cfg", "def setup(args):\n cfg = get_cfg()\n\n cfg.merge_from_file(model_zoo.get_config_file(args.model_zoo))\n cfg.DATASETS.TRAIN = (args.train_dataset, )\n cfg.DATASETS.TEST = (args.test_dataset, )\n cfg.DATALOADER.NUM_WORKERS = args.num_workers\n cfg.OUTPUT_DIR = args.output_dir\n os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)\n\n cfg.image_w = args.size[0]\n cfg.image_h = args.size[1]\n\n cfg.MODEL.WEIGHTS = args.model_zoo_weights\n cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.roi_thresh # set a custom testing threshold\n\n default_setup(cfg, args)\n return cfg", "def updateData(self, *args):\n # if self.move_next_option == \"R\":\n # self.restSampling()\n # elif self.move_next_option == \"A\":\n # self.addExtra()\n # else:\n # self.continueReview()\n for name, value in self.parameter_inputs.items():\n self.parameters[name] = value.value\n # directly change the value of class variables\n logMsg((\"update settings: \", self.ml_classifier_cls, name, value.value))\n setattr(self.ml_classifier_cls, name, value.value)\n\n pass", "def _set_params_initializer(self, hparams, mode, scope):\n\t\tself.mode = mode\n\t\t# pre_train flag is used for distinguish with pre_train and fine tune\n\t\tif hparams.enable_vae:\n\t\t\t_info('Enable VAE')\n\t\t\tself.enable_vae = True\n\t\t\tself.pre_train = hparams.pre_train\n\t\telse:\n\t\t\tself.enable_vae = False\n\t\t\tself.pre_train = False\n\t\tself.dtype = tf.float32\n\t\tself.global_step = tf.Variable(0, trainable=False)\n\n\t\t# define the input for the model\n\t\tself.encoder_input_data = tf.placeholder(\n\t\t\ttf.int32, [None, None], name='encoder_input_data')\n\t\tself.decoder_input_data = tf.placeholder(\n\t\t\ttf.int32, [None, None], name='decoder_input_data')\n\t\tself.decoder_output_data = tf.placeholder(\n\t\t\ttf.int32, [None, None], name='decoder_output_data')\n\t\tself.seq_length_encoder_input_data = tf.placeholder(\n\t\t\ttf.int32, [None], name='seq_length_encoder_input_data')\n\t\tself.seq_length_decoder_input_data = tf.placeholder(\n\t\t\ttf.int32, [None], name='seq_length_decoder_input_data')\n\t\t\n\t\t# load some important hparamters\n\t\tself.unit_type = hparams.unit_type\n\t\tself.num_units = hparams.num_units\n\t\tself.num_encoder_layers = hparams.num_encoder_layers\n\t\tself.num_decoder_layers = hparams.num_decoder_layers\n\t\tself.num_encoder_residual_layers = self.num_encoder_layers - 1\n\t\tself.num_decoder_residual_layers = self.num_decoder_layers - 1\n\n\t\tself.batch_size = tf.size(self.seq_length_encoder_input_data)\n\n\t\t# set initializer\n\t\trandom_seed = hparams.random_seed\n\t\tinitializer = _mh.get_initializer(hparams.init_op, random_seed, hparams.init_weight)\n\t\ttf.get_variable_scope().set_initializer(initializer)\n\n\t\t# embeddings\n\t\tself.src_vocab_size = hparams.src_vocab_size\n\t\tself.tgt_vocab_size = hparams.tgt_vocab_size\n\t\tself.init_embeddings(hparams, scope)", "def setup_training(args: argparse.Namespace) -> None:\n # 1. Read hyperparameters from file\n hp = HParams.from_yaml(args.path_config)\n # check if GPU available and add it to parameters\n hp[\"device\"] = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n # 2. Create extension of the architecture of the model and timestamp for this run (use to\n # identify folders and files created for this run)\n # format: f(params_file)_t(n_tiers)_l(n_layers)_hd(hidden_size)_gmm(gmm_size).\n extension_architecture = f\"d{hp.name}_t{hp.network.n_tiers}_\" \\\n f\"l{'.'.join(map(str, hp.network.layers))}_\" \\\n f\"hd{hp.network.hidden_size}_gmm{hp.network.gmm_size}\"\n timestamp = f\"{datetime.now().strftime('%Y%m%d-%H%M%S')}\"\n\n # 3 Create directories for saving logs and model weights if they do not exist\n # 3.1 Create model weights directory for this run (the same directory will be used for different\n # runs of a model with same architecture and the difference will be in the file stored)\n hp[\"training\"][\"dir_chkpt\"] = hp.training.dir_chkpt + extension_architecture\n Path(hp.training.dir_chkpt).mkdir(parents=True, exist_ok=True)\n # 3.2 Create general log directory for this run (the same directory will be used for different\n # runs of a model with same architecture and the difference will be in the file stored)\n hp[\"logging\"][\"dir_log\"] = hp.logging.dir_log + extension_architecture\n Path(hp.logging.dir_log).mkdir(parents=True, exist_ok=True)\n\n # 4. Setup general logging (it will use the folder previously created and the filename will be:\n tier = str(args.tier) if args.tier is not None else 'ALL'\n filename = f\"{hp.logging.dir_log}/tier{tier}_{timestamp}\"\n logging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s - %(levelname)s - %(message)s',\n handlers=[\n logging.FileHandler(filename=filename), # handler to save the log to a file\n logging.StreamHandler() # handler to output the log to the terminal\n ])\n logger = logging.getLogger()\n\n # 5. Show device that will be used for training: CPU or GPU\n logger.info(f\"Device for training: {hp.device}\")\n\n # 6. Start training of the model (or a single tier, depending on args)\n train_model(args, hp, extension_architecture, timestamp, logger)", "def _update(self):\n self.all_params = {}\n self._update_experiment_params()\n self._update_preprocessing_params()\n self._update_model_params()", "def set_fit_params(self):\n\n self.p0 = np.array([self.A_arr, self.T_a])\n # initial guess at A_arr and T_a\n\n self.popt, self.pcov = curve_fit(\n self.get_eta_fit, self.T_exp, self.eta_exp, p0=self.p0\n )\n\n self.A_arr = self.popt[0]\n self.T_a = self.popt[1]\n\n self.T_array = self.T_model", "def set_params(self):\r\n pass", "def set_parameters(targeted_flag='true',\r\n tv_flag='false',\r\n hinge_flag='true',\r\n cos_flag='false',\r\n interpolation='bilinear',\r\n model_type='small',\r\n loss_type='center',\r\n dataset_type='vgg',\r\n attack='CW',\r\n norm='2',\r\n epsilon=0.1,\r\n iterations=100,\r\n binary_steps=8,\r\n learning_rate=0.01,\r\n epsilon_steps=0.01,\r\n init_const=0.3,\r\n mean_loss='embeddingmean',\r\n batch_size=-1,\r\n margin=5.0,\r\n amplification=2.0):\r\n params = {}\r\n\r\n params['model_type'] = model_type\r\n params['loss_type'] = loss_type\r\n params['dataset_type'] = dataset_type\r\n params['attack'] = attack\r\n params['norm'] = norm\r\n params['epsilon'] = epsilon\r\n params['iterations'] = iterations\r\n params['binary_steps'] = binary_steps\r\n params['learning_rate'] = learning_rate\r\n params['epsilon_steps'] = epsilon_steps\r\n params['init_const'] = init_const\r\n params['mean_loss'] = mean_loss\r\n params['batch_size'] = batch_size\r\n params['targeted_flag'] = string_to_bool(targeted_flag)\r\n params['tv_flag'] = string_to_bool(tv_flag)\r\n params['hinge_flag'] = string_to_bool(hinge_flag)\r\n params['cos_flag'] = string_to_bool(cos_flag)\r\n params['margin'] = margin\r\n params['amp'] = amplification\r\n\r\n if model_type == 'small' and loss_type == 'center':\r\n params['pixel_max'] = 1.0\r\n params['pixel_min'] = -1.0\r\n else:\r\n params['pixel_max'] = 1.0\r\n params['pixel_min'] = 0.0\r\n\r\n if (dataset_type == 'vggsmall'):\r\n params['align_dir'] = VGG_ALIGN_160_DIR\r\n params['test_dir'] = VGG_TEST_DIR\r\n elif model_type == 'large' or dataset_type == 'casia':\r\n params['align_dir'] = ALIGN_160_DIR\r\n elif model_type == 'small':\r\n params['align_dir'] = ALIGN_96_DIR\r\n else:\r\n ValueError('ValueError: Argument must be either \"small\" or \"large\".')\r\n \r\n if interpolation == 'nearest':\r\n params['interpolation'] = cv2.INTER_NEAREST\r\n elif interpolation == 'bilinear':\r\n params['interpolation'] = cv2.INTER_LINEAR\r\n elif interpolation == 'bicubic':\r\n params['interpolation'] = cv2.INTER_CUBIC\r\n elif interpolation == 'lanczos':\r\n params['interpolation'] = cv2.INTER_LANCZOS4\r\n elif interpolation == 'super':\r\n print('finish later')\r\n else:\r\n raise ValueError('ValueError: Argument must be of the following, [nearest, bilinear, bicubic, lanczos, super].')\r\n\r\n if params['hinge_flag']:\r\n params['attack_loss'] = 'hinge'\r\n else:\r\n params['attack_loss'] = 'target'\r\n if not params['targeted_flag']:\r\n params['attack_loss'] = 'target'\r\n if norm == 'inf':\r\n norm_name = 'i'\r\n else:\r\n norm_name = '2'\r\n if params['tv_flag']:\r\n tv_name = '_tv'\r\n else:\r\n tv_name = ''\r\n if params['cos_flag']:\r\n cos_name = '_cos'\r\n else:\r\n cos_name = ''\r\n\r\n params['model_name'] = '{}_{}'.format(model_type, loss_type)\r\n if dataset_type == 'casia' or dataset_type == 'vggsmall':\r\n params['model_name'] = dataset_type\r\n params['attack_name'] = '{}_l{}{}{}'.format(attack.lower(), norm_name, tv_name, cos_name)\r\n\r\n return params", "def initialize(self): \n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=config.LR)", "def __setup_model(self, **kwargs):\n self.model_architecture = kwargs['model_architecture'].upper()\n self.model = Classifier.IMAGENET_MODELS[self.model_architecture](\n pretrained=True\n )\n\n if 'input_size' in kwargs: # Loading from a checkpoint\n self.input_size = kwargs['input_size']\n self.model.current_epoch = kwargs['current_epoch']\n\n else: # No checkpoint, will be creating a new classifier for the model\n # The number of features coming from the feature detector CNN\n if 'ALEXNET' in self.model_architecture:\n self.input_size = self.model.classifier[1].in_features\n elif 'VGG' in self.model_architecture:\n self.input_size = self.model.classifier[0].in_features\n elif 'DENSENET' in self.model_architecture:\n self.input_size = self.model.classifier.in_features\n\n # Freeze the feature detector parameters to prevent backpropagating\n # through them.\n for param in self.model.parameters():\n param.requires_grad = False\n\n self.model.current_epoch = 1\n\n self.output_size = kwargs['output_size']\n self.hidden_layers = kwargs['hidden_layers']\n self.learn_rate = kwargs['learn_rate']\n self.drop_p = kwargs['drop_p']\n\n self.model.class_to_idx = kwargs['class_to_idx']\n self.model.classifier = Network(self.input_size,\n self.output_size,\n self.hidden_layers,\n self.drop_p)\n\n if 'model_state_dict' in kwargs: # load the state from checkpoint\n self.model.load_state_dict(kwargs['model_state_dict'])\n\n self.criterion = nn.NLLLoss()\n self.optimizer = optim.Adam(self.model.classifier.parameters(),\n lr=self.learn_rate)\n\n if 'optimizer_state_dict' in kwargs: # load the state from checkpoint\n self.optimizer.load_state_dict(kwargs['optimizer_state_dict'])", "def __init__(self):\n self.num_examples_per_epoch = 99999\n self.optimizer = \"Adam\"\n # Learning rate for the initial phase of training.\n self.initial_learning_rate = 0.0001\n self.learning_rate_decay_factor = 0.5\n self.num_epochs_per_decay = 8.0\n\n # Learning rate when fine tuning the Inception v3 parameters.\n self.train_inception_learning_rate = 0.0001\n\n # If not None, clip gradients to this value.\n self.clip_gradients = 5.0\n\n # How many model checkpoints to keep.\n self.max_checkpoints_to_keep = 5000", "def set_train(self):\n for m in self.models.values():\n m.train()", "def train(self, trainingData, trainingLabels, validationData, validationLabels): \n \n # might be useful in your code later...\n # this is a list of all features in the training set.\n self.features = list(set([ f for datum in trainingData for f in datum.keys() ]));\n \n if (self.automaticTuning):\n kgrid = [0.001, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 20, 50]\n else:\n kgrid = [self.k]\n \n self.trainAndTune(trainingData, trainingLabels, validationData, validationLabels, kgrid)", "def init_params(self):\n self.clear()\n self._init_load_data()\n self._init_net_delay_data()", "def _set_model_parameters(self, verbose=False):\n from scipy.special import gamma\n\n z0 = self.z0\n\n # set parameters that are constants\n p_v, d_v, cs0, sigma, vout0 = (1, 2, 6.7, 0.1, 25.0)\n p_vB, d_vB, Mach0, p_M, d_M = (4, 2, 0.5, 1, 3)\n\n # calculate amplitudes that make the pdf integrate to 1\n A_v = np.log(10)*p_v/gamma(d_v/p_v)\n A_cs = np.log(10)/np.sqrt(2*np.pi)/sigma\n A_vB = np.log(10)*p_vB/gamma(d_vB/p_vB)\n A_M = np.log(10)*p_M/gamma(d_M/p_M)\n\n # store them in dictionaries\n self.cool_params = dict(A_v=A_v, p_v=p_v, d_v=d_v,\n A_cs=A_cs, cs0=cs0, sigma=sigma, vout0=vout0)\n self.hot_params = dict(A_vB=A_vB, p_vB=p_vB, d_vB=d_vB,\n A_M=A_M, Mach0=Mach0,p_M=p_M,d_M=d_M)\n # SN related parameters that set the reference values for loading factors\n self.params = dict(Esn=1.e51*au.erg, mstar=95.5*au.M_sun, vcool=200*au.km/au.s,\n Mej=10.*au.M_sun, ZSN=0.2, ZISM0=0.02)\n self.params['vej'] = np.sqrt(2.0*self.params['Esn']/self.params['Mej']).to('km/s')\n self.ref_params = dict(Mref=self.params['mstar'],\n pref=self.params['Esn']/(2*self.params['vcool']),\n Eref=self.params['Esn'],\n Zref=self.params['Mej']*self.params['ZSN'])\n\n # coefficients used in conversion from mass to other PDFs\n self.vp = (self.ref_params['pref']/self.params['mstar']).to('km/s').value\n self.vE = np.sqrt(self.ref_params['Eref']/self.params['mstar']).to('km/s').value\n self.Ze = (self.ref_params['Zref']/self.params['mstar']).cgs.value\n\n # parameters for scaling relations from Paper~I\n a = np.array(fit_alpha[z0])\n b = np.array(fit_beta[z0])\n\n self.scaling_params = dict(a=a, b=b)\n if z0 == '2H':\n self.cool_params['vout0'] = 45\n self.cool_params['cs0'] = 7.5\n elif z0 == '500':\n self.cool_params['vout0'] = 45\n self.cool_params['cs0'] = 8.5\n elif z0 == '1000':\n self.cool_params['vout0'] = 60\n self.cool_params['cs0'] = 10.0\n self.scaling_params['A'] = np.round(10.**(np.array(self.scaling_params['a'])),2)\n self.scaling_params['p'] = 1.+np.array(self.scaling_params['b'])\n self.enum=dict(M_cool=0, M_int=1, M_hot=2, M_total=3,\n p_cool=4, p_int=5, p_hot=6, p_total=7,\n E_cool=8, E_int=9, E_hot=10, E_total=11,\n Z_cool=12, Z_int=13, Z_hot=14, Z_total=15)\n\n # print parameters\n if verbose:\n self.show_parameters()", "def train(self, training_data):\n pass", "def _assign_model_params(self, sess):\n with self.graph.as_default():\n for nn in range(self.num_networks):\n self.networks[nn].assign_model_params(sess)", "def _set_parameters(self, parameters):\n self.parameters = parameters\n self._set_points_and_weights()", "def __init__(self, model, data, batch_size=50, num_epochs=2, optim_type=\"adam\", optim_config={'learning_rate': 1e-2,}, lr_decay=1.0, num_train_samples=100, num_val_samples=None, verbose=True):\n self.model = model\n \n self.X_train = data[\"X_train\"]\n self.y_train = data[\"y_train\"]\n self.X_val = data[\"X_val\"]\n self.y_val = data[\"y_val\"]\n\n # Setting up variables for the hyperparameters\n \n self.optim_type = optim_type\n self.optim_config = optim_config # dict containing hyperparameters related to parameter update\n self.lr_decay = lr_decay # learning rate decay rate\n self.batch_size = batch_size\n self.num_epochs = num_epochs\n self.num_train_samples = num_train_samples\n self.num_val_samples = num_val_samples\n\n self.print_every = 20\n self.verbose = verbose\n \n # Setting up some extra variables for faster convergence / book-keeping\n \n self.epoch = 0 # to keep track of number of epochs done\n self.best_val_acc = 0 # to keep track of the best val accuracy across all epochs\n self.best_params = {} # to keep track of best model across all epochs\n self.latest_loss = 0 # to keep track of loss in latest iteration\n\n # Making a copy of the optim_config for each parameter\n # for using in other functions of the solver class\n # optim_cofig contains first and second moment of gradients, if applicable, wrt 1 param and hence each parameter has its own optim_config dict\n \n self.optim_configs = {} # dictionary containing config dicts of all params\n for p in self.model.params:\n d = {k: v for k, v in self.optim_config.items()} # copying the input config dict to config dicts of all params\n self.optim_configs[p] = d", "def __init__(self, model, check_point, model_name, **kwargs):\n self.model = model\n self.model_name = model_name\n self.check_point = check_point\n self.num_epochs = kwargs.pop('num_epochs', 10)\n self.batch_size = kwargs.pop('batch_size', 128)\n self.learning_rate = kwargs.pop('learning_rate', 1e-4)\n self.model = nn.DataParallel(self.model)\n self.optimizer = optim.Adam(\n model.parameters(),\n lr=self.learning_rate, weight_decay=1e-6)\n self.scheduler = lr_scheduler.StepLR(self.optimizer, step_size=200, gamma=0.5)\n self.loss_fn = kwargs.pop('loss_fn', nn.MSELoss())\n self.fine_tune = kwargs.pop('fine_tune', False)\n self.verbose = kwargs.pop('verbose', False)\n self.print_every = kwargs.pop('print_every', 10)\n\n self._reset()", "def setup_optims(self):\n lr = self.train_config['lr']\n b1 = self.train_config['b1']\n b2 = self.train_config['b2']\n weight_decay = self.train_config['weight_decay']\n self.opt = torch.optim.Adam(self.network.parameters(), lr=lr, betas=(b1, b2),\n weight_decay=weight_decay)", "def set_params(self):\n raise NotImplementedError", "def setup(self, params, training=True, **kwargs):\n\n tf.reset_default_graph()\n\n return super().setup(params=params, training=training, **kwargs)", "def setup(self):\n # TODO check if need both dataset together\n self.train_dataset = ABSADataset(data_path=self.train_path, mode=self.in_mode, task=self.task, \n tokenizer=self.tokenizer, vocab=\"bert\")\n self.vocabulary = self.train_dataset.vocabulary\n\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=self.vocabulary)\n #self.train_restaurant = ABSADataset(data_path=RESTAURANT_TRAIN)\n #self.eval_restaurant = ABSADataset(data_path=RESTAURANT_DEV)", "def setParameters(self):\n\n # Set the parameters\n self.taux = 24.2\n self.mu = 0.23\n self.G = 33.75\n self.alpha_0 = 0.05\n self.delta = 0.0075\n self.p = 0.50\n self.I0 = 9500.0\n self.kparam = 0.55", "def trainModel( self, featureTrain, classTrain):", "def set_initial_params(model: LogisticRegression):\n n_classes = 15 # threat types\n n_features = 33 # Number of features in dataset\n model.classes_ = np.array([i for i in range(15)])\n\n model.coef_ = np.zeros((n_classes, n_features))\n if model.fit_intercept:\n model.intercept_ = np.zeros((n_classes,))", "def setup(self):\n print(\"setup\")\n \n self.modelToUse = 1\n if self.train:\n print(\"train\")\n else:\n print(\"no train\")\n \n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.envSize = 17\n \n #init model\n if self.train or not os.path.isfile(\"my-saved-model.pt\"):\n self.logger.info(\"Setting up model from scratch.\")\n if self.modelToUse == 0:\n self.policy_net = Model_global_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_global_view(self.envSize, self.envSize, 6).to(device)\n elif self.modelToUse == 1:\n self.policy_net = Model_local_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_local_view(self.envSize, self.envSize, 6).to(device)\n else:\n self.policy_net = Model_combined_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_combined_view(self.envSize, self.envSize, 6).to(device)\n self.model.load_state_dict(self.policy_net.state_dict())\n self.model.eval()\n else:\n self.logger.info(\"Loading model from saved state.\")\n with open(\"my-saved-model.pt\", \"rb\") as file:\n if self.modelToUse == 0:\n self.model = Model_global_view(self.envSize, self.envSize, 6)\n elif self.modelToUse == 1:\n self.model = Model_local_view(self.envSize, self.envSize, 6)\n else:\n self.model = Model_combined_view(self.envSize, self.envSize, 6)\n if torch.cuda.is_available():\n self.model.load_state_dict(torch.load(file))\n self.model.to(device)\n else:\n self.model.load_state_dict(torch.load(file, map_location=device))", "def _reset_parameters(self) -> None:\n self._setup_input = {\n \"P\": csc_matrix(2.0 * self.opt.P(self.p).toarray()),\n \"q\": self.opt.q(self.p).toarray().flatten(),\n }\n if self.opt_type in CONSTRAINED_OPT:\n A = self.opt.A(self.p)\n b = self.opt.b(self.p)\n self._setup_input[\"A\"] = csc_matrix(\n cs.vertcat(self.opt.M(self.p), A, -A).toarray()\n )\n self._setup_input[\"l\"] = (\n cs.vertcat(-self.opt.c(self.p), -b, b).toarray().flatten()\n )", "def setup(self, advanced=False):\n \n # Interpret the request data based on the expected row and column structure\n row_template = ['strData', 'strData', 'strData', 'strData']\n col_headers = ['model_name', 'estimator_args', 'scaler_args', 'execution_args']\n \n if advanced:\n # If specified, get dimensionality reduction arguments\n row_template = ['strData', 'strData', 'strData', 'strData', 'strData', 'strData']\n col_headers = ['model_name', 'estimator_args', 'scaler_args', 'metric_args', 'dim_reduction_args',\\\n 'execution_args']\n \n # Create a Pandas Data Frame for the request data\n self.request_df = utils.request_df(self.request, row_template, col_headers)\n \n # Create a model that can be persisted to disk\n self.model = PersistentModel()\n \n # Get the model name from the request dataframe\n self.model.name = self.request_df.loc[0, 'model_name']\n \n # Get the argument strings from the request dataframe\n estimator_args = self.request_df.loc[0, 'estimator_args']\n scaler_args = self.request_df.loc[0, 'scaler_args']\n execution_args = self.request_df.loc[0, 'execution_args']\n if advanced:\n metric_args = self.request_df.loc[0, 'metric_args']\n dim_reduction_args = self.request_df.loc[0, 'dim_reduction_args']\n \n if len(dim_reduction_args) > 0:\n self.model.dim_reduction = True\n else:\n self.model.dim_reduction = False \n \n # Set the relevant parameters using the argument strings\n self._set_params(estimator_args, scaler_args, execution_args, metric_args=metric_args,\\\n dim_reduction_args=dim_reduction_args)\n else:\n # Set the relevant parameters using the argument strings\n self._set_params(estimator_args, scaler_args, execution_args)\n self.model.dim_reduction = False \n \n # Persist the model to disk\n self.model = self.model.save(self.model.name, self.path, overwrite=self.model.overwrite, compress=self.model.compress)\n \n # Update the cache to keep this model in memory\n self._update_cache()\n \n # Prepare the output\n message = [[self.model.name, 'Model successfully saved to disk',\\\n time.strftime('%X %x %Z', time.localtime(self.model.state_timestamp))]]\n self.response = pd.DataFrame(message, columns=['model_name', 'result', 'time_stamp'])\n \n # Send the reponse table description to Qlik\n self._send_table_description(\"setup\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n # Finally send the response\n return self.response", "def train_all(self, dataset, mu=None):\n\n\t\tX = dataset.X #get_design_matrix()\n\t\ty = dataset.y\n\n\t\tclf = svm.SVC(C = self.C, cache_size = self.cache_size, gamma = self.gamma, kernel = self.kernel,\n\t\t\t\tclass_weight = None, coef0=0.0, degree=3,\n\t\t\t\tmax_iter = -1, probability=False, random_state=None,\n\t\t\t\tshrinking = True, tol=0.001, verbose=False)\n\t\tclf.fit (X, y)\n\t\t\n\t\t# save the last classifier\n\t\tself.classifier = clf\n\n\t\t# get model parameter\n\t\tself.alpha = clf.dual_coef_\n\t\tself.bias = clf.intercept_\n\t\t# TODO\n\t\tself.params = clf.get_params (deep=True)\n\n\n\t\t#TODO:self.mu = sharedX(mu)?\n\t\tself._params = [self.alpha, self.bias]\n\t\tprint(self._params)", "def training(self):\n self.model.fit(self.train_x, self.train_y)", "def set_params(model, params): # type: ignore\n for p, p_new in zip(model.parameters(), params):\n p.data = p_new.data", "def set_parameters(self, **kwargs):\n self.__multi_layer_perceptron.set_params(**kwargs)", "def train_model(self, *args, **kwargs):\n raise NotImplementedError", "def _set_model(self):\n print(\"Setting up model...\")\n # Encoder\n inputs = Input(batch_shape=(None,) + self.input_shape)\n\n baseEncoder = self.createEncoder(inputs)\n baseEncoder = Dropout(self.drop)(baseEncoder)\n\n # Instantiate encoder layers\n Q_z_mean = Dense(self.latent_dim)\n Q_z_log_var = Dense(self.latent_dim)\n\n # Parameters for continous latent distribution\n z_mean = Q_z_mean(baseEncoder)\n z_log_var = Q_z_log_var(baseEncoder)\n self.encoder =Model(inputs, z_mean)\n\n # Sample from latent distributions\n\n encoding = Lambda(self._sampling_normal, output_shape=(self.latent_dim,))([z_mean, z_log_var])\n \n G_0 = Dense(8*self.kernel_init)(encoding)\n G_0 = Dropout(self.drop)(G_0)\n baseDecoder = self.createDecoder(G_0)\n\n self.model =Model(inputs, baseDecoder)\n # Store latent distribution parameters\n self.z_mean = z_mean\n self.z_log_var = z_log_var\n\n\n # Compile models\n #self.opt = RMSprop()\n self.model.compile(optimizer=self.opt, loss=self._vae_loss)\n self.model.summary()\n print(\"Completed model setup.\")", "def setup_model(self):\r\n\r\n logging.info(\"Setup the models.\")\r\n\r\n logging.info(\"{} model\".format(self.base_network_name))\r\n if self.base_network_name.lower().startswith(\"resnet\"):\r\n base_model, classifier = getattr(setops_models, self.base_network_name)(\r\n num_classes=80,\r\n avgpool_kernel=self.avgpool_kernel\r\n )\r\n else:\r\n base_model = getattr(setops_models, self.base_network_name)()\r\n classifier = getattr(setops_models, self.classifier_name)(num_classes=80)\r\n\r\n if self.init_inception:\r\n logging.info(\"Initialize inception model using Amit's networks.\")\r\n\r\n checkpoint = torch.load(self.resume_path)\r\n\r\n base_model = Inception3(aux_logits=False, transform_input=True)\r\n base_model.load_state_dict(\r\n {k: v for k, v in checkpoint[\"state_dict\"].items() if k in base_model.state_dict()}\r\n )\r\n classifier.load_state_dict(\r\n {k: v for k, v in checkpoint[\"state_dict\"].items() if k in classifier.state_dict()}\r\n )\r\n\r\n setops_model_cls = getattr(setops_models, self.sets_network_name)\r\n setops_model = setops_model_cls(\r\n input_dim=2048,\r\n S_latent_dim=self.ops_latent_dim, S_layers_num=self.ops_layer_num,\r\n I_latent_dim=self.ops_latent_dim, I_layers_num=self.ops_layer_num,\r\n U_latent_dim=self.ops_latent_dim, U_layers_num=self.ops_layer_num,\r\n block_cls_name=self.sets_block_name, basic_block_cls_name=self.sets_basic_block_name,\r\n dropout_ratio=self.setops_dropout,\r\n )\r\n\r\n if self.resume_path:\r\n logging.info(\"Resuming the models.\")\r\n models_path = Path(self.resume_path)\r\n if self.base_network_name.lower().startswith(\"resnet\"):\r\n base_model.load_state_dict(\r\n torch.load(sorted(models_path.glob(\"networks_base_model_{}*.pth\".format(self.resume_epoch)))[-1])\r\n )\r\n classifier.load_state_dict(\r\n torch.load(sorted(models_path.glob(\"networks_classifier_{}*.pth\".format(self.resume_epoch)))[-1])\r\n )\r\n\r\n setops_models_paths = sorted(models_path.glob(\"networks_setops_model_{}*.pth\".format(self.resume_epoch)))\r\n if len(setops_models_paths) > 0:\r\n setops_model.load_state_dict(\r\n torch.load(setops_models_paths[-1]).state_dict()\r\n )\r\n\r\n return base_model, classifier, setops_model", "def params(self,new):\n self._params = new\n self._config_set()\n self._make_model()", "def set_up_and_parameterise_model_for_experiment(self):\n self.experiment_unique_steps_to_model = {}\n for op_number, op in enumerate(self.experiment.unique_steps):\n new_model = self.model.new_copy()\n new_parameter_values = self.parameter_values.copy()\n\n if op.type != \"current\":\n # Voltage or power control\n # Create a new model where the current density is now a variable\n # To do so, we replace all instances of the current density in the\n # model with a current density variable, which is obtained from the\n # FunctionControl submodel\n # check which kind of external circuit model we need (differential\n # or algebraic)\n if op.type == \"voltage\":\n submodel_class = pybamm.external_circuit.VoltageFunctionControl\n elif op.type == \"power\":\n submodel_class = pybamm.external_circuit.PowerFunctionControl\n\n # Build the new submodel and update the model with it\n submodel = submodel_class(new_model.param, new_model.options)\n variables = new_model.variables\n submodel.variables = submodel.get_fundamental_variables()\n variables.update(submodel.variables)\n submodel.variables.update(submodel.get_coupled_variables(variables))\n variables.update(submodel.variables)\n submodel.set_rhs(variables)\n submodel.set_algebraic(variables)\n submodel.set_initial_conditions(variables)\n new_model.rhs.update(submodel.rhs)\n new_model.algebraic.update(submodel.algebraic)\n new_model.initial_conditions.update(submodel.initial_conditions)\n\n # Set the \"current function\" to be the variable defined in the submodel\n new_parameter_values[\"Current function [A]\"] = submodel.variables[\n \"Current [A]\"\n ]\n self.update_new_model_events(new_model, op)\n # Update parameter values\n self._original_temperature = new_parameter_values[\"Ambient temperature [K]\"]\n experiment_parameter_values = self.get_experiment_parameter_values(\n op, op_number\n )\n new_parameter_values.update(\n experiment_parameter_values, check_already_exists=False\n )\n parameterised_model = new_parameter_values.process_model(\n new_model, inplace=False\n )\n self.experiment_unique_steps_to_model[repr(op)] = parameterised_model\n\n # Set up rest model if experiment has start times\n if self.experiment.initial_start_time:\n new_model = self.model.new_copy()\n # Update parameter values\n new_parameter_values = self.parameter_values.copy()\n self._original_temperature = new_parameter_values[\"Ambient temperature [K]\"]\n new_parameter_values.update(\n {\"Current function [A]\": 0, \"Ambient temperature [K]\": \"[input]\"},\n check_already_exists=False,\n )\n parameterised_model = new_parameter_values.process_model(\n new_model, inplace=False\n )\n self.experiment_unique_steps_to_model[\n \"Rest for padding\"\n ] = parameterised_model", "def train(self, trainingData, trainingLabels, validationData, validationLabels):\t \n\t \n\t# might be useful in your code later...\n\t# this is a list of all features in the training set.\n\tself.features = list(set([ f for datum in trainingData for f in datum.keys() ]));\n\t\n\tif (self.automaticTuning):\n\t\tkgrid = [0.001, 0.01, 0.05, 0.1, 0.5, 1, 5, 10, 20, 50]\n\telse:\n\t\tkgrid = [self.k]\n\t\t\n\tself.trainAndTune(trainingData, trainingLabels, validationData, validationLabels, kgrid)", "def set_defaults(args):\n # Check critical files exist\n args.train_file = os.path.join(args.data_dir, args.train_file)\n if not os.path.isfile(args.train_file):\n raise IOError('No such file: %s' % args.train_file)\n args.dev_file = os.path.join(args.data_dir, args.dev_file)\n if not os.path.isfile(args.dev_file):\n raise IOError('No such file: %s' % args.dev_file)\n if args.embedding_file:\n args.embedding_file = os.path.join(args.embed_dir, args.embedding_file)\n if not os.path.isfile(args.embedding_file):\n raise IOError('No such file: %s' % args.embedding_file)\n\n # Set model directory\n subprocess.call(['mkdir', '-p', args.model_dir])\n\n # Set model name\n if not args.model_name:\n import uuid\n import time\n args.model_name = time.strftime(\"%Y%m%d-\") + str(uuid.uuid4())[:8]\n\n # Set log + model file names\n args.log_file = os.path.join(args.model_dir, args.model_name + '.txt')\n args.model_file = os.path.join(args.model_dir, args.model_name + '.pt')\n\n # Embeddings options\n if args.embedding_file:\n with open(args.embedding_file) as f:\n dim = len(f.readline().strip().split(' ')) - 1\n args.embedding_dim = dim\n elif not args.embedding_dim:\n raise RuntimeError('Either embedding_file or embedding_dim '\n 'needs to be specified.')\n\n # Make sure fix_embeddings and embedding_file are consistent\n if args.fix_embeddings:\n if not (args.embedding_file or args.pretrained):\n logger.warning('WARN: fix_embeddings set to False '\n 'as embeddings are random.')\n args.fix_embeddings = False\n return args", "def __init__(self, data_set, model, config):\n\n self.config = config\n self.data_set = data_set\n # Normalize or standardize the features, to have them ready to use as model input\n self.data_set.shift_and_scale(self.config[\"shift\"], self.config[\"scaling\"])\n self.model = model\n self.model.eval()\n self.device = torch.device(\"cpu\") if not self.config[\"use_gpu\"] \\\n else torch.device(\"cuda:\" + str(self.config[\"gpu_no\"]))", "def train_model(self):\n self.logger.info('Loading the data...')\n train_data = self.load_data(split=\"train\")\n dev_data = self.load_data(split=\"dev\")\n self.config.best_model = os.path.join(self.config.output_dir,\"best_model\")\n self.logger.info('Training the model, outputdir=%s...,best_model=%s' % (self.config.output_dir,self.config.best_model))\n\n train_params = {\n \"overwrite_output_dir\" : True,\n \"reprocess_input_data\": True,\n \"learning_rate\" : self.config.learning_rate,\n \"num_train_epochs\" : self.config.num_train_epochs,\n \"train_batch_size\" : self.config.train_batch_size,\n \"eval_batch_size\" : self.config.eval_batch_size,\n \"gradient_accumulation_steps\": self.config.gradient_accumulation_steps,\n \"use_early_stopping\" : self.config.early_stopping,\n \"fp16\" : False,\n \"classification_report\" : True,\n \"evaluate_during_training\" : True,\n \"evaluate_during_training_verbose\" : True,\n \"best_model_dir\": self.config.best_model,\n \"save_model_every_epoch\" : self.config.save_model_every_epoch,\n \"save_steps\" : self.config.save_steps,\n \"save_optimizer_and_scheduler\" : self.config.save_optimizer_and_scheduler,\n \"save_best_model\": True,\n }\n\n ## train the model \n self.model.train_model(\n train_data,\n eval_data=dev_data,\n output_dir=self.config.output_dir,\n show_running_loss=False,\n args=train_params,\n )\n\n ## backing up the config and create pointer to best model \n with open(os.path.join(self.config.best_model,\"trainer_config.json\"),'w') as mconfig:\n mconfig.write(json.dumps(self.config.__dict__))\n self.config.existing_model = self.config.best_model", "def setup(self, stage: Optional[str] = None):\n if stage in (None, 'fit'):\n # Get a 20% of the train data for validation in a stratified way.\n _x = [i[1] for i in self.splits['train']]\n _y = [i[0] for i in self.splits['train']]\n\n _train_x, _val_x, _train_y, _val_y = train_test_split(_x, _y, test_size=0.2,\n stratify=_y)\n #print(np.unique(_train_y, return_counts=True))\n #print(np.unique(_val_y, return_counts=True))\n\n self.splits['train'] = [[i, j] for i,j in zip(_train_y, _train_x)]\n self.splits['valid'] = [[i, j] for i,j in zip(_val_y, _val_x)]\n\n self.datasets['train'] = FewShotDataset(self.splits['train'], self.ops)\n self.datasets['valid'] = FewShotDataset(self.splits['valid'], self.ops)\n\n if stage in (None, 'test'):\n self.datasets['test'] = FewShotDataset(self.splits['test'], self.ops)", "def __init__(self, modelwithparams=None, random_number=-1, problem_type='infer'):\n self.modelwithparams = modelwithparams\n self.oldpara = self.modelwithparams\n self.random_number = random_number\n self.flag = True\n self.problem_type = problem_type", "def __init__(\n self,\n conf: ExpConfig,\n estep_conf: EStepConfig,\n model: Trainable,\n train_data_file: str,\n val_data_file: str = None,\n ):\n if tvo.get_run_policy() == \"mpi\":\n init_processes()\n train_dataset = get_h5_dataset_to_processes(train_data_file, (\"train_data\", \"data\"))\n val_dataset = None\n if val_data_file is not None:\n val_dataset = get_h5_dataset_to_processes(val_data_file, (\"val_data\", \"data\"))\n\n setattr(conf, \"train_dataset\", train_data_file)\n setattr(conf, \"val_dataset\", val_data_file)\n super().__init__(conf, estep_conf, model, train_dataset, val_dataset)", "def setup(self, stage: Optional[str] = None) -> None:\n if stage == \"fit\" or stage is None:\n logger.info(f\"Setting up the datamodule for {self.config.task} task\")\n if self.validation is None:\n logger.debug(\n f\"No validation data provided.\"\n f\" Using {self.config.validation_split*100}% of train data as validation\"\n )\n val_idx = self.train.sample(\n int(self.config.validation_split * len(self.train)),\n random_state=self.seed,\n ).index\n self.validation = self.train[self.train.index.isin(val_idx)]\n self.train = self.train[~self.train.index.isin(val_idx)]\n else:\n self.validation = self.validation.copy()\n # Preprocessing Train, Validation\n self.train, _ = self.preprocess_data(self.train, stage=\"fit\")\n self.validation, _ = self.preprocess_data(self.validation, stage=\"inference\")\n if self.test is not None:\n self.test, _ = self.preprocess_data(self.test, stage=\"inference\")\n self._fitted = True", "def setup(self, params, training=True, **kwargs):\n if training:\n return self._setup_training(params, **kwargs)\n\n return self._setup_test(params, **kwargs)", "def train(self, data, option, param_map):\n if option == \"lr\":\n md = self.logistic_regression(elastic_param=param_map[\"elastic_param\"],\n reg_param=param_map[\"reg_param\"],\n family=param_map[\"family\"])\n elif option == \"rf\":\n md = self.random_forest(max_depth=param_map[\"max_depth\"],\n max_num_tree=param_map[\"max_num_tree\"])\n elif option == \"gbdt\":\n md = self.gbdt(max_depth=param_map[\"max_depth\"],\n max_bins=param_map[\"max_bins\"])\n else:\n raise ValueError(\"ERROR | model %s does not support yet\" % option)\n\n self.model = md.fit(data)\n return self.model", "def set_training_data(self, *, inputs: Inputs) -> None:\n\t\tsuper().set_training_data(inputs=inputs)", "def setup(\n self,\n dim_data: int,\n neural_net: ModelBase,\n optimizer: optax.OptState,\n ):\n # neural network\n self.state_neural_net = neural_net.create_train_state(\n self.rng, optimizer, dim_data\n )\n\n # step function\n self.step_fn = self._get_step_fn()", "def train(self, X, T, *args, **kwargs):\n\n assert len(self.neurons) > 0, \"Add neurons to ELM before training it\"\n X, T = self._checkdata(X, T)\n args = [a.upper() for a in args] # make all arguments upper case\n\n # kind of \"enumerators\", try to use only inside that script\n MODELSELECTION = None # V / CV / MCCV / LOO / None\n NEURONRANKING = None # HQ / OP / None\n CLASSIFICATION = None # c / mc / None\n ADAPTIVE = False # batch / None\n Xv = None\n Tv = None\n k = None\n batch = None\n\n # check exclusive parameters\n assert len(set(args).intersection(set([\"V\", \"CV\", \"MCCV\", \"LOO\"]))) <= 1, \"Use only one of V / CV / MCCV / LOO\"\n assert len(set(args).intersection(set([\"HQ\", \"OP\"]))) <= 1, \"Use only one of HQ / OP\"\n assert len(set(args).intersection(set([\"C\", \"MC\"]))) <= 1, \"Use only one of classification / multiclass (c / mc)\"\n\n # parse parameters\n for a in args:\n if a == \"V\": # validation set\n assert \"Xv\" in kwargs.keys(), \"Provide validation dataset (Xv)\"\n assert \"Tv\" in kwargs.keys(), \"Provide validation targets (Tv)\"\n Xv = kwargs['Xv']\n Tv = kwargs['Tv']\n Xv, Tv = self._checkdata(Xv, Tv)\n MODELSELECTION = \"V\"\n if a == \"CV\":\n assert \"k\" in kwargs.keys(), \"Provide Cross-Validation number of splits (k)\"\n k = kwargs['k']\n MODELSELECTION = \"CV\"\n if a == \"LOO\":\n MODELSELECTION = \"LOO\"\n if a == \"HQ\":\n NEURONRANKING = \"HQ\"\n if a == \"OP\":\n NEURONRANKING = \"OP\"\n if a in (\"C\", \"CL\", \"CLASSIFICATION\"):\n CLASSIFICATION = \"c\"\n if a in (\"MC\", \"MULTICLASS\"):\n CLASSIFICATION = \"mc\"\n if a in (\"A\", \"AD\", \"ADAPTIVE\"):\n assert \"batch\" in kwargs.keys(), \"Provide batch size for adaptive ELM model (batch)\"\n batch = kwargs['batch']\n ADAPTIVE = True\n\n if MODELSELECTION == \"V\":\n self._train_v(X, T, Xv, Tv)\n else:\n self.Beta = self._solve(self.project(X), T)", "def set_model_params(self, w1, b1, w2, b2, w3, b3, w4, b4, w5, b5, w6, b6):\n self.w1 = w1\n self.w2 = w2\n self.w3 = w3\n self.w4 = w4\n self.w5 = w5\n self.w6 = w6\n\n self.b1 = b1\n self.b2 = b2\n self.b3 = b3\n self.b4 = b4\n self.b5 = b5\n self.b6 = b6\n\n return", "def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.blending.set_parameters(self.parameters)", "def set_data():\r\n #if not os.path.exists(filepath):\r\n #download_data()\r\n metadata = read(filepath + flist[-1])\r\n ndata = metadata['num_cases_per_batch']\r\n ndim = metadata['num_vis']\r\n\r\n data, train, test = {}, {}, {}\r\n data['labels'] = metadata['label_names']\r\n data['ntraindata'] = metadata['num_cases_per_batch'] * (len(flist) - 2)\r\n data['ntestdata'] = metadata['num_cases_per_batch']\r\n data['ndim'] = metadata['num_vis']\r\n\r\n train['x'], train['y'] = convert_train(data['ntraindata'], data['ndim'])\r\n\r\n testdata = read(filepath + flist[-2])\r\n test['x'] = testdata['data']\r\n test['y'] = testdata['labels']\r\n\r\n data['train'], data['test'] = train, test\r\n save_pkl(data)", "def _init_node_parm(self, key):\n wf_net_conf = WorkFlowNetConfML(key)\n self.model_path = wf_net_conf.model_path\n self.ml_class = wf_net_conf.ml_class\n self.config = wf_net_conf.config\n self.batch_size = 10000\n self.model_type = wf_net_conf.model_type\n\n #Todo 어떻게 꺼내는지 승우씨한테 물어볼것\n _wf_data_conf = wf_data_conf(key.split('_')[0]+'_'+key.split('_')[1]+'_'+'dataconf_node')\n self.data_conf = _wf_data_conf.conf\n self.label = _wf_data_conf.label\n self.cell_feature = _wf_data_conf.cell_feature\n self.cross_cell = _wf_data_conf.cross_cell\n self.extend_cell_feature = _wf_data_conf.extend_cell_feature\n self.label_values = _wf_data_conf.label_values\n\n _wf_data_node = wf_data_node(key.split('_')[0] + '_' + key.split('_')[1] + '_' + 'data_node')\n self.multi_read_flag = _wf_data_node.multi_node_flag\n self.predict_path = _wf_data_node.predict_path", "def initialise_parameters(self):\n # Weights\n init = select_w_init(self.w_init)\n if self.w_gain:\n gain = nn.init.calculate_gain('relu')\n init(self.relations, gain=gain)\n else:\n init(self.relations)\n\n # Biases\n if self.b_init:\n init = select_b_init(self.b_init)\n init(self.sbias)\n init(self.pbias)\n init(self.obias)", "def resetParams(self):\n self.prediction = cons.init_pred # Classifier payoff - initialized to a constant initial payoff value\n self.error = cons.init_err # Classifier error - initialized to a constant initial error value\n self.fitness = cons.init_fit # Classifier fitness - initialized to a constant initial fitness value", "def __init__(\n self,\n conf: ExpConfig,\n estep_conf: EStepConfig,\n model: Trainable,\n train_dataset: to.Tensor = None,\n test_dataset: to.Tensor = None,\n ):\n H = sum(model.shape[1:])\n self.model = model\n assert isinstance(model, Trainable)\n self._conf = Munch(conf.as_dict())\n self._conf.model = type(model).__name__\n self._conf.device = tvo.get_device().type\n self._estep_conf = Munch(estep_conf.as_dict())\n self.train_data = None\n self.train_states = None\n self._precision = model.precision\n if train_dataset is not None:\n self.train_data = self._make_dataloader(train_dataset, conf)\n # might differ between processes: last process might have smaller N and less states\n # (but TVODataLoader+ShufflingSampler make sure the number of batches is the same)\n N = train_dataset.shape[0]\n self.train_states = self._make_states(N, H, self._precision, estep_conf)\n\n self.test_data = None\n self.test_states = None\n if test_dataset is not None:\n self.test_data = self._make_dataloader(test_dataset, conf)\n N = test_dataset.shape[0]\n self.test_states = self._make_states(N, H, self._precision, estep_conf)\n\n will_reconstruct = (\n self._conf.reco_epochs is not None or self._conf.warmup_reco_epochs is not None\n )\n self.trainer = Trainer(\n self.model,\n self.train_data,\n self.train_states,\n self.test_data,\n self.test_states,\n rollback_if_F_decreases=self._conf.rollback_if_F_decreases,\n will_reconstruct=will_reconstruct,\n eval_F_at_epoch_end=self._conf.eval_F_at_epoch_end,\n data_transform=self._conf.data_transform,\n )\n self.logger = H5Logger(self._conf.output, blacklist=self._conf.log_blacklist)", "def add_train_val_arguments(self):\n self.add_train_arguments()\n self.add_val_arguments()", "def set_params(self, **params):\n super(AveragingRegressor, self)._set_params('estimators', **params)\n return self", "def fit(self):\n \n # Open an existing model and get the training & test dataset and targets\n train_test_df, target_df = self._get_model_and_data(target=True, set_feature_def=True)\n \n # Check that the estimator is an supervised ML algorithm\n if self.model.estimator_type not in [\"classifier\", \"regressor\"]:\n err = \"Incorrect usage. The estimator specified is not a known classifier or regressor: {0}\".format(self.model.estimator)\n raise Exception(err)\n \n # Check which validation strategy is to be used, if any\n # For an explanation of cross validation in scikit-learn see: http://scikit-learn.org/stable/modules/cross_validation.html#multimetric-cross-validation\n if self.model.time_series_split > 0:\n self.model.validation = \"timeseries\"\n # Set up cross validation to be performed using TimeSeriesSplit\n self.model.cv = TimeSeriesSplit(n_splits=self.model.time_series_split, max_train_size=self.model.max_train_size)\n elif self.model.cv > 0:\n self.model.validation = \"k-fold\"\n elif self.model.test_size > 0:\n self.model.validation = \"hold-out\"\n else:\n self.model.validation = \"external\"\n\n if self.model.validation == \"hold-out\": \n # Split the data into training and testing subsets\n self.X_train, self.X_test, self.y_train, self.y_test = \\\n train_test_split(train_test_df, target_df, test_size=self.model.test_size, random_state=self.model.random_state)\n else:\n self.X_train = train_test_df\n self.y_train = target_df\n \n # Add the training and test data to the model if required\n if self.model.retain_data:\n self.model.X_train = self.X_train\n self.model.y_train = self.y_train\n \n try:\n self.model.X_test = self.X_test\n self.model.y_test = self.y_test\n except AttributeError:\n pass\n \n # Scale the targets and increase stationarity if required\n if self.model.scale_target or self.model.make_stationary:\n # Set up the target transformer\n self.model.target_transformer = TargetTransformer(scale=self.model.scale_target, make_stationary=self.model.make_stationary, stationarity_lags=self.model.stationarity_lags,\\\n missing=self.model.missing, scaler=self.model.scaler, logfile=self.logfile, **self.model.scaler_kwargs)\n\n # Fit the transformer to the training targets\n self.model.target_transformer = self.model.target_transformer.fit(self.y_train)\n\n # Apply the transformer to the training targets\n self.y_train = self.model.target_transformer.transform(self.y_train)\n # Drop samples where the target cannot be transformed due to insufficient lags\n self.X_train = self.X_train.iloc[len(self.X_train)-len(self.y_train):] \n \n # Add lag observations to the samples if required\n if self.model.lags or self.model.lag_target:\n # Check if the current sample will be included as an input, or whether we only use lag observations for predictions\n extrapolate = 1 if self.model.current_sample_as_input else 0\n # Add the lag observations\n self.X_train = self._add_lags(self.X_train, self.y_train, extrapolate=extrapolate, update_features_df=True)\n # Drop targets for samples which were dropped due to null values after adding lags.\n if len(self.y_train) > len(self.X_train):\n self.y_train = self.y_train.iloc[len(self.y_train)-len(self.X_train):]\n\n # If this is a Keras estimator, we require the preprocessing to return a data frame instead of a numpy array\n prep_return = 'df' if self.model.using_keras else 'np'\n\n # Construct the preprocessor\n prep = Preprocessor(self.model.features_df, return_type=prep_return, scale_hashed=self.model.scale_hashed, scale_vectors=self.model.scale_vectors,\\\n missing=self.model.missing, scaler=self.model.scaler, logfile=self.logfile, **self.model.scaler_kwargs)\n\n # Setup a list to store steps for the sklearn pipeline\n pipe_steps = [('preprocessor', prep)]\n\n if self.model.dim_reduction:\n # Construct the dimensionality reduction object\n reduction = self.decomposers[self.model.reduction](**self.model.dim_reduction_args)\n \n # Include dimensionality reduction in the pipeline steps\n pipe_steps.append(('reduction', reduction))\n self.model.estimation_step = 2\n else:\n self.model.estimation_step = 1 \n\n # If this is a Keras estimator, update the input shape and reshape the data if required\n if self.model.using_keras:\n # Update the input shape based on the final number of features after preprocessing\n self._keras_update_shape(prep)\n\n # Add the Keras build function, architecture and prediction_periods to the estimator keyword arguments\n self.model.estimator_kwargs['build_fn'] = self._keras_build_fn\n self.model.estimator_kwargs['architecture'] = self.model.architecture\n self.model.estimator_kwargs['prediction_periods'] = self.model.prediction_periods\n\n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(10)\n \n # Check than an identifier has been provided for sorting data if this is a sequence prediction problem\n if self.model.lags or len(self.model.first_layer_kwargs[\"input_shape\"]) > 1:\n assert len(self.model.original_features_df[self.model.original_features_df['variable_type'].isin([\"identifier\"])]) == 1, \\\n \"An identifier is mandatory when using lags or with sequence prediction problems. Define this field in your feature definitions.\"\n\n # Cater for multi-step predictions\n if self.model.prediction_periods > 1:\n # Transform y to a vector of values equal to prediction_periods\n self.y_train = utils.vectorize_array(self.y_train, steps=self.model.prediction_periods)\n # Drop values from x for which we don't have sufficient y values\n self.X_train = self.X_train.iloc[:-len(self.X_train)+len(self.y_train)]\n\n # Add a pipeline step to update the input shape and reshape the data if required\n # This transform will also add lag observations if specified through the lags parameter\n # If lag_target is True, an additional feature will be created for each sample using the previous value of y \n reshape = Reshaper(first_layer_kwargs=self.model.first_layer_kwargs, logfile=self.logfile)\n pipe_steps.append(('reshape', reshape))\n self.model.estimation_step += self.model.estimation_step\n\n # Avoid tensorflow error for keras models\n # https://github.com/tensorflow/tensorflow/issues/14356\n # https://stackoverflow.com/questions/40785224/tensorflow-cannot-interpret-feed-dict-key-as-tensor\n kerasbackend.clear_session()\n \n # Try assuming the pipeline involves a grid search\n try:\n # Construct an estimator\n estimator = self.algorithms[self.model.estimator](**self.model.estimator_kwargs)\n\n # Prepare the grid search using the previously set parameter grid\n grid_search = GridSearchCV(estimator=estimator, param_grid=self.model.param_grid, **self.model.grid_search_args)\n \n # Add grid search to the pipeline steps\n pipe_steps.append(('grid_search', grid_search))\n\n # Construct the sklearn pipeline using the list of steps\n self.model.pipe = Pipeline(pipe_steps)\n\n if self.model.validation in [\"k-fold\", \"timeseries\"]:\n # Perform K-fold cross validation\n self._cross_validate()\n\n # Fit the training data to the pipeline\n if self.model.using_keras:\n # https://stackoverflow.com/questions/54652536/keras-tensorflow-backend-error-tensor-input-10-specified-in-either-feed-de\n session = tf.Session()\n kerasbackend.set_session(session)\n with session.as_default():\n with session.graph.as_default():\n sys.stdout.write(\"\\nMODEL: {}, INPUT SHAPE: {}\\n\\n\".format(self.model.name, self.model.first_layer_kwargs['input_shape']))\n y = self.y_train.values if self.y_train.shape[1] > 1 else self.y_train.values.ravel()\n self.model.pipe.fit(self.X_train, y)\n else:\n self.model.pipe.fit(self.X_train, self.y_train.values.ravel())\n\n # Get the best parameters and the cross validation results\n grid_search = self.model.pipe.named_steps['grid_search']\n self.model.best_params = grid_search.best_params_\n self.model.cv_results = grid_search.cv_results_\n\n # Get the best estimator to add to the final pipeline\n estimator = grid_search.best_estimator_\n\n # Update the pipeline with the best estimator\n self.model.pipe.steps[self.model.estimation_step] = ('estimator', estimator)\n\n except AttributeError:\n # Construct an estimator\n estimator = self.algorithms[self.model.estimator](**self.model.estimator_kwargs)\n\n # Add the estimator to the pipeline steps\n pipe_steps.append(('estimator', estimator))\n\n # Construct the sklearn pipeline using the list of steps\n self.model.pipe = Pipeline(pipe_steps)\n\n if self.model.validation in [\"k-fold\", \"timeseries\"]:\n # Perform K-fold cross validation\n self._cross_validate()\n\n # Fit the training data to the pipeline\n if self.model.using_keras:\n # https://stackoverflow.com/questions/54652536/keras-tensorflow-backend-error-tensor-input-10-specified-in-either-feed-de\n session = tf.Session()\n kerasbackend.set_session(session)\n with session.as_default():\n with session.graph.as_default():\n sys.stdout.write(\"\\nMODEL: {}, INPUT SHAPE: {}\\n\\n\".format(self.model.name, self.model.first_layer_kwargs['input_shape']))\n y = self.y_train.values if self.y_train.shape[1] > 1 else self.y_train.values.ravel()\n self.model.pipe.fit(self.X_train, y)\n else:\n self.model.pipe.fit(self.X_train, self.y_train.values.ravel())\n \n if self.model.validation == \"hold-out\": \n # Evaluate the model using the test data \n self.calculate_metrics(caller=\"internal\")\n \n if self.model.calc_feature_importances:\n # Select the dataset for calculating importances\n if self.model.validation == \"hold-out\":\n X = self.X_test\n y = self.y_test # Already a numpy array after calculate_metrics\n else:\n X = self.X_train\n y = self.y_train.values.ravel()\n \n # Calculate model agnostic feature importances\n self._calc_importances(X = X, y = y)\n\n # Persist the model to disk\n self.model = self.model.save(self.model.name, self.path, overwrite=self.model.overwrite, compress=self.model.compress)\n \n # Update the cache to keep this model in memory\n self._update_cache()\n \n # Prepare the output\n if self.model.validation != \"external\": \n message = [[self.model.name, 'Model successfully trained, tested and saved to disk.',\\\n time.strftime('%X %x %Z', time.localtime(self.model.state_timestamp)),\\\n \"{0} model has a score of {1:.3f} against the test data.\"\\\n .format(self.model.estimator, self.model.score), self.model.score]]\n else:\n message = [[self.model.name, 'Model successfully trained and saved to disk.',\\\n time.strftime('%X %x %Z', time.localtime(self.model.state_timestamp)),\\\n \"{0} model score unknown as test_size was <= 0.\"\\\n .format(self.model.estimator), np.NaN]]\n \n self.response = pd.DataFrame(message, columns=['model_name', 'result', 'time_stamp', 'score_result', 'score'])\n \n # Send the reponse table description to Qlik\n self._send_table_description(\"fit\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n # Finally send the response\n return self.response" ]
[ "0.6915247", "0.6885677", "0.6774289", "0.67406434", "0.6733088", "0.6642049", "0.6622964", "0.65708476", "0.6508057", "0.64983726", "0.6465158", "0.6464744", "0.6418009", "0.6410264", "0.6372147", "0.63709724", "0.636245", "0.63596547", "0.63532597", "0.6346492", "0.63230634", "0.63130355", "0.6293885", "0.6272218", "0.6268245", "0.624462", "0.62392193", "0.6224966", "0.6222173", "0.62187076", "0.6218486", "0.6210686", "0.6207307", "0.6203023", "0.6200043", "0.6188181", "0.618695", "0.61841637", "0.61817896", "0.6170175", "0.6158792", "0.6158419", "0.615397", "0.61513704", "0.6143045", "0.6139246", "0.61366725", "0.6135858", "0.61249363", "0.6124127", "0.61128604", "0.6111994", "0.61098015", "0.6104924", "0.61001503", "0.6098744", "0.6098349", "0.6085633", "0.6076434", "0.6071741", "0.6068803", "0.6057663", "0.60539174", "0.6053887", "0.60507894", "0.60497355", "0.6049093", "0.6048687", "0.60402745", "0.603962", "0.6035669", "0.60349745", "0.6034548", "0.6031769", "0.6030016", "0.6028926", "0.60270435", "0.602611", "0.6019285", "0.60100716", "0.6007407", "0.59995496", "0.5995954", "0.59923595", "0.5991993", "0.5991644", "0.5988528", "0.5985209", "0.59848666", "0.5981483", "0.5979698", "0.5978657", "0.5973709", "0.5968882", "0.5965801", "0.59653383", "0.595477", "0.5954741", "0.5950777", "0.59501064", "0.59312963" ]
0.0
-1
Runs the training algorithm. Returns None.
def train(self): args = self.args model = self.model dataset = self.dataset train_state = self.train_state optimizer = self.optimizer scheduler = self.scheduler train_bar = self.train_bar val_bar = self.val_bar epoch_bar = self.epoch_bar for epoch_index in range(args.num_epochs): train_state['epoch_index'] = epoch_index # Iterate over training dataset running_loss,running_acc = self.train_loop(epoch_index, args, model, dataset, optimizer, train_bar) train_state['train_loss'].append(running_loss) train_state['train_acc'].append(running_acc) running_loss,running_acc = self.val_loop(epoch_index, args, model, dataset, optimizer, val_bar) train_state['val_loss'].append(running_loss) train_state['val_acc'].append(running_acc) print("Epoch "+str(epoch_index+1)+": Running loss="+ \ str(running_loss)+", Running Acc="+str(running_acc)) train_state = update_train_state(args=args, model=model, train_state=train_state) scheduler.step(train_state['val_loss'][-1]) if train_state['stop_early']: break train_bar.n = 0 val_bar.n = 0 epoch_bar.set_postfix(best_val=train_state['early_stopping_best_val'] ) epoch_bar.update() state_dict = torch.load(train_state['model_filename']) model.load_state_dict(state_dict) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train():\n # YOUR TRAINING CODE GOES HERE", "def train():\n pass", "def train(self):\n # Change directory to the code directory\n current_working_directory = os.getcwd()\n\n os.chdir(self.model_parameters[\"NN_code_directory\"])\n\n self.call_training_routine()\n\n # Come back to the original directory\n os.chdir(current_working_directory)", "def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)", "def training(self) -> None:\n self.compile_model()\n self.train_epoch()\n self.agent.save()", "def run(self, data, training=False):\n # Set mode\n if training:\n self._model.train()\n else:\n self._model.eval()\n # Compute\n return self._model(data)", "def train(self)->None:", "def call_training_routine(self):\n training_command = \"th main.lua \"\\\n \"-GPU_id %(GPU_identifier)i \"\\\n \"-number_of_GPUs %(number_of_GPUs)i \"\\\n \"-training_dataset %(training_dataset)s \"\\\n \"-testing_dataset %(testing_dataset)s \"\\\n \"-modelFilePath %(modelFilePath)s \"\\\n \"-maxepoch %(maxepoch)i \"\\\n \"-savingDirectory %(savingDirectory)s \"\\\n \"-learningRate %(learningRate)f \"\\\n \"-batchSize %(batchSize)i \"\\\n \"-momentum %(momentum)f\" % self.training_parameters\n\n if self.training_parameters[\"presavedModelPath\"] != \"\":\n training_command += \" -presavedModelPath %s\" %\\\n self.training_parameters[\"presavedModelPath\"]\n\n # Call the training command\n subprocess.call(training_command, shell=True)", "def train(self):\n\t\traise NotImplementedError", "def train(self):\n raise NotImplementedError", "def run_step(self):\n assert self.model.training, \"[SimpleTrainer] model was changed to eval mode!\"\n start = time.perf_counter()\n \"\"\"\n If your want to do something with the data, you can wrap the dataloader.\n \"\"\"\n data = next(self._data_loader_iter)\n data_time = time.perf_counter() - start\n\n \"\"\"\n If your want to do something with the losses, you can wrap the model.\n \"\"\"\n loss_dict = self.model(data)\n losses = sum(loss for loss in loss_dict.values())\n self._detect_anomaly(losses, loss_dict)\n\n metrics_dict = loss_dict\n metrics_dict[\"data_time\"] = data_time\n self._write_metrics(metrics_dict)\n \n validation_data = next(self.validation_data_loader_iter)\n val_losses_dict = self.model(validation_data)\n val_losses = sum(loss for loss in val_losses_dict.values())\n self._detect_anomaly(val_losses, val_losses_dict)\n\n val_metrics_dict = val_losses_dict\n val_metrics_dict[\"data_time\"] = data_time\n self._write_validation_metrics(val_metrics_dict)\n\n \"\"\"\n If you need accumulate gradients or something similar, you can\n wrap the optimizer with your custom `zero_grad()` method.\n \"\"\"\n self.optimizer.zero_grad()\n losses.backward()\n\n \"\"\"\n If you need gradient clipping/scaling or other processing, you can\n wrap the optimizer with your custom `step()` method.\n \"\"\"\n self.optimizer.step()", "def train(self):\n raise NotImplementedError()", "def train(self):\n\n if self.Algorithms.startswith(\"TMVA:\"):\n self.trainTMVAMethods()\n elif self.Algorithms.startswith(\"SKL:\"):\n self.trainSKLMethods()\n elif self.Algorithms.startswith(\"TF:\"):\n self.trainTFMethods()\n else:\n print(\"ERROR: Unknown algorithm: {}\".format(self.Algorithms))\n\n return", "def train(self, ):\n raise NotImplementedError", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def training_phase(self):\r\n self.train_dataloader = self.get_dataloader(\r\n hdf_path=self.train_h5_path,\r\n data_description=\"training set\"\r\n )\r\n self.valid_dataloader = self.get_dataloader(\r\n hdf_path=self.valid_h5_path,\r\n data_description=\"validation set\"\r\n )\r\n\r\n self.get_ts_properties()\r\n\r\n self.initialize_output_files()\r\n\r\n start_epoch, end_epoch = self.define_model_and_optimizer()\r\n\r\n print(\"* Beginning training.\", flush=True)\r\n n_processed_batches = 0\r\n for epoch in range(start_epoch, end_epoch):\r\n\r\n self.current_epoch = epoch\r\n n_processed_batches = self.train_epoch(n_processed_batches=n_processed_batches)\r\n\r\n # evaluate model every `sample_every` epochs (not every epoch)\r\n if epoch % self.C.sample_every == 0:\r\n self.evaluate_model()\r\n else:\r\n util.write_model_status(score=\"NA\") # score not computed\r\n\r\n self.print_time_elapsed()", "def start_training(self):\n self.training = True", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def training(self):\r\n self.model, self.voc = svm_clf_training('all', self.dataset)\r\n return 0", "def run_training():\n\tdata_sets, data_w= data_loader.load_laplace(loc=0, scale=1, sample_size=1000, dimension=2, skew=False, whiten=True, rotation=True)\n\n\t# Tell tensorflow that the model will be built into the default gragh\n\twith tf.Graph().as_default():\n\t\t# Generate placeholders for the single image and all training images for objective function\n\t\tinput_placeholder, obj_placeholder = placeholder_inputs(data_sets.shape[0], data_sets.shape[1])\n\n\t\t# Build a graph that computes predictions from the inference model\n\t\toutputs, objs, weights, thresholds = BCM_tf.inference(input_placeholder, obj_placeholder, FLAGS.n_output, FLAGS.obj_type, FLAGS.nonlinear)\n\n\t\t# Add to the Graph that Ops that train the model\n\t\tupdate_w, update_thres = BCM_tf.training(input_placeholder, weights, thresholds, outputs, FLAGS.n_output, FLAGS.obj_type, FLAGS.nonlinear, FLAGS.eta, FLAGS.decay, FLAGS.tau, FLAGS.p)\n\n\t\t# Build the summary Tensor based on the TF collections fo Summaries\n\t\tsummary = tf.summary.merge_all()\n\n\t\t# Add the variable initializer Op.\n\t\tinit = tf.global_variables_initializer()\n\n\t\t# Create a saver for writing training checkpoints.\n\t\tsaver = tf.train.Saver()\n\n\t\t# Create a session for running Ops on the Graph\n\t\tsess = tf.Session()\n\n\t\t# Instantiate a Summary Writer to output summaries and the Graph\n\t\tsummary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)\n\n\t\t# After everything is built:\n\n\t\t# Run the Op to initialize the variables.\n\t\tsess.run(init)\n\n\t\t# Start the training loop\n\t\tfor step in range(FLAGS.epochs):\n\t\t\ttf.random_shuffle(data_sets)\n\t\t\tfor sample in range(data_sets.shape[0]):\n\t\t\t\t# Fill a feed dictionary with the actual set of images and labels\n\t\t\t\t# For this particular training step\n\t\t\t\tfeed_dict = fill_feed_dict(data_sets[sample, :].reshape([1, 2]), data_sets, input_placeholder, obj_placeholder)\n\n\t\t\t\t# Run one step of the mode. The return values are the outputs\n\t\t\t\tsess.run(update_w, feed_dict=feed_dict)\n\t\t\t\tsess.run(update_thres, feed_dict=feed_dict)\n\n\t\t\t\t# Write summaries and print overview fairly often\n\t\t\t\t# if (step+1) * sample % 100 == 0:\n\t\t\t\t\t# Print status to stdout\n\t\t\t\t # print('Iteration %d:' % (weights[0, 0]))\n\t\t\t\t\t# Update the event file\n\t\t\t\t\t#summary_str = sess.run(summary, feed_dict=feed_dict)\n\t\t\t\t\t#summary_writer.add_summary(summary_str, step)\n\t\t\t\t\t#summary_writer.flush()\n\n\t\tfinal_w = sess.run(weights).reshape(1,4)\n\n\treturn final_w, data_w", "def start_training(self):\n if self.task_env is None:\n rospy.logfatal(\"No task environment found for training.\")\n if self.agent is None:\n rospy.logfatal(\"No agent found for training.\")\n self.agent.start_training()", "def train(self):\n self.log(f\"{self.cur_file_path}\\t\\tInfo: train method invoked!\")\n self.log(f\"{self.cur_file_path}\\t\\tInfo: training {self.model.__class__.__name__} model!\")\n\n self.model.fit(self.trainX, self.trainY)", "def train(self):\n return", "def train(self) -> Any:\n pass", "def train_impl(\n self,\n train_processed_data_dir: Path,\n val_processed_data_dir: Path,\n ) -> NoReturn:\n pass", "def main():\n data = load_data()\n analyze_features(data['full_features'])\n model = train(data)\n\n with open('model.pickle', 'wb') as f:\n pickle.dump(model, f)\n evaluate(model, data)", "def train(self, training_data):\n pass", "def train(self):\n self.training = True", "def train(self):\n raise NotImplemented()", "def run(self, verbose=0):\n self.verbose = verbose\n self._preproc()\n self._lda()\n self._evaluate()", "def run_training_loop():\n logging.info(\"Starting the training loop.\")\n\n trainer = trainer_class(\n output_dir=output_dir,\n train_env=train_env,\n eval_env=eval_env,\n trajectory_dump_dir=trajectory_dump_dir,\n )\n trainer.training_loop(n_epochs=n_epochs)", "def train_main(cls):\n launcher = cls()\n launcher.launch()", "def train():\n import training\n\n # Ensure output directories exist\n os.makedirs(os.path.dirname(cfg.scaler_path), exist_ok=True)\n os.makedirs(cfg.model_path, exist_ok=True)\n os.makedirs(cfg.log_path, exist_ok=True)\n\n # Load (standardized) input data and target values\n tr_x, tr_y, _ = _load_data(cfg.training_set, is_training=True)\n val_x, val_y, _ = _load_data(cfg.validation_set)\n\n # Try to create reproducible results\n np.random.seed(cfg.initial_seed)\n\n # Save free parameters to disk\n utils.log_parameters(cfg.training, os.path.join(cfg.model_path,\n 'parameters.json'))\n\n training.train(tr_x, tr_y, val_x, val_y)", "def _train(self):\n training_environment = self._training_environment\n evaluation_environment = self._evaluation_environment\n policy = self._policy\n pool = self._pool\n\n if not self._training_started:\n self._init_training()\n\n self._initial_exploration_hook(\n training_environment, self._initial_exploration_policy, pool)\n\n self.sampler.initialize(training_environment, policy, pool)\n\n gt.reset_root()\n gt.rename_root('RLAlgorithm')\n gt.set_def_unique(False)\n\n self._training_before_hook()\n\n for self._epoch in gt.timed_for(range(self._epoch, self._n_epochs)):\n self._epoch_before_hook()\n gt.stamp('epoch_before_hook')\n\n start_samples = self.sampler._total_samples\n for i in count():\n samples_now = self.sampler._total_samples\n self._timestep = samples_now - start_samples\n\n if (samples_now >= start_samples + self._epoch_length\n and self.ready_to_train):\n break\n\n self._timestep_before_hook()\n gt.stamp('timestep_before_hook')\n\n self._do_sampling(timestep=self._total_timestep)\n gt.stamp('sample')\n\n if self.ready_to_train:\n self._do_training_repeats(timestep=self._total_timestep)\n gt.stamp('train')\n\n self._timestep_after_hook()\n gt.stamp('timestep_after_hook')\n\n training_paths = self.sampler.get_last_n_paths(math.ceil(self._epoch_length / self.sampler._max_path_length))\n gt.stamp('training_paths')\n evaluation_paths = self._evaluation_paths(policy, evaluation_environment)\n gt.stamp('evaluation_paths')\n\n training_metrics = self._evaluate_rollouts(training_paths, training_environment)\n gt.stamp('training_metrics')\n if evaluation_paths:\n evaluation_metrics = self._evaluate_rollouts(\n evaluation_paths, evaluation_environment)\n gt.stamp('evaluation_metrics')\n else:\n evaluation_metrics = {}\n\n self._epoch_after_hook(training_paths)\n gt.stamp('epoch_after_hook')\n\n sampler_diagnostics = self.sampler.get_diagnostics()\n\n diagnostics = self.get_diagnostics(\n iteration=self._total_timestep,\n batch=self._evaluation_batch(),\n training_paths=training_paths,\n evaluation_paths=evaluation_paths)\n\n time_diagnostics = gt.get_times().stamps.itrs\n\n diagnostics.update(OrderedDict((\n *(\n (f'evaluation/{key}', evaluation_metrics[key])\n for key in sorted(evaluation_metrics.keys())\n ),\n *(\n (f'training/{key}', training_metrics[key])\n for key in sorted(training_metrics.keys())\n ),\n *(\n (f'times/{key}', time_diagnostics[key][-1])\n for key in sorted(time_diagnostics.keys())\n ),\n *(\n (f'sampler/{key}', sampler_diagnostics[key])\n for key in sorted(sampler_diagnostics.keys())\n ),\n ('epoch', self._epoch),\n ('timestep', self._timestep),\n ('timesteps_total', self._total_timestep),\n ('train-steps', self._num_train_steps),\n )))\n\n if self._eval_render_kwargs and hasattr(\n evaluation_environment, 'render_rollouts'):\n # TODO(hartikainen): Make this consistent such that there's no\n # need for the hasattr check.\n training_environment.render_rollouts(evaluation_paths)\n\n yield diagnostics\n\n self.sampler.terminate()\n\n self._training_after_hook()\n\n yield {'done': True, **diagnostics}", "def TrainOneStep(self):\n pass", "def test_training(self):\n\t\tpass", "def train(self, batch_training=False):\n raise NotImplementedError", "def _on_training_start(self) -> None:\n if self.eval_freq > 0:\n self.solver.run_tests(0, draw=self.draw, verbose=self.verbose)", "def train_start(self):\n self.module.img_enc.train()\n self.module.txt_enc.train()", "async def train(self):", "def test_train(self):\n print \"x=\",self.trainer.train()", "def train(self) -> None:\n for module in self.modules.values():\n module.train()\n return", "def eval(self):\n self.train(mode=False)", "def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)", "def training(self):\n self.training = True", "def main():\r\n # Read dataset.\r\n reader = DatasetReader\r\n train_filename = sys.argv[1]\r\n test_filename = train_filename.replace('_train_', '_dev_')\r\n term_index, tag_index, train_data, test_data = reader.ReadData(train_filename, test_filename)\r\n (train_terms, train_tags, train_lengths) = train_data\r\n (test_terms, test_tags, test_lengths) = test_data\r\n\r\n model = SequenceModel(train_tags.shape[1], len(term_index), len(tag_index))\r\n model.build_inference()\r\n model.build_training()\r\n for j in range(5):\r\n model.train_epoch(train_terms,train_tags, train_lengths)\r\n print('Finished epoch %i. Evaluating ...' % (j+1))\r\n model.evaluate(test_terms, test_tags, test_lengths)", "def trainNet():", "def train(self, training_steps=10):", "def main():\r\n preprocessor = DATA_PREPROCESSOR('shakespeare-corpus.txt')\r\n corpus = preprocessor.preprocess_data()\r\n plot(corpus)\r\n data, unique_vocab, word_to_idx = create_context(corpus)\r\n\r\n #train model- changed global variable if needed\r\n model=CBOW(len(unique_vocab), EMBEDDING_DIM, CONTEXT_SIZE)\r\n if USE_ADAM:\r\n print('Using adam as optimizer')\r\n optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\r\n else:\r\n print('Using SGD as optimizer')\r\n optimizer = torch.optim.SGD(model.parameters(), lr=0.001)\r\n\r\n checkpoint_file ='checkpoint.pth'\r\n checkpoint_available= os.path.exists(checkpoint_file)\r\n if checkpoint_available:\r\n model, optimizer, current_epoch = reset_model_to_checkpoint(model, optimizer, checkpoint_file)\r\n else:\r\n print('no checkpoint found. initializing new model..\\n')\r\n current_epoch=0 \r\n\r\n executor = MODEL_EXECUTOR(model)\r\n if RESUME_TRAINING or not checkpoint_available:\r\n print('resuming training...\\n')\r\n import time\r\n start_time = time.time()\r\n cbow = executor.train(optimizer, data, unique_vocab, word_to_idx, current_epoch, checkpoint_file)\r\n print(\"--- %s seconds ---\" % (time.time() - start_time))\r\n else:\r\n print('pre-trained model loaded. no further training...\\n')\r\n\r\n # get two words similarity\r\n executor.test(unique_vocab,word_to_idx)\r\n\r\n show_closest_words(cbow, word_to_idx,unique_vocab)", "def run():\n print('*-----------------------------------*')\n print('Running main.py ...')\n model = MLPModel(CFG, name='tfds_tryout')\n print('* Model defined')\n model.load_data(method='tfds')\n print('* Data Loaded')\n print(model.datasetinfo)\n model.build()\n model.train()\n model.evaluate()\n model.save()", "def train():\n\t# 1、make dataloader\n\ttrain_loader, val_loader, num_query, num_class = make_data_loader(cfg)\n\t#print(\"num_query:{},num_class:{}\".format(num_query,num_class))\n\n\t# 2、make model\n\tmodel = build_model(cfg, num_class)\n\n\t# model.eval()\n\t# x = model(img_tensor)\n\t# print(x.shape)\n\t# 3、 make optimizer\n\toptimizer = make_optimizer(cfg, model)\n\n\t# 4、 make lr_scheduler\n\tscheduler = make_lr_scheduler(cfg, optimizer)\n\n\t# 5、 make loss_func\n\tif cfg.MODEL.PCB_NECK:\n\t\t# make loss specificially for pcb \n\t\tloss_func = get_softmax_triplet_loss_fn(cfg, num_class)\n\telse:\n\t\tloss_func = make_loss(cfg, num_class)\n\n\t# get paramters\n\tlog_period = cfg.OUTPUT.LOG_PERIOD \n\tckpt_period =cfg.OUTPUT.CHECKPOINT_PERIOD\n\teval_period = cfg.OUTPUT.EVAL_PERIOD\n\toutput_dir = cfg.OUTPUT.ROOT_DIR\n\tdevice = cfg.MODEL.DEVICE\n\tepochs = cfg.SOLVER.MAX_EPOCHS\n\tuse_gpu = device == \"cuda\"\n\tuse_neck = cfg.MODEL.NECK or cfg.MODEL.LEARN_REGION \n\t# how many batch for each log\n\tbatch_size = cfg.SOLVER.IMGS_PER_BATCH\n\tbatch_num = len(train_loader) \n\t\n\tlog_iters = batch_num // log_period\n\tpretrained = cfg.MODEL.PRETRAIN_PATH != ''\n\tparallel = cfg.MODEL.PARALLEL \t\n\tgrad_clip = cfg.DARTS.GRAD_CLIP \n\n\tfeat_norm = cfg.TEST.FEAT_NORM \n\tckpt_save_path = cfg.OUTPUT.ROOT_DIR + cfg.OUTPUT.CKPT_DIR\n\tif not os.path.exists(ckpt_save_path):\n\t\tos.makedirs(ckpt_save_path)\n\n\n\t# create *_result.xlsx\n\t# save the result for analyze\n\tname = (cfg.OUTPUT.LOG_NAME).split(\".\")[0] + \".xlsx\"\n\tresult_path = cfg.OUTPUT.ROOT_DIR + name\n\n\twb = xl.Workbook()\n\tsheet = wb.worksheets[0]\n\ttitles = ['size/M','speed/ms','final_planes', 'acc', 'mAP', 'r1', 'r5', 'r10', 'loss',\n\t\t\t 'acc', 'mAP', 'r1', 'r5', 'r10', 'loss','acc', 'mAP', 'r1', 'r5', 'r10', 'loss']\n\tsheet.append(titles)\n\tcheck_epochs = [40, 80, 120, 160, 200, 240, 280, 320, 360, epochs]\n\tvalues = []\n\n\tlogger = logging.getLogger('MobileNetReID.train')\n\t\n\t# count parameter\n\tsize = count_parameters(model)\n\tlogger.info(\"the param number of the model is {:.2f} M\".format(size))\n\t\n\tvalues.append(format(size, '.2f'))\n\tvalues.append(model.final_planes)\n\n\tlogger.info(\"Start training\")\n\t\n\t#count = 183, x, y = batch -> 11712 for train\n\tif pretrained:\n\t\tstart_epoch = model.start_epoch\n\n\tif parallel:\n\t\tmodel = nn.DataParallel(model)\n\n\tif use_gpu:\n\t\t# model = nn.DataParallel(model)\n\t\tmodel.to(device)\n\t\n\t# save the best model\n\tbest_mAP, best_r1 = 0., 0.\n\tis_best = False\n\t# batch : img, pid, camid, img_path\n\tavg_loss, avg_acc = RunningAverageMeter(), RunningAverageMeter()\n\tavg_time, global_avg_time = AverageMeter(), AverageMeter()\n\tglobal_avg_time.reset()\n\tfor epoch in range(epochs):\n\t\tscheduler.step()\n\n\t\tif pretrained and epoch < start_epoch - 1:\n\t\t\tcontinue\n\t\n\t\tmodel.train()\n\t\t# sum_loss, sum_acc = 0., 0.\n\t\tavg_loss.reset()\n\t\tavg_acc.reset()\n\t\tavg_time.reset()\n\t\tfor i, batch in enumerate(train_loader):\n\n\t\t\tt0 = time.time()\n\t\t\timgs,labels = batch\n\n\t\t\tif use_gpu:\n\t\t\t\timgs = imgs.to(device)\n\t\t\t\tlabels = labels.to(device)\n\n\t\t\tres = model(imgs)\n\t\t\t# score, feat = model(imgs)\n\t\t\t# loss = loss_func(score, feat, labels)\n\t\t\tloss, acc = compute_loss_acc(use_neck, res, labels, loss_func)\n\t\t\t\n\t\t\tloss.backward()\n\t\t\tif grad_clip != 0:\n\t\t\t\tnn.utils.clip_grad_norm(model.parameters(), grad_clip)\n\n\t\t\toptimizer.step()\n\n\t\t\toptimizer.zero_grad()\n\n\t\t\t# acc = (score.max(1)[1] == labels).float().mean()\n\n\t\t\t# sum_loss += loss\n\t\t\t# sum_acc += acc \n\t\t\tt1 = time.time()\n\t\t\tavg_time.update((t1 - t0) / batch_size)\n\t\t\tavg_loss.update(loss)\n\t\t\tavg_acc.update(acc)\n\n\t\t\t#log the info \n\t\t\tif (i+1) % log_iters == 0:\n\n\t\t\t\tlogger.info(\"epoch {}: {}/{} with loss is {:.5f} and acc is {:.3f}\".format(\n\t\t\t\t\t epoch+1, i+1, batch_num, avg_loss.avg, avg_acc.avg))\n\n\t\tlr = optimizer.state_dict()['param_groups'][0]['lr']\n\t\tlogger.info(\"end epochs {}/{} with lr: {:.5f} and avg_time is {:.3f} ms\".format(epoch+1, epochs, lr, avg_time.avg * 1000))\n\t\tglobal_avg_time.update(avg_time.avg)\n\t\t# change the lr \n\n\t\t# eval the model \n\t\tif (epoch+1) % eval_period == 0 or (epoch + 1) == epochs :\n\t\t\t\n\t\t\tmodel.eval()\n\t\t\tmetrics = R1_mAP(num_query, use_gpu = use_gpu, feat_norm = feat_norm)\n\n\t\t\twith torch.no_grad():\n\n\t\t\t\tfor vi, batch in enumerate(val_loader):\n\t\t\t\t\t\n\t\t\t\t\timgs, labels, camids = batch\n\n\t\t\t\t\tif use_gpu:\n\t\t\t\t\t\timgs = imgs.to(device)\n\n\t\t\t\t\tfeats = model(imgs)\n\t\t\t\t\tmetrics.update((feats,labels, camids))\n\n\t\t\t\t#compute cmc and mAP\n\t\t\t\tcmc, mAP = metrics.compute()\n\t\t\t\tlogger.info(\"validation results at epoch:{}\".format(epoch + 1))\n\t\t\t\tlogger.info(\"mAP:{:.2%}\".format(mAP))\n\t\t\t\tfor r in [1,5,10]:\n\t\t\t\t\tlogger.info(\"CMC curve, Rank-{:<3}:{:.2%}\".format(r,cmc[r-1]))\t\n\n\t\t\t\t# determine whether cur model is the best \n\t\t\t\tif mAP > best_mAP:\n\t\t\t\t\tis_best = True\n\t\t\t\t\tbest_mAP = mAP\n\t\t\t\t\tlogger.info(\"Get a new best mAP\")\n\t\t\t\tif cmc[0] > best_r1:\n\t\t\t\t\tis_best = True\n\t\t\t\t\tbest_r1 = cmc[0]\n\t\t\t\t\tlogger.info(\"Get a new best r1\")\n\n\t\t\t\t# add the result to sheet\n\t\t\t\tif (epoch + 1) in check_epochs:\n\t\t\t\t\tval = [avg_acc.avg, mAP, cmc[0], cmc[4], cmc[9]]\n\t\t\t\t\tchange = [format(v * 100, '.2f') for v in val]\n\t\t\t\t\tchange.append(format(avg_loss.avg, '.3f'))\n\t\t\t\t\tvalues.extend(change)\n\n\n\t\t# we hope that eval_period == ckpt_period or eval_period == k* ckpt_period where k is int\t\t\t\n\t\t# whether to save the model\n\t\tif (epoch+1) % ckpt_period == 0 or is_best:\n\n\t\t\tif parallel:\n\t\t\t\ttorch.save(model.module.state_dict(), ckpt_save_path + \"checkpoint_{}.pth\".format(epoch + 1 ))\n\t\t\telse:\n\t\t\t\ttorch.save(model.state_dict(), ckpt_save_path + \"checkpoint_{}.pth\".format(epoch + 1 ))\n\n\t\t\tlogger.info(\"checkpoint {} saved !\".format(epoch + 1))\n\n\t\t\tif is_best:\n\t\t\t\tif parallel:\n\t\t\t\t\ttorch.save(model.module.state_dict(), ckpt_save_path + \"best_ckpt.pth\")\n\t\t\t\telse:\n\t\t\t\t\ttorch.save(model.state_dict(), ckpt_save_path + \"best_ckpt.pth\")\n\t\t\t\tlogger.info(\"best checkpoint was saved\")\n\t\t\t\tis_best = False\n\t\n\tvalues.insert(1, format(global_avg_time.avg * 1000, '.2f'))\n\tsheet.append(values)\n\twb.save(result_path)\n\n\tlogger.info(\"training is end, time for per imgs is {} ms\".format(global_avg_time.avg *1000))", "def main():\n # Fix random seed.\n torch.manual_seed(0)\n\n # Create checkpoint directory.\n try:\n os.mkdir('checkpoints')\n except FileExistsError:\n pass\n\n # Make preparations.\n args = get_args()\n logger = get_logger()\n data_train, data_val, data_test = get_data(args.batch_size,\n args.num_workers)\n model = get_model()\n\n # Log command arguments.\n logger.info(' '.join(sys.argv))\n logger.info(vars(args))\n\n # Send the model to the GPU, if enabled and available.\n if args.cuda:\n model = model.cuda()\n\n # Create the loss function and optimizer.\n loss_function = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(model.parameters(),\n lr=args.learning_rate,\n momentum=args.momentum)\n\n # Load checkpoint, if given.\n if args.checkpoint:\n load_checkpoint(args.checkpoint, model, optimizer)\n\n # Loop epochs.\n for epoch in range(args.num_epochs):\n logger.info(f'Epoch {epoch}:')\n\n mean_loss = train(model, loss_function, optimizer, data_train)\n logger.info(f' - [training] mean loss: {mean_loss:.3f}')\n\n accuracy = evaluate(model, data_val)\n logger.info(f' - [validation] accuracy: {accuracy:.3f}')\n\n torch.save([model.state_dict(), optimizer.state_dict()],\n os.path.join('checkpoints', f'{epoch}.pth'))\n\n # Run final evaluation on the test data.\n logger.info('Test:')\n accuracy = evaluate(model, data_test)\n logger.info(f' - [test] accuracy: {accuracy:.3f}')", "def start_training(self):\n self.training()\n \n images, true_labels, pred_labels, pred_probs = self.evaluate_model(proba=True)\n \n metrics = Metrics(images, true_labels, pred_labels, pred_probs, self.classes)\n\n cm = metrics.get_confusion_matrix()\n print('The confusion matrix is:\\n', cm)\n print('*'*100)\n \n cr = metrics.get_classification_report()\n print('The classification report is:\\n', cr)\n print('*'*100)", "def _train(self):\n self.network.train() # note that here we are calling torch.nn.Module train class method\n epochs_since_improvement = 0\n best_params = None\n self.calculate_validation_loss()\n best_validation_loss = self.validation_average_loss\n\n while epochs_since_improvement < 10:\n self.train_epoch()\n self.calculate_validation_loss()\n if self.validation_average_loss < best_validation_loss:\n epochs_since_improvement = 0\n best_validation_loss = self.validation_average_loss\n best_params = self.network.state_dict()\n else:\n epochs_since_improvement += 1\n LOGGER.info(\"Epochs since improvement in validation_loss: {} \\n\".format(epochs_since_improvement))\n if self.maximum_epochs_allowed is not None and self.epochs_trained >= self.maximum_epochs_allowed:\n break\n LOGGER.info(\"Training complete after {} epochs \\n\".format(self.epochs_trained))\n LOGGER.info(\"Best training loss achieved: {} \\n\".format(self.training_average_loss))\n LOGGER.info(\"Best validation loss achieved: {}\".format(self.validation_average_loss))\n self.learned_params = best_params\n self.network.load_state_dict(best_params)", "def run(self) -> None:\n ts = time.time()\n startTime = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n\n svm_dataset = \"NLP/SVM/IHE/SVM_dataset_ihe.csv\"\n \n tags = ['IHE {}'.format(i) for i in range(1, 10)] # IHE tags.\n\n # SDG results files.\n model = \"NLP/SVM/IHE/model.pkl\"\n\n self.load_dataset(svm_dataset)\n self.load_tags(tags)\n print(\"Loaded dataset: size =\", len(self.dataset))\n\n print(\"Training...\")\n X_train, X_test, y_train, y_test = self.train()\n\n\n print(\"Saving results...\")\n self.serialize(model)\n\n print(\"Done.\")", "def train(self):\n\t\tprint(\"Training...\")\n\t\tprev_loss = self.compute_loss(self.w0, self.w, 'train')\n\t\tfor i in range(self.max_iter):\n\t\t\t# gradient descent\n\t\t\tdw0, dw = self.compute_grad(self.w0, self.w, 'train')\n\t\t\tself.w0 -= self.step_size * dw0\n\t\t\tself.w = [wj-self.step_size*dwj for wj, dwj in zip(self.w, dw)]\n\t\t\tcurr_loss = self.compute_loss(self.w0, self.w, 'train')\n\t\t\tif i%(self.max_iter/10)==0:\n\t\t\t\tprint('iteration: {}, loss: {}'.format(i, curr_loss))\n\t\t\tif abs(curr_loss-prev_loss) < self.tolerance:\n\t\t\t\tprint('# of iterations:',i)\n\t\t\t\tbreak\n\t\tself.trained = True\n\t\tprint('Mean log loss of TRAIN data:', curr_loss)", "def train(self):\n # setup model\n self.createModel()\n self.setGenerators()\n self.buildCallbacks()\n self.printParameters()\n \n # train model\n _ = self.model.fit_generator(\n generator = self.trainGen,\n validation_data = self.validateGen,\n steps_per_epoch = self.steps_per_epoch,\n validation_steps = self.validation_steps,\n epochs = self.epochs,\n use_multiprocessing = True,\n callbacks = self.callbacks)\n # clear save paths to avoid overwriting accidentaly\n self.saveName = None", "def train(args):\n\n log_header('Training network')\n\n train_retriever(args)", "def _do_training(self, iteration, batch):\n\n feed_dict = self._get_feed_dict(iteration, batch)\n\n self._session.run(self._training_ops, feed_dict)\n\n if iteration % self._target_update_interval == 0:\n # Run target ops here.\n self._update_target()", "def run_step(self):\n self.hooked_sess.run(self.train_op)", "def train(self, *args, **kwargs):\n raise NotImplementedError", "def train(self):\n logging.info(\"Training DINTModel.\")\n start = time.time()\n tr = self.classifier.train()\n return time.time() - start", "def train(self, trainData):\n pass", "def training(self):\n \n best_valid_loss = np.inf\n c = 0\n \n self.train_loader, self.test_loader = self.get_train_test_loaders()\n \n print('Training the {} model with the following architecture:'.format(self.model_name))\n print(summary(self.model, (3, self.image_width, self.image_height)))\n print('*'*100)\n print('Starting the training...')\n print('*'*100)\n \n # Create the model save dir if it already doesn't exist\n if not os.path.exists(self.model_save_dir):\n os.makedirs(self.model_save_dir)\n \n for epoch in range(self.n_epochs):\n\n print(f'Epoch: {epoch+1:02}')\n\n start_time = time.time()\n\n train_loss = self.train(self.train_loader)\n valid_loss = self.evaluate(self.test_loader)\n\n epoch_mins, epoch_secs = self.epoch_time(start_time, time.time())\n\n c+=1\n if valid_loss < best_valid_loss:\n best_valid_loss = valid_loss\n torch.save(self.model.state_dict(), os.path.join(self.model_save_dir, '{}_trained.pt'.format(self.model_name)))\n c=0\n\n if c>4:\n #decrease lr if loss does not decrease after 5 steps\n self.scheduler.step()\n c=0\n\n print(f'Time: {epoch_mins}m {epoch_secs}s') \n print(f'Train Loss: {train_loss:.3f}')\n print(f'Val Loss: {valid_loss:.3f}')\n print('-'*60)\n print('The best validation loss is', best_valid_loss)\n print('*'*100)", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--identifier\", required=True,\n help=\"A short name/identifier for your experiment, e.g. 'ex42b'.\")\n args = parser.parse_args()\n\n train(args)", "def retrain(self):\n thread = Thread(target=self.trainer.train_classifier)\n thread.start()", "def train(x_train, y_train, x_test, y_test):\n\n print(\" Nearest centroid : \", end='')\n run(x_train, y_train, x_test, y_test, NearestCentroid())\n print(\" k-NN classifier (k=3) : \", end='')\n run(x_train, y_train, x_test, y_test, KNeighborsClassifier(n_neighbors=3))\n print(\" k-NN classifier (k=7) : \", end='')\n run(x_train, y_train, x_test, y_test, KNeighborsClassifier(n_neighbors=7))\n print(\" Naive Bayes (Gaussian) : \", end='')\n run(x_train, y_train, x_test, y_test, GaussianNB())\n print(\" Random Forest (trees= 5) : \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=5))\n print(\" Random Forest (trees= 50) : \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=50))\n print(\" Random Forest (trees=500) : \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=500))\n print(\" Random Forest (trees=1000): \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=1000))\n print(\" LinearSVM (C=0.01) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=0.01))\n print(\" LinearSVM (C=0.1) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=0.1))\n print(\" LinearSVM (C=1.0) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=1.0))\n print(\" LinearSVM (C=10.0) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=10.0))", "def test_training(self):\n self.classifier.train(\"test\", self.message)", "def train(self):\n\t\t# Helper: Early stopping.\n\t\tearly_stopper = EarlyStopping(patience=2, verbose = 1)\n\t\tself.model.fit(data.x_train, data.y_train,\n\t\t\t\t\t\tbatch_size=data.batch_size,\n\t\t\t\t\t\tepochs=10000, # using early stopping, so no real limit\n\t\t\t\t\t\tverbose=1,\n\t\t\t\t\t\tvalidation_split=0.05,\n\t\t\t\t\t\tcallbacks=[early_stopper])\n\n\t\tscore = self.model.evaluate(data.x_test, data.y_test, verbose=1)\n\n\t\treturn score[1] # 1 is accuracy. 0 is loss.", "def main():\n df = prepro_last()\n X, y = train_build(df)\n fit_store(X, y)", "def main():\n setup_keras()\n\n args = parse()\n\n train_settings = common.load_settings(args.settings_path, default_conf_name='train.yml')\n train_settings['store'] = args.store\n\n feature_settings = common.load_settings(args.settings_path, default_conf_name='feature.yml')\n model_settings = common.load_settings(args.settings_path, default_conf_name=train_settings['model_conf'])\n\n train_df, val_df = load_training_data(dict(train_settings, **feature_settings))\n assert train_df.shape[0] > val_df.shape[0] * 4.5, f'training data {train_df.shape[0]} should be much larger than validation {val_df.shape[0]}'\n\n sample_featurizer = AudioFeature(feature_settings)\n\n if args.load_name:\n model_name = args.load_name\n print('Loading existing model', model_name)\n m = keras.models.load_model(model_name)\n else:\n t = datetime.datetime.now().strftime('%Y%m%d-%H%M')\n model_name = f\"model-{model_settings['model']}_hop{feature_settings['hop_length']}_{t}\"\n m = models.build(dict(model_settings, **feature_settings))\n m.summary()\n\n output_dir = os.path.join(args.model_store, model_name)\n\n print(f\"Training model: '{model_name}'\", json.dumps(train_settings, indent=1))\n\n combined_settings = dict(train_settings, **model_settings, **feature_settings)\n\n h = train_model(output_dir, train_df, val_df,\n model=m,\n sample_featurizer=sample_featurizer,\n settings=combined_settings)", "def train_network(self):\n if self.trainData:\n if self.verbose:\n print('Started training...')\n\n for epoch in range(135):\n pass\n # save the model\n else:\n if self.verbose:\n print('No train data available')", "def train(self):\n self.ae_train(self.net0, self.ae0_optimizer, self.train0_loader, self.val_loader, name='Net0')\n self.ae_train(self.net1, self.ae1_optimizer, self.train1_loader, self.val_loader, name='Net1')\n self.ae_train(self.net2, self.ae2_optimizer, self.train2_loader, self.val_loader, name='Net2')\n\n self.classifier_train(self.net0, self.optimizer0, self.train0_loader, self.val_loader, name='Net0')\n self.classifier_train(self.net1, self.optimizer1, self.train1_loader, self.val_loader, name='Net1')\n self.classifier_train(self.net2, self.optimizer2, self.train2_loader, self.val_loader, name='Net2')", "def training(self):\n self.model.fit(self.train_x, self.train_y)", "def run(self):\n # This should do nothing if the user has already configured\n # logging, and will it least enable error messages otherwise.\n logging.basicConfig()\n\n # If this is resumption from a checkpoint, it is crucial to\n # reset `profile.current`. Otherwise, it simply does not hurt.\n self.profile.current = []\n\n # Sanity check for the most common case\n if (self._model and isinstance(self._model, Model) and\n isinstance(self.algorithm, GradientDescent)):\n if not (set(self._model.get_parameter_dict().values()) ==\n set(self.algorithm.parameters)):\n logger.warning(\"different parameters for model and algorithm\")\n\n with change_recursion_limit(config.recursion_limit):\n self.original_sigint_handler = signal.signal(\n signal.SIGINT, self._handle_epoch_interrupt)\n self.original_sigterm_handler = signal.signal(\n signal.SIGTERM, self._handle_batch_interrupt)\n try:\n logger.info(\"Entered the main loop\")\n if not self.status['training_started']:\n for extension in self.extensions:\n extension.main_loop = self\n self._run_extensions('before_training')\n with Timer('initialization', self.profile):\n self.algorithm.initialize()\n self.status['training_started'] = True\n # We can not write \"else:\" here because extensions\n # called \"before_training\" could have changed the status\n # of the main loop.\n if self.log.status['iterations_done'] > 0:\n self.log.resume()\n self._run_extensions('on_resumption')\n self.status['epoch_interrupt_received'] = False\n self.status['batch_interrupt_received'] = False\n with Timer('training', self.profile):\n while self._run_epoch():\n pass\n except TrainingFinish:\n self.log.current_row['training_finished'] = True\n except Exception as e:\n self._restore_signal_handlers()\n self.log.current_row['got_exception'] = traceback.format_exc()\n logger.error(\"Error occured during training.\" + error_message)\n try:\n self._run_extensions('on_error')\n except Exception:\n logger.error(traceback.format_exc())\n logger.error(\"Error occured when running extensions.\" +\n error_in_error_handling_message)\n reraise_as(e)\n finally:\n self._restore_signal_handlers()\n if self.log.current_row.get('training_finished', False):\n self._run_extensions('after_training')\n if config.profile:\n self.profile.report()", "def evaluate(self):\n self.training = False", "def train_and_test(self, train_fn, test_fn):\n logging.info(\"Training..\")\n self.train(train_fn)\n logging.info(\"Testing..\")\n return self.test(test_fn)\n logging.info(\"Done!\")", "def train(self, algorithm):\n\n kfold = StratifiedKFold(10, True, 1)\n f1_score = []\n precision_score = []\n recall_score = []\n for train, test in kfold.split(self.data_training, self.data_target):\n model = algorithm.fit(self.data_training.iloc[train], self.data_target.iloc[train])\n scores = self.score_model(model, self.data_training.iloc[test], self.data_target.iloc[test])\n f1_score.append(scores[0])\n precision_score.append(scores[1])\n recall_score.append(scores[2])\n\n self.print_results(f1_score, precision_score, recall_score)", "def self_training(args):\n\n print('load pre-trained model from [%s]' % args.load_model, file=sys.stderr)\n params = torch.load(args.load_model, map_location=lambda storage, loc: storage)\n vocab = params['vocab']\n transition_system = params['transition_system']\n saved_args = params['args']\n saved_state = params['state_dict']\n\n # transfer arguments\n saved_args.cuda = args.cuda\n saved_args.save_to = args.save_to\n saved_args.train_file = args.train_file\n saved_args.unlabeled_file = args.unlabeled_file\n saved_args.dev_file = args.dev_file\n saved_args.load_decode_results = args.load_decode_results\n args = saved_args\n\n update_args(args)\n\n model = Parser(saved_args, vocab, transition_system)\n model.load_state_dict(saved_state)\n\n if args.cuda: model = model.cuda()\n model.train()\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)\n\n print('load unlabeled data [%s]' % args.unlabeled_file, file=sys.stderr)\n unlabeled_data = Dataset.from_bin_file(args.unlabeled_file)\n\n print('load decoding results of unlabeled data [%s]' % args.load_decode_results, file=sys.stderr)\n decode_results = pickle.load(open(args.load_decode_results))\n\n labeled_data = Dataset.from_bin_file(args.train_file)\n dev_set = Dataset.from_bin_file(args.dev_file)\n\n print('Num. examples in unlabeled data: %d' % len(unlabeled_data), file=sys.stderr)\n assert len(unlabeled_data) == len(decode_results)\n self_train_examples = []\n for example, hyps in zip(unlabeled_data, decode_results):\n if hyps:\n hyp = hyps[0]\n sampled_example = Example(idx='self_train-%s' % example.idx,\n src_sent=example.src_sent,\n tgt_code=hyp.code,\n tgt_actions=hyp.action_infos,\n tgt_ast=hyp.tree)\n self_train_examples.append(sampled_example)\n print('Num. self training examples: %d, Num. labeled examples: %d' % (len(self_train_examples), len(labeled_data)),\n file=sys.stderr)\n\n train_set = Dataset(examples=labeled_data.examples + self_train_examples)\n\n print('begin training, %d training examples, %d dev examples' % (len(train_set), len(dev_set)), file=sys.stderr)\n print('vocab: %s' % repr(vocab), file=sys.stderr)\n\n epoch = train_iter = 0\n report_loss = report_examples = 0.\n history_dev_scores = []\n num_trial = patience = 0\n while True:\n epoch += 1\n epoch_begin = time.time()\n\n for batch_examples in train_set.batch_iter(batch_size=args.batch_size, shuffle=True):\n batch_examples = [e for e in batch_examples if len(e.tgt_actions) <= args.decode_max_time_step]\n\n train_iter += 1\n optimizer.zero_grad()\n\n loss = -model.score(batch_examples)\n # print(loss.data)\n loss_val = torch.sum(loss).data[0]\n report_loss += loss_val\n report_examples += len(batch_examples)\n loss = torch.mean(loss)\n\n loss.backward()\n\n # clip gradient\n if args.clip_grad > 0.:\n grad_norm = torch.nn.utils.clip_grad_norm(model.parameters(), args.clip_grad)\n\n optimizer.step()\n\n if train_iter % args.log_every == 0:\n print('[Iter %d] encoder loss=%.5f' %\n (train_iter,\n report_loss / report_examples),\n file=sys.stderr)\n\n report_loss = report_examples = 0.\n\n print('[Epoch %d] epoch elapsed %ds' % (epoch, time.time() - epoch_begin), file=sys.stderr)\n # model_file = args.save_to + '.iter%d.bin' % train_iter\n # print('save model to [%s]' % model_file, file=sys.stderr)\n # model.save(model_file)\n\n # perform validation\n print('[Epoch %d] begin validation' % epoch, file=sys.stderr)\n eval_start = time.time()\n eval_results = evaluation.evaluate(dev_set.examples, model, args, verbose=True)\n dev_acc = eval_results['accuracy']\n print('[Epoch %d] code generation accuracy=%.5f took %ds' % (epoch, dev_acc, time.time() - eval_start), file=sys.stderr)\n is_better = history_dev_scores == [] or dev_acc > max(history_dev_scores)\n history_dev_scores.append(dev_acc)\n\n if is_better:\n patience = 0\n model_file = args.save_to + '.bin'\n print('save currently the best model ..', file=sys.stderr)\n print('save model to [%s]' % model_file, file=sys.stderr)\n model.save(model_file)\n # also save the optimizers' state\n torch.save(optimizer.state_dict(), args.save_to + '.optim.bin')\n elif epoch == args.max_epoch:\n print('reached max epoch, stop!', file=sys.stderr)\n exit(0)\n elif patience < args.patience:\n patience += 1\n print('hit patience %d' % patience, file=sys.stderr)\n\n if patience == args.patience:\n num_trial += 1\n print('hit #%d trial' % num_trial, file=sys.stderr)\n if num_trial == args.max_num_trial:\n print('early stop!', file=sys.stderr)\n exit(0)\n\n # decay lr, and restore from previously best checkpoint\n lr = optimizer.param_groups[0]['lr'] * args.lr_decay\n print('load previously best model and decay learning rate to %f' % lr, file=sys.stderr)\n\n # load model\n params = torch.load(args.save_to + '.bin', map_location=lambda storage, loc: storage)\n model.load_state_dict(params['state_dict'])\n if args.cuda: model = model.cuda()\n\n # load optimizers\n if args.reset_optimizer:\n print('reset optimizer', file=sys.stderr)\n optimizer = torch.optim.Adam(model.inference_model.parameters(), lr=lr)\n else:\n print('restore parameters of the optimizers', file=sys.stderr)\n optimizer.load_state_dict(torch.load(args.save_to + '.optim.bin'))\n\n # set new lr\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n # reset patience\n patience = 0", "def train():\n import trace\n trace.train()", "def train(self, train_dataset):\n\n # check fine_tuning option\n model_path = os.path.join(self.check_point, 'model.pt')\n if self.fine_tune and not os.path.exists(model_path):\n raise Exception('Cannot find %s.' %model_path)\n elif self.fine_tune and os.path.exists(model_path):\n if self.verbose:\n pass\n self.model = torch.load(model_path)\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)\n \n # capture best model\n best_val_psnr = -1\n best_model_state = self.model.state_dict()\n\n # Train the model\n for epoch in range(self.num_epochs):\n self._epoch_step(train_dataset, epoch)\n self.scheduler.step()\n\n\n\n # capture running PSNR on train and val dataset\n train_psnr, train_ssim, _, _ = self._check_PSNR(train_dataset)\n self.hist_train_psnr.append(train_psnr)\n \n\n \n # write the model to hard-disk for testing\n if not os.path.exists(self.check_point):\n os.makedirs(self.check_point)\n model_path = os.path.join(self.check_point, 'model.pt')\n torch.save(self.model, model_path)", "def runner(self):\n\n print('[ INFO ]: Initializing the abalone program runner...')\n\n df, features, predictor, classes = self.preprocess()\n\n df = alg.random_feature_sample(self, df, 0.10)\n\n # Set up the training, testing and validation sets\n split = round(len(df) * 0.10)\n v_set = df[df.index < split]\n t_set = df[df.index >= split]\n\n tree = alg()\n folds_dict = tree.cross_validation(t_set, predictor, type='classification', folds=5)\n\n # Initialize comparion values\n best_fold_tree = None\n best_fold_score = 0\n best_fold_pred_labels = None\n best_fold_df = None\n\n # Loop through each fold in the folds dictionary\n for fold in folds_dict:\n\n test_set = folds_dict[fold]\n train_set = pd.DataFrame()\n for inner_fold in folds_dict:\n if inner_fold != fold:\n train_set = train_set.append(folds_dict[inner_fold], ignore_index=True)\n\n # Build an ID3 tree\n root = tree.build_tree(train_set, features, predictor)\n df, labels, pred_labels, score = tree.test(test_set, features, predictor, root)\n\n # Determine which tree is the best\n if score > best_fold_score:\n best_fold_tree = root\n best_fold_score = score\n best_fold_pred_labels = pred_labels\n best_fold_df = df\n\n # Validate results and prune the ID3 tree\n v_tree = alg()\n df, labels, pred_labels, score = v_tree.test(v_set, features, predictor, best_fold_tree)\n prune_root = v_tree.prune(df, predictor, best_fold_tree)\n prune_df, prune_labels, prune_pred_labels, prune_score = v_tree.test(v_set, features, predictor, prune_root)\n\n return best_fold_tree, score, labels, pred_labels, prune_root, prune_score, prune_labels, prune_pred_labels", "def train(self):\n backend = self.config.backend.build(self.config, self.tmp_dir)\n backend.train(source_bundle_uri=self.config.source_bundle_uri)", "def run_worker(self):\n # TODO(xiejw): To allow execution framework to add train hooks.\n return self._start_distributed_training()", "def run(cfg): # pylint: disable=too-many-locals,too-many-statements\n # load_text\n voca, gazet, data_, pos_model, word_model = load_text(cfg)\n\n char_voca = voca['in']\n\n # Build Ner model\n model = build_model(cfg, char_voca=char_voca, word_voca=None,\n gazet=gazet, pos_voca=pos_model.cfg.voca['out'])\n\n epoch_syl_cnt = data_['train'].get_syllable_count()\n iter_per_epoch = epoch_syl_cnt // cfg.batch_size\n iter_to_rvt = iter_per_epoch * cfg.rvt_epoch\n\n # Load GPU\n if torch.cuda.is_available():\n model.cuda()\n\n # Loss / Optimizer\n criterion = nn.CrossEntropyLoss()\n optimizer = cfg.optimizer(model.parameters())\n\n losses = []\n accuracies = []\n f_scores = []\n\n iter_ = 1\n best_iter = 0\n\n # Remove existing log directory\n if cfg.clean:\n logging.info('==== removing log: %s ====', cfg.model_dir)\n shutil.rmtree(cfg.model_dir)\n time.sleep(3)\n\n else:\n if cfg.ckpt_path.exists():\n logging.info('==== reverting from check point ====')\n model_dump = CheckPoint.load(cfg.ckpt_path)\n model.load_state_dict(model_dump['model'])\n optimizer.load_state_dict(model_dump['optim'])\n best_iter = model_dump['iter']\n iter_ = best_iter + 1\n losses.append(model_dump['loss'])\n accuracies.append(model_dump['accuracy'])\n f_scores.append(model_dump['f-score'])\n logging.info('---- iter: %dk, loss: %f, accuracy: %f, f-score: %f ----',\n iter_ // 1000, losses[-1], accuracies[-1], f_scores[-1])\n lrs = [param_group['lr'] for param_group in optimizer.param_groups]\n logging.info('learning rates: %s', ', '.join([str(_) for _ in lrs]))\n\n # Tensorboard Summary Writer\n sum_wrt = SummaryWriter(cfg.model_dir)\n\n # loss / accuracy / f-score logging (.tsv)\n log_path = cfg.model_dir.joinpath('log.tsv')\n logf = open(log_path, 'at' if cfg.ckpt_path.exists() else 'wt')\n if os.path.getsize(log_path) == 0:\n print('iter\\tloss\\taccuracy\\tf-score', file=logf)\n\n # Main Training Loop\n revert = 0\n one_more_thing = True # one more change to increase learning rate into 10 times\n batches = []\n while revert <= cfg.rvt_term or one_more_thing:\n for train_sent in data_['train']:\n # Convert to Tensor\n # labels [sentence_len]\n # contexts [sentence_len, 21]\n # gazet [sentence_len, 21, 15]\n train_sent.set_word_feature(pos_model, word_model, cfg.window)\n train_sent.set_pos_feature(pos_model, cfg.window)\n train_labels, train_contexts, train_gazet, train_pos, train_words = \\\n train_sent.to_tensor(voca, gazet, cfg.window, cfg.phoneme, cfg.gazet_embed)\n\n # Convert to Variable\n train_labels = Variable(train_labels)\n train_contexts = Variable(train_contexts)\n train_gazet = Variable(train_gazet)\n train_pos = Variable(train_pos, requires_grad=False)\n train_words = Variable(train_words, requires_grad=False)\n\n # Load on GPU\n if torch.cuda.is_available():\n train_labels = train_labels.cuda()\n train_contexts = train_contexts.cuda()\n train_gazet = train_gazet.cuda()\n train_pos = train_pos.cuda()\n train_words = train_words.cuda()\n\n # Reset Gradient\n optimizer.zero_grad()\n\n # Training mode (updates/dropout/batchnorm)\n model.train()\n\n # import ipdb; ipdb.set_trace()\n\n # Forward Prop\n outputs = model(train_contexts, train_gazet, train_pos, train_words)\n\n batches.append((train_labels, outputs))\n if sum([batch[0].size(0) for batch in batches]) < cfg.batch_size:\n continue\n batch_label = torch.cat([x[0] for x in batches], 0)\n batch_output = torch.cat([x[1] for x in batches], 0)\n batches = []\n\n # Backprop\n loss = criterion(batch_output, batch_label)\n loss.backward()\n optimizer.step()\n\n # Validation\n if iter_ % 1000 == 0:\n measure = tagger.PerformanceMeasure()\n # Freeze parameters\n model.eval()\n\n # Calculate loss\n losses.append(loss.data[0])\n for dev_sent in data_['dev']:\n # Convert to CUDA Variable\n dev_sent.set_word_feature(pos_model, word_model, cfg.window)\n dev_sent.set_pos_feature(pos_model, cfg.window)\n _, dev_contexts, dev_gazet, dev_pos, dev_words = \\\n dev_sent.to_tensor(voca, gazet, cfg.window, cfg.phoneme, cfg.gazet_embed)\n dev_contexts = Variable(dev_contexts, volatile=True)\n dev_gazet = Variable(dev_gazet, volatile=True)\n dev_pos = Variable(dev_pos, volatile=True)\n dev_words = Variable(dev_words, volatile=True)\n if torch.cuda.is_available():\n dev_contexts = dev_contexts.cuda()\n dev_gazet = dev_gazet.cuda()\n dev_pos = dev_pos.cuda()\n dev_words = dev_words.cuda()\n\n outputs = model(dev_contexts, dev_gazet, dev_pos, dev_words)\n\n _, predicts = outputs.max(1)\n dev_sent.compare_label(predicts, voca, measure)\n\n accuracy, f_score = measure.get_score()\n print(file=sys.stderr)\n sys.stderr.flush()\n if not f_scores or f_score > max(f_scores):\n logging.info('==== writing best model: %f ====', f_score)\n model.save(cfg.ckpt_path)\n check_point = CheckPoint(optimizer, model,\n {'iter': iter_, 'loss': loss.data[0],\n 'accuracy': accuracy, 'f-score': f_score})\n check_point.save(cfg.ckpt_path)\n logging.info('check point: %s', check_point)\n best_iter = iter_\n revert = 0\n one_more_thing = True\n accuracies.append(accuracy)\n f_scores.append(f_score)\n logging.info('---- iter: %dk, loss: %f, accuracy: %f, f-score: %f (max: %r) ----',\n iter_ // 1000, losses[-1], accuracy, f_score, max(f_scores))\n\n if cfg.model_dir.exists():\n sum_wrt.add_scalar('loss', losses[-1], iter_ // 1000)\n sum_wrt.add_scalar('accuracy', accuracy, iter_ // 1000)\n sum_wrt.add_scalar('f-score', f_score, iter_ // 1000)\n print('{}\\t{}\\t{}\\t{}'.format(iter_ // 1000, losses[-1], accuracy,\n f_score), file=logf)\n logf.flush()\n\n # revert policy\n if (iter_ - best_iter) > iter_to_rvt:\n revert += 1\n logging.info('==== revert to iter: %dk, revert count: %d ====',\n best_iter // 1000, revert)\n model_dump = CheckPoint.load(cfg.ckpt_path)\n model.load_state_dict(model_dump['model'])\n optimizer.load_state_dict(model_dump['optim'])\n lrs = []\n for param_group in optimizer.param_groups:\n param_group['lr'] *= (0.9 if one_more_thing else 0.8) ** revert\n lrs.append(param_group['lr'])\n best_iter = iter_\n logging.info('learning rates: %s', ', '.join([str(_) for _ in lrs]))\n elif iter_ % 100 == 0:\n print('.', end='', file=sys.stderr)\n sys.stderr.flush()\n\n iter_ += 1\n if revert > cfg.rvt_term and one_more_thing:\n logging.info('==== one more thing, revert to iter: %dk ====', best_iter // 1000)\n model_dump = CheckPoint.load(cfg.ckpt_path)\n model.load_state_dict(model_dump['model'])\n optimizer.load_state_dict(model_dump['optim'])\n lrs = []\n for param_group in optimizer.param_groups:\n param_group['lr'] *= 10.0\n lrs.append(param_group['lr'])\n best_iter = iter_\n revert = 0\n one_more_thing = False\n logging.info('learning rates: %s', ', '.join([str(_) for _ in lrs]))", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.num_epochs):\n print(\"EPOHA\")\n self.run_epoch()\n print(\"SAVE MODEL\")\n self.save_model()", "def Train(self, training_set):\n self.countGrams(training_set)\n self.estimateA()\n self.estimateB()\n self.computeDict()", "def train(\n self, training_data: TrainingData, cfg: DazuConfig, **kwargs: Any\n ) -> None:", "def train( self, trainingData, trainingLabels, validationData, validationLabels ):\n\n self.features = trainingData[0].keys() # could be useful later\n # DO NOT ZERO OUT YOUR WEIGHTS BEFORE STARTING TRAINING, OR\n # THE AUTOGRADER WILL LIKELY DEDUCT POINTS.\n for iteration in range(self.max_iterations):\n #pdb.set_trace() # esto es un break point para que puedas comprobar el formato de los datos\n print (\"Starting iteration \", iteration, \"...\")\n for i in range(len(trainingData)):#training data\n max = -10000000\n for j in range(len(self.weights)):\n prod = np.dot(self.weights[j], trainingData[i]) #este sería x0 (en la primera vuelta) (xj)\n if (prod > max):\n max=prod #en max guardamos la distancia a la instancia que más cerca está de la que estamos recorriendo\n indclase=j #guardas el índice de la clase a la que predices que pertenece\n\n if(indclase != trainingLabels[i]):\n # recalcular pesos\n self.weights[trainingLabels[i]].__radd__(trainingData[i]) #honek jarraian egiten du gehiketa pisu guztientzat\n #pdb.set_trace() # esto es un break point para que puedas comprobar el formato de los datos\n self.weights[indclase].__sub__(trainingData[i]) #honek jarraian egiten du kenketa pisu guztientzat\n\n\n\n\n\n ########################################################################################\n # 1. i es el indice de un ejemplo (un item, f(x) de un ejemplo) del conjunto de entrenamiento.\n # 2. Asi pues, en cada vuelta de este loop se trata un solo ejemplo\n # por cada ejemplo calculareis el producto punto (dotProduct) w*item\n # NOTAS: Recordad que cada ejemplo viene representado por varios rasgos (o features), es decir, es un vector de rasgos, tantos como nos marca el atributo self.features.\n # Asi cada ejemplo es de dimension 1 filas y self.features).\n # La dimension del vector w tambien es self.features, es decir, habra tantos pesos en w_rasgo dentro de w como rasgos haya en cada item de ejemplo\n # Recordad tambien que es una clasificacion multiclase en este caso. Hay tantas clases como nos marca el atributo self.legalLabels\n #########################################################################################", "def start_training(params):\n\n\n\n # CREATE A FOLDER TO HOLD RESULTS\n\n\n exp_pref = \"../results/\" + params.EXPERIMENT_PREFIX\n time_str = time.strftime(\"_%m-%d-%H-%M_\", time.gmtime())\n exp_dir = exp_pref + time_str + \\\n \"{}\".format(params.LEARNING_RATE).replace(\".\", \"p\") + \"_\" \\\n + \"{}\".format(params.DISCOUNT).replace(\".\", \"p\")\n\n try:\n os.stat(exp_dir)\n except OSError:\n os.makedirs(exp_dir)\n\n logger = logging.getLogger(\"DeepLogger\")\n logger.setLevel(logging.INFO)\n\n # Logging filehandler\n #fh = logging.FileHandler(exp_dir + \"/log.log\")\n # Rotate file when filesize is 5 mb\n fh = RotatingFileHandler(exp_dir + \"/log.log\", maxBytes=5000000, backupCount=100)\n\n fh.setLevel(logging.INFO)\n\n # Console filehandler\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n\n formatter = logging.Formatter('%(message)s')\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n\n logger.addHandler(fh)\n\n # Prevent nohup from producing large log file, logging to file is handled internally\n # logger.addHandler(ch)\n\n log_params(logger, params)\n\n #logging.basicConfig(level=logging.INFO, filename=exp_dir + \"/log.log\")\n\n\n if params.DETERMINISTIC:\n rng = np.random.RandomState(12345)\n else:\n rng = np.random.RandomState()\n\n if params.CUDNN_DETERMINISTIC:\n theano.config.dnn.conv.algo_bwd = 'deterministic'\n\n # Init ale\n ale = ale_python_interface.ALEInterface()\n ale.setInt('random_seed', 123)\n ale.setBool('display_screen', params.DISPLAY_SCREEN)\n ale.setFloat('repeat_action_probability', params.REPEAT_ACTION_PROBABILITY)\n full_rom_path = os.path.join(params.ROM_PATH, params.ROM_NAME)\n ale.loadROM(full_rom_path)\n num_actions = len(ale.getMinimalActionSet())\n\n print \"Legal actions: \", num_actions\n print ale.getMinimalActionSet()\n\n # Instantiate network\n logger.info(\"Setting up network...\")\n network = None # Be able to continue training from a network or watch a network play\n if (params.NETWORK_PICKLE_FILE is None):\n logger.info(\"Initializing a new random network...\")\n network = q_network.DeepQLearner(params.RESIZED_WIDTH,\n params.RESIZED_HEIGHT,\n num_actions,\n params.PHI_LENGTH,\n params.DISCOUNT,\n params.LEARNING_RATE,\n params.RMS_DECAY,\n params.RMS_EPSILON,\n params.MOMENTUM,\n params.CLIP_DELTA,\n params.FREEZE_INTERVAL,\n params.BATCH_SIZE,\n params.NETWORK_TYPE,\n params.UPDATE_RULE,\n params.BATCH_ACCUMULATOR,\n rng)\n else:\n logger.info(\"Loading network instance from file...\")\n handle = open(params.NETWORK_PICKLE_FILE, 'r')\n network = cPickle.load(handle)\n\n\n # Only used when getting a random network\n if params.RANDOM_NETWORK_PICKLE:\n import sys\n sys.setrecursionlimit(10000)\n result_net_file = open(params.EXPERIMENT_PREFIX + '.pkl', 'w')\n print \"File opened\"\n cPickle.dump(network, result_net_file, -1)\n print \"Pickle dumped\"\n result_net_file.close()\n sys.exit(0)\n\n\n # Instatiate agent\n logger.info(\"Setting up agent...\")\n agent = ale_agent.NeuralAgent(network,\n params.EPSILON_START,\n params.EPSILON_MIN,\n params.EPSILON_DECAY,\n params.REPLAY_MEMORY_SIZE,\n exp_dir,\n params.REPLAY_START_SIZE,\n params.UPDATE_FREQUENCY,\n rng)\n\n # Instantiate experient\n logger.info(\"Setting up experiment...\")\n experiment = ale_experiment.ALEExperiment(ale, agent,\n params.RESIZED_WIDTH,\n params.RESIZED_HEIGHT,\n params.RESIZE_METHOD,\n params.EPOCHS,\n params.STEPS_PER_EPOCH,\n params.STEPS_PER_TEST,\n params.FRAME_SKIP,\n params.DEATH_ENDS_EPISODE,\n params.MAX_START_NULLOPS,\n rng)\n\n\n # Run experiment\n logger.info(\"Running experiment...\")\n experiment.run()", "def train(self, training_data, cfg, **kwargs):\n pass", "def _run(self):\n pb = progressbar.ProgressBar(self.iterations)\n p = subprocess.Popen(self.comm, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)\n while True:\n line = p.stdout.readline()\n if not line:\n\t\t break\n line = line.strip()\n acc = self._checkAccuracy(line)\n if acc:\n self.model_acc = acc\n loss = self._checkLoss(line)\n if loss:\n self.model_loss = loss\n\n if self._checkUpdate(line):\n pb.update()\n self.run = True", "def call(self, inputs, training=True):\n pass", "def train(self):\n self.emission_model(self.train_data)\n self.transition_model(self.train_data)", "def run_training(argv=None):\n # parse args\n args = parse_arguments(sys.argv if argv is None else argv)\n logging.info('getting the ML model...')\n model = getattr(models, args.model)(nr_predictors=24, nr_classes=2)\n\n # get the data\n logging.info('getting the data...')\n temp_folder = 'data'\n if not os.path.exists(temp_folder):\n os.mkdir(temp_folder)\n file_path = os.path.join(temp_folder, 'data.csv')\n storage_helper.download_blob(args.bucket, args.blob_path, file_path)\n time_series = pd.read_csv(file_path)\n training_test_data = preprocess.train_test_split(time_series, 0.8)\n\n\n # define training objective\n logging.info('defining the training objective...')\n sess = tf.Session()\n feature_data = tf.placeholder(\"float\", [None, 24])\n actual_classes = tf.placeholder(\"float\", [None, 2])\n\n model = model.build_model(feature_data)\n cost = -tf.reduce_sum(actual_classes * tf.log(model))\n train_opt = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(cost)\n init = tf.global_variables_initializer()\n sess.run(init)\n\n # train model\n correct_prediction = tf.equal(tf.argmax(model, 1), tf.argmax(actual_classes, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n\n logging.info('training the model...')\n time_dct = {}\n time_dct['start'] = time.time()\n for i in range(1, args.epochs):\n sess.run(\n train_opt,\n feed_dict={\n feature_data: training_test_data['training_predictors_tf'].values,\n actual_classes: training_test_data['training_classes_tf'].values.reshape(\n len(training_test_data['training_classes_tf'].values), 2)\n }\n )\n if i % 5000 == 0:\n print(i, sess.run(\n accuracy,\n feed_dict={\n feature_data: training_test_data['training_predictors_tf'].values,\n actual_classes: training_test_data['training_classes_tf'].values.reshape(\n len(training_test_data['training_classes_tf'].values), 2)\n }\n ))\n time_dct['end'] = time.time()\n logging.info('training took {0:.2f} sec'.format(time_dct['end'] - time_dct['start']))\n\n # print results of confusion matrix\n logging.info('validating model on test set...')\n feed_dict = {\n feature_data: training_test_data['test_predictors_tf'].values,\n actual_classes: training_test_data['test_classes_tf'].values.reshape(\n len(training_test_data['test_classes_tf'].values), 2)\n }\n metrics.tf_confusion_matrix(model, actual_classes, sess, feed_dict)\n\n # create signature for TensorFlow Serving\n logging.info('Exporting model for tensorflow-serving...')\n\n export_path = os.path.join(\"model\", args.version)\n tf.saved_model.simple_save(\n sess,\n export_path,\n inputs={'predictors': feature_data},\n outputs={'prediction': tf.argmax(model, 1),\n 'model-version': tf.constant([str(args.version)])}\n )\n\n # save model on GCS\n logging.info(\"uploading to \" + args.bucket + \"/\" + export_path)\n storage_helper.upload_to_storage(args.bucket, export_path)\n\n # remove local files\n shutil.rmtree(export_path)\n shutil.rmtree(temp_folder)", "def main(args):\n \n ## Load & Preprocess data \n if args.data_name == 'amsterdam': \n file_name = '../data/amsterdam/test_longitudinal_data.csv'\n ori_data = data_preprocess(file_name, args.max_seq_len)\n \n # Divide the data into training and testing\n divided_data, _ = data_division(ori_data, seed = args.seed, divide_rates = [args.train_rate, 1-args.train_rate])\n \n train_data = np.asarray(divided_data[0])\n test_data = np.asarray(divided_data[1])\n\n print('Finish data loading: ' + str(args.data_name)) \n \n ## Run hider algorithm\n if args.hider_model == 'timegan':\n generated_data = timegan.timegan(train_data)\n elif args.hider_model == 'add_noise':\n generated_data = add_noise.add_noise(train_data, args.noise_size) \n print('Finish hider algorithm training') \n \n ## Define enlarge data and its labels\n enlarge_data = np.concatenate((train_data, test_data), axis = 0)\n enlarge_data_label = np.concatenate((np.ones([train_data.shape[0],]), np.zeros([test_data.shape[0],])), axis = 0)\n \n # Mix the order\n idx = np.random.permutation(enlarge_data.shape[0])\n enlarge_data = enlarge_data[idx]\n enlarge_data_label = enlarge_data_label[idx]\n \n ## Run seeker algorithm\n reidentified_data = knn_seeker(generated_data, enlarge_data)\n \n print('Finish seeker algorithm training') \n \n ## Evaluate the performance\n # 1. Feature prediction\n feat_idx = np.random.permutation(train_data.shape[2])[:args.feature_prediction_no]\n ori_feat_pred_perf = feature_prediction(train_data, test_data, feat_idx)\n new_feat_pred_perf = feature_prediction(generated_data, test_data, feat_idx)\n \n feat_pred = [ori_feat_pred_perf, new_feat_pred_perf]\n \n print('Feature prediction results: ' + \n '(1) Ori: ' + str(np.round(ori_feat_pred_perf, 4)) + \n '(2) New: ' + str(np.round(new_feat_pred_perf, 4)))\n \n # 2. One step ahead prediction\n ori_step_ahead_pred_perf = one_step_ahead_prediction(train_data, test_data)\n new_step_ahead_pred_perf = one_step_ahead_prediction(generated_data, test_data)\n \n step_ahead_pred = [ori_step_ahead_pred_perf, new_step_ahead_pred_perf]\n \n print('One step ahead prediction results: ' + \n '(1) Ori: ' + str(np.round(ori_step_ahead_pred_perf, 4)) + \n '(2) New: ' + str(np.round(new_step_ahead_pred_perf, 4)))\n \n # 3. Reidentification score\n reidentification_score = reidentify_score(enlarge_data_label, reidentified_data)\n \n print('Reidentification score: ' + str(np.round(reidentification_score, 4)))\n \n shutil.rmtree('tmp')\n \n return feat_pred, step_ahead_pred, reidentification_score" ]
[ "0.77021474", "0.7665013", "0.75775045", "0.754038", "0.74701804", "0.7316239", "0.72936034", "0.7281923", "0.7249765", "0.7234686", "0.72101897", "0.7197399", "0.7190534", "0.71843356", "0.7157707", "0.7157707", "0.7157707", "0.7157707", "0.7157707", "0.71547514", "0.70944256", "0.70547175", "0.70547175", "0.7034413", "0.6994123", "0.6979423", "0.69417447", "0.69378823", "0.69376546", "0.6926493", "0.6900066", "0.6898678", "0.68891424", "0.68482965", "0.68275076", "0.68223786", "0.68217117", "0.6815598", "0.6796458", "0.6783792", "0.67768365", "0.6768792", "0.6768379", "0.67474645", "0.67410535", "0.6740765", "0.6732913", "0.67238843", "0.672333", "0.67178005", "0.6712374", "0.6702446", "0.6699699", "0.6692996", "0.66829175", "0.66536856", "0.66427946", "0.6639922", "0.6628029", "0.6626849", "0.6625669", "0.66129315", "0.6611956", "0.66061", "0.6603843", "0.6600542", "0.65981257", "0.6596419", "0.659585", "0.6595756", "0.659534", "0.6594157", "0.65938723", "0.65928185", "0.65875304", "0.657825", "0.6561176", "0.65581226", "0.6554035", "0.65538913", "0.6553072", "0.6543238", "0.6533796", "0.65287703", "0.6520326", "0.6519338", "0.65158427", "0.651542", "0.6503479", "0.65024745", "0.64972305", "0.64898866", "0.64841425", "0.6465675", "0.646421", "0.6460218", "0.6457714", "0.64546615", "0.64525187", "0.64453614", "0.64394414" ]
0.0
-1
Runs the training for a single epoch.
def train_loop(self,epoch_index,args,model,dataset,optimizer,train_bar): dataset.set_split('train') batch_generator = generate_nmt_batches(dataset, batch_size=args.batch_size, device=args.device) running_loss = 0.0 running_acc = 0.0 model.train() for batch_index, batch_dict in enumerate(batch_generator): # the training routine is these 5 steps: # -------------------------------------- # step 1. zero the gradients optimizer.zero_grad() # step 2. compute the output if isinstance(model,NMTModelWithMLTM): y_pred = model(batch_dict['x_source'], batch_dict['x_source_mltm_vector'], batch_dict['x_source_length'], batch_dict['x_target']) else: y_pred = model(batch_dict['x_source'], batch_dict['x_source_length'], batch_dict['x_target']) # step 3. compute the loss loss = sequence_loss(y_pred, batch_dict['y_target'], self.mask_index) # step 4. use loss to produce gradients loss.backward() # step 5. use optimizer to take gradient step optimizer.step() # ----------------------------------------- # compute the running loss and running accuracy running_loss += (loss.item() - running_loss) / (batch_index + 1) acc_t = compute_accuracy(y_pred, batch_dict['y_target'], self.mask_index) running_acc += (acc_t - running_acc) / (batch_index + 1) # update bar train_bar.set_postfix(loss=running_loss, acc=running_acc, epoch=epoch_index) train_bar.update() return running_loss,running_acc
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_one_epoch(self):\n raise NotImplementedError", "def train_one_epoch(self):\n\t\tself.model.train()\n\t\ttrain_loss = 0\n\n\t\tfor batch_idx, data in enumerate(self.data_loader.train_loader):\n\t\t\tInput = data[0].float().to(self.device)\n\t\t\tOutput = data[1].float().to(self.device)\n\n\t\t\tself.optimizer.zero_grad()\n\t\t\tloss = self.loss(self.model(Input)[:,0],Output)\n\t\t\ttrain_loss += loss.item()\n\t\t\tloss.backward()\n\t\t\tself.optimizer.step()\n\t\t\tself.current_iteration += 1\n\n\t\tself.summary_writer.add_scalar('training/loss', loss.item(), self.current_epoch)", "def train_epoch(self) -> None:\n ct = self.config.training\n total_games = self._get_total_games()\n print(f\"Total Games: {total_games}\")\n train_size = int(0.9 * total_games)\n dataset_wrapper = DatasetWrapper(self.config)\n self.agent.model.fit(\n dataset_wrapper.get_dataset(train_size),\n epochs=ct.epoch_to_checkpoint,\n validation_data=dataset_wrapper.get_dataset(train_size, is_training=False),\n )", "def train_epoch(self):\r\n for loader in self.loaders:\r\n if self.epoch % loader.epoch_interval == 0:\r\n self.cycle_dataset(loader)\r\n\r\n self._stats_new_epoch()\r\n self._write_tensorboard()\r\n print('{}th epoch train / eval done!'.format(self.epoch))", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def run_epoch(self):\n self.model_lr_scheduler.step()\n\n print(\"Training\")\n self.set_train()\n\n for batch_idx, inputs in enumerate(self.train_loader):\n\n before_op_time = time.time()\n\n outputs, losses = self.process_batch(inputs)\n\n self.model_optimizer.zero_grad()\n losses[\"loss\"].backward()\n self.model_optimizer.step()\n\n duration = time.time() - before_op_time\n\n # log less frequently after the first 2000 steps to save time & disk space\n early_phase = self.step < 2000\n late_phase = self.step % 2000 == 0\n\n if early_phase or late_phase:\n self.log_time(batch_idx, duration, losses[\"loss\"].cpu().data)\n\n if \"depth_gt\" in inputs:\n self.compute_depth_losses(inputs, outputs, losses)\n\n self.log(\"train\", inputs, outputs, losses)\n self.val()\n\n self.step += 1", "def train_epoch(self):\n for batch, targets in self.training_dataloader:\n self.training_step(batch, targets)\n self.calculate_training_loss()\n self.epochs_trained += 1\n LOGGER.info(\n \"Training loss after {} epochs: {}\".format(str(self.epochs_trained), str(self.training_average_loss))\n )", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.num_epochs):\n print(\"EPOHA\")\n self.run_epoch()\n print(\"SAVE MODEL\")\n self.save_model()", "def step(self, epoch):\n\n self.train(epoch)\n self.test(epoch)", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.opt.num_epochs):\n self.run_epoch()\n if (self.epoch + 1) % self.opt.save_frequency == 0:\n self.save_model()", "def train_epoch(self):\n\n if self._train_data_set is not None and self._train_data_set is not None:\n self._model.fit_num_epochs(self._train_data_set, self._test_data_set)\n else:\n raise RuntimeError(\"[Triggerbot]: No training or test set available\")", "def run_epoch(self, train, dev, epoch):\n # iterate over dataset\n for i, (words, labels) in enumerate(minibatches(train, self.config.batch_size)):\n fd, _ = self.get_feed_dict(words, labels, self.config.lr,\n self.config.dropout)\n\n _, train_loss= self.sess.run(\n [self.train_op, self.loss], feed_dict=fd)\n\n# =============================================================================\n# # tensorboard\n# if i % 10 == 0:\n# self.file_writer.add_summary(summary, epoch*nbatches + i)\n# =============================================================================\n\n metrics = self.run_evaluate(dev)\n msg = \" - \".join([\"{} {:04.2f}\".format(k, v)\n for k, v in metrics.items()])\n print(msg)\n\n return metrics[\"f1\"]", "def run_epoch( self ):\n # --- Init Epoch ----\n total_epoch_loss = 0.0\n epoch_batches = self.dataset.dataloader( self.config.neuron.epoch_length )\n progress_bar = qqdm(enumerate(epoch_batches), total=len(epoch_batches), desc=format_str('blue', f'Epoch Progress'))\n for iteration, (inputs) in progress_bar:\n\n # ---- Forward / Backward ----\n prev_mechanism_weights = self.mechanism_weights.tolist()\n output = self.train ( batch = { 'inputs': inputs } )\n next_mechanism_weights = self.mechanism_weights.tolist()\n total_epoch_loss += output.local_target_loss.item()\n\n # ---- Logs ----\n self.epoch_logs (\n progress_bar,\n iteration = iteration,\n output = output,\n prev_mechanism_weights = prev_mechanism_weights,\n next_mechanism_weights = next_mechanism_weights\n )\n self.global_step += 1\n\n self.epoch_loss = total_epoch_loss / self.config.neuron.epoch_length\n self.epoch += 1", "def run_epoch(model, data, optimizer, epoch):\n traindata, valdata = data\n\n model.train()\n train_bpd = epoch_iter(model, traindata, optimizer, epoch)\n\n model.eval()\n val_bpd = epoch_iter(model, valdata, optimizer, epoch)\n\n return train_bpd, val_bpd", "def Train(self):\n self.init_epoch = self.epoch\n if self.epoch >= self.params.num_epoch:\n WARNING('Num_epoch should be smaller than current epoch. Skip training......\\n')\n else:\n for _ in range(self.epoch, self.params.num_epoch):\n self.epoch += 1\n print('-' * 20 + 'Epoch.' + str(self.epoch) + '-' * 20)\n\n # train one epoch\n self.train_one_epoch()\n\n # should display\n if self.epoch % self.params.display == 0:\n print('\\tTrain loss: %.4f' % self.train_loss[-1])\n\n # should save\n if self.params.should_save:\n if self.epoch % self.params.save_every == 0:\n self.save_checkpoint()\n\n # test every params.test_every epoch\n if self.params.should_val:\n if self.epoch % self.params.val_every == 0:\n self.val_one_epoch()\n print('\\tVal loss: %.4f' % self.val_loss[-1])\n\n # adjust learning rate\n self.adjust_lr()\n self.train_one_epoch_Image_display() \n \n # save the last network state\n if self.params.should_save:\n self.save_checkpoint()\n\n # train visualization\n self.plot_curve()", "def train_epoch(self, train=False):\n # init params\n config = self.config\n writer = self.writer\n train_params = self.get_train_params()\n args = self.args\n # net, net_SP = self.net, self.net_SP\n optimizer, optimizer_SP = self.optimizer, self.optimizer_SP\n\n lr = self.get_learning_rate()\n logging.info(f\"current learning rate: {lr}\")\n\n running_losses = []\n self.save_lists = [\n \"err_q\",\n \"err_t\",\n \"epi_dists\",\n \"relative_poses_cam\",\n \"relative_poses_body\",\n ]\n dict_of_lists_in_train = init_dict_of_lists(config, self.save_lists)\n dict_of_lists_in_val = init_dict_of_lists(config, self.save_lists)\n if_val_in_train_trigger = False\n\n thd_corr = 300\n writer.add_scalar(\"training-lr\", lr, self.n_iter)\n\n # Train one epoch\n for i, sample_train in tqdm(enumerate(self.train_loader)):\n # if training\n if train:\n # eval in training script\n if (\n self.n_iter != 0\n and self.n_iter % config[\"training\"][\"val_interval_in_train\"] == 0\n ):\n if_val_in_train_trigger = True\n if if_val_in_train_trigger:\n logging.info(\n \"+++[Train]+++ Collecting training batch for %s at train step %d\"\n % (args.exper_name, self.n_iter)\n )\n self.net.eval()\n else:\n self.net.train()\n\n # train one batch\n (\n loss_train_out,\n dict_of_lists_in_train,\n clamp_cum,\n ) = self.train_val_batch(\n train_params,\n sample_train,\n True,\n if_val=if_val_in_train_trigger,\n dict_of_lists=dict_of_lists_in_train,\n )\n\n if if_val_in_train_trigger:\n if (\n dict_of_lists_in_train[\"count\"]\n > config[\"training\"][\"val_batches\"]\n ):\n dict_of_lists_in_train = self.flush_dict_of_lists(\n writer, \"training\", self.n_iter, **dict_of_lists_in_train\n )\n if_val_in_train_trigger = False\n else:\n # running_losses.append(loss_train_out)\n print(self.n_iter, \"%.8f\" % loss_train_out)\n self.n_iter += 1\n\n # if testing\n if args.eval and self.n_iter % config[\"training\"][\"val_interval\"] == 0:\n logging.info(\n \"+++[Val]+++ Validating %s at train step %d\"\n % (args.exper_name, self.n_iter)\n )\n self.net.eval()\n assert self.net.training == False\n for j, sample_val in tqdm(enumerate(self.val_loader)):\n # if not self.check_num_of_matches(sample, thd=thd_corr): continue\n logging.info(\"+++[Val]+++ Validating batch %d\" % (j))\n # logging.info(f\"frame_id: {sample_val['frame_ids']}\")\n loss_val_out, dict_of_lists_in_val, _ = self.train_val_batch(\n train_params, sample_val,\n False, if_val=True, dict_of_lists=dict_of_lists_in_val,\n ) ##### check: in order to align val and training\n self.n_iter_val += 1\n if config[\"training\"][\"val_batches\"] != -1 and (\n j > config[\"training\"][\"val_batches\"]\n ): ##### check: how to limit the validation\n break\n print(dict_of_lists_in_val.keys())\n\n ## save valdiation result (dict)\n if len(config[\"exps\"][\"filename\"]) > 3:\n # print(f\"dict_of_lists_in_val: {dict_of_lists_in_val}\")\n def get_dict(key_layer1, key_layer2, dict_of_lists):\n dict_of_array = {}\n for k in key_layer1:\n dict_of_array[k] = np.stack(dict_of_lists[k][key_layer2])\n return dict_of_array\n\n our_name, base_name = (\n config[\"exps\"][\"our_name\"],\n config[\"exps\"][\"base_name\"],\n )\n\n print(f'save dict_of_lists_in_val to {config[\"exps\"][\"filename\"]}')\n # save our results\n dict_of_lists = get_dict(\n self.save_lists, our_name, dict_of_lists_in_val\n )\n dict_of_lists[\"epi_dists\"] = dict_of_lists[\"epi_dists\"][:, :10] ### only take part of it\n np.savez(\n f'{self.save_path[:-11]}/{our_name}_{config[\"exps\"][\"filename\"]}',\n **dict_of_lists,\n )\n # save base_name\n dict_of_lists = get_dict(\n self.save_lists, base_name, dict_of_lists_in_val\n )\n dict_of_lists[\"epi_dists\"] = dict_of_lists[\"epi_dists\"][:, :10] ### only take part of it\n np.savez(\n f'{self.save_path[:-11]}/{base_name}_{config[\"exps\"][\"filename\"]}',\n **dict_of_lists,\n )\n # output then flush\n dict_of_lists_in_val = self.flush_dict_of_lists(\n writer, \"validating\", self.n_iter, **dict_of_lists_in_val\n )\n\n # epoch_loss = np.mean(np.asarray(running_losses))\n\n # training iterations\n self.epoch += 1\n if self.n_iter > config[\"training\"][\"train_iter\"]:\n break\n return 0.0, self.clamp_cum, self.n_iter, self.n_iter_val", "def train_epoch(self):\n for it in range(self.iter_per_epoch):\n # Get batch\n xs, _ = self.mnist.train.next_batch(100)\n _, loss, summary = self.sess.run([self.train_op, self.loss, self.summary_op],\n {self.x: xs})\n self.summary_writer.add_summary(summary, it)\n if it % 1000 == 0:\n print('Iteration {}\\t loss: {}'.format(it, loss))", "def _run_epoch(sess, model, args, data, index=0, tb_summaries=None,\n id_to_word=None, train_op=None, verbose=False):\n epoch_start_time = time.time()\n # total cost and number of words evaluated in this epoch\n costs, total_words = 0.0, 0.0\n # epoch size is number of batches in each epoch\n epoch_size = (len(data[index]) - 1) // model.config['batch_size']\n state = sess.run(model.initial_state)\n\n # iterate through batches\n for step, (x, y) in enumerate(data_reader.batch_iterator(\n data[index], model.config['batch_size'])):\n # return these parameters after running TF session\n fetches = {\n 'cost': model.cost[index],\n 'final_state': model.final_state,\n 'seq_len': model.seq_len\n }\n # only train model has optimizer operation\n if train_op is not None:\n fetches['train_op'] = train_op[index]\n\n # create dict to feed input, targets, and rnn into TF session\n feed_dict = utils.create_feed_dict(model, args, x, y, state)\n # run all parameters in fetches dict\n vals = sess.run(fetches, feed_dict)\n\n costs += vals['cost']\n # number of words evaluated\n total_words += np.sum(vals['seq_len'])\n # use perplexity to evaluate language models\n perplexity = np.exp(costs / total_words)\n\n if verbose and step % (epoch_size // 2) == 1:\n # display perplexity and top word predictions for sequence\n _display_epoch_metrics(step, epoch_size, perplexity, total_words,\n epoch_start_time, args, model, sess,\n index, feed_dict, vals, id_to_word, y)\n\n # generate sample text while training to monitor progress\n if args.display_text == 'True' and model.name == 'Train':\n generate.generate_text(sess, model, id_to_word, train_ind=index)\n\n # write TensorBoard summaries for Train/Valid\n if args.save_path != '' and model.name != 'Test':\n summary = sess.run(tb_summaries.summary_op,\n {tb_summaries.ppl_summary: perplexity})\n model.file_writer.add_summary(summary, get_or_create_global_step().eval())\n\n return perplexity", "def TrainEpoch(ss):\n ss.StopNow = False\n curEpc = ss.TrainEnv.Epoch.Cur\n while True:\n ss.TrainTrial()\n if ss.StopNow or ss.TrainEnv.Epoch.Cur != curEpc:\n break\n ss.Stopped()", "def train(self, epoch=50):\n # self.history = self.model.fit(self.train_images,\n # self.train_labels,\n # epochs=epoch,\n # validation_data=(self.test_images, self.test_labels))\n datagen = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1,\n horizontal_flip=True)\n # prepare iterator\n it_train = datagen.flow(self.train_images, self.train_labels, batch_size=64)\n # fit model\n steps = int(self.train_images.shape[0] / 64)\n self.history = self.model.fit_generator(it_train, steps_per_epoch=steps,\n epochs=epoch,\n validation_data=(self.test_images,\n self.test_labels),\n verbose=1)\n # evaluate model\n _, acc = self.model.evaluate(self.test_images, self.test_labels, verbose=0)\n LOGGER.info('> %.3f' % (acc * 100.0))\n self.summarize_diagnostics()", "def run_epoch(model, data, optimizer):\n traindata, valdata = data\n\n model.train()\n train_elbo = epoch_iter(model, traindata, optimizer)\n\n model.eval()\n val_elbo = epoch_iter(model, valdata, optimizer)\n\n return train_elbo, val_elbo", "def run_epoch(model, data, optimizer):\n traindata, valdata = data\n\n model.train()\n train_elbo = epoch_iter(model, traindata, optimizer)\n\n model.eval()\n val_elbo = epoch_iter(model, valdata, optimizer)\n\n return train_elbo, val_elbo", "def run_training_loop():\n logging.info(\"Starting the training loop.\")\n\n trainer = trainer_class(\n output_dir=output_dir,\n train_env=train_env,\n eval_env=eval_env,\n trajectory_dump_dir=trajectory_dump_dir,\n )\n trainer.training_loop(n_epochs=n_epochs)", "def train_epoch(self, data_loader):\n raise NotImplementedError", "def train_an_epoch(self, train_loader, epoch_id):\n assert hasattr(self, \"model\"), \"Please specify the exact model !\"\n self.model.train()\n total_loss = 0.0\n\n for batch_data in train_loader:\n loss = self.train_single_batch(batch_data)\n total_loss += loss\n print(\"[Training Epoch {}], Loss {}\".format(epoch_id, loss))\n self.writer.add_scalar(\"model/loss\", total_loss, epoch_id)", "def training(self) -> None:\n self.compile_model()\n self.train_epoch()\n self.agent.save()", "def train(self, batch):\n pass", "def run_epoch(self):\n print(\"Training\")\n self.set_train()\n\n for batch_idx in range(0, self.num_total_batch):\n\n before_op_time = time.time()\n # Choosing the dataloader for training model\n if self.choosing_dataset_to_train_with(batch_idx):\n # Synthetic dataset\n self.syn_or_real = 'syn'\n try:\n inputs = self.syn_train_iter.__next__()\n except StopIteration:\n print('Stopped as the iteration has reached to the END, and reloading the synthetic dataloader')\n self.syn_train_iter = iter(self.syn_train_loader)\n inputs = self.syn_train_iter.__next__()\n else:\n # Real dataset\n self.syn_or_real = 'real'\n try:\n inputs = self.real_train_iter.__next__()\n except StopIteration:\n print('Stopped as the iteration has reached to the END, and reloading the real dataloader')\n self.real_train_iter = iter(self.real_train_loader)\n inputs = self.real_train_iter.__next__()\n\n # Move all available tensors to GPU memory\n for key, ipt in inputs.items():\n if type(key) == tuple or key == \"depth_gt\":\n inputs[key] = ipt.to(self.device)\n\n # log less frequently after the first 2000 steps to save time & disk space\n self.step += 1\n self.early_phase = batch_idx % self.opt.log_frequency == 0\n self.mid_phase = False and self.step % self.opt.save_frequency == 0\n self.late_phase = self.num_total_batch - 1 == batch_idx\n\n outputs, losses = {}, {}\n # Depth estimation\n outputs_d, losses_d = self.process_batch(inputs)\n outputs.update(outputs_d)\n losses.update(losses_d)\n\n # No more if else conditions, just combine all losses based on availability of gradients\n final_loss = torch.tensor(0.).to(self.device)\n for k, v in losses.items():\n if ('d_' not in k) and v.requires_grad and ('/' not in k):\n final_loss += v\n final_loss.backward()\n losses[\"loss\"] = final_loss\n\n if (batch_idx + 1) % 2 == 0:\n self.model_optimizer.step()\n self.model_optimizer.zero_grad()\n self.zero_grad()\n\n duration = time.time() - before_op_time\n self.log_time(batch_idx, duration, losses[\"loss\"].cpu().data)\n\n if \"depth_gt\" in inputs:\n self.compute_depth_losses(inputs, outputs, losses)\n\n if self.early_phase or self.mid_phase or self.late_phase:\n self.log(\"train\", inputs, outputs, losses)\n self.val(\"real\")\n self.val(\"syn\")\n\n if (batch_idx + 1) % 2 == 0:\n current_lr = self.update_learning_rate(self.model_optimizer, self.opt.learning_rate)", "def set_train_epoch(self, epoch: int):\n self._train_epoch = epoch", "def _train_epoch(self, epoch):\n self.model.train()\n self.train_metrics.reset()\n for batch_idx, (data, target) in enumerate(self.data_loader):\n data, target = data.to(self.device), target.to(self.device)\n\n self.optimizer.zero_grad()\n\n if self.config[\"amp\"]:\n # AMP!\n with autocast():\n output = self.model(data)\n loss = self.criterion(output, target)\n else:\n output = self.model(data)\n loss = self.criterion(output, target)\n\n loss.backward()\n self.optimizer.step()\n\n self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)\n self.train_metrics.update(\"loss\", loss.item())\n for met in self.metric_ftns:\n self.train_metrics.update(met.__name__, met(output, target))\n\n if batch_idx % self.log_step == 0:\n self.logger.debug(\n \"Train Epoch: {} {} Loss: {:.6f}\".format(\n epoch, self._progress(batch_idx), loss.item()\n )\n )\n self.writer.add_image(\n \"input\", make_grid(data.cpu(), nrow=8, normalize=True)\n )\n\n if batch_idx == self.len_epoch:\n break\n log = self.train_metrics.result()\n\n if self.do_validation:\n val_log = self._valid_epoch(epoch)\n log.update(**{\"val_\" + k: v for k, v in val_log.items()})\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n return log", "def train_an_epoch(self, sampler, epoch_id):\n assert hasattr(self, \"model\"), \"Please specify the exact model !\"\n self.model.train()\n total_loss = 0\n for batch_id in range(self.num_batch):\n (\n u,\n seq,\n time_seq,\n time_matrix,\n pos,\n neg,\n ) = sampler.next_batch() # tuples to ndarray\n batch_data = (\n np.array(u),\n np.array(seq),\n np.array(time_seq),\n np.array(time_matrix),\n np.array(pos),\n np.array(neg),\n )\n loss = self.train_single_batch(batch_data)\n # print(\n # \"loss in epoch {} iteration {}: {}\".format(epoch, step, loss.item())\n # ) # expected 0.4~0.6 after init few epochs\n total_loss += loss\n print(\"[Training Epoch {}], Loss {}\".format(epoch_id, total_loss))\n self.writer.add_scalar(\"model/loss\", total_loss, epoch_id)", "def train(self, num_batches: int):", "def train(self):\n # Change directory to the code directory\n current_working_directory = os.getcwd()\n\n os.chdir(self.model_parameters[\"NN_code_directory\"])\n\n self.call_training_routine()\n\n # Come back to the original directory\n os.chdir(current_working_directory)", "def run_epoch(self, train, dev, epoch):\n # progbar stuff for logging\n batch_size = self.config.batch_size\n nbatches = (len(train) + batch_size - 1) // batch_size\n prog = Progbar(target=nbatches)\n\n # iterate over dataset\n for i, (words, labels) in enumerate(minibatches(train, batch_size)):\n fd, _ = self.get_feed_dict(words, labels, self.config.lr,\n self.config.dropout)\n\n _, train_loss, summary = self.sess.run(\n [self.train_op, self.loss, self.merged], feed_dict=fd)\n\n prog.update(i + 1, [(\"train loss\", train_loss)])\n\n # tensorboard\n if i % 10 == 0:\n self.file_writer.add_summary(summary, epoch*nbatches + i)\n\n metrics = self.run_evaluate(dev)\n msg = \" - \".join([\"{} {:04.2f}\".format(k, v)\n for k, v in metrics.items()])\n self.logger.info(msg)\n\n return metrics[\"f1\"]", "def train_epoch(self, epoch, evaluator, optimizer, perf_path, perf_trace, state_fpath, writer_tensorboard):\n\n # Train an epoch\n self.model.train()\n print('Start epoch', epoch)\n train_itr = iter(self.loader_train)\n total_err = 0\n total_acc = 0\n\n for index, (data_pixel, data_labels) in enumerate(train_itr):\n\n # compute\n input_data = data_pixel.float().cuda()\n data_labels = data_labels.cuda()\n\n # Use the model the produce the classification\n grapheme_logits, vowel_logits, consonant_logits = self.model(input_data)\n\n # produce evaluator results\n eval_result = evaluator(grapheme_logits, vowel_logits, consonant_logits, data_labels)\n\n # set optimizer to zero.\n optimizer.zero_grad()\n\n # back propogate the evaluation results.\n eval_result['loss'].backward()\n\n # optimizer take step forward.\n optimizer.step()\n\n # tabulate the steps from the evaluation\n eval_result = {k: eval_result[k].item() for k in eval_result}\n\n # update every hundreds' of\n if index % 100 == 0:\n print(index, eval_result['loss'], eval_result['acc'])\n train_result = evaluator.evalulate_on_cache()\n train_total_err = train_result['loss']\n writer_tensorboard.add_scalar('Loss/Train', train_total_err, global_step=epoch)\n # log_metric('loss', train_total_err)\n train_total_acc = train_result['acc']\n writer_tensorboard.add_scalar('Accuracy/Train', train_total_acc, global_step=epoch)\n # log_metric('acc', train_total_acc)\n train_kaggle_score = train_result['kaggle_score']\n writer_tensorboard.add_scalar('Kaggle_Score/Train', train_kaggle_score, global_step=epoch)\n # log_metric('kaggle_score', train_kaggle_score)\n dict_metrics_train = {\n 'Loss/Train': train_total_err,\n 'Accuracy/Train': train_total_acc,\n 'Kaggle_Score/Train': train_kaggle_score,\n }\n log_metrics(dict_metrics_train, step=epoch)\n print(f\"Epoch {epoch} Training, Loss {train_total_err}, Acc {train_total_acc}\")\n evaluator.clear_cache()\n # compute validation error\n self.model.eval()\n val_itr = iter(self.loader_val)\n with torch.no_grad():\n for index, (data_pixel, data_labels) in enumerate(val_itr):\n input_data = data_pixel.float().cuda()\n data_labels = data_labels.cuda()\n grapheme_logits, vowel_logits, consonant_logits = self.model(input_data)\n eval_result = evaluator(grapheme_logits, vowel_logits, consonant_logits, data_labels)\n eval_result = {k: eval_result[k].item() for k in eval_result}\n total_err += eval_result['loss']\n total_acc += eval_result['acc']\n # print(total_err / (1 + input_index), total_acc / (1 + input_index))\n val_result = evaluator.evalulate_on_cache()\n val_total_err = val_result['loss']\n writer_tensorboard.add_scalar('Loss/Val', val_total_err, global_step=epoch)\n val_total_acc = val_result['acc']\n writer_tensorboard.add_scalar('Accuracy/Val', val_total_acc, global_step=epoch)\n val_kaggle_score = val_result['kaggle_score']\n writer_tensorboard.add_scalar('Kaggle_Score/Val', val_kaggle_score, global_step=epoch)\n dict_metrics_val = {\n 'Loss/Validation': val_total_err,\n 'Accuracy/Validation': val_total_acc,\n 'Kaggle_Score/Validation': val_kaggle_score,\n }\n log_metrics(dict_metrics_val, step=epoch)\n # Write to disk.\n writer_tensorboard.flush()\n print(f\"Epoch {epoch} Eval, Loss {val_total_err}, Acc {val_total_acc}\")\n evaluator.clear_cache()\n print(\"Saving the model (epoch %d)\" % epoch)\n torch.save({\n \"epoch\": epoch + 1,\n \"model_state\": self.model.state_dict(),\n \"optimizer_state\": optimizer.state_dict(),\n }, state_fpath)\n print(f\"Making a backup (step {epoch})\")\n backup_fpath = os.path.join(self.backup_dir, f\"model_bak_{epoch}.pt\")\n torch.save({\n \"epoch\": epoch + 1,\n \"model_state\": self.model.state_dict(),\n \"optimizer_state\": optimizer.state_dict(),\n }, backup_fpath)\n # Dump the traces\n perf_trace.append(\n {\n 'epoch': epoch,\n 'train_err': train_total_err,\n 'train_acc': train_total_acc,\n 'train_kaggle_score': train_kaggle_score,\n 'val_err': val_total_err,\n 'val_acc': val_total_acc,\n 'val_kaggle_score': val_kaggle_score\n }\n )\n pickle.dump(perf_trace, open(perf_path, 'wb'))\n # store epoch full result separately\n epoch_result = {\n 'epoch': epoch,\n 'train_result': train_result,\n 'val_result': val_result\n }\n pickle.dump(epoch_result, open(os.path.join(self.results_dir, 'result_epoch_{0}.p'.format(epoch)), 'wb'))", "def run_step(self):\n assert self.model.training, \"[SimpleTrainer] model was changed to eval mode!\"\n start = time.perf_counter()\n \"\"\"\n If your want to do something with the data, you can wrap the dataloader.\n \"\"\"\n data = next(self._data_loader_iter)\n data_time = time.perf_counter() - start\n\n \"\"\"\n If your want to do something with the losses, you can wrap the model.\n \"\"\"\n loss_dict = self.model(data)\n losses = sum(loss for loss in loss_dict.values())\n self._detect_anomaly(losses, loss_dict)\n\n metrics_dict = loss_dict\n metrics_dict[\"data_time\"] = data_time\n self._write_metrics(metrics_dict)\n \n validation_data = next(self.validation_data_loader_iter)\n val_losses_dict = self.model(validation_data)\n val_losses = sum(loss for loss in val_losses_dict.values())\n self._detect_anomaly(val_losses, val_losses_dict)\n\n val_metrics_dict = val_losses_dict\n val_metrics_dict[\"data_time\"] = data_time\n self._write_validation_metrics(val_metrics_dict)\n\n \"\"\"\n If you need accumulate gradients or something similar, you can\n wrap the optimizer with your custom `zero_grad()` method.\n \"\"\"\n self.optimizer.zero_grad()\n losses.backward()\n\n \"\"\"\n If you need gradient clipping/scaling or other processing, you can\n wrap the optimizer with your custom `step()` method.\n \"\"\"\n self.optimizer.step()", "def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)", "def train(self, epochs):\n print('Starting training...')\n print('\\n{:13} '\n '{:>17} '\n '{:^38}'\n ''.format('', '--- Training ---', '--- Validation ---'))\n print('{:4} {:>8} '\n '{:>8} {:>8} '\n '{:>8} {:>8} {:>8} {:>8}'\n ''.format('', '', 'Loss', 'Acc', 'Loss', 'Prc', 'Rec', 'Acc'))\n training_time = 0\n for epoch in range(1, epochs + 1):\n start_time = time.time()\n trn_stats = self.__progress(self.training, self.__train_fn)\n val_stats = self.__progress(self.validation, self.__val_fn)\n elapsed_time = time.time() - start_time\n training_time += elapsed_time\n print('{:>4} {:>7.2f}s '\n '{:>8.3f} {:>8.1%} '\n '{:>8.3f} {:>8.1%} {:>8.1%} {:>8.1%}'\n ''.format(epoch, elapsed_time,\n trn_stats[0], trn_stats[-1],\n *val_stats))\n self.history.append([epoch] + list(trn_stats) + list(val_stats))\n self.report['epochs'] = epochs\n self.report['time_per_epoch'] = training_time / epochs", "def run_epoch(self, sess, epoch_num, validate=True):\n total_loss = 0\n accuracies = []\n for i in range(self.batches_per_epoch):\n batch = self.loader.get_batch()\n if self.config.print_every and i % self.config.print_every == 0:\n if validate:\n val_accuracy = self.eval_validation_accuracy()\n print(\"step {}, validation accuracy {:.3f}\".format(i, val_accuracy))\n accuracies.append((i + epoch_num * self.batches_per_epoch, val_accuracy))\n else:\n if self.include_coverage and self.include_entropy:\n train_accuracy = self.eval_accuracy_on_batch(batch[0], batch[1], batch[2], batch[3])\n elif self.include_coverage:\n train_accuracy = self.eval_accuracy_on_batch(batch[0], batch[1], batch[2])\n elif self.include_entropy:\n train_accuracy = self.eval_accuracy_on_batch(batch[0], batch[1], batch[2])\n else:\n train_accuracy = self.eval_accuracy_on_batch(batch[0], batch[1])\n print(\"step {}, training accuracy {:.3f}\".format(i, train_accuracy))\n \n if self.include_coverage and self.include_entropy:\n _, loss_val = sess.run([self.train_op, self.loss],\n feed_dict={self.x: batch[0], self.c: batch[1], self.e: batch[2], self.y_: batch[2], \n self.keep_prob: 1-self.config.dropout_prob})\n elif self.include_coverage:\n _, loss_val = sess.run([self.train_op, self.loss],\n feed_dict={self.x: batch[0], self.c: batch[1], self.y_: batch[2], \n self.keep_prob: 1-self.config.dropout_prob})\n elif self.include_entropy:\n _, loss_val = sess.run([self.train_op, self.loss],\n feed_dict={self.x: batch[0], self.e: batch[1], self.y_: batch[2], \n self.keep_prob: 1-self.config.dropout_prob})\n else:\n attention, _, loss_val = sess.run([self.attention, self.train_op, self.loss],\n feed_dict={self.x: batch[0], self.y_: batch[1],\n self.keep_prob: 1-self.config.dropout_prob})\n\t\tpdb.set_trace()\n\t\tnp.savetxt(\"a.csv\", attention[0], delimiter=\",\")\n total_loss += loss_val\n\n return total_loss / self.batches_per_epoch, accuracies", "def train_one_epoch(self):\n self.model.train()\n for batch_idx, (imgs, labels) in enumerate(self.tr_loader):\n imgs, labels = imgs.to(self.device), labels.to(self.device)\n self.optimizer.zero_grad()\n\n outputs, aux_outputs = self.model(imgs).values()\n loss1 = self.criterion(outputs, labels)\n loss2 = self.criterion(aux_outputs, labels)\n self.loss = loss1 + 0.3*loss2\n\n _, preds = torch.max(outputs, 1)\n acc = preds.eq(labels.view_as(preds)).sum().item() / self.cfg.bs\n\n self.loss.backward()\n self.optimizer.step()\n \n self.summary_writer.add_scalars('scalar_group', \n { 'loss_end' : loss1.item(),\n 'loss_aux' : loss2.item(),\n 'loss_total' : self.loss.item(),\n 'accuracy' : acc},\n self.current_iteration)\n\n if batch_idx % self.cfg.log_interval == 0:\n info_1 = 'Epochs {} [{}/{} ({:.0f}%)] | Loss: {:.6f}' .format(\n self.current_epoch, \n batch_idx * len(imgs), \n len(self.tr_loader.dataset), \n 100. * batch_idx / len(self.tr_loader),\n self.loss.item())\n info_2 = 'Batch Accuracy : {:.2f}'.format(acc)\n self.logger.info('{} | {}'.format(info_1, info_2))\n self.save_checkpoint('{}_epoch{}_iter{}.pt'.format(\n self.cfg.exp_name,\n self.current_epoch, \n self.current_iteration)\n )\n self.current_iteration += 1", "def train(self,\n epochs=10,\n track_every=20):\n self.model.train()\n print(\"Model put in training mode.\")\n\n for i in range(epochs):\n stop_training = False\n batch_losses = []\n for j, sample in enumerate(self.training_set):\n\n # Run single loop.\n loss = self.partial_fit(sample)\n batch_losses.append(loss)\n self.print_progress(epoch=i,\n batch=j,\n loss=loss)\n\n if j % track_every == 0 and j != 0:\n batch_loss = numpy.mean(numpy.array(batch_losses))\n val_loss, metric = self.update_validation_result(epoch=i,\n batch=j,\n loss=batch_loss)\n\n stop_training = self.estopper.check_stop_training(val_loss)\n\n if stop_training:\n break\n\n # End batch iteration.\n\n val_loss, metric = self.update_validation_result(epoch=i,\n batch=j,\n loss=batch_loss)\n\n if stop_training:\n print(\"Early stopping.\")\n torch.save(self.model, self.save_dir + \"model.pt\")\n print(f\"Model saved to {self.save_dir}model.pt\")\n break\n\n # End training loop.", "def training_phase(self):\r\n self.train_dataloader = self.get_dataloader(\r\n hdf_path=self.train_h5_path,\r\n data_description=\"training set\"\r\n )\r\n self.valid_dataloader = self.get_dataloader(\r\n hdf_path=self.valid_h5_path,\r\n data_description=\"validation set\"\r\n )\r\n\r\n self.get_ts_properties()\r\n\r\n self.initialize_output_files()\r\n\r\n start_epoch, end_epoch = self.define_model_and_optimizer()\r\n\r\n print(\"* Beginning training.\", flush=True)\r\n n_processed_batches = 0\r\n for epoch in range(start_epoch, end_epoch):\r\n\r\n self.current_epoch = epoch\r\n n_processed_batches = self.train_epoch(n_processed_batches=n_processed_batches)\r\n\r\n # evaluate model every `sample_every` epochs (not every epoch)\r\n if epoch % self.C.sample_every == 0:\r\n self.evaluate_model()\r\n else:\r\n util.write_model_status(score=\"NA\") # score not computed\r\n\r\n self.print_time_elapsed()", "def train_one_epoch(sess, tr_model, i_epoch, run_metadata):\n tr_loss, i = 0, 0\n stime = time.time()\n while True:\n try:\n if NNET_PARAM.time_line:\n _, loss, current_batchsize = sess.run(\n [tr_model.train_op, tr_model.loss, tr_model.batch_size],\n options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),\n run_metadata=run_metadata)\n else:\n _, loss, current_batchsize = sess.run(\n [tr_model.train_op, tr_model.loss, tr_model.batch_size])\n tr_loss += loss\n if (i+1) % NNET_PARAM.minibatch_size == 0:\n if NNET_PARAM.time_line and NNET_PARAM.timeline_type == 'minibatch':\n tl = timeline.Timeline(run_metadata.step_stats)\n ctf = tl.generate_chrome_trace_format()\n with open('_timeline/%03dtimeline%04d.json' % (i_epoch, i+1), 'w') as f:\n f.write(ctf)\n lr = sess.run(tr_model.lr)\n costtime = time.time()-stime\n stime = time.time()\n print(\"MINIBATCH %05d: TRAIN AVG.LOSS %04.6f, \"\n \"(learning rate %02.6f)\" % (\n i + 1, tr_loss / (i*NNET_PARAM.batch_size+current_batchsize), lr), 'cost time: %06dS' % costtime)\n sys.stdout.flush()\n i += 1\n except tf.errors.OutOfRangeError:\n break\n tr_loss /= ((i-1)*NNET_PARAM.batch_size+current_batchsize)\n return tr_loss", "def train_and_evaluate(model, train_dataloader, test_dataloader, optimizer, scheduler, loss_fn, total_epochs):\n\n for epoch in range(total_epochs):\n\n # Run one epoch for both train and test\n print(\"Epoch {}/{}\".format(epoch + 1, total_epochs))\n\n # compute number of batches in one epoch(one full pass over the training set)\n train(model, optimizer, loss_fn, train_dataloader, epoch)\n \n scheduler.step()\n\n # Evaluate for one epoch on test set\n eval(model, loss_fn, test_dataloader, epoch)", "def train_one_epoch(self):\n prog_bar = tqdm(enumerate(self.train_data), total=len(self.train_data))\n self.model.train()\n with autocast():\n for idx, inputs in prog_bar:\n ids = inputs['inputs'].to(self.device, dtype=torch.long)\n mask = inputs['attention_mask'].to(self.device, dtype=torch.long)\n targets = inputs['targets'].to(self.device, dtype=torch.float)\n\n outputs = self.model(input_ids=ids, attention_mask=mask) \n\n loss = self.loss_fn(outputs.squeeze(1), targets)\n prog_bar.set_description('loss: {:.2f}'.format(loss.item()))\n\n Config.scaler.scale(loss).backward()\n Config.scaler.step(self.optimizer)\n Config.scaler.update()\n self.optimizer.zero_grad()\n self.scheduler.step()", "def run_epoch(self, session):\n start_time = time.time()\n costs = 0.0\n iters = 0\n\n fetches = {\n \"cost\": self._cost,\n \"final_state\": self._final_state,\n }\n\n if(self._is_training):\n fetches[\"train_op\"] = self._train_op\n\n state = session.run(self._initial_state)\n\n for step in range(self._input.epoch_size):\n feed_dict = {}\n for i, (c, h) in enumerate(self._initial_state):\n feed_dict[c] = state[i].c\n feed_dict[h] = state[i].h\n\n vals = session.run(fetches, feed_dict)\n cost = vals[\"cost\"]\n state = vals[\"final_state\"]\n\n costs += cost\n iters += self._input.num_steps\n\n return np.exp(costs / iters)", "def _run_training_loop(self, m, curr_epoch):\n start_time = time.time()\n while True:\n try:\n with self._new_session(m):\n train_accuracy = helper_utils.run_epoch_training(\n self.session, m, self.data_loader, curr_epoch)\n tf.logging.info('Saving model after epoch')\n self.save_model(step=curr_epoch)\n break\n except (tf.errors.AbortedError, tf.errors.UnavailableError) as e:\n tf.logging.info('Retryable error caught: %s. Retrying.', e)\n tf.logging.info('Finished epoch: {}'.format(curr_epoch))\n tf.logging.info('Epoch time(min): {}'.format(\n (time.time() - start_time) / 60.0))\n return train_accuracy", "def set_train_epoch(self, epoch: int):\n if hasattr(self, 'cls_head'):\n self.cls_head.set_train_epoch(epoch)", "def run_one_epoch(\n self,\n epoch: int,\n extra_log_info: List[Tuple[str, float, Callable[[float], str]]] = None,\n ) -> None:\n self.lr_scheduler(self.optimizer, epoch)\n\n \n # train\n train_loss, train_stat = self.train_one_epoch()\n\n # test\n test_loss, test_stat = self.test_one_epoch()\n \n\n\n # save all params that showed the best acc\n\n test_acc = test_stat[\"model_acc\"]\n if test_acc > self.best_acc:\n self.best_acc = test_acc\n filename = str(epoch) + \"_\" + f\"{self.best_acc:.2f}\".replace(\".\", \"_\")\n self.save_params(self.model_save_dir, filename, epoch)\n \n # log\n if not extra_log_info:\n extra_log_info = []\n lr = self.optimizer.param_groups[0][\"lr\"]\n log_info: List[Tuple[str, float, Callable[[float], str]]] = []\n log_info.append((\"train/lr\", lr, default_format))\n log_info.append((\"train/loss\", train_loss, default_format))\n log_info += [(\"train/\" + k, v, percent_format) for k, v in train_stat.items()]\n log_info.append((\"test/loss\", test_loss, default_format))\n log_info += [(\"test/\" + k, v, percent_format) for k, v in test_stat.items()]\n log_info.append((\"test/best_acc\", self.best_acc, percent_format))\n self.log_one_epoch(epoch, log_info + extra_log_info)", "def train_epoch(model, train_dataloader, optimizer, loss_fn):\n model.train()\n total_training_loss = 0\n for batch_index, batch in enumerate(train_dataloader):\n batch = batch[0].view(-1,1,28,28).float()\n output_batch = model(batch)\n loss = loss_fn(batch, output_batch, model.prev_means, model.prev_vars)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n total_training_loss += loss", "def _run_epoch(self, train_loader, valid_loader, threshold):\n # set model in train mode and run a train pass\n self.net.train()\n train_loss, train_metric = self._train_epoch(train_loader, threshold)\n\n # set model in eval mode and validate epoch\n self.net.eval()\n val_loss, val_metric = self._validate_epoch(valid_loader, threshold)\n self.epoch_counter += 1\n\n print(\"Epoch: {}\".format(self.epoch_counter))\n print(\"LOSS - Training : [{}], Validation : [{}]\".format(round(train_loss, 4), round(val_loss, 4)))\n print(\"METRIC - Training : [{}], Validation : [{}]\".format(round(train_metric, 4), round(val_metric, 4)))\n return val_loss, val_metric", "def train(self, batch_training=False):\n raise NotImplementedError", "def run(self):\n for _ in range(self.epoch, conf.FX_MAX_EPOCHS):\n self.train()\n\n with torch.no_grad():\n self.test()\n\n self.epoch += 1\n self.save_ck()\n\n self.show_completion_msg()", "def train_epoch(self, epoch):\n device_mapper = self.device_mapper\n self.model.train()\n self.train_metrics.reset()\n for batch_idx, data in enumerate(self.data_loader):\n data = device_mapper.map_modules(data, non_blocking=True)\n\n self.optimizer.zero_grad()\n output = self.model(data)\n loss = self.loss(output, data)\n loss.backward()\n self.optimizer.step()\n\n self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)\n self.train_metrics.update('loss', loss.item(), batch_size=output.size(0))\n for met in self.metrics:\n self.train_metrics.update(met.name(), met(output, data), batch_size=output.size(0))\n\n if batch_idx % self.log_step == 0:\n self.logger.debug('Train Epoch: %d %s Loss: %.6f', epoch, self._progress(batch_idx), loss.item())\n\n if batch_idx == self.len_epoch:\n break\n log = self.train_metrics.result()\n\n if self.do_validation:\n val_log = self.valid_epoch(epoch)\n log.update(**{'val_' + k: v for k, v in val_log.items()})\n\n if self.lr_scheduler is not None:\n if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):\n self.lr_scheduler.step(log[\"val_roc_auc\"])\n else:\n self.lr_scheduler.step()\n return log", "def _train_epoch(self, epoch):\n self.model.train()\n self.train_metrics.reset()\n for batch_idx, (data, target_seg, target_class) in enumerate(self.data_loader):\n data, target_seg, target_class = data.to(self.device), target_seg.to(self.device), target_class.to(self.device)\n\n self.optimizer.zero_grad()\n output_seg, output_class = self.model(data)\n loss = self.criterion((output_seg, output_class), target_seg, target_class, epoch)\n loss.backward()\n self.optimizer.step()\n\n self.writer.set_step((epoch - 1) * self.len_epoch + batch_idx)\n self.train_metrics.update('loss', loss.item())\n for met in self.metric_ftns:\n if met.__name__ == \"accuracy\":\n self.train_metrics.update(met.__name__, met(output_class, target_class))\n else:\n self.train_metrics.update(met.__name__, met(output_seg, target_seg))\n\n if batch_idx % self.log_step == 0:\n self.logger.debug('Train Epoch: {} {} Loss: {:.6f}'.format(\n epoch,\n self._progress(batch_idx),\n loss.item()))\n\n self._visualize_input(data.cpu())\n\n if batch_idx == self.len_epoch:\n break\n log = self.train_metrics.result()\n\n if self.do_validation:\n val_log = self._valid_epoch(epoch)\n log.update(**{'val_'+k : v for k, v in val_log.items()})\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n\n return log", "def _train_epoch(self, model, tqdm_data,\n optimizer_disc=None, optimizer_gen=None):", "def run_training(model, batcher, sess_context_manager, sv, summary_writer):\r\n logging.info(\"starting run_training\")\r\n with sess_context_manager as sess:\r\n if FLAGS.debug: # start the tensorflow debugger\r\n sess = tf_debug.LocalCLIDebugWrapperSession(sess)\r\n sess.add_tensor_filter(\"has_inf_or_nan\", tf_debug.has_inf_or_nan)\r\n if FLAGS.num_iterations == -1:\r\n while True: # repeats until interrupted\r\n run_training_iteration(model, batcher, summary_writer, sess)\r\n else:\r\n initial_iter = model.global_step.eval(sess)\r\n pbar = tqdm(initial=initial_iter, total=FLAGS.num_iterations)\r\n print((\"Starting at iteration %d\" % initial_iter))\r\n for iter_idx in range(initial_iter, FLAGS.num_iterations):\r\n run_training_iteration(model, batcher, summary_writer, sess)\r\n pbar.update(1)\r\n pbar.close()", "def train_network(self):\n if self.trainData:\n if self.verbose:\n print('Started training...')\n\n for epoch in range(135):\n pass\n # save the model\n else:\n if self.verbose:\n print('No train data available')", "def _train_epoch(self, epoch):\n self.model.train()\n total_loss = 0\n\n self.logger.info('Train Epoch: {}'.format(epoch))\n\n for batch_idx, (data) in enumerate(self.train_loader):\n start_it = time()\n data = data.to(self.device)\n\n self.optimizer.zero_grad()\n output = self.model(data)\n if isinstance(output, tuple):\n loss = self.model.loss(data, *output)\n else:\n loss = self.model.loss(data, output)\n loss.backward()\n self.optimizer.step()\n\n step = epoch * len(self.train_loader) + batch_idx\n self.tb_writer.add_scalar('train/loss', loss.item(), step)\n # self.comet_writer.log_metric('loss', loss.item(), step)\n\n total_loss += loss.item()\n\n end_it = time()\n time_it = end_it - start_it\n if batch_idx % self.log_step == 0:\n self.logger.info(\n ' > [{}/{} ({:.0f}%), {:.2f}s] Loss: {:.6f} '.format(\n batch_idx * self.train_loader.batch_size + data.size(\n 0),\n len(self.train_loader.dataset),\n 100.0 * batch_idx / len(self.train_loader),\n time_it * (len(self.train_loader) - batch_idx),\n loss.item()))\n # grid = make_grid(data.cpu(), nrow=8, normalize=True)\n # self.tb_writer.add_image('input', grid, step)\n\n self.logger.info(' > Total loss: {:.6f}'.format(\n total_loss / len(self.train_loader)\n ))\n\n return total_loss / len(self.train_loader)", "def train(self, training_steps=10):", "def _train_epoch(self, epoch):\n\t\tstart = time.time()\n\t\tself.model.train()\n\t\tfor batch_idx, (images, labels) in enumerate(self.train_loader):\n\n\t\t\timages, labels = images.to(self.config.DEVICE), labels.to(self.config.DEVICE)\n\n\t\t\tself.optimizer.zero_grad()\n\t\t\toutput = self.model(images)\n\t\t\tloss = self.criterion(output, labels)\n\t\t\tloss.backward()\n\t\t\tself.optimizer.step()\n\n\t\t\tn_iter = (epoch - 1) * len(self.train_loader) + batch_idx + 1\n\n\t\t\tlast_layer = list(self.model.children())[-1]\n\t\t\tfor name, para in last_layer.named_parameters():\n\t\t\t\tif 'weight' in name:\n\t\t\t\t\tself.logger_setup['writer'].add_scalar('LastLayerGradients/grad_norm2_weights', para.grad.norm(), n_iter)\n\t\t\t\tif 'bias' in name:\n\t\t\t\t\tself.logger_setup['writer'].add_scalar('LastLayerGradients/grad_norm2_bias', para.grad.norm(), n_iter)\n\n\t\t\tprint('Training Epoch: {epoch} [{trained_samples}/{total_samples}]\\tLoss: {:0.4f}\\tLR: {:0.6f}'.format(\n\t\t\t\tloss.item(),\n\t\t\t\tself.optimizer.param_groups[0]['lr'],\n\t\t\t\tepoch=epoch,\n\t\t\t\ttrained_samples=self.config.loader_params['bs']*(batch_idx + 1),\n\t\t\t\ttotal_samples=self.total_train_samples))\n\t\t\n\t\t\t#update training loss for each iteration\n\t\t\tself.logger_setup['writer'].add_scalar('Train/loss', loss.item(), n_iter)\n\t\t\t\n\t\t\tif self.config.WARM_UP and (epoch <= self.config.WARM_EPOCH):\n\t\t\t\tself.warmup_scheduler.step()\n\n\t\tfor name, param in self.model.named_parameters():\n\t\t\tlayer, attr = os.path.splitext(name)\n\t\t\tattr = attr[1:]\n\t\t\tself.logger_setup['writer'].add_histogram(\"{}/{}\".format(layer, attr), param, epoch)\n\n\t\tfinish = time.time()\n\t\tprint('epoch {} training time consumed: {:.2f}s'.format(epoch, finish - start))", "def _do_training(self, iteration, batch):\n\n feed_dict = self._get_feed_dict(iteration, batch)\n\n self._session.run(self._training_ops, feed_dict)\n\n if iteration % self._target_update_interval == 0:\n # Run target ops here.\n self._update_target()", "def __call__(self, initial_lr, step, epoch):\n\n pass", "def iter_epoch(self):\n\n # set to train mode\n self._set_train()\n\n # start epoch\n for i, (source, target) in enumerate(self.train_dataset):\n self._batch_iter(source, target, i)\n\n if self.info:\n print(f\"\\rEpoch: { self.epoch } | Average loss: { self.epoch_loss.avg }\")\n\n # update epoch and reset the epoch_loss\n self.epoch_loss.reset()\n self.epoch += 1", "def run_epoch(session,\n model,\n dataset,\n is_train=False,\n plot_attention_weights=False):\n assert dataset is not None\n n_words = len([word for sample in dataset for word in sample if word > 0])\n epoch_size = int(math.ceil(len(dataset) / model.batch_size))\n # producer = lm_data_producer(dataset, model.batch_size, model.num_steps)\n\n fetches = {\"step_cost\": model.batch_loss, \"niters\": model.nwords}\n if is_train:\n fetches[\"eval_op\"] = model.train_op\n if plot_attention_weights:\n fetches[\"weights\"] = model.attention_weights\n\n costs = 0.0\n iters = 0\n start_time = time.time()\n # for step, (x, y) in enumerate(producer):\n for step in range(epoch_size):\n step_time = time.time()\n vals = session.run(fetches, {})\n step_cost = vals[\"step_cost\"]\n costs += step_cost\n # iters += np.sum(x > 0)\n iters += vals[\"niters\"]\n\n # print information regarding the current training process\n if is_train:\n if step % (epoch_size // 20) == 10:\n print(\"{:.3f} - aprox. loss {:.8f} - approx. speed: {:.0f} wps\".format(\n step * 1.0 / epoch_size, costs / (step + 1),\n iters / (time.time() - start_time)))\n # print information regarding the current training process\n else:\n if step % (epoch_size // 10) == 5:\n print(\"{:.3f} - approx. speed: {:.0f} wps\".format(\n step * 1.0 / epoch_size, iters / (time.time() - start_time)))\n\n return np.exp(costs / n_words)", "def train_epoch(net, train_iter, loss, updater): #@save\n # Set the model to training mode\n if isinstance(net, torch.nn.Module):\n net.train()\n # Sum of training loss, sum of training accuracy, no. of examples\n metric = Accumulator(3)\n for X, y in train_iter:\n # Compute gradients and update parameters\n y_hat = net(X)\n l = loss(y_hat, y)\n if isinstance(updater, torch.optim.Optimizer):\n # Using PyTorch in-built optimizer & loss criterion\n updater.zero_grad()\n l.backward()\n updater.step()\n metric.add(float(l) * len(y), accuracy(y_hat, y),\n y.numel())\n else:\n # Using custom built optimizer & loss criterion\n l.sum().backward()\n updater(X.shape[0])\n metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())\n # Return training loss and training accuracy\n return metric[0] / metric[2], metric[1] / metric[2]", "def train():\n pass", "def trainer(model,\n optimizer,\n dataset,\n count_of_epoch=5,\n batch_size=64,\n callback=None,\n progress=None):\n iterations = range(count_of_epoch)\n\n if progress is not None:\n iterations = progress(iterations)\n\n for it in iterations:\n\n batch_generator = DataLoader(\n dataset=dataset,\n batch_size=batch_size,\n shuffle=True,\n pin_memory=True)\n\n train_epoch(\n \tmodel=model,\n train_generator=batch_generator,\n optimizer=optimizer,\n callback=callback)\n\n return", "def epoch_start(self, epoch):\n self.epoch = epoch", "def fit_one_epoch(self):\n preds, labels = [], []\n for batch_idx, data in tqdm(enumerate(self.primary_dataloader)):\n losses_report, train_preds, train_labels = self.forward_one_batch(\n data)\n preds.append(train_preds)\n labels.append(train_labels)\n\n self._optimize(losses_report)\n self._update_losses(losses_report, train=True)\n\n self.iter += 1\n\n # log/check point\n with torch.no_grad():\n if self.iter % self.log_iter == 0:\n # TODO: track train\n preds = np.concatenate(preds, axis=0)\n labels = np.concatenate(labels, axis=0)\n if IS_REG:\n preds = disc(preds)\n\n metrics_report = self.evaluate_metrics(preds, labels)\n self._update_metrics(metrics_report, train=True)\n preds, labels = [], []\n\n if self.valid_dataloader:\n self.validate()\n\n self.log_meters()\n self.save_checkpoint()\n self.reset_meters()", "def run_epoch(self, sess, input_data, verbose=None):\n data_len = len(input_data)\n total_steps =data_len // self.config.batch_size\n total_loss = []\n for step, (ret_batch, ret_label, sent_num_enc, sent_num_dec, sent_len) in enumerate(\n helper.data_iter(input_data, self.config.batch_size, self.vocab, \n noize_list=self.train_data_flatten_list, noize_num=noize_num)):\n feed_dict = self.create_feed_dict(ret_batch, sent_len, sent_num_enc, ret_label, sent_num_dec)\n \n _, loss, lr = sess.run([self.train_op, self.loss, self.learning_rate], feed_dict=feed_dict)\n total_loss.append(loss)\n if verbose and step % verbose == 0:\n sys.stdout.write('\\r{} / {} : loss = {}, lr = {}'.format(\n step, total_steps, np.mean(total_loss[-verbose:]), lr))\n sys.stdout.flush()\n sys.stdout.write('\\n')\n avg_loss = np.mean(total_loss)\n return avg_loss", "def train_one_epoch(self):\n print('Training......')\n\n # set mode train\n self.network.train()\n\n # prepare data\n train_loss = 0\n transform = transforms.Compose([Rescale(params.rescale_size),\n RandomCrop(params.image_size),\n RandomHorizontalFlip(),\n ToTensor()\n ])\n\n\n\n dataset = Cityscapes(params.dataset_root, mode='train', transforms = transform)\n\n train_loader = DataLoader(dataset,\n batch_size=params.train_batch,\n shuffle=params.shuffle,\n num_workers=params.dataloader_workers)\n \n train_size = 1896\n if train_size % self.params.train_batch != 0:\n total_batch = train_size // self.params.train_batch + 1\n else:\n total_batch = train_size // self.params.train_batch\n recal = 0\n precision = 0\n F_one = 0\n IOU = 0\n accuracy_new = 0 \n # train through dataset\n for batch_idx, batch in enumerate(train_loader):\n self.pb.click(batch_idx, total_batch)\n image, label = batch['image'], batch['label']\n image_cuda, label_cuda = image.cuda(), label.cuda()\n\n # checkpoint split\n if self.params.should_split:\n image_cuda.requires_grad_()\n out = checkpoint_sequential(self.network, self.params.split, image_cuda)\n else:\n out = self.network(image_cuda)\n\n\n loss = self.loss_fn(out, label_cuda)\n \n #display_image(out, label_cuda)\n TP, FP, TN, FN = confusion(out, label_cuda)\n recal = recal+TP\n precision = precision+FP\n F_one = F_one + TN\n IOU = IOU+ FN \n accuracy_final = accuracy(out, label_cuda)\n accuracy_new = accuracy_new + accuracy_final\n\n # optimize\n self.opt.zero_grad()\n loss.backward()\n self.opt.step()\n\n # accumulate\n train_loss += loss.item()\n\n # record first loss\n if self.train_loss == []:\n self.train_loss.append(train_loss)\n self.summary_writer.add_scalar('loss/train_loss', train_loss, 0)\n \n print(\"\\t\")\n print(recal/total_batch, precision/ total_batch, F_one/ total_batch, IOU/ total_batch)\n print(accuracy_new/total_batch)\n \n self.pb.close()\n train_loss /= total_batch\n self.train_loss.append(train_loss)\n\n # add to summary\n self.summary_writer.add_scalar('loss/train_loss', train_loss, self.epoch)", "def train(self):\r\n\r\n for cur_epoch in range(self.model.cur_epoch_tensor.eval(self.sess), self.config.num_epochs + 1, 1):\r\n self.train_epoch(cur_epoch)\r\n self.model.global_step_assign_op.eval(session=self.sess, feed_dict={\r\n self.model.global_step_input: self.model.global_step_tensor.eval(self.sess) + 1})", "def train(self):\n start_time = time()\n self.model.train()\n\n for step, sample in enumerate(self.train_loader):\n self.optimizer.zero_grad()\n\n x, _, _ = sample\n x = x.to(self.device)\n\n y_pred = self.model.forward(x)\n loss = nn.MSELoss()(y_pred, x)\n loss.backward()\n self.train_losses.append(loss.item())\n\n self.optimizer.step(None)\n\n # print an incredible progress bar\n print(f'\\r{self.progress_bar} │ Loss: {np.mean(self.train_losses):.6f}', end='')\n self.progress_bar.inc()\n\n # log average loss of this epoch\n mean_epoch_loss = np.mean(self.train_losses)\n self.sw.add_scalar(tag='train_loss', scalar_value=mean_epoch_loss, global_step=self.epoch)\n self.train_losses = []\n\n # log epoch duration\n print(f' │ T: {time() - start_time:.2f} s')", "def train(self, epochs=5):\n x_train, y_train, x_test, y_test = self._load_data()\n x_train = tf.keras.utils.normalize(x_train, axis=1) # Scale between 0-1\n x_test = tf.keras.utils.normalize(x_test, axis=1)\n\n model = tf.keras.models.Sequential()\n # 28 x 28 (digits dimensions) -> flat 784\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))\n model.add(tf.keras.layers.Dense(128, activation=tf.nn.relu))\n # neurons -> number of classification\n model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax))\n\n dtnow = datetime.now().strftime(\"%Y-%m-%dT%H:%M\")\n tb_logs = self._artifact_repo.artifact_path(self._TENSORBOARD_LOGS)\n tensorboard = tf.keras.callbacks.TensorBoard(log_dir='{}/{}'.format(tb_logs, dtnow))\n model.compile(\n optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy']\n )\n model.fit(x_train, y_train, epochs=int(epochs), validation_data=(x_test, y_test), callbacks=[tensorboard])\n\n # val_loss, val_acc = model.evaluate(x_test, y_test)\n\n # self._logger.info(\"Evaluation on test dataset: Loss: %s, Accuracy: %s\", val_loss, val_acc)\n\n path = self._artifact_repo.artifact_path(self._ARTIFACT_MODEL)\n model.save(path)", "def run_epoch(session, model, dataset,\n keep_prob=1.0, passes=1.0, verbose=False):\n num_batches = dataset.num_batches\n start_time = time.time()\n train_cost = train_accy = valid_cost = valid_accy = 0.0\n train_evals = valid_evals = 0.0\n dot_count = 0\n total_steps = int(passes*num_batches)\n prog_int = total_steps/100 # progress interval for stdout\n\n if not num_batches > 0:\n raise RuntimeError(\"batch_size*num_unrollings is larger \"\n \"than the training set size.\")\n\n dataset.rewind() # make sure we start a beggining\n\n print(\"batches: %d \"%num_batches,end=' ')\n\n for step in range(total_steps):\n batch = dataset.next_batch()\n\n (tcost, taccy, tevals,\n vcost, vaccy, vevals) = model.train_step(session, batch,\n keep_prob=keep_prob)\n\n train_cost += tcost\n train_accy += taccy\n train_evals += tevals\n valid_cost += vcost\n valid_accy += vaccy\n valid_evals += vevals\n\n if ( verbose and ((prog_int<=1) or\n (step % (int(prog_int)+1)) == 0) ):\n dot_count += 1\n print('.',end='')\n sys.stdout.flush()\n\n if verbose:\n print(\".\"*(100-dot_count),end='')\n print(\" passes: %.2f train iters: %d valid iters: %d \"\n \"speed: %.0f seconds\" % (passes,\n train_evals,\n valid_evals,\n (time.time() - start_time)) )\n sys.stdout.flush()\n\n return (train_cost/train_evals,\n 1.0 - train_accy/train_evals,\n valid_cost/valid_evals,\n 1.0 - valid_accy/valid_evals)", "def start_training(self):\n self.training = True", "def run_epoch(data, config, is_train, verbose=False):\n epoch_size = ((len(data) // config.batch_size) - 1) // config.num_steps\n start_time = time.time()\n costs = 0.0\n iters = 0\n for hidden_state in hidden_states:\n hidden_state.set_value(np.zeros_like(hidden_state.get_value()))\n for step, (x, y) in enumerate(data_iterator(data, config.batch_size, config.num_steps)):\n if is_train:\n noise_x = get_noise_x(x, config.drop_x)\n cost = train(x, y, noise_x)\n else:\n cost = evaluate(x, y)\n costs += cost\n iters += config.num_steps\n if verbose and step % (epoch_size // 10) == 10:\n print(\"%.3f perplexity: %.3f speed: %.0f wps\" % (step * 1.0 / epoch_size, np.exp(costs / iters),\n iters * config.batch_size / (time.time() - start_time)))\n return np.exp(costs / iters)", "def train_epoch(args, loss_func, pbar, train_loader, model, optimizer,\n train_bpd, train_recon_error , train_perplexity):\n model.train()\n # Loop data in epoch\n for x, _ in train_loader:\n\n # This break used for debugging\n if args.max_iterations is not None:\n if args.global_it > args.max_iterations:\n break\n\n x = x.to(args.device)\n\n # Get reconstruction and vector quantization loss\n # `x_prime`: reconstruction of `input`\n # `vq_loss`: MSE(encoded embeddings, nearest emb in codebooks)\n x_prime, vq_loss, perplexity = model(x)\n\n loss, log_pxz, bpd = loss_func(args, x_prime, x, vq_loss, model)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n train_bpd.append((-1)*bpd.item())\n train_recon_error.append((-1)*log_pxz.item())\n train_perplexity.append(perplexity.item())\n\n # Print Average every 100 steps\n if (args.global_it+1) % 100 == 0:\n av_bpd = np.mean(train_bpd[-100:])\n av_rec_err = np.mean(train_recon_error[-100:])\n av_ppl = np.mean(train_perplexity[-100:])\n if args.model == 'vqvae':\n pbar.print_train(bpd=float(av_bpd), rec_err=float(av_rec_err),\n increment=100)\n elif args.model == 'diffvqvae':\n pbar.print_train(bpd=float(av_bpd), temp=float(model.temp),\n increment=100)\n args.global_it += 1", "def train(self):\n self.dataGenerator.printDataStatistics()\n sE = len(self.dataGenerator.ids[\"train\"])// 32\n sV = len(self.dataGenerator.ids[\"validation\"])// 32\n self.model.fit_generator(\n generator=self.dataGenerator.trainingGenerator,\n steps_per_epoch= sE,\n epochs=2,\n validation_data=self.dataGenerator.validationGenerator,\n validation_steps=sV,\n # use_multiprocessing=True,\n # workers=2,\n )", "def run_epoch(self, dataloader, train=True):\n losses = []\n accs = []\n for imgs, targets in dataloader:\n imgs, targets = imgs.to(self.device), targets.to(self.device)\n\n # calc. the losses\n output = self.resnet(imgs)\n loss = self.criterion(output, targets)\n\n if train:\n # update the parameters\n self.optimizer.zero_grad() # initialize gradients\n loss.backward()\n self.optimizer.step()\n\n # save training results\n if self.total_steps % 10 == 0:\n accuracy = self.calc_batch_accuracy(output, targets)\n accs.append(accuracy.item())\n losses.append(loss.item())\n self.log_performance(self.summary_writer,\n {'loss': loss.item(), 'acc': accuracy.item()},\n self.epoch,\n self.total_steps)\n\n if self.total_steps % 100 == 0:\n self.save_module_summary(\n self.summary_writer, self.resnet.module, self.total_steps)\n\n self.total_steps += 1\n else: # no training - validation\n accuracy = self.calc_batch_accuracy(output, targets)\n accs.append(accuracy.item())\n losses.append(loss.item())\n\n avg_loss = sum(losses) / len(losses)\n avg_acc = sum(accs) / len(accs)\n return avg_loss, avg_acc", "def train(self):\r\n \r\n args = self.args\r\n model = self.model\r\n dataset = self.dataset\r\n train_state = self.train_state\r\n optimizer = self.optimizer\r\n scheduler = self.scheduler\r\n train_bar = self.train_bar\r\n val_bar = self.val_bar\r\n epoch_bar = self.epoch_bar\r\n \r\n for epoch_index in range(args.num_epochs):\r\n train_state['epoch_index'] = epoch_index\r\n \r\n # Iterate over training dataset\r\n \r\n running_loss,running_acc = self.train_loop(epoch_index, args, \r\n model, dataset, \r\n optimizer, train_bar)\r\n \r\n train_state['train_loss'].append(running_loss)\r\n train_state['train_acc'].append(running_acc)\r\n \r\n running_loss,running_acc = self.val_loop(epoch_index, args, model, \r\n dataset, optimizer, val_bar)\r\n \r\n \r\n train_state['val_loss'].append(running_loss)\r\n train_state['val_acc'].append(running_acc)\r\n \r\n print(\"Epoch \"+str(epoch_index+1)+\": Running loss=\"+ \\\r\n str(running_loss)+\", Running Acc=\"+str(running_acc))\r\n \r\n train_state = update_train_state(args=args, model=model, \r\n train_state=train_state)\r\n \r\n scheduler.step(train_state['val_loss'][-1])\r\n \r\n if train_state['stop_early']:\r\n break\r\n \r\n train_bar.n = 0\r\n val_bar.n = 0\r\n epoch_bar.set_postfix(best_val=train_state['early_stopping_best_val'] )\r\n epoch_bar.update()\r\n \r\n state_dict = torch.load(train_state['model_filename'])\r\n model.load_state_dict(state_dict)\r\n return model", "def run_step(self):\n self.hooked_sess.run(self.train_op)", "def train_nn_epoch(epoch, sess, batch_size, get_batches_fn, train_op, loss, recall, recall_op, input_image, correct_label,\n learning_rate, keep_prob):\n last_time = time.time()\n batch_count = 0\n sample_count = 0\n\n for image, label in get_batches_fn(batch_size):\n _, loss_val, _, recall_val = sess.run([train_op, loss, recall_op, recall],\n feed_dict={input_image: image,\n correct_label: label,\n learning_rate: LEARNING_RATE,\n keep_prob: KEEP_PROB})\n cur_time = time.time()\n batch_count += 1\n sample_count += image.shape[0]\n\n print(\"Epoch: {}, Batch: {:02d}, Loss: {:.3f}, Recall: {:.3f}, Time: {:.3f}\".format(epoch,\n batch_count,\n loss_val,\n recall_val,\n cur_time - last_time))\n last_time = cur_time", "def run_epoch(model, data):\n model.eval()\n state_dict = torch.load('saved_model.pt', map_location=\"cpu\")\n model.load_state_dict(state_dict)\n total_loss = np.zeros(model.seq_len)\n steps = 0\n # LOOP THROUGH MINI BATCHES\n for step, (x, y) in enumerate(ptb_iterator(data, model.batch_size, model.seq_len)):\n steps += 1\n if args.model != 'TRANSFORMER':\n hidden = model.init_hidden()\n hidden = hidden.to(device)\n\n if args.model == 'TRANSFORMER':\n batch = Batch(torch.from_numpy(x).long().to(device))\n model.zero_grad()\n outputs = model.forward(batch.data, batch.mask).transpose(1, 0)\n # print (\"outputs.shape\", outputs.shape)\n else:\n inputs = torch.from_numpy(x.astype(np.int64)).transpose(0, 1).contiguous().to(device)#.cuda()\n model.zero_grad()\n hidden = repackage_hidden(hidden)\n outputs, hidden = model(inputs, hidden)\n\n targets = torch.from_numpy(y.astype(np.int64)).transpose(0, 1).contiguous().to(device)#.cuda()\n total_loss += np.array([loss_fn(outputs[i], targets[i]).item() for i in range(len(outputs))])\n\n total_loss /= float(steps)\n print(total_loss)", "def run_epoch(data, session, model, train_op=None, verbose=False):\n start_time = time.time()\n costs = 0.0\n num_steps = model['num_steps']\n num_batches = (data.shape[1] - 1) // num_steps\n\n # initialize RNN cell states to be all zero\n state = session.run(model['initial_state'])\n\n fetches = {\n \"cost\": model['cost'],\n \"final_state\": model['final_state'],\n }\n\n # train model\n if train_op is not None:\n fetches[\"train_op\"] = train_op\n\n for batch in range(num_batches):\n feed_dict = {\n model['user_inputs']: data[:, batch * num_steps: (batch + 1) * num_steps],\n model['targets']: data[:, batch * num_steps + 1: (batch + 1) * num_steps + 1],\n }\n for i, (c, h) in enumerate(model['initial_state']):\n feed_dict[c] = state[i].c\n feed_dict[h] = state[i].h\n\n vals = session.run(fetches, feed_dict)\n cost = vals[\"cost\"]\n state = vals[\"final_state\"]\n costs += cost\n\n if verbose and batch % (num_batches // 10) == 10:\n iters = num_steps * (batch + 1)\n print(\"%.3f perplexity: %.3f speed: %.0f wps\" %\n (batch * 1.0 / num_batches, np.exp(costs / iters),\n iters * data.shape[0] * 1 /\n (time.time() - start_time)))\n\n return np.exp(costs / (data.shape[1] - 1))", "def train_epoch(self, epoch_info: EpochInfo, interactive=True) -> None:\n epoch_info.on_epoch_begin()\n\n if interactive:\n iterator = tqdm.trange(epoch_info.batches_per_epoch, file=sys.stdout, desc=\"Training\", unit=\"batch\")\n else:\n iterator = range(epoch_info.batches_per_epoch)\n\n for batch_idx in iterator:\n batch_info = BatchInfo(epoch_info, batch_idx)\n\n batch_info.on_batch_begin()\n self.train_batch(batch_info)\n batch_info.on_batch_end()\n\n epoch_info.result_accumulator.freeze_results()\n epoch_info.on_epoch_end()", "def run(num_epochs, encoded_dim):\n # for patient_ in get_patient_ids():\n for patient_ in ['16']:\n print(\"Starting on index: \" + str(patient_))\n training_ae(num_epochs, encoded_dim, patient_, True)\n print(\"Completed \" + str(patient_) + \" reconstruction and encoding, saved test data to assess performance\")", "def train(self):\n for e in range(self.train_config['nb_epochs']):\n self.on_epoch(e)\n\n with open(os.path.join(self.exp_path, 'config.yml'), 'w') as outfile:\n yaml.dump(self.config, outfile, default_flow_style=False)", "def train(self, nsamples = 1, verbose = False, random = True):\n imgs, skels = self.images.get_batch(nimages = nsamples, random = random);\n self.trainer.run(session = self.session, feed_dict={self.input : imgs, self.skeleton : skels})\n if verbose:\n self.plot_results(imgs);", "def train_epoch(self, data_loader):\n self.model.train()\n\n # Prepare summary information\n summary = dict()\n sum_loss = 0\n\n # Loop over training batches\n for i, (batch_input, batch_target) in enumerate(data_loader):\n batch_input = [a.to(self.device) for a in batch_input]\n batch_target = batch_target.to(self.device)\n\n # Compute target weights on-the-fly for loss function\n batch_weights_real = batch_target * self.real_weight\n batch_weights_fake = (1 - batch_target) * self.fake_weight\n batch_weights = batch_weights_real + batch_weights_fake\n\n # Train on this batch\n self.model.zero_grad()\n batch_output = self.model(batch_input)\n batch_loss = self.loss_func(batch_output, batch_target, weight=batch_weights)\n batch_loss.backward()\n self.optimizer.step()\n sum_loss += batch_loss.item()\n self.logger.debug(' train batch %i, loss %f', i, batch_loss.item())\n\n # Summarize the epoch\n n_batches = i + 1\n summary['lr'] = self.optimizer.param_groups[0]['lr']\n summary['train_loss'] = sum_loss / n_batches\n self.logger.debug(' Processed %i batches', n_batches)\n self.logger.debug(' Current LR %f', summary['lr'])\n self.logger.info(' Training loss: %.3f', summary['train_loss'])\n return summary", "def call_training_routine(self):\n training_command = \"th main.lua \"\\\n \"-GPU_id %(GPU_identifier)i \"\\\n \"-number_of_GPUs %(number_of_GPUs)i \"\\\n \"-training_dataset %(training_dataset)s \"\\\n \"-testing_dataset %(testing_dataset)s \"\\\n \"-modelFilePath %(modelFilePath)s \"\\\n \"-maxepoch %(maxepoch)i \"\\\n \"-savingDirectory %(savingDirectory)s \"\\\n \"-learningRate %(learningRate)f \"\\\n \"-batchSize %(batchSize)i \"\\\n \"-momentum %(momentum)f\" % self.training_parameters\n\n if self.training_parameters[\"presavedModelPath\"] != \"\":\n training_command += \" -presavedModelPath %s\" %\\\n self.training_parameters[\"presavedModelPath\"]\n\n # Call the training command\n subprocess.call(training_command, shell=True)", "def run_epoch(self, sess, inputs, labels):\n n_minibatches, total_loss = 0, 0\n for input_batch, labels_batch in get_minibatches([inputs, labels], self.config.batch_size):\n n_minibatches += 1\n total_loss += self.train_on_batch(sess, input_batch, labels_batch)\n return total_loss / n_minibatches", "def run_epoch(self, epoch, data_loader, training=False):\n if training:\n self.model.train()\n else:\n self.model.eval()\n\n epoch_metrics = {\"loss\": 0.0}\n overall_parsing_counts = {\"correct\": 0, \"predicted\": 0, \"gold\": 0}\n num_evaluated_batches = 0\n\n with torch.set_grad_enabled(training):\n for sentences, target in data_loader:\n # Run model\n target = self._to_device(target)\n output, parsing_counts = self.parser.evaluate_batch(sentences)\n\n # Compute loss\n output, target = self._unroll_sequence_batch(output), self._unroll_sequence_batch(target)\n loss = self.criterion(output, target)\n\n # Add metrics to overall total\n epoch_metrics[\"loss\"] += loss.item()\n for count in \"gold\", \"predicted\", \"correct\":\n overall_parsing_counts[count] += parsing_counts[count]\n\n # Perform backpropagation (when training)\n if training:\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # Print progress\n num_evaluated_batches += 1\n self.logger.debug('{} Epoch: {} {} Loss: {:.6f}'.format(\n \"Training\" if training else \"Validation\",\n epoch,\n self._progress(num_evaluated_batches, data_loader),\n loss.item()))\n\n epoch_metrics.update(self.compute_prf(overall_parsing_counts))\n\n return epoch_metrics", "def train(self):\n for epoch in range(self.current_epoch, self.config.optim.epochs):\n self.current_epoch = epoch\n self.train_one_epoch()\n if epoch % self.config.optim.val_freq == 0:\n self.validate()\n if self.config.optim.auto_schedule:\n self.scheduler.step(self.current_val_loss)\n self.save_checkpoint()", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def train(self):\n\t\t# Helper: Early stopping.\n\t\tearly_stopper = EarlyStopping(patience=2, verbose = 1)\n\t\tself.model.fit(data.x_train, data.y_train,\n\t\t\t\t\t\tbatch_size=data.batch_size,\n\t\t\t\t\t\tepochs=10000, # using early stopping, so no real limit\n\t\t\t\t\t\tverbose=1,\n\t\t\t\t\t\tvalidation_split=0.05,\n\t\t\t\t\t\tcallbacks=[early_stopper])\n\n\t\tscore = self.model.evaluate(data.x_test, data.y_test, verbose=1)\n\n\t\treturn score[1] # 1 is accuracy. 0 is loss." ]
[ "0.8152491", "0.7976923", "0.78916997", "0.78650874", "0.78335357", "0.78335357", "0.78335357", "0.78335357", "0.7772364", "0.7753938", "0.77308905", "0.7689781", "0.76347524", "0.7558736", "0.75391996", "0.75388575", "0.75285155", "0.7487567", "0.7441571", "0.7418234", "0.73650664", "0.7359628", "0.7352041", "0.73368233", "0.73368233", "0.73090035", "0.72836894", "0.7247344", "0.7242724", "0.7229201", "0.7200613", "0.7185765", "0.7181795", "0.7161921", "0.71469283", "0.7130515", "0.7114882", "0.7104362", "0.7085922", "0.7085277", "0.70791173", "0.707797", "0.707713", "0.70733416", "0.70634705", "0.7061835", "0.7055072", "0.70504117", "0.7049276", "0.7042776", "0.70344234", "0.70330644", "0.7024307", "0.70211643", "0.70206434", "0.7018418", "0.69991976", "0.69975424", "0.6958844", "0.6954776", "0.69483536", "0.6939809", "0.6939492", "0.69212747", "0.6911905", "0.6897675", "0.68947583", "0.6889709", "0.6885324", "0.6874642", "0.68667346", "0.68601257", "0.6859316", "0.6855945", "0.68525636", "0.6843164", "0.68417305", "0.6833268", "0.68217546", "0.6819857", "0.6818937", "0.68182576", "0.6813223", "0.6810447", "0.6805029", "0.6804248", "0.6782929", "0.6781962", "0.67798907", "0.6779486", "0.6778932", "0.67755586", "0.6770911", "0.6767958", "0.676771", "0.6759931", "0.67557234", "0.6752945", "0.6750852", "0.6750852", "0.6750386" ]
0.0
-1
Evaluates the model on the validation set.
def val_loop(self,epoch_index,args,model,dataset,optimizer,val_bar): dataset.set_split('val') batch_generator = generate_nmt_batches(dataset, batch_size=args.batch_size, device=args.device) running_loss = 0.0 running_acc = 0.0 model.eval() for batch_index, batch_dict in enumerate(batch_generator): # step 1. compute the output if isinstance(model,NMTModelWithMLTM): y_pred = model(batch_dict['x_source'], batch_dict['x_source_mltm_vector'], batch_dict['x_source_length'], batch_dict['x_target']) else: y_pred = model(batch_dict['x_source'], batch_dict['x_source_length'], batch_dict['x_target']) # step 2. compute the loss loss = sequence_loss(y_pred, batch_dict['y_target'], self.mask_index) # ----------------------------------------- # compute the running loss and running accuracy running_loss += (loss.item() - running_loss) / (batch_index + 1) acc_t = compute_accuracy(y_pred, batch_dict['y_target'], self.mask_index) running_acc += (acc_t - running_acc) / (batch_index + 1) # update bar val_bar.set_postfix(loss=running_loss, acc=running_acc, epoch=epoch_index) val_bar.update() return running_loss,running_acc
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate(self):\n self.set_model_mode('eval')\n self.evaluator.reset()\n losses = MetricMeter()\n\n print('Do evaluation on {} set'.format('valid set'))\n data_loader = self.val_loader\n assert data_loader is not None\n for batch_idx, batch in enumerate(data_loader):\n input, label = self.parse_batch_test(batch)\n loss = self.forward_backward(batch, backprob=False)\n losses.update(loss)\n # total_loss += loss['loss']\n output = self.model_inference(input)\n self.evaluator.process(output, label)\n\n results = self.evaluator.evaluate()\n total_loss = losses.meters['loss_x'].avg\n\n for k, v in results.items():\n tag = '{}/{}'.format('validation', k)\n self.write_scalar(tag, v, self.epoch)\n # if full_results:\n return [total_loss,losses.dict_results(),results]\n # return total_loss", "def _set_eval(self):\n\n if self.model.__dict__['training']:\n self.model.eval()", "def set_eval(self):\n self.model.eval()", "def validate(self):\n stats = {}\n evaluate_config = {\"verbose\": self.verbose}\n evaluate_config.update(self.config.get(\"evaluate_config\", {}))\n\n results = self.model.evaluate(self.test_dataset, **evaluate_config)\n if results is None:\n # Using local Model since model.evaluate() returns None\n # for MultiWorkerMirroredStrategy\n logger.warning(\"Running a local model to get validation score.\")\n self.local_model = self.model_creator(self.config)\n self.local_model.set_weights(self.model.get_weights())\n results = self.local_model.evaluate(self.test_dataset,\n **evaluate_config)\n\n if isinstance(results, list):\n stats = {\n \"validation_\" + k: v\n for k, v in zip(self.model.metrics_names, results)\n }\n else:\n stats = {\"loss\": results}\n\n return stats", "def evaluate_model(model, ds_valid):\n print(\"-- Evaluate Model:\")\n for features, labels in ds_valid:\n valid_step(model, features, labels)\n logs = \"\\nValid Loss: {}, Valid Accuracy: {}\"\n tf.print(tf.strings.format(logs, (valid_loss.result(), valid_metric.result())))\n valid_loss.reset_states()\n train_metric.reset_states()\n valid_metric.reset_states()", "def evaluate(self):\n\n\t\tself.model_score = self.model.evaluate(self.x_test, self.y_test, batch_size=2048)\n\t\tprint(\"%s score = %f\\n\" %(self.modelName, self.model_score[1]))\n\t\treturn self.model_score", "def validate(self, data_loader=None):\n if data_loader is None:\n data_loader = self.dataset.val_data_loader\n m = self.model\n m.eval()\n\n batch_matrix_list = []\n for idx, data in tqdm(enumerate(data_loader), total=len(data_loader)):\n if type(data) is dict:\n for key, value in data.items():\n data[key] = value.to(self.device)\n pred = m.predict(data)\n batch_matrix = self.evaluator.collect(data, pred)\n batch_matrix_list.append(batch_matrix)\n\n if self.single:\n result = self.evaluator.evaluate(batch_matrix_list, groupby=False)\n else:\n result = self.evaluator.evaluate(batch_matrix_list, groupby=True)\n return result", "def validate(self):\n\n # start validate\n self.model.eval()\n preds, labels = [], []\n for batch_idx, data in enumerate(self.valid_dataloader):\n # calculate and log losses\n losses_report, valid_preds, valid_labels = self.forward_one_batch(\n data)\n self._update_losses(losses_report, train=False)\n\n preds.append(valid_preds)\n labels.append(valid_labels)\n\n preds = np.concatenate(preds, axis=0)\n labels = np.concatenate(labels, axis=0)\n if IS_REG:\n preds = disc(preds)\n # calculate and log metrics\n metrics_report = self.evaluate_metrics(preds, labels)\n self._update_metrics(metrics_report, train=False)\n\n # TODO: lr scheduler step setting\n self.lr_scheduler.step(self.valid_loss_meters['CrossEntropyLoss'].avg)\n\n # end validate\n self.model.train()", "def evaluate(self, x_test, y_test, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n\n test_results = self.model.evaluate(x_test,\n y_test,\n batch_size=self.batch_size,\n verbose=verbose)\n self.val_history = test_results\n return test_results", "def evaluate(self, x_test, y_test, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n\n test_results = self.model.evaluate(x_test,\n y_test,\n batch_size=self.batch_size,\n verbose=verbose)\n self.val_history = test_results\n return test_results", "def evaluate(self, x_test, y_test, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n\n test_results = self.model.evaluate(x_test,\n y_test,\n batch_size=self.batch_size,\n verbose=verbose)\n self.val_history = test_results\n return test_results", "def set_eval(self):\n for m in self.models.values():\n m.eval()", "def evaluate():\n model.eval()\n with torch.no_grad():\n loss, n = 0, 0\n for xb, yb in valid_dl:\n n += len(xb)\n loss += loss_func(model(xb), yb) * len(xb)\n\n return loss/n", "def validate(self):\n for rule in self.get_rules():\n rule.validate(self.get_val())", "def model_evaluate(model,x_train,n_y_array,x_val, vald_array):\n\n scores = model.evaluate(x_train, n_y_array, verbose=1)\n\n scores2 = model.evaluate(x_val, vald_array, verbose=1)\n\n\n print(\"for traininf set\")\n\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\n\n print(\"%s: %.2f%%\" % (model.metrics_names[0], scores[0]))\n\n\n\n print(\"for validation set : \") \n\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores2[1]*100))\n\n print(\"%s: %.2f%%\" % (model.metrics_names[0], scores2[0]))", "def eval_model(self):\n self.decoder.eval()\n\n validation_losses_in_epoch = []\n\n with torch.no_grad():\n # validation iterations loop\n for inputs1, inputs2, targets in zip(self.train_loader_enc1, self.train_loader_enc2,\n self.train_loader_pred):\n # get encoded inputs and targets\n encoded_inputs_tensor = encode_inputs(inputs1, inputs2, self.encoder1, self.encoder2, self.device)\n targets = targets[0]\n\n # get outputs\n outputs = self.decoder(encoded_inputs_tensor)\n\n # calculate loss and add to list\n loss = self.loss_criterion(outputs, targets)\n loss_item = loss.cpu().detach().item()\n validation_losses_in_epoch.append(loss_item)\n\n # calculate average validation loss for epoch\n average_validation_loss = sum(validation_losses_in_epoch) / len(validation_losses_in_epoch)\n\n return average_validation_loss", "def validate(model, criterion, valset, iteration, batch_size, n_gpus,\n collate_fn, logger, rank):\n \n model.eval()\n \n with torch.no_grad():\n val_sampler = DistributedSampler(valset) if n_gpus > 1 else None\n val_loader = DataLoader(\n valset,\n sampler=val_sampler,\n num_workers=1, \n shuffle=False,\n batch_size=batch_size,\n pin_memory=False,\n collate_fn=collate_fn\n )\n\n val_loss = 0.0\n\n for i, batch in enumerate(val_loader):\n x, y = model.parse_batch(batch)\n y_pred = model(x)\n loss = criterion(y_pred, y)\n reduced_val_loss = reduce_tensor(loss.data, n_gpus).item() \\\n if n_gpus > 1 else loss.item()\n val_loss += reduced_val_loss\n val_loss = val_loss / (i + 1)\n \n model.train()\n\n if rank == 0:\n print(\"Validation loss {}: {:9f} \".format(iteration, reduced_val_loss))\n logger.log_validation(reduced_val_loss, model, y, y_pred, iteration)\n \n return val_loss", "def get_model_evaluation(self):\n\n self.log(f\"{self.cur_file_path}\\t\\tInfo: model_evaluation method invoked for {self.model.__class__.__name__}!\")\n\n evaluation = ModelEvaluation(self.model, (self.trainX, self.trainY), (self.testX, self.testY))\n return evaluation.get_evaluation_report()", "def evaluate(model, val_data, epoch):\n print('validating')\n\n # 设置为评估模式 \n model.eval() \n\n val_loss = []\n with torch.no_grad():\n DEVICE = config.DEVICE\n\n val_dataloader = DataLoader(dataset=val_data,\n batch_size=config.batch_size,\n shuffle=True,\n pin_memory=True, drop_last=True,\n collate_fn=collate_fn)\n\n for batch, data in enumerate(tqdm(val_dataloader)):\n\n x, y, x_len, y_len, oov, len_oovs = data\n\n if config.is_cuda:\n x = x.to(DEVICE)\n y = y.to(DEVICE)\n x_len = x_len.to(DEVICE)\n len_oovs = len_oovs.to(DEVICE)\n\n loss = model(x, x_len, y, len_oovs, batch=batch, \n num_batches=len(val_dataloader),\n teacher_forcing=True)\n\n val_loss.append(loss.item())\n\n return np.mean(val_loss)", "def _eval_during_training(\r\n self, evaluator, output_path, save_best_model, epoch, steps):\r\n if evaluator is not None:\r\n score = evaluator(\r\n self, output_path=output_path, epoch=epoch, steps=steps)\r\n if score > self.best_score and save_best_model:\r\n self.save(output_path)\r\n self.best_score = score", "def _evaluate(model):\n _recompile(model)\n if isinstance(eval_dataset, tuple):\n eval_images, eval_labels = eval_dataset\n return model.evaluate(\n eval_images, eval_labels, verbose=verbose, return_dict=True)\n else:\n return model.evaluate(eval_dataset, verbose=verbose, return_dict=True)", "def eval(self):\n\n # parameters initialize\n torch = import_optional_dependency(\"torch\")\n eval_total = 0\n eval_correct = 0\n eval_loss = 0\n self._set_eval()\n\n # display the information\n if self.info:\n print(f\"\\rEvaluating...\", end=\"\")\n\n # start eval part\n for i, (source, target) in enumerate(self.eval_dataset):\n # send data to device\n source = source.to(self.device)\n target = target.to(self.device)\n\n result = self.model(source)\n eval_loss += self.criterion(result, target).item()\n _, predicted = torch.max(result.data, 1)\n eval_total += target.size(0)\n eval_correct += (predicted == target).sum().item()\n\n accuracy = eval_correct / eval_total\n eval_loss = eval_loss / eval_total\n\n if self.info:\n print(f\"\\rEvaluation loss: { eval_loss } | Accuracy: { accuracy }\")\n\n return eval_loss, accuracy", "def evaluate(self,**kwargs):\n # setup model\n self.optimizer = SGD(lr = 0,momentum=0,decay = 0)\n self.createModel()\n self.setGenerators()\n self.printParameters()\n output = {}\n\n if kwargs['validationOnly'] != None:\n if kwargs['validationOnly'] == True:\n valOnly = True\n else:\n valOnly = False\n else:\n valOnly = False\n\n if valOnly == False:\n trainOutput = self.model.evaluate_generator(\n generator = self.trainGen,\n steps=self.steps_per_epoch,\n use_multiprocessing=True,\n verbose=1\n )\n output['loss'] = trainOutput[0]\n for i in range(len(self.metricsAsString)):\n output[self.metricsAsString[i]] = trainOutput[i+1]\n\n print(\"loss : \" + str(output['loss']))\n for i in range(len(self.metricsAsString)):\n tmp = self.metricsAsString[i] \n print(tmp + \" : \" + str(output[tmp])) \n\n validationOutput = self.model.evaluate_generator(\n generator = self.validateGen,\n steps=self.validation_steps, \n use_multiprocessing=True, \n verbose=1)\n \n output['val_loss'] = validationOutput[0]\n for i in range(len(self.metricsAsString)):\n output[\"val_\" + self.metricsAsString[i]] = validationOutput[i+1]\n \n\n print(\"val_loss : \" + str(output['val_loss']))\n for i in range(len(self.metricsAsString)):\n tmp = \"val_\" + self.metricsAsString[i] \n print(tmp + \" : \" + str(output[tmp]))", "def evaluate_model(self):\r\n self.model.eval() # sets layers to eval mode (e.g. norm, dropout)\r\n with torch.no_grad(): # deactivates autograd engine\r\n\r\n # generate graphs required for model evaluation\r\n # note that evaluation of the generated graphs happens in\r\n # `generate_graphs()`, and molecules are saved as `self` attributes\r\n self.generate_graphs(n_samples=self.C.n_samples, evaluation=True)\r\n\r\n print(\"* Evaluating model.\", flush=True)\r\n anal.evaluate_model(valid_dataloader=self.valid_dataloader,\r\n train_dataloader=self.train_dataloader,\r\n nll_per_action=self.nll_per_action,\r\n model=self.model)\r\n\r\n self.nll_per_action = None # don't need anymore\r\n\r\n print(f\"* Saving model state at Epoch {self.current_epoch}.\", flush=True)\r\n\r\n # `pickle.HIGHEST_PROTOCOL` good for large objects\r\n model_path_and_filename = (self.C.job_dir + f\"model_restart_{self.current_epoch}.pth\")\r\n torch.save(obj=self.model,\r\n f=model_path_and_filename,\r\n pickle_protocol=pickle.HIGHEST_PROTOCOL)", "def _doValidation(self, val_dl: torch.utils.data.DataLoader):\n\n # Initialize variables for tracking loss, correct predictions, total samples, and labels\n val_loss = 0.0\n correct = 0\n total = 0\n true_labels = []\n pred_labels = []\n\n # Set the model to evaluation mode (disables gradient computation and dropout)\n self.eval()\n\n # Disable gradient tracking for efficiency\n with torch.no_grad():\n # Iterate over the validation data loader\n for x_batch, y_batch in val_dl:\n # Forward pass to obtain model predictions\n y_pred = self.forward(x_batch)\n # Compute the loss between the predictions and the ground truth\n loss = self.criterion(y_pred, y_batch)\n val_loss += loss.item()\n\n # Get the predicted labels by selecting the maximum value along the second dimension\n _, predicted = torch.max(y_pred.data, 1)\n # Update the count of total samples and correct predictions\n total += y_batch.size(0)\n correct += (predicted == y_batch).sum().item()\n\n # Extend the true and predicted labels lists\n true_labels.extend(y_batch.tolist())\n pred_labels.extend(predicted.tolist())\n\n # Compute the average validation loss\n val_loss /= len(val_dl)\n # Calculate the weighted F1 score for the true and predicted labels\n val_f1 = f1_score(true_labels, pred_labels, average='weighted') * 100\n\n # Return the validation loss, F1 score, true labels, and predicted labels\n return val_loss, val_f1, true_labels, pred_labels", "def evaluate(self):\n predictions = self.model.predict(self.test[0])\n accuracy = accuracy_score(self.test[1], predictions)\n print(\"Accuracy:\", str(accuracy * 100) + \"%\")\n self.plot_results(predictions)", "def __call__(self, save_fct):\n eval_scores = [\"Not evaluated\"]\n if self.train:\n logger.info(\"> Training\")\n self.train.run_training(save_fct = save_fct)\n logger.info('reverting learned weights to best checkpoint..')\n try:\n ParamManager.param_col.revert_to_best_model()\n except RevertingUnsavedModelException:\n pass\n\n evaluate_args = self.evaluate\n if evaluate_args:\n logger.info(\"> Performing final evaluation\")\n eval_scores = []\n for evaluator in evaluate_args:\n eval_score = evaluator.eval()\n if type(eval_score) == list:\n eval_scores.extend(eval_score)\n else:\n eval_scores.append(eval_score)\n\n return eval_scores", "def evaluate(self, test_data):\n result = self.model.run(test_data)\n self._save_result(result)", "def evaluate(self, dataset):\n return self.model.evaluate(dataset.X_val, dataset.y_val)", "def eval_model(self, model):\n evaluation = model.evaluate(x=self.xt_test, y=self.yt_test)\n print(\"loss : \" + str(round(evaluation[0]*100, 2)) + \"%\")\n print(\"accuracy: \" + str(round(evaluation[1]*100, 2)) + \"%\")", "def set_models_eval(self):\n raise NotImplementedError", "def validate(self, trainingSet): \n if self.regression:\n return self._validateRegression(trainingSet) \n else:\n return self._validateClassification(trainingSet)", "def _validate_model(self, model: TModel, dataset: Dataset, subset_indices: List[int]) -> float:\n if self._is_metric_mode:\n metric_value, _ = self._evaluator.validate(model, dataset, subset_indices)\n else:\n approximate_outputs = self._evaluator.collect_values_for_each_item(model, dataset, subset_indices)\n reference_outputs = [self._initial_metric_results.values_for_each_item[i] for i in subset_indices]\n errors = [self._error_fn(a, b) for a, b in zip(reference_outputs, approximate_outputs)]\n metric_value = sum(errors) / len(errors)\n\n return metric_value", "def evaluate(model, loss, val_iterator):\n\n # Initializing parameters\n loss_value = 0.0\n accuracy = 0.0\n total_samples = 0\n\n with torch.no_grad():\n\n # Iterating over validation dataloader\n for data, labels in val_iterator:\n\n # Resetting variables for calculating current batch accuracy\n correct = 0\n total = 0\n\n # Map data to GPU if available\n if use_cuda:\n data = data.cuda()\n labels = labels.cuda(non_blocking=True)\n\n n_batch_samples = labels.size()[0]\n logits = model(data)\n\n # Compute batch loss\n batch_loss = loss(logits, labels)\n\n # Compute batch accuracy\n _, predicted = logits.max(1)\n total += labels.size(0)\n correct += predicted.eq(labels).sum().item()\n batch_accuracy = 100. * correct / total\n\n # Summing up batch losses and accuracies over each step\n loss_value += batch_loss.float() * n_batch_samples\n accuracy += batch_accuracy * n_batch_samples\n total_samples += n_batch_samples\n\n return loss_value / total_samples, accuracy / total_samples", "def evaluate(model, eval_data, num_labels): \n # Turn on the evaluation state to ignore dropouts\n model.eval()\n results = [predict(model, x) for x, y in eval_data]\n f1_score, accuracy = get_metrics(np.array([y for x, y in eval_data]), results, num_labels)\n return f1_score, accuracy", "def eval(self, logger=None):\n self.model.eval()\n self.model_DP.eval()\n logger.info(\"Successfully set the model eval mode\")", "def evaluate(self):\n\n\t\t## We should be evaluating on dev dataset as well, so commenting x_test\n\t\t#self.model_score = self.model.evaluate(self.x_test, self.y_test_oh, batch_size=2048)\n\t\tself.model_score = self.model.evaluate(self.x_dev, self.y_dev_oh, batch_size=2048)\n\t\tprint(\"%s score = %f\\n\" %(self.modelName, self.model_score[1]))\n\n\t\t##Saving atucal vs predicted predictions\n\t\t##np.argmax returns the index where it see's 1 in the row\n\t\t#y_pred = np.argmax(self.model.predict(self.x_test, batch_size=2048), axis=1)\n\t\ty_pred = np.argmax(self.model.predict(self.x_dev, batch_size=2048), axis=1)\n\n\t\t## vstack will stack them in 2 rows, so we use Trasnpose to get them in column stack\n\t\t#output_predict = np.vstack((np.argmax(self.y_test_oh, axis=1), y_pred)).T\n\t\toutput_predict = np.vstack((np.argmax(self.y_dev_oh, axis=1), y_pred)).T\n\t\toutputFile = self.resultDir + \"/outputPredict.csv\" \n\t\tnp.savetxt(outputFile, output_predict, fmt=\"%5.0f\", delimiter=\",\")\n\n\t\t##Error Analysis of the prediction\n\t\terrorAnalysis(outputFile)\n\n\t\treturn self.model_score", "def __call__(self, trial) -> float:\n params = self._eval_params(trial, self._params)\n model = self._model(**params)\n\n if self._cv:\n return self._cross_validate(\n x=self._x,\n y=self._y,\n model=model,\n scorer=self._scorer,\n cv=self._cv,\n time_series=self._time_series,\n random_state=self._random_state,\n n_jobs=self._n_jobs,\n )\n else:\n return self._train_test(\n x=self._x,\n y=self._y,\n model=model,\n scorer=self._scorer,\n test_samples=self._test_samples,\n test_ratio=self._test_ratio,\n time_series=self._time_series,\n random_state=self._random_state,\n )", "def get_model_evaluations(self):\n return self._model_evaluations", "def test_evaluate(self):\n # Check build does not raise errors\n dataset = KDDCupDataset()\n dataset.create_fixed_samples(\n *self.data, samples_num=1, partition_sizes=self.partition_sizes)\n dataset.set_current_sample(0)\n model = self.MODEL(dataset, **self.model_arguments)\n model.fit(training_epochs=50)\n metric = model.evaluate('test')\n self.assertLessEqual(0, metric)\n self.assertGreaterEqual(1, metric)", "def validate(\n self,\n val_data=None,\n print_report=True,\n save_path=\"ktrain_classification_report.csv\",\n class_names=[],\n ):\n if val_data is not None:\n val = val_data\n else:\n val = self.val_data\n\n classification, multilabel = U.is_classifier(self.model)\n if not classification:\n # warnings.warn('learner.validate is only for classification problems. '\n #'For regression, etc., use learner.predict and learner.ground_truth '\n #'to manually validate.')\n # return\n pass\n is_multilabel = U.is_multilabel(val) or multilabel\n y_pred = self.predict(val_data=val)\n y_true = self.ground_truth(val_data=val)\n y_pred = np.squeeze(y_pred)\n y_true = np.squeeze(y_true)\n\n # regression evaluation\n if not classification:\n from sklearn.metrics import mean_absolute_error, mean_squared_error\n\n regout = []\n metrics = U.metrics_from_model(self.model)\n for m in metrics:\n if m in [\"mae\", \"mean_absolute_error\"]:\n regout.append((m, mean_absolute_error(y_true, y_pred)))\n elif m in [\"mse\", \"mean_squared_error\"]:\n regout.append((m, mean_squared_error(y_true, y_pred)))\n if not regout:\n warnings.warn(\n \"%s is not supported by validate/evaluate - falling back to MAE\"\n )\n regout.append((\"mae\", mean_absolute_error(y_true, y_pred)))\n return regout\n\n if len(y_pred.shape) == 1:\n y_pred = np.where(y_pred > 0.5, 1, 0)\n y_true = np.where(y_true > 0.5, 1, 0)\n elif is_multilabel:\n from sklearn.preprocessing import binarize\n\n y_pred = binarize(y_pred, threshold=0.5)\n else:\n y_pred = np.argmax(y_pred, axis=1)\n y_true = np.argmax(y_true, axis=1)\n\n if print_report or save_path is not None:\n if class_names:\n try:\n class_names = [str(s) for s in class_names]\n except:\n pass\n report = classification_report(\n y_true,\n y_pred,\n target_names=class_names,\n output_dict=not print_report,\n )\n else:\n report = classification_report(\n y_true,\n y_pred,\n output_dict=not print_report,\n zero_division=0,\n )\n if print_report:\n print(report)\n else:\n df = pd.DataFrame(report).transpose()\n df.to_csv(save_path)\n print(\"classification report saved to: %s\" % (save_path))\n cm_func = confusion_matrix\n if is_multilabel:\n warnings.warn(\n \"Confusion matrices do not currently support multilabel classification, so returning None\"\n )\n return\n\n cm = confusion_matrix(y_true, y_pred)\n return cm", "def valid_one_epoch(self):\n prog_bar = tqdm(enumerate(self.valid_data), total=len(self.valid_data))\n self.model.eval()\n all_targets = []\n all_predictions = []\n with torch.no_grad():\n for idx, inputs in prog_bar:\n ids = inputs['inputs'].to(self.device, dtype=torch.long)\n mask = inputs['attention_mask'].to(self.device, dtype=torch.long)\n targets = inputs['targets'].to(self.device, dtype=torch.float)\n\n outputs = self.model(input_ids=ids, attention_mask=mask)\n all_targets.extend(targets.cpu().detach().numpy().tolist())\n all_predictions.extend(outputs.cpu().detach().numpy().tolist())\n\n val_rmse_loss = np.sqrt(mean_squared_error(all_targets, all_predictions))\n print('Validation RMSE: {:.2f}'.format(val_rmse_loss))\n \n return val_rmse_loss", "def evaluate(self):\n pass", "def evaluate(self):\n pass", "def _eval_during_training(self, evaluator, output_path, save_best_model, epoch, steps, callback):\n if evaluator is not None:\n score = evaluator(self, output_path=output_path, epoch=epoch, steps=steps)\n if callback is not None:\n callback(score, epoch, steps)\n if score > self.best_score:\n self.best_score = score\n if save_best_model:\n self.save(output_path)", "def eval(self, model, data_iterators, key=\"val\"):\n assert key in (\"val\", \"test\")\n assert not (data_iterators[key] is None)\n criterion = self.criterion\n weight = self.weight\n device = self.device\n\n return evaluator.evaluate(\n model,\n device,\n data_iterators[key],\n self.target_labels,\n criterion,\n weight,\n labeled=True,\n )", "def evaluate(model, datagen, X_test, Y_test, batch_size, save_folder_path=None):\n\n print(\"[INFO] Evaluating model...\")\n\n scores = model.evaluate_generator(\n datagen.flow(X_test, Y_test, batch_size=batch_size),\n verbose=1)\n \n print(\"[INFO] Evaluation results:\\n{0}: {1:.2f}\\n{2}: {3:.2f}\".format(model.metrics_names[0], scores[0]*100, model.metrics_names[1], scores[1]*100))\n \n if save_folder_path is not None:\n # Write results to path\n assert os.path.isdir(save_folder_path) == True, \"Unable to save evaluation results, save_folder_path is not a folder\"\n eval_results_path = save_folder_path + \"/eval_results.txt\"\n eval_handle = open(eval_results_path, 'w')\n eval_handle.write(\"Model name: {}\\n\\n\".format(MODEL_NAME))\n eval_handle.write(\"Evaluation results:\\n{0}: {1:.2f}\\n{2}: {3:.2f}\".format(model.metrics_names[0], scores[0]*100, model.metrics_names[1], scores[1]*100))\n eval_handle.close()", "def validate(self, sess, valid_dataset):\n return self.test(sess, valid_dataset)", "def evaluate(self) :\n pass", "def evaluate_model(model, train_input, train_target, test_input, test_target, loss, save_plot, mname=None):\n # Evalute Model in train set\n epochs_number = len(loss)\n output = model.forward(train_input)\n train_loss = model.compute_loss(output, train_target).item()\n train_error = compute_number_error(output, train_target).item()\n\n print(\"\\nTraining Loss: \", train_loss)\n print(\"Training Number of errors: \", train_error)\n\n id_class_train = output.argmax(dim=1)\n if save_plot:\n plot_result(train_input, train_target, id_class_train, fname=mname)\n plot_loss(range(0, epochs_number), loss, fname=mname)\n\n # Deactivate dropout to test models\n model.enable_dropout(False)\n \n # Evaluate Model in test set\n output = model.forward(test_input)\n test_loss = model.compute_loss(output, test_target).item()\n test_error = compute_number_error(output, test_target).item()\n\n print(\"\\nTest Loss: \", test_loss)\n print(\"Test Number of errors: \", test_error)\n\n\n id_class_test = output.argmax(dim=1)\n if save_plot:\n plot_result(test_input, test_target, id_class_test, train=False, fname=mname)\n \n return [train_loss, train_error, test_loss, test_error]", "def evaluate(self):\n # Method variables definition\n X_train, X_test, y_train, y_test = dm.reshape_y_set_split_data(self.datasetManager)\n featureScaleDependentVariables = self.datasetManager.params.featureScaleDependentVariables\n\n # Feature Scaling\n X_scaler, X_train = dm.do_feature_scaling(X_train)\n if featureScaleDependentVariables:\n y_scaler, y_train = dm.do_feature_scaling(y_train)\n else:\n y_scaler = None\n y_train = self.datasetManager.y_train\n \n self.X_scaler = X_scaler\n self.y_scaler = y_scaler\n\n # Training the SVR model on the training set\n regressor = SVR(kernel = 'rbf')\n regressor.fit(X_train, y_train.ravel())\n self.regressor = regressor\n\n # Predicting the Test set results\n self.y_pred = y_scaler.inverse_transform(regressor.predict(X_scaler.transform(X_test))) if featureScaleDependentVariables else regressor.predict(X_test)\n \n # Returning the process result : the regression type and the predicted dependent variables set\n return [\"Support Vector Regression\", self.get_r2_score(y_test, self.y_pred)]", "def evaluate(self, train_set=\"train_set\", test_set=\"test_set\", targets=\"targets\", k=10):\n\n test_set = self.cache.fetch(test_set) if isinstance(test_set, str) else test_set\n\n # Predict\n preds = self.run(dataset=train_set, targets=targets, k=k)\n\n # Evaluate model\n print(\"evaluating model ...\")\n score = evaluate(preds, test_set)\n print(\"MAP@{}: {:.5f}\\n\".format(k, score))\n\n return score", "def validate(model_path, dataset, batch_size, args):\n\n if args.gpu:\n model = torch.load(model_path)\n else:\n model = torch.load(model_path, map_location=torch.device('cpu'))\n\n ss, pred_slot, real_slot = Processor.prediction(\n model, dataset, \"test\", batch_size, args)\n\n slot_f1_score = Evaluator.computeF1Score(ss, real_slot, pred_slot, os.path.join(args.save_dir, 'eval.txt'))\n print(\"slot f1: {}\".format(slot_f1_score))\n\n return slot_f1_score", "def do_eval(sess,model,valid,batch_size):\n valid_X,valid_y,valid_p=valid\n number_examples=valid_X.shape[0]\n if number_examples>10000:\n number_examples=validation_size\n print(\"do_eval.valid.number_examples:\",number_examples)\n if number_examples>validation_size: valid_X,valid_y,valid_p=valid_X[0:validation_size],valid_y[0:validation_size],valid_p[0:validation_size]\n eval_loss,eval_counter,eval_acc=0.0,0,0.0\n for start,end in zip(range(0,number_examples,batch_size),range(batch_size,number_examples,batch_size)):\n feed_dict = {model.x_mask_lm: valid_X[start:end],model.y_mask_lm: valid_y[start:end],model.p_mask_lm:valid_p[start:end],\n model.dropout_keep_prob: 1.0} # FLAGS.dropout_keep_prob\n curr_eval_loss, logits_lm, accuracy_lm= sess.run([model.loss_val_lm,model.logits_lm,model.accuracy_lm],feed_dict) # logits:[batch_size,label_size]\n eval_loss=eval_loss+curr_eval_loss\n eval_acc=eval_acc+accuracy_lm\n eval_counter=eval_counter+1\n return eval_loss/float(eval_counter+small_value), eval_acc/float(eval_counter+small_value)", "def validation_epoch(self):\n self.model.eval()\n\n # Compute for training set\n train_loss, train_acc = compute_loss_and_accuracy(\n self.dataloader_train, self.model, self.loss_criterion\n )\n self.TRAIN_ACC.append(train_acc)\n self.TRAIN_LOSS.append(train_loss)\n\n # Compute for validation set\n validation_loss, validation_acc = compute_loss_and_accuracy(\n self.dataloader_val, self.model, self.loss_criterion\n )\n self.VALIDATION_ACC.append(validation_acc)\n self.VALIDATION_LOSS.append(validation_loss)\n print(\"Current validation loss:\", validation_loss, \" Accuracy:\", validation_acc)\n # Compute for testing set\n test_loss, test_acc = compute_loss_and_accuracy(\n self.dataloader_test, self.model, self.loss_criterion\n )\n self.TEST_ACC.append(test_acc)\n self.TEST_LOSS.append(test_loss)\n\n self.model.train()", "def evaluate(self):\n self.training = False", "def evaluate_model(self, t, scaling_parameters, system_parameters):\n raise NotImplementedError", "def evaluate(model,loss_fn, val_dataloader):\r\n # Put the model into the evaluation mode. The dropout layers are disabled during\r\n # the test time.\r\n model.eval()\r\n\r\n # Tracking variables\r\n val_accuracy = []\r\n val_loss = []\r\n\r\n # For each batch in our validation set...\r\n for batch in val_dataloader:\r\n # Load batch to GPU\r\n b_input_ids, b_attn_mask, b_labels = tuple(t.to(device) for t in batch)\r\n\r\n # Compute logits\r\n with torch.no_grad():\r\n logits = model(b_input_ids, b_attn_mask)\r\n\r\n # Compute loss\r\n loss = loss_fn(logits, b_labels)\r\n val_loss.append(loss.item())\r\n\r\n # Get the predictions\r\n preds = torch.argmax(logits, dim=1).flatten()\r\n\r\n # Calculate the accuracy rate\r\n accuracy = (preds == b_labels).cpu().numpy().mean() * 100\r\n val_accuracy.append(accuracy)\r\n\r\n # Compute the average accuracy and loss over the validation set.\r\n val_loss = np.mean(val_loss)\r\n val_accuracy = np.mean(val_accuracy)\r\n\r\n return val_loss, val_accuracy", "def eval(self):\n self.train(mode=False)", "def evaluate(model, val_dataloader):\n # Put the model into the evaluation mode. The dropout layers are disabled during\n # the test time.\n model.eval()\n\n # Tracking variables\n val_accuracy = []\n val_loss = []\n\n # For each batch in our validation set...\n for batch in val_dataloader:\n # Load batch to GPU\n b_input_ids, b_attn_mask, b_labels = tuple(t.to(device) for t in batch)\n\n # Compute logits\n with torch.no_grad():\n logits = model(b_input_ids, b_attn_mask)\n\n # Compute loss\n loss = loss_fn(logits, b_labels.long())\n val_loss.append(loss.item())\n\n # Get the predictions\n preds = torch.argmax(logits, dim=1).flatten()\n\n # Calculate the accuracy rate\n accuracy = (preds == b_labels).cpu().numpy().mean() * 100\n val_accuracy.append(accuracy)\n\n # Compute the average accuracy and loss over the validation set.\n val_loss = np.mean(val_loss)\n val_accuracy = np.mean(val_accuracy)\n\n return val_loss, val_accuracy", "def _evaluate(self, training_state, val_iter, val_metric):\n val_iter.reset()\n val_metric.reset()\n\n for nbatch, eval_batch in enumerate(val_iter):\n self.module.forward(eval_batch, is_train=False)\n self.module.update_metric(val_metric, eval_batch.label)\n\n for name, val in val_metric.get_name_value():\n logger.info('Checkpoint [%d]\\tValidation-%s=%f', training_state.checkpoint, name, val)\n\n return self.training_monitor.eval_end_callback(training_state.checkpoint, val_metric)", "def evaluate_model(self, test_data, test_labels,verbose=2):\n test_loss, test_acc = self.model.evaluate(test_data, test_labels, verbose=verbose)\n return test_loss, test_acc", "def evaluateModel(model, val_data, abs_idx2word, device, batch_size):\n #modify abs_idx2word by removing pad tokens so as to correctly calculate Reouge scores\n abs_idx2word[0] = ''\n\n #data setup\n val_data.move_to(torch.device('cpu')) #keep data on cpu\n val_dataloader = data.DataLoader(val_data, batch_size=batch_size, shuffle=True, num_workers=0)\n #model instantiation\n model = model.to(device=device)\n #evaluation\n logger.debug(f'\\tModel eval on validation data...')\n r1, r2, rl = evaluate.evaluate_model(model, val_dataloader, abs_idx2word, device, print_example=True)\n logger.debug(f'\\nRouge-1 is {r1:.4f}, Rouge-2 is {r2:.4f}, and Rouge-l is {rl:.4f}')", "def evaluate(self):\n raise NotImplementedError()", "def evaluate(self):\n try:\n self._evaluate()\n except Exception as e:\n if str(e) == \"assignment destination is read-only\":\n log.exception(\n \"Encountered error during scenario evaluation. Be sure \"\n + \"that the classifier's predict() isn't directly modifying the \"\n + \"input variable itself, as this can cause unexpected behavior in ART.\"\n )\n else:\n log.exception(\"Encountered error during scenario evaluation.\")\n sys.exit(1)\n\n if self.results is None:\n log.warning(f\"{self._evaluate} did not set self.results to a dict\")\n\n self.save()", "def evaluate_model():\n\n # Get the processed data (in proper format to evaluate the NER model)\n data = get_json_from_file_path(PROCESSED_DATA_PATH)\n # Split the dataset for training and test as we did for training\n train_data, test_data = train_test_split(data, train_size=0.7, \n random_state=4)\n\n # Load the model trained\n try:\n ner_model = spacy.load(OUTPUT_MODEL_PATH)\n except Exception as err:\n msg = f'Could not load the model. Error: {err}'\n raise Exception(msg)\n\n # Compute evaluation scores\n print('Computing metrics...')\n scores = evaluate(ner_model, test_data)\n # General metrics of the model\n f_score = scores.get('ents_f')\n precision = scores.get('ents_p')\n recall = scores.get('ents_r')\n print('\\nScoring:')\n print(f'F-score: {f_score}')\n print(f'Precision: {precision}')\n print(f'Recall: {recall}')\n\n # Get the specific scores for each entity \n scores_per_entity = scores.get('ents_per_type')\n # Get the F-score of the entities\n f_scores_of_entities = []\n for entity_scores in scores_per_entity.values():\n f_scores_of_entities.append(entity_scores['f'])\n # Compute the macro averaged F-score\n macro_avg_f_score = sum(f_scores_of_entities)/len(f_scores_of_entities)\n print(f'Macro averaged F-score: {macro_avg_f_score}')\n \n print('\\nScores per entity;')\n print('{:<15} {:<10} {:<10} {:<10}'.format('Entity','F-score','Precision','Recall'))\n for key, value in scores_per_entity.items():\n entity = key\n f, p, r = value['f'], value['p'], value['r']\n print('{:<15} {:<10.2f} {:<10.2f} {:<10.2f}'.format(entity, f, p, r))", "def eval_model(net, val_iter):\n correct = 0\n total = 0\n cm = conf.ConfusionMatrix([0, 1])\n net.eval()\n with torch.no_grad():\n for batch in val_iter:\n total += batch.correct.size(0)\n prediction = predict_batch(net, batch)\n cm.add_entry(batch.correct.tolist(), prediction.tolist())\n correct += (prediction == batch.correct).sum().item()\n\n return correct/total, cm.get_f1()", "def evaluate_model(sess, model, data_set):\n total_cost = 0.0\n total_r_cost = 0.0\n total_kl_cost = 0.0\n for batch in range(data_set.num_batches):\n unused_orig_x, x, s = data_set.get_batch(batch)\n feed = {model.input_data: x, model.sequence_lengths: s}\n (cost, r_cost,\n kl_cost) = sess.run([model.cost, model.r_cost, model.kl_cost], feed)\n total_cost += cost\n total_r_cost += r_cost\n total_kl_cost += kl_cost\n\n total_cost /= (data_set.num_batches)\n total_r_cost /= (data_set.num_batches)\n total_kl_cost /= (data_set.num_batches)\n return (total_cost, total_r_cost, total_kl_cost)", "def evaluate(self,\n model,\n x=None,\n y=None,\n batch_size=None,\n verbose=1,\n sample_weight=None,\n steps=None,\n callbacks=None,\n **kwargs):\n raise NotImplementedError()", "def evaluate(X_test, y_test):\n # batch size is 16 for evaluation\n batch_size = 16\n\n # Load Model\n model = load_model('model/model.h5')\n return model.evaluate(X_test, y_test, batch_size, verbose = 1)", "def validate(config, model, val_iterator, criterion, scheduler=None):\n\n if isinstance(model, collections.Iterable) or isinstance(\n scheduler, collections.Iterable):\n raise ValueError(\n \"Need to provide custom validation function if using multi-model \"\n \"or multi-scheduler training.\")\n batch_time = AverageMeter()\n losses = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n correct = 0\n total = 0\n batch_idx = 0\n with torch.no_grad():\n end = time.time()\n for batch_idx, (features, target) in enumerate(val_iterator):\n if torch.cuda.is_available():\n features = features.cuda(non_blocking=True)\n target = target.cuda(non_blocking=True)\n\n # compute output\n output = model(features)\n loss = criterion(output, target)\n _, predicted = torch.max(output.data, 1)\n total += target.size(0)\n correct += (predicted == target).sum().item()\n\n # measure accuracy and record loss\n losses.update(loss.item(), features.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if config.get(TEST_MODE) and batch_idx == 0:\n break\n\n stats = {\n BATCH_COUNT: batch_idx + 1,\n \"batch_time\": batch_time.avg,\n \"validation_loss\": losses.avg,\n \"mean_accuracy\": correct / total,\n \"mean_loss\": losses.sum / total,\n }\n return stats", "def evaluate(self, predictor_model) -> Any:\n raise NotImplementedError()", "def evaluate_model(model, model_name, X_train, Y_train, X_test, ground_truth):\n\tprint(\"\t\tModel [\" + model_name + \"]\")\n\tmodel.fit(X_train, Y_train)\n\tY_pred = model.predict(X_test).astype(int)\n\tregression = np.sqrt(metrics.mean_squared_error(ground_truth, Y_pred))\n\treturn regression", "def val(model, val_loader, writer, step, infer):\n\n print('\\n')\n model.eval()\n val_losses = []\n n = len(val_loader)\n\n with torch.no_grad():\n for batch_idx, batch in enumerate(val_loader):\n\n # run only on a subset\n if batch_idx >= cfg['val_batches']:\n break\n\n batch_val_loss = infer(model, batch).item()\n\n # log\n printProgressBar(batch_idx, min(n, cfg['val_batches']), suffix='\\tValidation ...')\n\n val_losses.append(batch_val_loss)\n\n val_loss = sum(val_losses) / len(val_losses)\n writer.add_scalar('Steps/val_loss', val_loss, step)\n print('\\n')\n print('Finished validation with loss {:4f}'.format(val_loss))\n return val_loss", "def evaluate(self):\n raise NotImplementedError(\"Abstract method\")", "def evaluate(self, X_test, y_test):\n self.run(self)\n self.y_pred = self.pipeline.predict(X_test)\n self.rmse = compute_rmse(self.y_pred, y_test)", "def evaluate(self, valid_x, valid_y):\n results = self.model.evaluate(valid_x, valid_y, batch_size=self.BATCH_SIZE)\n print(results)\n return results[1:] + [\"NaN\"]", "def model_run(self, model, estimators):\n model.fit(self.X_train, self.y_train)\n y_score = model.predict(self.X_test)\n accu_train = np.sum(model.predict(self.X_train) == self.y_train) / self.y_train.size\n accu_test = np.sum(y_score == self.y_test) / self.y_test.size\n\n self.results.write(\"Model Results\\n\")\n self.results.write(\"Number of Estimators: \" + str(estimators) + \"\\n\")\n self.results.write(\"Accuracy on Train: \" + str(accu_train) + \"\\n\")\n self.results.write(\"Accuracy on Test: \" + str(accu_test) + \"\\n\")\n return model", "def evaluate(self, test_x, test_y):\n score = self._model.evaluate(test_x, test_y, verbose=self._verbose)\n print(\"Test score: \", score[0])\n print(\"Test accuracy: \", score[1])", "def evaluate_model(valp):\n\n a = valp.predict(data_inputs[\"Test\"], [], new=True)[0]\n\n m2e = np.mean(mse(a[\"o0\"], data_outputs[\"Test\"][\"o0\"]))\n acc = 1 - acc_err(a[\"o1\"][:, 0], np.argmax(data_outputs[\"Test\"][\"o1\"], axis=1))\n i_d = 50-np.mean(inception_score(a[\"o2\"][:100]))\n\n return np.array([m2e, acc, i_d])", "def validate(self):\n X_orig = make_X_from_features(self._conf)\n train_sz = len(load_array(self._conf, 'task.dataset.id_train'))\n X = X_orig[:train_sz, :]\n y = load_array(self._conf, 'task.dataset.y_train')\n y = y.reshape(y.size)\n\n cv_method_name = self._conf['task']['params']['validation']['class']\n cv_params_name = self._conf['task']['params']['validation'].get(\n 'params', {})\n cv_params_name = _to_str_value(cv_params_name)\n\n cv_method = dynamic_load(cv_method_name)\n mean_cv_score = cv_method(X, y, self, **cv_params_name)\n\n task_metrics = self._conf['task']['params']['metrics']\n task_method = task_metrics['method']\n\n ume.db.add_validation_score(\n os.path.basename(self._jn),\n ume.__version__,\n task_method,\n mean_cv_score)", "def _evaluate_model(\n run_id: str, dataset_filename: str, dataset_sampling_column: str = None\n):\n fix_multiprocessing_with_keras_on_macos()\n\n run = _get_run(run_id)\n hyperparameters = run.config\n\n # no need to run this on a gpu since it's 1 epoch\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n\n with ModelBestH5File(run) as model_h5_filepath:\n model = _load_untrainable_model(hyperparameters, model_h5_filepath)\n\n model_name = run.config[\"model_name\"]\n x, y = _get_prepared_dataset(\n model_name, hyperparameters, dataset_filename, dataset_sampling_column\n )\n\n wandb.init(\n config={\n \"run_id\": run_id,\n \"dataset_filename\": dataset_filename,\n \"dataset_sampling_column\": dataset_sampling_column,\n },\n tags=[\"model-evaluation\"],\n )\n\n batch_size = hyperparameters[\"batch_size\"]\n label_scale_factor_mmhg = hyperparameters[\"label_scale_factor_mmhg\"]\n acceptable_error_mg_l = hyperparameters[\"acceptable_error_mg_l\"]\n acceptable_fraction_outside_error = hyperparameters[\n \"acceptable_fraction_outside_error\"\n ]\n\n # we're using fit() instead of evaluate() to get the functionality of these callbacks\n # training performance in the results should be ignored, as it can be affected by some\n # training-only layers such as dropout\n model.fit(\n x,\n y,\n batch_size=batch_size,\n epochs=1,\n verbose=2,\n validation_data=(x, y),\n callbacks=[\n ThresholdValMeanAbsoluteErrorOnCustomMetric(\n acceptable_fraction_outside_error=acceptable_fraction_outside_error,\n acceptable_error_mg_l=acceptable_error_mg_l,\n ),\n WandbCallback(verbose=1, monitor=\"val_adjusted_mean_absolute_error\"),\n LogPredictionsAndWeights(\n metric=\"val_adjusted_mean_absolute_error\",\n dataset=([], [], x, y),\n label_scale_factor_mmhg=label_scale_factor_mmhg,\n ),\n ],\n )\n\n # returning model and dataset for use in jupyter notebooks\n return model, (x, y)", "def train_and_validate(trnK, trnY, valK, valY, Cs):\n models = []\n trn_error = []\n val_error = []\n sup_vect = []\n\n for C in Cs:\n #Training\n model = train(trnK, trnY, C)\n trn_error.append((100 - evaluate(trnK, trnY, model)) / 100)\n sup_vect.append(len(model.get_SV()))\n models.append(model)\n #Evaluate\n val_error.append((100 - evaluate(valK, valY, model)) / 100)\n return(models, trn_error, val_error, sup_vect)", "def evaluate_questions(self):\n for question in self.question_list:\n question.evaluate_question()", "def run_validation(self, data=empty):\n self._validated_data = super().run_validation(data)\n return self._validated_data", "def evaluate(data_loader, model, device):\n\n\tmodel.eval()\n\ttotal_num_examples = 0\n\ttotal_error = 0\n\tfor idx, batch in enumerate(data_loader):\n\t\tquestion_feature_vec = batch['feature_vec'].to(device)\n\t\tquestion_len = batch['len'].to(device)\n\t\tlabels = batch['labels'].to(device)\n\n\t\t####Your code here ---\n\n\t\t# get the output from the model\n\t\tlogits = model(question_feature_vec, question_len)\n\n\t\t# get error, num_examples using accuracy_fn defined previously\n\t\terror, num_examples = accuracy_fn(logits, labels)\n\n\t\t# update total_error and total_num_examples\n\t\ttotal_error += error\n\t\ttotal_num_examples += num_examples\n\n\taccuracy = 1 - total_error / total_num_examples\n\treturn accuracy", "def _evaluate_during_fit(self, test_loader, epoch):", "def model_evaluate(self, test):\n features = {name: np.array(value) for name, value in test.items()}\n labels = {name: features.pop(name) for name in self.label_names}\n metrics = self.model.evaluate(x=features, y=labels, batch_size=5)\n return metrics", "def run(self):\n\n # TODO try/catch to ensure proper shutdown even if error encountered\n\n params = self._get_params_for_run()\n result_rows = []\n\n # Check for valid configuration\n if self._test_docs is None and self._k_folds == 0:\n self._logger.error(\"Explicit test set or number of cross-validation folds must be specified.\")\n metrics = Metrics()\n result_row = {**params, **metrics.get_scores_as_dict()}\n result_rows.append(result_row)\n return result_rows\n\n # Continue while there are configured parameter settings to evaluate\n while params is not None:\n\n # Get collection of training and test sets for current run\n data_sets = self._get_training_and_test_sets()\n for set_index, (training_docs, test_docs) in enumerate(data_sets):\n\n # Retrieve an encoder module trained with the specified configuration\n self._encoder = self._get_encoder(params)\n\n set_index += 1 # Only used for user output, so start index at 1\n\n num_sets = len(data_sets)\n if num_sets > 1:\n self._logger.info(\"Training and evaluating fold {} of {}.\".format(set_index, num_sets))\n\n start = time.time()\n self._train_and_evaluate(params, self._encoder, training_docs, test_docs)\n runtime = time.time() - start\n self._logger.info(\n \"Trained and evaluated fold {} of sequence model in {} seconds.\".format(set_index, runtime))\n\n # Combine run parameters with evaluation results and store\n result_row = {**params, **self._evaluator.get_score_as_dict()}\n result_rows.append(result_row)\n\n # Check if model should be saved\n if self._is_model_saving_enabled():\n\n operator = self._evaluator.get_operator()\n current_score = self._evaluator.get_score()\n\n best_model = self._get_best_model()\n if best_model is not None:\n (best_metric, _) = best_model\n if not operator(best_metric, current_score):\n self._set_best_model(current_score, (params, self._encoder, self._sequence_learner))\n else:\n # New model is the best one if no previous existed\n self._set_best_model(current_score, (params, self._encoder, self._sequence_learner))\n\n # Invoke optimizer callback to report on results of this run\n if self._optimizer is not None:\n self._optimizer.process_run_result(params=params,\n score=self._evaluator.get_score_as_dict(),\n encoder=self._encoder,\n sequence_learner=self._sequence_learner)\n\n # Check if there are additional runs to execute\n if self._optimizer is not None:\n params = self._optimizer.get_next_params()\n else:\n params = None\n\n # Store best model, if configured\n if self._is_model_saving_enabled():\n path, name = self._get_model_save_path_and_name()\n try:\n self.save(path, name)\n except Exception:\n self._logger.error(\"Failed to save model, clearing Keras session and trying again.\")\n self._sequence_learner.clear_session()\n self.save(path, name)\n\n # Clear Keras/Tensorflow models # TODO why a second time?\n if self._sequence_learner is not None:\n self._sequence_learner.clear_session()\n\n return pd.DataFrame(result_rows)", "def eval_model_on_valid(args):\n cfg, lbl = util.get_label_cfg_by_args(args)\n uid = cfg['uniqueid']\n print('We are playing with %s' % uid)\n outdir='models/%s/gate_expert' % uid\n outname='gate_expert_model.pt'\n if KLLOSS:\n outname = 'gate_expert_kldiv_model.pt'\n if args.warm:\n outname = outname.replace('.pt', '_warm.pt')\n mdl_path = os.path.join(outdir, outname)\n gate_expert = GateExpertNet(mdl_path, False)\n eval_fun = gate_expert.get_y\n\n valid_set = np.load(cfg['valid_path'])\n valid_x = valid_set[cfg['x_name']]\n valid_y = valid_set[cfg['y_name']]\n predy = eval_fun(valid_x)\n # dump output into some file\n valid_name = 'data/%s/gate_expert_valid_data.npz' % uid\n if KLLOSS:\n valid_name = valid_name.replace('_valid', '_kldiv_valid')\n if args.warm:\n valid_name = valid_name.replace('.npz', '_warm.npz')\n np.savez(valid_name, x=valid_x, y=predy)", "def evaluate(self, dataset):\n\t\tpass", "def evaluate(self, model, X_train, X_test, y_train, y_test):\n\n model.fit(X_train,y_train)\n y_pred = model.predict(X_test)\n R2 = r2_score(y_test, y_pred)\n MAE = round(mape(y_test, y_pred), 2)\n RMSE = round(rmse(y_test, y_pred), 2)\n\n res = {'Model': self.model, 'R2' : R2, 'MAPE': MAE, 'RMSE': RMSE}\n return res", "def _perform_validation(self):\n # -- Extract the information of the current fold -- #\n trained_on_folds = self.already_trained_on[str(self.fold)]\n\n # -- Extract all tasks into a list to loop through -- #\n tasks = list(self.mh_network.heads.keys())\n\n # -- Add the current trainer_class name to prev_trainer, so the loop does not end in an error -- #\n # -- since this trainer is not yet a prev_trainer.. --> Remove the trainer again after the loop -- #\n # -- because this creates only a view and changes self.already_trained_on as well which we do not want to -- #\n trained_on_folds['prev_trainer'].append(self.trainer_class_name)\n \n # -- NOTE: Since the head is an (ordered) ModuleDict, the current task is the last head, so there -- #\n # -- is nothing to restore at the end. -- #\n # -- NOTE: Since the current task the model is training on is always added at the end of the list, -- #\n # -- After this loop everything is automatically set as before, so no restoring needs to be done -- #\n # -- For each previously trained task perform the validation on the full validation set -- #\n running_task_list = list()\n for idx, task in enumerate(tasks):\n # -- Update running task list and create running task which are all (trained tasks and current task joined) for output folder name -- #\n running_task_list.append(task)\n running_task = join_texts_with_char(running_task_list, '_')\n\n # -- Get default configuration for nnunet/nnunet_ext model (finished training) -- #\n plans_file, _, self.dataset_directory, _, stage, \\\n _ = get_default_configuration(self.network_name, task, running_task, trained_on_folds['prev_trainer'][idx],\\\n self.tasks_joined_name, self.identifier, extension_type=self.extension)\n\n # -- Load the plans file -- #\n self.plans = load_pickle(plans_file)\n\n # -- Extract the folder with the preprocessed data in it -- #\n self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +\n \"_stage%d\" % stage)\n \n # -- Create the corresponding dataloaders for train ind val (dataset loading and split performed in function) -- #\n # -- Since we do validation, there is no need to unpack the data -- #\n self.dl_tr, self.dl_val = self.get_basic_generators()\n\n # -- Load the dataset for the task from the loop and perform the split on it -- #\n #self.dataset = load_dataset(folder_with_preprocessed_data)\n #self.do_split()\n\n # -- Extract corresponding self.val_gen --> the used function is extern and does not change any values from self -- #\n self.tr_gen, self.val_gen = get_moreDA_augmentation(self.dl_tr, self.dl_val, # Changed due to do_split ;)\n self.data_aug_params[\n 'patch_size_for_spatialtransform'],\n self.data_aug_params,\n deep_supervision_scales=self.deep_supervision_scales,\n pin_memory=self.pin_memory,\n use_nondetMultiThreadedAugmenter=False)\n # -- Update the log -- #\n self.print_to_log_file(\"Performing validation with validation data from task {}.\".format(task))\n\n # -- Activate the current task to train on the right model -- #\n # -- Set self.network, since the parent classes all use self.network to train -- #\n # -- NOTE: self.mh_network.model is also updated to task split ! -- #\n self.network = self.mh_network.assemble_model(task)\n \n # -- For evaluation, no gradients are necessary so do not use them -- #\n with torch.no_grad():\n # -- Put current network into evaluation mode -- #\n self.network.eval()\n # -- Run an iteration for each batch in validation generator -- #\n for _ in range(self.num_val_batches_per_epoch):\n # -- Run iteration without backprop but online_evaluation to be able to get TP, FP, FN for Dice and IoU -- #\n _ = self.run_iteration(self.val_gen, False, True)\n \n # -- Calculate Dice and IoU --> self.validation_results is already updated once the evaluation is done -- #\n self.finish_online_evaluation_extended(task)\n\n # -- Remove the trainer now from the list again -- #\n trained_on_folds['prev_trainer'] = trained_on_folds['prev_trainer'][:-1]\n\n # -- Save the dictionary as json file in the corresponding output_folder -- #\n save_json(self.validation_results, join(self.output_folder, 'val_metrics.json'))\n\n # -- Update already_trained_on if not already done before -- #\n if not self.already_trained_on[str(self.fold)]['val_metrics_should_exist']:\n # -- Set to True -- #\n self.already_trained_on[str(self.fold)]['val_metrics_should_exist'] = True\n # -- Save the updated dictionary as a json file -- #\n save_json(self.already_trained_on, join(self.trained_on_path, self.extension+'_trained_on.json'))", "def evaluate(self, ts_loader=None):\n # start evaluation of the model\n self.tr_model.eval()\n samples, correct = 0, 0\n \n # check if a dataloader was provided for evaluation\n loader = self.ts_loader if not ts_loader else ts_loader\n \n with torch.no_grad():\n for x, y in loader:\n \n x, y = x.to(device), y.to(device)\n \n y_ = self.tr_model(x)\n _, predicted = torch.max(y_.detach(), 1)\n \n samples += y.shape[0]\n correct += (predicted == y).sum().item()\n \n # return evaluation statistics\n return {\"accuracy\" : correct/samples}", "def evaluate_model(model, testset):\n\n # Sort data by top level label to ease inspection\n testset = testset.sort_using_layer(-1, reverse=True)\n\n # Feed the samples to the model to obtain each layers' activations\n v = testset.get_layer(0)\n hs = model.transform(v)[1:]\n\n # Read model weights\n ws = [params['w'] for params in model.parameters]\n del params\n\n # Take the (hidden) labels from the data set\n ls = testset.get_layers()[1:]\n\n # In each layer, reorder and invert neurons to match best with the labels\n for i in range(len(ls)):\n hs[i], ws[i] = align_with_labels(ls[i], hs[i], ws[i])\n del i\n\n # Measure correlations, etcetera\n metrics = compare(ls, hs)\n\n # Simply return a dict with all used variables\n return locals()", "def evaluate_model(model, X_train, y_train, X_test, y_test):\n model = model\n model.fit(X_train, y_train)\n\n y_pred = model.predict(X_test)\n\n report = classificationreport(y_test, y_pred, target_names= [\"0\", \"1\"], output_dict=True)\n\n return report", "def evaluate_model(\n self,\n val_loader,\n additional_gpu=None,\n metrics=None,\n inputs_key=\"image\",\n labels_key=\"label\"\n ):\n # predict on the validation set\n all_preds = []\n all_labels = []\n\n self.model.eval()\n\n if additional_gpu is not None:\n device = additional_gpu\n else:\n device = self.device\n\n with torch.no_grad():\n for i, data in enumerate(val_loader):\n inputs, labels = data[inputs_key], data[labels_key]\n inputs = inputs.to(device)\n labels = labels.to(device)\n # forward + backward + optimize\n outputs = self.model(inputs)\n # run inference\n all_preds, all_labels = predict(\n outputs,\n labels,\n all_preds,\n all_labels,\n self.prediction_type,\n self.criterion,\n class_threshold=self.class_threshold\n )\n\n # compute confusion matrix\n cm = confusion_matrix(all_labels, all_preds)\n plt.imshow(cm, interpolation=\"nearest\", cmap=plt.cm.Blues)\n\n # Visualize the confusion matrix\n classes = [\"control\", \"patient\"]\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = \"d\"\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(\n j,\n i,\n format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\",\n )\n plt.title(\"Confusion Matrix\")\n plt.ylabel(\"True label\")\n plt.xlabel(\"Predicted label\")\n plt.show()\n\n # print metrics\n if metrics is not None:\n for metric in metrics:\n if isinstance(all_preds[0], list):\n print(\"{}: {}\".format(metric.__name__, np.mean([metric(labels, preds) for preds,labels in zip(all_preds, all_labels)])))\n else:\n print(\"{}: {}\".format(metric.__name__, metric(all_labels, all_preds)))\n\n\n self.model.train()", "def train_and_eval(self):\n self.__create_indexes()\n model = None\n model = None\n if self.model == 'OMult':\n model = OMult(self.kwargs)\n elif self.model == 'ConvO':\n model = ConvO(self.kwargs)\n elif self.model == 'QMult':\n model = QMult(self.kwargs)\n elif self.model == 'ConvQ':\n model = ConvQ(self.kwargs)\n elif self.model == 'OMultBatch':\n model = OMultBatch(self.kwargs)\n elif self.model == 'ConvOBatch':\n model = ConvOBatch(self.kwargs)\n elif self.model == 'QMultBatch':\n model = QMultBatch(self.kwargs)\n elif self.model == 'ConvQBatch':\n model = ConvQBatch(self.kwargs)\n else:\n print(self.model, ' is not valid name')\n raise ValueError\n\n self.train(model)\n self.eval(model)", "def evaluate(\n self,\n test_data=None,\n print_report=True,\n save_path=\"ktrain_classification_report.csv\",\n class_names=[],\n ):\n return self.validate(\n val_data=test_data,\n print_report=print_report,\n save_path=save_path,\n class_names=class_names,\n )", "def run_evaluation(\n self,\n training_set,\n validation_set,\n test_set,\n progress_tracker: ProgressTracker,\n train_summary_writer,\n validation_summary_writer,\n test_summary_writer,\n model_hyperparameters_path,\n output_features,\n metrics_names,\n save_path,\n loss: torch.Tensor,\n all_losses: Dict[str, torch.Tensor],\n early_stopping_steps: int,\n checkpoint_manager: CheckpointManager,\n ) -> bool:\n start_time = time.time()\n self.callback(lambda c: c.on_eval_start(self, progress_tracker, save_path))\n\n progress_tracker.checkpoint_number += 1\n if self.is_coordinator():\n logger.info(f\"\\nRunning evaluation for step: {progress_tracker.steps}, epoch: {progress_tracker.epoch}\")\n\n # ================ Eval ================\n # eval metrics on train\n self.eval_batch_size = max(self.eval_batch_size, progress_tracker.batch_size)\n\n if self.evaluate_training_set:\n # Run a separate pass over the training data to compute metrics\n self.evaluation(\n training_set, \"train\", progress_tracker.train_metrics, self.eval_batch_size, progress_tracker\n )\n else:\n # Use metrics accumulated during training\n metrics = self.model.get_metrics()\n append_metrics(self.model, \"train\", metrics, progress_tracker.train_metrics, progress_tracker)\n self.model.reset_metrics()\n\n self.write_eval_summary(\n summary_writer=train_summary_writer,\n metrics=progress_tracker.train_metrics,\n step=progress_tracker.steps,\n )\n\n if validation_set is not None:\n self.callback(lambda c: c.on_validation_start(self, progress_tracker, save_path))\n\n # eval metrics on validation set\n self.evaluation(\n validation_set,\n VALIDATION,\n progress_tracker.validation_metrics,\n self.eval_batch_size,\n progress_tracker,\n )\n\n self.write_eval_summary(\n summary_writer=validation_summary_writer,\n metrics=progress_tracker.validation_metrics,\n step=progress_tracker.steps,\n )\n\n self.callback(lambda c: c.on_validation_end(self, progress_tracker, save_path))\n\n if test_set is not None:\n self.callback(lambda c: c.on_test_start(self, progress_tracker, save_path))\n\n # eval metrics on test set\n self.evaluation(test_set, TEST, progress_tracker.test_metrics, self.eval_batch_size, progress_tracker)\n\n self.write_eval_summary(\n summary_writer=test_summary_writer,\n metrics=progress_tracker.test_metrics,\n step=progress_tracker.steps,\n )\n\n self.callback(lambda c: c.on_test_end(self, progress_tracker, save_path))\n\n elapsed_time = (time.time() - start_time) * 1000.0\n\n if self.is_coordinator():\n logger.info(f\"Evaluation took {time_utils.strdelta(elapsed_time)}\\n\")\n print_metrics_table(\n output_features,\n progress_tracker.train_metrics,\n progress_tracker.validation_metrics,\n progress_tracker.test_metrics,\n )\n\n # ================ Validation Logic ================\n should_break = False\n if validation_set is not None and validation_set.size > 0:\n should_break = self.check_progress_on_validation(\n progress_tracker,\n self.validation_field,\n self.validation_metric,\n save_path,\n model_hyperparameters_path,\n self.increase_batch_size_on_plateau,\n self.increase_batch_size_on_plateau_patience,\n self.increase_batch_size_on_plateau_rate,\n self.max_batch_size,\n self.increase_batch_size_eval_metric,\n self.increase_batch_size_eval_split,\n early_stopping_steps,\n self.skip_save_model,\n checkpoint_manager,\n )\n else:\n # There's no validation, so we save the model.\n if not self.skip_save_model:\n logger.info(\"Saving model.\\n\")\n checkpoint_manager.save_best(progress_tracker.steps)\n self.callback(lambda c: c.on_save_best_checkpoint(self, progress_tracker, save_path))\n\n # Trigger eval end callback after any model weights save for complete checkpoint\n self.callback(lambda c: c.on_eval_end(self, progress_tracker, save_path))\n\n # Clear the CUDA cache to free up memory\n torch.cuda.empty_cache()\n\n return should_break", "def evaluate(args, model, tokenizer, eval_dataset, eval_dataloader, task_name, model_type, split, step):\n model.eval()\n processor = MoralStoriesProcessor()\n results = dict()\n softmax = torch.nn.Softmax(dim=1)\n\n # Eval!\n logger.info('***** Running evaluation on the validation / test set *****')\n logger.info(' Num examples = %d', len(eval_dataset))\n logger.info(' Batch size = %d', args.eval_batch_size)\n batch_losses = list()\n eval_loss = 0.0\n micro_loss, macro_loss = 0.0, 0.0\n num_batches, num_tokens = 0, 0\n preds = None\n soft_preds = None\n out_label_ids = None\n # Perform a single evaluation step\n for batch in tqdm(eval_dataloader, desc='Evaluating', mininterval=10, ncols=100):\n batch = tuple(t.to(args.device) for t in batch)\n with torch.no_grad():\n if 'gen' not in task_name:\n inputs = {'input_ids': batch[0],\n 'attention_mask': batch[1],\n 'token_type_ids': batch[2] if model_type == 'bert' else None,\n 'labels': batch[3]}\n else:\n inputs = {'input_ids': batch[0],\n 'attention_mask': batch[1],\n 'labels': batch[3]}\n if 'gpt2' not in model_type:\n # Prepare decoder inputs and labels for enc-dec models\n inputs['labels'] = batch[3][:, 1:].contiguous() # shift\n decoder_input_ids = batch[3][:, :-1].clone() # shift\n decoder_input_ids[decoder_input_ids == -100] = tokenizer.pad_token_id # remove masking\n inputs['decoder_input_ids'] = decoder_input_ids.contiguous()\n\n outputs = model(**inputs)\n\n tmp_eval_loss, logits = outputs[:2]\n soft_logits = softmax(logits)\n eval_loss += tmp_eval_loss.mean().item()\n batch_losses.append(tmp_eval_loss.item())\n\n if 'gen' not in task_name:\n if preds is None:\n preds = logits.detach().cpu().numpy()\n soft_preds = soft_logits.detach().cpu().numpy()\n out_label_ids = inputs['labels'].detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n soft_preds = np.append(soft_preds, soft_logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)\n else:\n # Obtain per-token loss for perplexity computation\n batch_loss = get_token_loss(args, logits, batch[3], batch[4], model_type=model_type)\n macro_loss += batch_loss.mean().item()\n micro_loss += batch_loss.sum().item()\n num_batches += 1\n num_tokens += batch_loss.view(-1).shape[0]\n\n # Compute and update evaluation metric values\n if 'gen' not in task_name:\n # Isolate model predictions\n preds = np.argmax(preds, axis=1)\n soft_preds = soft_preds.tolist()\n curr_result = compute_cls_metrics(preds, out_label_ids)\n else:\n macro_perplexity = torch.exp(torch.tensor(macro_loss / num_batches)).item()\n micro_perplexity = torch.exp(torch.tensor(micro_loss / num_tokens)).item()\n curr_result = {'macro_perplexity': macro_perplexity,\n 'micro_perplexity': micro_perplexity}\n\n if len(results.keys()) == 0:\n for k, v in curr_result.items():\n results[k] = [v]\n else:\n for k, v in curr_result.items():\n results[k].append(v)\n\n # Log metrics\n output_eval_file = os.path.join(args.output_dir, 'results_{}_{}.txt'.format(task_name, split))\n with open(output_eval_file, 'a') as writer:\n logger.info('***** Eval results *****')\n writer.write('STEP: {:s}\\n'.format(str(step)))\n for key in sorted(curr_result.keys()):\n logger.info(' %s = %s', key, str(curr_result[key]))\n writer.write('%s = %s\\n' % (key, str(curr_result[key])))\n\n # Log predictions\n if 'gen' not in task_name:\n output_pred_file = \\\n os.path.join(args.output_dir, 'predictions_{}_{}_{}.lst'.format(task_name, split, step))\n with open(output_pred_file, 'w') as writer:\n logger.info('***** Write predictions *****')\n for pred in preds:\n writer.write('{}\\n'.format(processor.get_labels()[pred]))\n\n # Maintain a single metrics file\n if os.path.exists(args.output_dir):\n with open(os.path.join(args.output_dir, 'metrics_{}_{}.json'.format(task_name, split)), 'w') as f:\n f.write(json.dumps(results))\n f.close()\n\n # Report mean dev loss\n mean_eval_loss = eval_loss / len(eval_dataloader)\n logging.info('\\n' + '*' * 10)\n logging.info('Mean development loss: {:.4f}'.format(mean_eval_loss))\n logging.info('*' * 10 + '\\n')\n\n return results, mean_eval_loss, preds, soft_preds" ]
[ "0.7588236", "0.712658", "0.7041165", "0.70291466", "0.68915606", "0.6771378", "0.67110956", "0.6702142", "0.66912436", "0.66912436", "0.66912436", "0.6676459", "0.6667484", "0.6665729", "0.6603032", "0.6589405", "0.6569131", "0.6567912", "0.655075", "0.6545247", "0.65360427", "0.6535638", "0.6528428", "0.65271115", "0.652358", "0.6506216", "0.64894754", "0.6473953", "0.6431577", "0.6427257", "0.64255595", "0.64139545", "0.6407953", "0.6397605", "0.63888747", "0.63758355", "0.6370478", "0.6361853", "0.6340368", "0.6335281", "0.63175154", "0.63112044", "0.6293661", "0.6293661", "0.6292675", "0.6290069", "0.628209", "0.6276835", "0.62746376", "0.6274327", "0.6266767", "0.62660676", "0.626459", "0.6244754", "0.62426794", "0.62280625", "0.622722", "0.62220037", "0.6220553", "0.62181574", "0.62043786", "0.6200892", "0.6199659", "0.6178504", "0.6165893", "0.61525095", "0.6144011", "0.6138462", "0.6129053", "0.6111269", "0.61092794", "0.61067516", "0.61024183", "0.6096044", "0.6093634", "0.6092644", "0.6091732", "0.6083506", "0.60716194", "0.60684836", "0.6056754", "0.605466", "0.6047323", "0.6044459", "0.6042175", "0.60336673", "0.6033418", "0.6033317", "0.60285634", "0.60269976", "0.6017069", "0.601265", "0.60064113", "0.59947246", "0.59857297", "0.5981606", "0.5981365", "0.59772897", "0.59767336", "0.5966191", "0.59636945" ]
0.0
-1
Tests the model on the test set, measuring accuracy. Returns float Total accuracy of the model on the test set.
def test(self): args = self.args model = self.model dataset = self.dataset dataset.set_split('test') batch_generator = generate_nmt_batches(dataset, batch_size=len(dataset), device=args.device) acc_sum = 0.0 model.eval() for batch_index, batch_dict in enumerate(batch_generator): # step 1. compute the output if isinstance(model,NMTModelWithMLTM): y_pred = model(batch_dict['x_source'], batch_dict['x_source_mltm_vector'], batch_dict['x_source_length'], batch_dict['x_target']) else: y_pred = model(batch_dict['x_source'], batch_dict['x_source_length'], batch_dict['x_target']) acc_t = compute_accuracy(y_pred, batch_dict['y_target'], self.mask_index) acc_sum += acc_t return acc_sum / (batch_index+1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_test_accuracy(self):\n print('\\n# Evaluate on test data')\n results = self.model.evaluate(self.data.test_dataset)\n print('\\ntest loss, test acc:', results)", "def get_accuracy(self) -> float:\n self.network.load_data()\n self.network.train()\n\n n = len(self.network.y_test)\n correct = 0\n for i in range(n):\n # Predict by running forward pass through the neural network\n pred = self.network.predict(self.network.x_test[i])\n # Sanity check of the prediction\n assert 0 <= pred <= 1, \"The prediction needs to be in [0, 1] range.\"\n # Check if right class is predicted\n correct += self.network.y_test[i] == round(float(pred))\n return round(correct / n, 3)", "def getAccuracy(self):\n\t\tcorrect = (self.testing[self.classLabel]==self.bestLabel).sum()\n\t\tself.accuracy = (correct/float(len(self.testing))) * 100.0", "def testAccuracy(self):\n \n loader = torch.utils.data.DataLoader(dataset=self.test, \n shuffle=False)\n acc = accuracy(self.model, loader)\n self.assertEqual(acc, 1.0)\n print(acc)", "def test(self):\n self.load()\n bottleneck_features = np.load(self.feature_path)\n test = bottleneck_features['test']\n _, test_targets = load_dataset(self.image_path_test) \n predictions = [np.argmax(self.model.predict(np.expand_dims(feature, axis=0))) for feature in test]\n test_accuracy = 100*np.sum(np.array(predictions) == np.argmax(test_targets, axis=1))/len(predictions)\n print('{}, test accuracy: {:.4f}%'.format(self.name, test_accuracy))\n return test_accuracy", "def accuracy(self, X_train, X_test):\n loss, accuracy = self.estimator.evaluate(X_test, X_train)\n return accuracy", "def compute_accuracy(self):\n self.test_predictions = tf.cast(tf.argmax(self.test_logits, 1), tf.int32)\n correct = tf.equal(self.data.test_labels, self.test_predictions)\n return tf.reduce_mean(tf.cast(correct, tf.float32))", "def accuracy(self):\n if not self.run:\n self._run()\n return self.model_acc", "def get_test_accuracy(model, X_test, y_test):\n # Make predictions - test accuracy\n test_pred = model.predict(X_test)\n score = accuracy_score(test_pred, y_test)\n print(\"Test Accuracy:\", score)\n\n return test_pred", "def evaluate(self, test_x, test_y):\n score = self._model.evaluate(test_x, test_y, verbose=self._verbose)\n print(\"Test score: \", score[0])\n print(\"Test accuracy: \", score[1])", "def evaluate(self):\n predictions = self.model.predict(self.test[0])\n accuracy = accuracy_score(self.test[1], predictions)\n print(\"Accuracy:\", str(accuracy * 100) + \"%\")\n self.plot_results(predictions)", "def compute_accuracy(self):\n self.test_predictions = tf.cast(tf.argmax(self.test_logits, 1), tf.int32)\n correct = tf.equal(self.episode.test_labels, self.test_predictions)\n return tf.reduce_mean(tf.cast(correct, tf.float32))", "def test(model, X_test, y_test, config):\n loss, y_pred = model.forward_pass(X_test)\n\n y_maxVals = np.amax(y_pred, axis=1).reshape(-1, 1)\n y_1hot = np.where(y_maxVals == y_pred, 1, 0)\n correct = np.sum(y_test * y_1hot)\n\n accuracy = correct / len(X_test)\n return accuracy", "def test(self):\n self.eval()\n test_mask = self.data.test_mask\n labels = self.data.y\n output = self.forward(self.data)\n # output = self.output\n loss_test = F.nll_loss(output[test_mask], labels[test_mask])\n acc_test = utils.accuracy(output[test_mask], labels[test_mask])\n print(\"Test set results:\",\n \"loss= {:.4f}\".format(loss_test.item()),\n \"accuracy= {:.4f}\".format(acc_test.item()))\n return acc_test.item()", "def accuracy(self):\r\n # Load tarined model using intent id.\r\n clf = joblib.load(filename=self.intention_id+'.pkl')\r\n # Compute accuracy for hole training data and return.\r\n return clf.score(X=self.training_data, y=self.target_data)", "def report_accuracy(\r\n predictions: pd.DataFrame, \r\n test_y: pd.DataFrame\r\n) -> None:\r\n # Calculate accuracy of predictions\r\n accuracy = (predictions == test_y).mean()\r\n \r\n # Log the accuracy of the model\r\n log = logging.getLogger(__name__)\r\n log.info(\"Model accuracy on test set: %0.2f%%\", accuracy * 100)\r\n\r\n mlflow.log_metric(\"accuracy\", accuracy)\r\n mlflow.set_tag(\"Model Version\", 1)", "def accuracy(self, X_test, y_test):\n\t\tif X_test.ndim == 1:\n\t\t\tX_test = np.reshape(X_test, (X_test.shape[0],1))\n\t\ty_pred = self.predict(X_test)\n\t\treturn np.sum(np.argmax(y_pred,axis=1)==np.argmax(y_test,axis=1))/float(y_test.shape[0])", "def test(self):\n y_list = []\n y_hat_list = []\n for ex_dict in ut.TEST_LIST:\n y_list.append(ex_dict[1])\n y_hat_list.append(self.predict(ex_dict[0]))\n acc = ut.compute_accuracy(y_hat_list, y_list)\n return y_hat_list, acc", "def compute_accuracy(self):\n if not self.is_training:\n logits = self.test_logits\n labels = self.data.test_labels\n else:\n logits = self.train_logits\n labels = self.data.labels\n\n predictions = tf.cast(tf.argmax(logits, 1), tf.int32)\n correct = tf.equal(labels, predictions)\n return tf.reduce_mean(tf.cast(correct, tf.float32))", "def test(model, x_test, y_test):\n loss = model.forward(x_test, y_test)\n predict = np.zeros_like(model.y)\n predict[np.arange(len(model.y)), model.y.argmax(1)] = 1\n\n accuracy = sum([1 if all(predict[i] == y_test[i]) else 0 for i in range(len(y_test))])/len(y_test)\n\n return loss, accuracy\n raise NotImplementedError(\"Test method not implemented\")", "def get_accuracy(model, task, batchmanager, test_set=False):\n\n model.eval()\n count, num = 0., 0\n batchmanager = batchmanager if isinstance(batchmanager, BatchManager) else batchmanager.batchmanagers[task]\n\n iter = batchmanager.test_iter if test_set else batchmanager.dev_iter\n\n with torch.no_grad():\n for batch in iter: \n data, targets = batch\n out = model(data, task)\n predicted = out.argmax(dim=1)\n count += (predicted == targets).sum().item()\n num += len(targets)\n\n model.train()\n return count / num", "def accuracy(predictions, test_labels):\n return f1_score(test_labels, predictions, average='micro') * 100", "def accuracy(cls, test_labels):\n N = len(test_labels)\n\n # Calculate total correct as precentage\n total_correct = 100*(N - np.count_nonzero(cls - test_labels))/N\n\n # Calculate precentag correct for each class\n lab = np.unique(test_labels)\n cls_correct = {}\n for label in lab:\n idx = np.where(test_labels == label)[0]\n N_cls = len(idx)\n cls_correct[label] = 100*(N_cls - np.count_nonzero(label -\n cls[idx]))/N_cls\n\n print(\"Accuracy for:\")\n print(\"All classes is %.2f%%\" % total_correct)\n for label in lab:\n print(\"Class %d is %.2f%%\" % (label, cls_correct[label]))\n return(total_correct, cls_correct)", "def accuracy(self):\n total_predictions = self.tp + self.fp + self.tn + self.fn;\n return float(self.tp + self.tn) / total_predictions if total_predictions != 0 else 1", "def test_accuracy(self, _input_data, _labels, quiet=False):\n test_loss, test_accuracy = (self.merged_model).evaluate(\n _input_data, _labels, verbose=0\n )\n\n return test_accuracy", "def test(model, X_test, y_test, config):\r\n loss, predictions = model.forward_pass(X_test, y_test)\r\n return caclulate_accuracy_of_predictions(predictions, y_test)", "def reportAccuracy(self, testLabels=\"\"):\n assert len(self._predictions) > 0\n rawTestLabelDump = self._read_file(testLabels)\n formattedTestLabels = [line for line in rawTestLabelDump.split('\\n')]\n corrects = [1 for x in zip(self._predictions, formattedTestLabels) if x[0] == x[1]]\n return (len(corrects) / len(self._predictions)) * 100", "def train_accuracy(self):\n # Train accuarcy\n add = np.ones(len(self.X_train))\n X_add1 = np.c_[add, self.X_train]\n pred_train = np.dot(X_add1, self.w_result.T)\n pred_train[pred_train > 0] = 1\n pred_train[pred_train < 0] = 0\n print(pred_train)\n train_check_lable = np.isclose(pred_train, self.y_train)\n num_true_lable = np.sum(train_check_lable)\n num_all_lable = np.size(train_check_lable)\n train_accuracy = num_true_lable / num_all_lable\n print(\"train_accuracy is: %f\" %train_accuracy)\n return train_accuracy", "def accuracy(self):\n return (self.table[0, 0] + self.table[1, 1]) / self.N", "def accuracy(self):", "def test_nn_predicts_accurate_results(self):\n self.nn.train_nn(self.X_train, self.y_train, 6, 10, 0.06)\n accuracy = 0\n X_test, y_test = load_data(\"../data/testdata.mat.tar.gz\")\n for i in range(len(X_test[:100])):\n out = self.nn.forward_prop(X_test[i])[0][-1]\n if np.argmax(out) == np.where(y_test[i])[0][0]:\n accuracy += 1\n else:\n print(\"Incorrect\", np.argmax(out))\n print(\"accuracy: \", accuracy)\n self.assertGreaterEqual(accuracy, 70)", "def percent_accuracy(self, test_set, predicted_values):\r\n\r\n correct = 0\r\n for i in range(len(test_set)):\r\n if test_set[i].classification == predicted_values[i]:\r\n correct += 1\r\n return correct / len(test_set)", "def accuracy(y_test, y_pred):\n\treturn accuracy_score(y_test, y_pred)", "def test(model, test_loader, device):\n model.eval()\n test_loss = 0\n accuracy = 0\n with torch.no_grad():\n for inputs, labels in test_loader:\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n output = model.forward(inputs)\n\n # Calculate accuracy\n ps = torch.exp(output)\n top_p, top_class = ps.topk(1, dim=1)\n equals = top_class == labels.view(*top_class.shape)\n accuracy += torch.mean(equals.type(torch.FloatTensor)).item()\n\n print(f\"Accuracy on test set is: {accuracy/len(test_loader):.3f}\")", "def evaluate_model(self, test_data, test_labels,verbose=2):\n test_loss, test_acc = self.model.evaluate(test_data, test_labels, verbose=verbose)\n return test_loss, test_acc", "def test(xtest, ytest, neural_net):\n loss, accuracy = neural_net.evaluate(xtest, ytest, verbose=0)\n return accuracy", "def test(self, idx_test):\n self.eval()\n output = self.predict()\n # output = self.output\n loss_test = F.nll_loss(output[idx_test], self.labels[idx_test])\n acc_test = utils.accuracy(output[idx_test], self.labels[idx_test])\n print(\"Test set results:\",\n \"loss= {:.4f}\".format(loss_test.item()),\n \"accuracy= {:.4f}\".format(acc_test.item()))\n return acc_test.item()", "def evaluate_accuracy(model: nn.Module, loader: DataLoader) -> float:\n \n accuracy = 0.\n counter = 0\n \n model.eval()\n \n with torch.no_grad():\n for (input, target, _) in loader:\n output, _ = model(input)\n \n accuracy += (output >= 0.5) == target\n counter += target.size(0)\n \n return (accuracy.sum() / counter).float().item()", "def evaluate_model(model, X_test_input, y_test_input):\r\n pred_class = [model.classes_[i] for i in model.predict_proba(X_test_input).argmax(axis=-1)]\r\n pred_accuracy = np.sum(np.array(y_test_input)==np.array(pred_class))/len(pred_class)\r\n return pred_class, pred_accuracy", "def test_model(model, trainset, testset):\n model.eval()\n \n predictions = []\n actuals = []\n \n for data in testset:\n # data will have batch of features and labels\n X = data[0:4]\n y = data[4:]\n \n pred = np.round(model(X).detach().numpy())\n actual = y.detach().numpy()\n # print(f'pred: {pred}')\n # print(f'actual: {actual}')\n predictions.append(pred)\n actuals.append(actual)\n \n print(accuracy_score(y_true=actuals, y_pred=predictions))\n \n \n # Confusion Matrix\n \n confusion_matrix = np.zeros((3, 3))\n for i,j in zip(predictions, actuals):\n confusion_matrix[i, j] += 1\n print(\"Confusion matrix:\\n\", confusion_matrix)", "def eval_perf_total(model, X_train, y_train, X_test, y_test):\n\n y_hat_train = model.predict(X_train)\n y_hat_test = model.predict(X_test)\n \n train_mae = metrics.mean_absolute_error(y_train, y_hat_train)\n train_mse = metrics.mean_squared_error(y_train, y_hat_train)\n train_rmse = np.sqrt(metrics.mean_squared_error(y_train, y_hat_train))\n train_r = metrics.r2_score(y_train, y_hat_train)\n\n print('Evaluating Performance on Training Data:\\n')\n print(f' Train Mean Absolute Error: {train_mae:,.2f}')\n print(f' Train Mean Squared Error: {train_mse:,.2f}\\n')\n print(f'Train Root Mean Squared Error: {train_rmse:,.2f}')\n print(f'Train R-Square Value: {round(train_r,2)}')\n\n print('\\n'+'---'*25+'\\n')\n\n test_mae = metrics.mean_absolute_error(y_test, y_hat_test)\n test_mse = metrics.mean_squared_error(y_test, y_hat_test)\n test_rmse = np.sqrt(metrics.mean_squared_error(y_test, y_hat_test))\n test_r = metrics.r2_score(y_test, y_hat_test)\n\n print('Evaluating Performance on Testing Data:\\n')\n print(f' Test Mean Absolute Error: {test_mae:,.2f}')\n print(f' Test Mean Squared Error: {test_mse:,.2f}\\n')\n print(f'Test Root Mean Squared Error: {test_rmse:,.2f}')\n print(f'Test R-Square Value: {round(test_r,2)}')", "def test(self, dataset):\n test_accuracy = 0\n test_loss = 0\n num_examples_tested = 0\n # Put model into evaluation mode\n self.model.eval()\n for num, batch in enumerate(dataset.loader):\n xs, ys = batch\n batch_size = len(xs)\n num_examples_tested += batch_size\n iloss, iaccuracy = self.model(xs, ys)\n test_loss += iloss.cpu().data.numpy().item() * batch_size\n test_accuracy += iaccuracy.cpu().data.numpy().item() * batch_size\n test_accuracy = test_accuracy / num_examples_tested\n test_loss = test_loss / num_examples_tested\n # Return accuracy and loss for this model on the test set\n return test_accuracy, test_loss", "def evaluate_model(model, X_test, Y_test, category_names):\n\n y_pred = model.predict(X_test)\n Y_test_as_array = np.array(Y_test)\n for i in range(len(category_names)):\n print(\"{} accuracy {} precision {} recall {} f1 {}\".format(\n category_names[i],\n (y_pred[:, i] == Y_test_as_array[:, i]).mean(), # accuracy\n precision_score(Y_test_as_array[:, i], y_pred[:, i], average=None), # precision\n recall_score(Y_test_as_array[:, i], y_pred[:, i], average=None), # recall\n f1_score(Y_test_as_array[:, i], y_pred[:, i], average=None) # f1\n ))\n print(\"mean accuracy {}\".format((y_pred == Y_test_as_array).mean().mean()))", "def test_net(self, test_loader):\n test_loss = 0\n correct = 0\n cr = self._criterion\n # Make sure we don't modify the weights\n # while testing\n with torch.no_grad():\n for data, target in test_loader:\n data = data.cuda()\n target = target.cuda()\n # Feed the data\n output = self(data).cuda()\n # Calculate the loss\n test_loss += cr(output, target)\n # Get the predicted output and test whether or not\n # it aligns with the correct answer\n pred = output.data.max(1, keepdim=True)[1]\n correct += pred.eq(target.data.view_as(pred)).sum()\n test_loss /= len(test_loader.dataset)\n # Output accuracy\n print('\\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n return 100. * float(correct) / len(test_loader.dataset)", "def accuracy(self, X_train, y_train):\n y_train_pred = self.predict(X_train)\n diffs = y_train_pred - y_train\n count = 0.\n for i in range(y_train.shape[0]):\n if diffs[i] != 0:\n count+=1\n return 100 - count*100/y_train.shape[0]", "def evaluate_model(model, X_test, Y_test, category_names):\n Y_pred=model.predict(X_test)\n acc=[]\n for i,c in enumerate(Y_test.columns):\n print(c)\n print(classification_report(Y_test[c], Y_pred[:,i]))\n acc.append(accuracy_score(Y_test[c], Y_pred[:,i]))\n print('Accuracy :',np.mean(acc))\n\n pass", "def _compute_final_accuracies(self, meval):\n valid_accuracy = self.eval_child_model(meval, self.data_loader, 'val')\n if self.hparams.eval_test:\n test_accuracy = self.eval_child_model(meval, self.data_loader, 'test')\n else:\n test_accuracy = 0\n tf.logging.info('Test Accuracy: {}'.format(test_accuracy))\n return valid_accuracy, test_accuracy", "def test_evaluate():\n X_train, X_test, y_train, y_test = src.load()\n clf, score = src.train(X_train, y_train)\n test_score = src.evaluate(clf, X_test, y_test)\n assert isinstance(test_score, float)", "def test_network(self):\n train_accuracy = 100 - percentError(map(self.neural_result,\n self.train_inputs),\n self.train_outputs)\n print 'Train accuracy:', train_accuracy\n\n test_accuracy = 100 - percentError(map(self.neural_result,\n self.test_inputs),\n self.test_outputs)\n print 'Test accuracy:', test_accuracy\n\n print '#' * int(train_accuracy), 'TR'\n print '#' * int(test_accuracy), 'TE'", "def eval_performance(weights, test_y, test_x):\n y_predicted = predict_labels(weights, test_x)\n accuracy = len(y_predicted[y_predicted == test_y]) / len(y_predicted)\n return accuracy", "def test_accuracy(y, tx, w):\n labels = predict_regression_labels(w, tx)\n \n return (labels==y).sum()/len(y)", "def accuracy(self):\n\t\treturn self.accuracy_", "def evaluate(self, test):\n self.logger.info(\"Testing model over test set\")\n metrics = self.run_evaluate(test)\n msg = \" - \".join([\"{} {:04.2f}\".format(k, v)\n for k, v in metrics.items()])\n self.logger.info(msg)\n return metrics", "def test(self, X_test, y_test, **kwargs):\n y_pred = self.predict(X_test)\n self._accuracy = mape(y_test, y_pred)\n return self._accuracy", "def evaluate(self, test):\r\n self.logger.info(\"Testing model over test set\")\r\n metrics = self.run_evaluate(test)\r\n msg = \" - \".join([\"{} {:04.2f}\".format(k, v)\r\n for k, v in metrics.items()])\r\n self.logger.info(msg)\r\n return metrics", "def evaluate_test(model, history, class_labels, train_X, test_X, train_y, test_y):\n train_loss, train_acc = model.evaluate(train_X, train_y, verbose=0)\n test_loss, test_acc = model.evaluate(test_X, test_y, verbose=0)\n print('Accuracy \\n Train: %.3f, Test: %.3f' % (train_acc, test_acc))\n print('Loss \\n Train: %.3f, Test: %.3f \\n' % (train_loss, test_loss))\n # plot loss during training\n plt.subplots_adjust(hspace = .5, wspace = 0.5)\n plt.subplot(211)\n plt.title('Loss', weight='bold')\n plt.plot(history.history['loss'], label='train')\n plt.plot(history.history['val_loss'], label='val')\n plt.legend()\n # plot accuracy during training\n plt.subplot(212)\n plt.title('Accuracy', weight='bold')\n plt.plot(history.history['acc'], label='train')\n plt.plot(history.history['val_acc'], label='val')\n plt.legend()\n plt.show()\n print('\\n')\n # predict probabilities for test set\n yhat_probs = model.predict(test_X, verbose=0)\n # predict classes for test set\n yhat_classes = model.predict_classes(test_X, verbose=0)\n # reduce to 1d array\n yhat_probs = yhat_probs[:, 0]\n yhat_classes = yhat_classes[:, 0]\n # calculate metrics\n report = metrics.classification_report(test_y, yhat_classes, target_names=class_labels)\n confusion_matrix = metrics.confusion_matrix(test_y, yhat_classes)\n plot_confusion_matrix(confusion_matrix, class_labels)\n print('\\n')\n return report", "def test(model, X_test, y_test):\n pred, loss = model(X_test, y_test)\n test_pred = np.argmax(pred, axis=1) \n acc = np.mean(np.argwhere(y_test==1)[:,1]==test_pred) \n\n print(\"Test acc is:\\n\", acc) \n return test\n raise NotImplementedError(\"Test method not implemented\")", "def evaluate(self, y_pred, y_test):\n for i in range(len(y_pred)):\n if y_pred[i] == y_test.iloc[i]:\n self.accuracy += 1\n self.accuracy = (self.accuracy/len(y_pred))", "def eval_model(self, model):\n evaluation = model.evaluate(x=self.xt_test, y=self.yt_test)\n print(\"loss : \" + str(round(evaluation[0]*100, 2)) + \"%\")\n print(\"accuracy: \" + str(round(evaluation[1]*100, 2)) + \"%\")", "def test(self, model, dl_test, test_verbose=True, return_acc=True):\n\n loss_test = []\n acc_test = []\n for batch_idx, batch in enumerate(dl_test):\n model.eval()\n with torch.no_grad():\n loss, acc = model.test_step(batch, batch_idx)\n loss_test.append(loss.item())\n acc_test.append(acc)\n\n avg_loss_test = round(sum(loss_test) / len(loss_test), 2)\n avg_acc_test = round(sum(acc_test) / len(acc_test), 2)\n if test_verbose:\n print(f\"loss_test={avg_loss_test}\\t acc_test={avg_acc_test}\")\n if return_acc:\n return avg_acc_test", "def get_accuracy(test_sets, predictions, class_index):\n actual_classes = [test_set[class_index] for test_set in test_sets]\n\n num_correct = sum(int(actual == prediction) for actual, prediction in zip(actual_classes, predictions))\n\n return float(num_correct) / len(test_sets)", "def test_eval(model, test_set):\n num_test_batch = len(test_set)\n test_loss = np.zeros((num_test_batch, 1), dtype=float)\n test_acc = np.zeros((num_test_batch, 1), dtype=float)\n for ibatch, batch in enumerate(test_set):\n result = model.test_on_batch({'input':batch[0]}, {'fp1':batch[1], 'fp2':batch[1], 'fp3':batch[1], 'ave':batch[1]})\n test_loss[ibatch] = result[0]\n test_acc[ibatch] = result[-1]\n return np.mean(test_loss), np.mean(test_acc)", "def evaluate_model(model, X_test, y_test):\n # run prediction with test data\n y_pred = model.predict(X_test)\n\n # print precision, recall and f1-score\n i = 0\n for col in y_test:\n print('Evaluation for \"{}\": \\n {} \\n\\n'.format(col, classification_report(y_test[col], y_pred[:,i])))\n i += 1", "def test_accuracy(self):\n total_accuracy, weights = losses.weighted_accuracy(\n logits=self.logits, targets=self.targets)\n\n expected_accuracy = 2 / 3\n\n self.assertEqual(weights, 3)\n self.assertAlmostEqual(total_accuracy / weights, expected_accuracy)", "def test(model, test_inputs, test_labels):\r\n m = test_inputs.shape[0]\r\n total_acc = 0\r\n total_labels = np.empty((0,6))\r\n total_preds = np.empty((0,6))\r\n\r\n num_batches = int(m / model.batch_size)\r\n\r\n for i in np.arange(0, m, model.batch_size):\r\n if(i % (model.batch_size * PRINT_EVERY) == 0):\r\n print(f\"Testing: {int(i / model.batch_size)} out of {num_batches} batches\")\r\n batch_inputs = test_inputs[i:i+model.batch_size]\r\n batch_labels = test_labels[i:i+model.batch_size]\r\n if(len(batch_labels) < model.batch_size):\r\n break\r\n\r\n preds = model(batch_inputs)\r\n total_acc += model.accuracy(preds, batch_labels)\r\n\r\n total_labels = np.append(total_labels, batch_labels, axis=0)\r\n total_preds = np.append(total_preds, preds, axis=0)\r\n\r\n roc_auc_acc = 0\r\n for i in range(6):\r\n class_labels = total_labels[:,i]\r\n class_preds = total_preds[:,i]\r\n if i == 1 or i == 3:\r\n class_labels = np.append(class_labels, [1], axis=0)\r\n class_preds = np.append(class_preds, [1], axis=0)\r\n class_score = roc_auc_score(class_labels, class_preds)\r\n print(f\"Label: {i} ROC AUC Score: {class_score}\")\r\n roc_auc_acc += class_score\r\n\r\n roc_auc_acc = float(roc_auc_acc / 6)\r\n\r\n accuracy = total_acc / num_batches\r\n return accuracy, roc_auc_acc", "def accuracy(model, dataset, filename, batchsize=2):\n total, correct = 0, 0\n model.eval()\n dataloader = DataLoader(dataset, batch_size = batchsize, drop_last = False)\n\n with torch.no_grad():\n for i_batch, batch in enumerate(dataloader):\n outputs = model(batch['audio'])\n _, predicted = torch.max(outputs.data, 1)\n total += batchsize\n correct += (predicted == batch['label'].to(DEVICE)).sum().item()\n\n with open(filename, 'a') as f:\n f.write(str(100 * correct / float(total))+'\\n')\n model.train()\n return(100*correct/float(total))", "def overall_accuracy(y_true, y_pred):\n pred_flat, true_flat = y_pred.flatten(), y_true.flatten()\n intersection = list(pred_flat == true_flat).count(True)\n sum_ = len(true_flat)\n accuracy = round(intersection/sum_, 4)\n return accuracy", "def test(self, test=False): \n if test == True:\n if os.path.exists(self.student_save_path):\n checkpoint = torch.load(self.student_save_path, map_location=self.device)\n else:\n raise ValueError('No file with the pretrained model selected')\n\n self.student_model.load_state_dict(checkpoint)\n self.student_model.eval()\n\n running_acc = 0\n with torch.no_grad():\n for data, label in self.testloader:\n data, label = data.to(self.device), label.to(self.device)\n\n student_logits, *student_activations = self.student_model(data)\n\n running_acc += utils.accuracy(student_logits.data, label)\n\n print(f\"Test accuracy: {running_acc / len(self.testloader)}\")\n return running_acc / len(self.testloader)", "def accuracy(predictions, targets):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n n_samples = targets.shape[0]\n _, y_pred = predictions.max(dim=1)\n accuracy = (y_pred == targets).sum().item() / n_samples\n ########################\n # END OF YOUR CODE #\n #######################\n\n return accuracy", "def calc_accuracy(self, X, y):\n accuracy = 0.0\n ###########################################################################\n # TODO: #\n # Implement this method. #\n ###########################################################################\n\n y_pred = self.predict(X)\n if len(y_pred) != len(y):\n raise Exception('Fatal Error in dim - please checkout your prediction code!')\n accuracy = np.sum(y_pred == y)/len(y)*100\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return accuracy", "def accuracy(self, X, y):\n pred_labels = self.predict(X)\n return np.sum(pred_labels == y) / pred_labels.shape[0]", "def getaccuracy(features: ndarray, target: ndarray, trained_model) -> float:\n predictions = trained_model.predict(features)\n\n accuracy = accuracy_score(target, predictions, normalize=True)\n\n return accuracy", "def test(model, data: torch_geometric.data.Data) -> torch.Tensor:\n model.eval()\n logits, accuracies = model(), []\n for _, mask in data('train_mask', 'val_mask', 'test_mask'):\n pred = logits[mask].max(1)[1]\n accuracy = pred.eq(data.y[mask]).sum().item() / mask.sum().item()\n accuracies.append(accuracy)\n return accuracies", "def accuracy(self):\n # Initialize key variables\n correct = {}\n prediction = 0\n cls_count = {}\n accuracy = {}\n\n # Analyze all the data\n for cls in self.pca_object.classes():\n # Get list of x values to test\n vectors = self.pca_object.xvalues(cls)\n\n # Process each vector\n for vector in vectors:\n # Get the prediction\n prediction = self.classifier(vector)\n\n # Only count definitive predictions\n if prediction is not None:\n # Count the number of correct predictions\n if prediction == cls:\n if cls in correct:\n correct[cls] += 1\n else:\n correct[cls] = 1\n\n # Increment the count\n if cls in cls_count:\n cls_count[cls] += 1\n else:\n cls_count[cls] = 1\n\n # Calculate per class accuracy\n correct[None] = 0\n cls_count[None] = 0\n for cls in cls_count.keys():\n if cls_count[cls] != 0:\n accuracy[cls] = correct[cls] / cls_count[cls]\n\n # Keep a tally for all successes\n correct[None] = correct[None] + correct[cls]\n cls_count[None] = cls_count[None] + cls_count[cls]\n\n # Calulate overall accuracy\n accuracy[None] = correct[None] / cls_count[None]\n\n # Return\n return accuracy", "def evaluate_model(model, X_test, Y_test, category_names):\n\n # Predict labels using model\n y_pred1 = model.predict(X_test)\n\n # Generate accuracy report\n accuracy = [[(y_pred1[:, i] == Y_test.values[:, i]).mean(),\n *precision_recall_fscore_support(\n Y_test.values[:, i], y_pred1[:, i], average='weighted')]\n for i in range(y_pred1.shape[1])]\n accuracy = np.array(accuracy)[:, :-1]\n accuracy = (accuracy * 10000).astype(int) / 100\n scores1= pd.DataFrame( data=accuracy, index=list(Y_test), columns=['Accuracy', 'Precision', 'Recall', 'F-score'])\n print(scores1)\n return scores1", "def findOveralAccuracy(trainData,testData):\r\n kNNClassifier = kNN(trainData)\r\n \r\n All_Predictions = kNNClassifier.classify(testData,k=5)\r\n \r\n reference_dictionary = testData.dataDict['Species']\r\n\r\n Overall_Accuracy = 100*sum(reference_dictionary== All_Predictions)/len(All_Predictions)\r\n \r\n return All_Predictions, Overall_Accuracy", "def run_evaluate(self, test):\n accs = []\n correct_preds, total_correct, total_preds = 0., 0., 0.\n for words, labels in minibatches(test, self.config.batch_size):\n labels_pred, sequence_lengths = self.predict_batch(words)\n\n for lab, lab_pred, length in zip(labels, labels_pred,\n sequence_lengths):\n lab = lab[:length]\n lab_pred = lab_pred[:length]\n accs += [a==b for (a, b) in zip(lab, lab_pred)]\n\n lab_chunks = set(get_chunks(lab, self.config.vocab_tags))\n lab_pred_chunks = set(get_chunks(lab_pred,\n self.config.vocab_tags))\n\n correct_preds += len(lab_chunks & lab_pred_chunks)\n total_preds += len(lab_pred_chunks)\n total_correct += len(lab_chunks)\n\n p = correct_preds / total_preds if correct_preds > 0 else 0\n r = correct_preds / total_correct if correct_preds > 0 else 0\n f1 = 2 * p * r / (p + r) if correct_preds > 0 else 0\n acc = np.mean(accs)\n print('*'*20, '\\n')\n print('precision:', p, 'recall:', r, 'f1:', f1, '\\n')\n print('*'*20)\n\n return {\"acc\": 100*acc, \"f1\": 100*f1}", "def _calculate_accuracy(self):\n same = 0\n dif = 0\n for x, y in zip(self.test_string[3:], self.prediction[3:]):\n if x == y:\n same += 1\n else:\n dif += 1\n\n accuracy = round((same / (same + dif)) * 100, 2)\n print(f'Computer guessed right {same} out of {same + dif} symbols ({accuracy} %)')\n self.capital += dif\n self.capital -= same\n\n return", "def accuracy(preds, labels):\n correct = preds == labels\n return correct.sum().float() / correct.shape[0]", "def accuracy(predictions, targets):\n\n compare = predictions == targets\n # compare = (predictions.argmax(dim=1)) == (targets)\n # compare = (predictions.argmax(dim=1)) == (targets.argmax(dim=1))\n # summed = compare.sum().item()\n summed = compare.sum()\n # print(summed, compare.size())\n # print(compare.size()[0])\n return summed/compare.size", "def accuracy1(y_test, predictions):\n accuracy = 0.0\n\n for i in range(y_test.shape[0]):\n intersection = 0.0\n union = 0.0\n for j in range(y_test.shape[1]):\n if int(y_test[i,j]) == 1 or int(predictions[i,j]) == 1:\n union += 1\n if int(y_test[i,j]) == 1 and int(predictions[i,j]) == 1:\n intersection += 1\n \n if union != 0:\n accuracy = accuracy + float(intersection/union)\n\n accuracy = float(accuracy/y_test.shape[0])\n\n return accuracy", "def evaluate_model(model, X_test, Y_test, category_names):\n Y_prediction = model.predict(X_test)\n Y_prediction_df = pd.DataFrame(Y_prediction, columns=category_names)\n \n for col in category_names:\n print(f\"category:{col}\")\n print(classification_report(Y_test[col], Y_prediction_df[col]))\n print('------------------------------------------------------')\n \n accuracy = np.mean(Y_prediction == Y_test.values)\n print(f\"Accuracy: {accuracy:.2%}\")", "def test(args, model, device, data, target):\n model.eval()\n test_loss = 0\n correct = 0\n data, target = data.to(device), target.to(device)\n output = model(data)\n # Final result will be average of averages of the same size\n test_loss += F.nll_loss(output, target, reduction='mean').item()\n ppe.reporting.report({'val/loss': test_loss})\n pred = output.argmax(dim=1, keepdim=True)\n correct += pred.eq(target.view_as(pred)).sum().item()\n ppe.reporting.report({'val/acc': correct / len(data)})", "def model_pipeline_test(model_name):\n dir_path = config.DataDirectory.DEV_DIR\n model, path = load_model(model_name)\n model.load_state_dict(torch.load(path))\n model.eval()\n model.init_hidden()\n\n test_dir = os.path.join(\n dir_path, model_name.LABEL, config.TrainingTestingSplitDirectory.TEST_DIR\n )\n\n assert os.path.exists(test_dir)\n test_accuracy = {}\n for label in os.listdir(test_dir):\n test_set = os.path.join(test_dir, label)\n accuracy = 0\n for value in os.listdir(test_set):\n data = np.load(os.path.join(test_set, value))\n pred_label, _ = generate_pred(\n mel=data, model=model, label=model_name.OUTPUT, model_name=model_name\n )\n if pred_label.lower() == label.lower():\n accuracy += 1\n test_accuracy[label] = accuracy / len(os.listdir(test_set))\n\n _logger.info(test_accuracy)\n return test_accuracy", "def show_accuracy(self):\r\n return round(accuracy_score(self.actual, self.predicted),2)", "def get_model_accuracy(self, features, labels):\n features_prediction = self._model.predict(features)\n accuracy = accuracy_score(features_prediction, labels)\n return accuracy", "def model_accuracy(predict, y):\n true_predict = (predict.argmax(1) == y.argmax(1)).float()\n acc = true_predict.sum() / len(true_predict)\n return acc", "def test_model(model: nn.Module, test_set: data.DataLoader, number_of_classes: int) -> Tuple[score.FloatScore, score.DictScore]:\n # model.eval is used for ImageNet models, batchnorm or dropout layers will work in eval mode.\n model.eval()\n\n def test_average() -> score.FloatScore:\n correct = 0\n total = 0\n\n with torch.set_grad_enabled(False):\n for (inputs, yreal) in tqdm(test_set, unit=\"images\", desc=\"Testing model (average)\", leave=True, ascii=True):\n inputs, yreal = inputs.cuda(), yreal.cuda()\n\n ypred = model(inputs)\n _, predicted = torch.max(ypred.data, 1)\n\n total += yreal.size(0)\n correct += (predicted == yreal).sum().item()\n\n accuracy = 100 * correct / total\n log.info(\"Accuracy of the network on the {} test images (average): {}\".format(total, accuracy))\n with open('epoch_logs.txt', 'a+') as file:\n file.write('Test Acc: {}\\n'.format(accuracy))\n return score.FloatScore(accuracy)\n\n def test_per_class() -> score.DictScore:\n class_correct = list(0. for _ in range(number_of_classes))\n class_total = list(0. for _ in range(number_of_classes))\n total = 0\n\n with torch.no_grad():\n for (inputs, yreal) in tqdm(test_set, unit=\"images\", desc=\"Testing model (per class)\", leave=True, ascii=True):\n inputs, yreal = inputs.cuda(), yreal.cuda()\n\n total += yreal.size(0)\n\n ypred = model(inputs)\n _, predicted = torch.max(ypred, 1)\n c = (predicted == yreal).squeeze()\n for i in range(yreal.shape[0]):\n label = yreal[i]\n class_correct[label] += c[i].item()\n class_total[label] += 1\n\n log.info(\"Accuracy of the network on the {} test images (per-class):\".format(total))\n\n per_class_accuracy = {}\n for i in range(number_of_classes):\n accuracy = 100 * class_correct[i] / (class_total[i] + 0.0001)\n per_class_accuracy[i] = accuracy\n print('Accuracy of %5s : %2d %%' % (\n i, accuracy))\n\n return score.DictScore(per_class_accuracy)\n\n return test_average(), test_per_class()", "def accuracy ( actuals, predictions ):\n return np.mean ( actuals == predictions )\n # End accuracy()", "def accuracy ( actuals, predictions ):\n return np.mean ( actuals == predictions )\n # End accuracy()", "def accuracy_compute(predictions, labels):\n with tf.name_scope('test_accuracy'):\n accu = 100 * np.sum(np.argmax(predictions, 1) == labels) / predictions.shape[0]\n tf.summary.scalar('test_accuracy', accu)\n return accu", "def get_accuracy(self, k=None):\n k = 1 if k is None else k\n n_correct = 0\n \n for query, answer in tqdm(zip(self.test_queries, self.results)):\n correct_set = self.correct_answers[query]\n is_correct = False\n for candidate in answer[:k]:\n if candidate in correct_set:\n is_correct = True\n break\n n_correct += int(is_correct)\n \n return n_correct / len(self.test_queries)", "def _accuracy(self, data_loader):\n self._net.train(False)\n num_correct = 0\n num_total = 0\n for X, y in data_loader:\n # Data.\n X = torch.autograd.Variable(X.cuda())\n y = torch.autograd.Variable(y.cuda(async=True)).long()\n\n # Prediction.\n score = self._net(X)\n _, prediction = torch.max(score.data, 1)\n num_total += y.size(0)\n num_correct += torch.sum(prediction == y.data).item()\n self._net.train(True) # Set the model to training phase\n return 100 * num_correct / num_total", "def get_accuracy(pos_test, neg_test, pos_train, neg_train):\n pos_file = open(pos_test, \"r\")\n neg_file = open(neg_test, \"r\")\n trained_pos = train_model(pos_train)\n trained_neg = train_model(neg_train)\n pos_count = 0\n #keeps track of how many positive reviews are accurately predicted\n total_pos_reviews = 0 \n neg_count = 0\n #keeps track of how many negative reviews are accurately predicted\n total_neg_reviews = 0\n for review in pos_file:\n classification = classify(review, trained_pos, trained_neg)\n total_pos_reviews += 1\n if classification == \"positive\":\n pos_count += 1 \n positive_accuracy = pos_count/total_pos_reviews \n for review in neg_file:\n classification = classify(review, trained_pos, trained_neg)\n total_neg_reviews += 1\n if classification == \"negative\":\n neg_count += 1 \n negative_accuracy = neg_count/total_neg_reviews \n total_accuracy = average(positive_accuracy, negative_accuracy)\n print(\"Positive accuracy: \" + str(positive_accuracy))\n print(\"Negative accuracy: \" + str(negative_accuracy))\n print(\"Total accuracy: \" + str(total_accuracy))", "def test(model, args, test_loader):\n with torch.no_grad():\n model.eval()\n test_loss = 0\n correct = 0\n # Data and target are a single pair of images and labels.\n for data, target in tqdm(test_loader, desc='Batching Test Data'):\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n pred, tloss = make_prediction(data, target)\n test_loss += tloss\n correct += pred.eq(target.data.view_as(pred)).cpu().sum()\n\n test_loss /= len(test_loader.dataset)\n uf.box_print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))", "def compute_accuracy(model: torch.nn.Module, data_loader: torch.utils.data.DataLoader,\r\n model_type: str = \"Baseline\") -> float:\r\n tot_err = 0\r\n for input_data, target_data, _ in iter(data_loader):\r\n # Get output from the model\r\n if model_type == \"Baseline\":\r\n res = model(input_data)\r\n else:\r\n model.train(False)\r\n res, _ = model(input_data)\r\n # Count the number of errors\r\n for i, r in enumerate(res):\r\n pred = r.max(0)[1].item()\r\n if (target_data[i]) != pred:\r\n tot_err += 1\r\n # Return accuracy\r\n return 1 - tot_err / (len(data_loader.dataset))", "def score_model(self, model, test_training, test_target):\n\n target_prediction = model.predict(test_training)\n from sklearn.metrics import classification_report\n if(self.VERBOSE):\n print(classification_report(test_target, target_prediction))\n\n return [\n f1_score(test_target, target_prediction, average='weighted'),\n precision_score(test_target, target_prediction, average='weighted'),\n recall_score(test_target, target_prediction, average='weighted')\n ]", "def _accuracy(self, data_loader):\n self._net.eval()\n num_correct = 0.0\n num_total = 0.0\n batchindex = 0\n for X, y in tqdm(data_loader):\n # Data.\n batchindex = batchindex + 1\n X = X.to(self.device)\n y = y.to(self.device)\n # y = torch.tensor(y.to(device))\n\n # Prediction.\n score = self._net(X)\n _, prediction = torch.max(score.data, 1)\n num_total += y.size(0)\n num_correct += torch.sum(prediction == y.data)\n self._net.train() # Set the model to training phase\n return 100 * num_correct.float() / num_total", "def evaluate_model(model, X_test, Y_test): \n #Make predictions with the model\n Y_pred = model.predict(X_test)\n #convert numpy output to dataframe and add columns\n Y_pred_df = pd.DataFrame(Y_pred)\n Y_pred_df.columns = Y_test.columns\n #Convert predictions and correct y values to float for faciliate comparison\n Y_pred_df = Y_pred_df.astype('float64')\n Y_test = Y_test.astype('float64')\n print_score(Y_test, Y_pred_df, 'weighted avg')", "def evaluate_classifier(self, clf):\n\n clf = clf.fit(self.training_data_train_x, self.training_data_train_y)\n predicted = clf.predict(self.training_data_opt_x)\n\n correct = 0\n for i in range(len(self.training_data_opt_y)):\n if predicted[i] == self.training_data_opt_y[i]:\n correct += 1\n\n accuracy = correct / len(self.training_data_opt_y)\n\n return clf, accuracy" ]
[ "0.78987217", "0.7703377", "0.76911086", "0.76736206", "0.7662402", "0.7599939", "0.75526947", "0.7543788", "0.7528232", "0.7517148", "0.74728966", "0.74700576", "0.74215174", "0.73989594", "0.73596543", "0.7353871", "0.7339963", "0.73214674", "0.73190075", "0.7318403", "0.7298355", "0.7268526", "0.7247585", "0.7175477", "0.7162517", "0.7155559", "0.7145557", "0.710485", "0.7098075", "0.70480704", "0.70043755", "0.6987557", "0.6955608", "0.6943958", "0.69426614", "0.69390917", "0.69386137", "0.6938079", "0.6915838", "0.6914199", "0.6897752", "0.6893998", "0.68886256", "0.68741155", "0.68715215", "0.6869314", "0.6866001", "0.6856939", "0.6851383", "0.68487954", "0.68417925", "0.68309903", "0.6810934", "0.6799617", "0.67948073", "0.6788523", "0.6785987", "0.6782784", "0.6772759", "0.67612886", "0.67601126", "0.6753444", "0.67480487", "0.6746931", "0.67258114", "0.6722003", "0.67178506", "0.6713774", "0.67111033", "0.6709846", "0.67014366", "0.6692259", "0.66847104", "0.6673539", "0.6656892", "0.665364", "0.6653595", "0.6647135", "0.66441184", "0.6643032", "0.6634502", "0.6630793", "0.66248983", "0.66201574", "0.6619135", "0.66169137", "0.661509", "0.6614348", "0.66094077", "0.66094077", "0.66088986", "0.6607302", "0.6603989", "0.6598565", "0.6596443", "0.6590496", "0.6588325", "0.65755725", "0.6573676", "0.6573427" ]
0.71598864
25
Runs a training procedure on a PyTorch module using the dataset and loss function.
def train_model(self,model): train_state = {'stop_early': False, 'early_stopping_step': 0, 'early_stopping_best_val': 1e8, 'learning_rate': self.lr, 'epoch_index': 0, 'train_loss': [], 'val_loss': [], 'best_model':model} dataset = self.dataset loss_fn = self.loss_fn dataset.set_split('train') print("Training module with "+str(len(dataset))+" examples") data_loader = DataLoader(dataset,batch_size=self.batch_size,shuffle=True, drop_last=True) optimizer = optim.Adam(model.parameters(), lr=self.lr) for epoch in range(self.epochs): train_state['epoch_index'] = epoch #First step in each epoch is to train over all batches model.train() dataset.set_split('train') train_loss = 0 for b_i,batch_data in enumerate(data_loader): #Step 1: zero gradients optimizer.zero_grad() #Step 2: run forward X = batch_data['x'] output = model(X) #Step 3: compute loss target = batch_data['y'] loss = loss_fn(output,target) #Step 4: run backward loss.backward() #Step 5: update optimizer.step() #Record accumulated loss new_loss = loss.item() train_loss += new_loss train_loss /= b_i train_state['train_loss'].append(train_loss) #After training, compute loss on validation set and check for early stop model.eval() dataset.set_split('val') val_loss = 0 for b_i,batch_data in enumerate(data_loader): #Step 1: run forward X = batch_data['x'] output = model(X) #Step 2: compute loss target = batch_data['y'] loss = loss_fn(output,target) #Record accumulated loss new_loss = loss.item() val_loss += new_loss val_loss /= b_i train_state['val_loss'].append(val_loss) print("Finished epoch "+str(epoch+1)+". Train loss="+\ str(train_loss)+", Val loss="+str(val_loss)) if val_loss < train_state['early_stopping_best_val']: #new best model, reset stopping counter, store model train_state['early_stopping_step'] = 0 train_state['early_stopping_best_val'] = val_loss best_model = copy.deepcopy(model) best_model.load_state_dict(model.state_dict()) train_state['best_model'] = best_model else: #val loss not improved; increase early stopping counter train_state['early_stopping_step'] += 1 if train_state['early_stopping_step'] >= self.early_stopping_criteria: train_state['stop_early'] = True print("Val loss failed to improve. Stopping early.") break return train_state['best_model'],train_state
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train(train_dataset: torch.utils.data.Dataset, test_dataset: torch.utils.data.Dataset,\n training_config: dict = train_config, global_config: dict = global_config):\n\n for path in global_config.values():\n create_dirs(path)\n\n # wrap datasets with Dataloader classes\n train_loader = torch.utils.data.DataLoader(train_dataset,\n **training_config[\"DATA_LOADER_CONFIG\"])\n test_loader = torch.utils.data.DataLoader(test_dataset,\n **training_config[\"DATA_LOADER_CONFIG\"])\n\n # model name & paths\n name = \"_\".join([train_config[\"DATE\"], train_config[\"SESSION_NAME\"]])\n modelpath = os.path.join(global_config[\"WEIGHT_DIR\"], name)\n\n # instantiate model\n model = training_config[\"MODEL\"](**training_config[\"MODEL_CONFIG\"])\n\n optimizer = training_config[\"OPTIMIZER\"](model.parameters(),\n **training_config[\"OPTIMIZER_CONFIG\"])\n\n # set up ignite engine\n training_config[\"METRICS\"].update({\"loss\" : Loss(training_config[\"LOSS\"])})\n trainer = create_supervised_trainer(model=model, optimizer=optimizer,\n loss_fn=training_config[\"LOSS\"],\n device=training_config[\"DEVICE\"])\n evaluator = create_supervised_evaluator(model,\n metrics=training_config[\"METRICS\"],\n device=training_config[\"DEVICE\"])\n\n\n # tensorboardX setup\n log_dir = os.path.join(global_config[\"LOG_DIR\"], \"tensorboardx\", name)\n create_dirs(log_dir)\n writer = SummaryWriter(logdir=log_dir)\n\n # log using the logging tool\n logger = log.Log(training_config, run_name=train_config['SESSION_NAME'])\n\n @trainer.on(Events.ITERATION_COMPLETED)\n def log_training(engine):\n iteration = (engine.state.iteration - 1) % len(train_loader) + 1\n writer.add_scalar(\"training/loss\", engine.state.output, engine.state.iteration)\n if iteration % 4 == 0:\n print(\"\\repoch[{}] iteration[{}/{}] loss: {:.2f} \".format(engine.state.epoch,\n iteration, len(train_loader),\n engine.state.output), end=\"\")\n\n # generic evaluation function\n def evaluate(engine, loader):\n evaluator.run(loader)\n metrics = evaluator.state.metrics\n return metrics\n\n # training data metrics\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_training_results(engine):\n print(\"\\ntraining results - epoch {}\".format(engine.state.epoch))\n metrics = evaluate(engine, train_loader)\n print(metrics)\n for key, value in metrics.items():\n logger.log_metric(key, value)\n writer.add_scalar(\"training/avg_{}\".format(key), value, engine.state.epoch)\n\n # test data metrics\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_validation_results(engine):\n print(\"test results - epoch {}\".format(engine.state.epoch))\n metrics = evaluate(engine, test_loader)\n print(metrics)\n for key, value in metrics.items():\n writer.add_scalar(\"validation/avg_{}\".format(key), value, engine.state.epoch)\n\n # model checkpointing\n @trainer.on(Events.EPOCH_COMPLETED)\n def model_checkpoint(engine):\n torch.save(model.state_dict(), modelpath + \".pth\")\n print(\"Checkpoint saved to {}\".format(modelpath + \".pth\"))\n\n # training iteration\n try:\n trainer.run(train_loader, max_epochs=training_config[\"EPOCHS\"])\n except KeyboardInterrupt:\n torch.save(model.state_dict(), modelpath + \".pth\")\n print(\"Model saved to {}\".format(modelpath + \".pth\"))\n raise KeyboardInterrupt\n\n # write weights\n torch.save(model.state_dict(), modelpath + \".pth\")\n\n # write csv log file\n log_content = training_config.copy()\n evaluator.run(test_loader)\n log_content[\"VAL_METRICS\"] = evaluator.state.metrics\n log_path = os.path.join(global_config[\"LOG_DIR\"], training_config[\"LOGFILE\"])\n write_log(log_path, log_content)\n\n logger.end_run()\n \n return evaluator.state.metrics[\"training/avg_loss\"]", "def train(train_loader : torch.utils.data.DataLoader, model : nn.Module, criterion : nn.Module, optimizer : torch.optim.Optimizer) -> logger.Result:", "def train(\n train_dataloader: torch.utils.data.dataloader,\n model: nn.Module,\n loss_function: nn.Module,\n optimizer: torch.optim.Optimizer,\n device: torch.device,\n scheduler=None,\n):\n model.train()\n\n total_loss = 0\n\n for step, batch in tqdm(enumerate(train_dataloader)):\n\n if step % 50 == 0 and not step == 0:\n print(\" Batch {:>5,} of {:>5,}.\".format(step, len(train_dataloader)))\n\n data = batch[\"data\"].to(device)\n label = batch[\"label\"].to(device)\n model.zero_grad()\n\n preds = model(data)\n\n loss = loss_function(preds, label)\n\n total_loss = total_loss + loss.item()\n\n loss.backward()\n\n optimizer.step()\n # scheduler.step()", "def train(self, dataset):\n print_interval = 100\n train_accuracy = 0\n train_loss = 0\n num_examples_trained = 0\n # Put model into training mode\n self.model.train()\n # Step through dataset using pytorch data loader, and a tqdm progress bar\n for num, batch in enumerate(tqdm(dataset.loader)):\n xs, ys = batch\n batch_size = len(xs)\n num_examples_trained += batch_size\n iloss, iaccuracy = self.model(xs, ys)\n loss = iloss\n # Zero out the gradient\n self.optimizer.zero_grad()\n # Perform backwards pass (backprop)\n loss.backward()\n # Step the optimizer\n self.optimizer.step()\n train_loss += iloss.cpu().data.numpy().item() * batch_size\n train_accuracy += iaccuracy.cpu().data.numpy().item() * batch_size\n # Print results info\n if num % print_interval == 0:\n print(\"training loss: \" + str(iloss.cpu().data.numpy().item()))\n print(\"training acc: \" + str(iaccuracy.cpu().data.numpy().item()))\n train_accuracy = train_accuracy / num_examples_trained\n train_loss = train_loss / num_examples_trained\n # Increment current epoch number\n self.cur_epoch += 1\n # Return accuracy and loss\n return train_accuracy, train_loss", "def train(X_train, y_train, X_test, y_test, net):\n \n # convert X, y to tensors:\n X_train = torch.tensor(X_train, dtype=torch.float32)\n y_train = torch.tensor(y_train, dtype=torch.float32)\n \n X_test = torch.tensor(X_test, dtype=torch.float32)\n y_test = torch.tensor(y_test, dtype=torch.float32)\n\n # iterator:\n train_set = TensorDataset(X_train, y_train)\n train_loader = DataLoader(train_set, batch_size, shuffle=True)\n\n test_set = TensorDataset(X_test, y_test)\n test_loader = DataLoader(test_set, batch_size, shuffle=True)\n\n # optimizer:\n optimizer = torch.optim.Adam(net.parameters(), lr=lr)\n loss = nn.MSELoss()\n\n # loss accumulator:\n time_line = []\n train_metric = []\n test_metric = []\n\n # loop:\n for epoch in range(epochs):\n # update parameters:\n for Xb, yb in train_loader:\n train_ls = loss(net(Xb), yb)\n optimizer.zero_grad()\n train_ls.backward()\n optimizer.step()\n # update train and test losses:\n with torch.no_grad():\n if not epoch % 50:\n time_line.append(epoch)\n metric = 0\n for Xb, yb in train_loader:\n metric += loss(net(Xb), yb) / batch_size\n train_metric.append(metric)\n metric = 0\n for Xb, yb in test_loader:\n metric += loss(net(Xb), yb) / batch_size\n test_metric.append(metric)\n # verbose:\n print('Epoch: ', epoch)\n\n # final report of the losses: \n print('Train loss.....{0:6.3f}'.format(train_metric[-1]))\n print('Test loss......{0:6.3f}'.format(test_metric[-1]))\n\n # plot losses with respect to epochs:\n plt.plot(time_line, train_metric, color='b')\n plt.plot(time_line, test_metric, color='r')\n plt.show()", "def train_func(self, data):\n self.net.train()\n\n outputs, losses = self.forward(data)\n\n self.update_network(losses)\n self.record_losses(losses, 'train')\n\n return outputs, losses", "def train(self, dataset):\n \"*** YOUR CODE HERE ***\"\n batch_size = 1\n while True:\n error = False\n for x, y in dataset.iterate_once(batch_size):\n y_pred = self.get_prediction(x)\n y = nn.as_scalar(y)\n if y != y_pred:\n error = True\n nn.Parameter.update(self.get_weights(),x,y)\n if error == False:\n break", "def get_train_routine(\n self,\n ) -> Callable[\n [\n torch.utils.data.DataLoader,\n torch.utils.data.DataLoader,\n argparse.Namespace,\n torch.nn.Module,\n torch.optim.Optimizer,\n torch.optim.Optimizer,\n Progress,\n TaskID,\n ],\n None,\n ]:\n pass", "def train(args, model, train_data_loader, dev_data_loader, device):\n\n\tmodel.train()\n\toptimizer = torch.optim.Adam(model.parameters())\n\tprint_loss_total = 0\n\tepoch_loss_total = 0\n\tstart = time.time()\n\n\t#### modify the following code to complete the training funtion\n\n\tbest_train_acc, best_dev_acc = 0.0, 0.0\n\n\tfor idx, batch in enumerate(train_data_loader):\n\t\tquestion_feature_vec = batch['feature_vec'].to(device)\n\t\tquestion_len = batch['len'].to(device)\n\t\tlabels = batch['labels'].to(device)\n\n\t\t#### Your code here ----\n\n\t\t# zero out\n\t\toptimizer.zero_grad()\n\n\t\t# get output from model\n\t\tlogits = model(question_feature_vec, question_len)\n\n\t\t# use loss_fn defined above to calculate loss\n\t\tloss = loss_fn(logits, labels)\n\n\t\t# use accuracy_fn defined above to calculate 'error' and number of examples ('num_examples') used to\n\t\t# calculate accuracy below.\n\t\terror, num_examples = accuracy_fn(logits, labels)\n\n\t\t# backprop\n\t\tloss.backward()\n\t\toptimizer.step()\n\n\t\t###Your code ends ---\n\t\taccuracy = 1 - error / num_examples\n\t\tclip_grad_norm_(model.parameters(), 5)\n\t\tprint_loss_total += loss.data.numpy()\n\t\tepoch_loss_total += loss.data.numpy()\n\n\t\tif (idx + 1) % args.checkpoint == 0 and idx > 0:\n\t\t\tprint_loss_avg = print_loss_total / args.checkpoint\n\n\t\t\tdev_acc = evaluate(dev_data_loader, model, device)\n\n\t\t\tprint('number of steps: %d, train loss: %.5f, train acc: %.3f, dev acc: %.3f, time: %.5f'\n\t\t\t % (idx + 1, print_loss_avg, accuracy, dev_acc, time.time() - start))\n\t\t\tprint_loss_total = 0\n\t\t\tif accuracy > best_train_acc:\n\t\t\t\tbest_train_acc = accuracy\n\t\t\tif dev_acc > best_dev_acc:\n\t\t\t\tbest_dev_acc = dev_acc\n\n\treturn best_train_acc, best_dev_acc", "def train(self, dataset):\n \"*** YOUR CODE HERE ***\"\n converged = False\n while not converged:\n failures = 0\n for item, classification in dataset.iterate_once(1):\n prediction = self.get_prediction(item)\n if prediction != nn.as_scalar(classification):\n failures += 1\n self.w.update(item, nn.as_scalar(classification))\n if failures == 0:\n converged = True", "def train_func(self, data, label):\r\n self.net.train()\r\n\r\n outputs, losses = self.forward(data, label)\r\n\r\n self.update_network(losses)\r\n\r\n return outputs, losses", "def train(model, optimizer, loss_fn, dataloader, epoch):\n\n # Set the model into train mode\n model.train()\n\n train_loss = 0\n correct = 0\n total = 0\n datacount = len(dataloader)\n\n for batch_idx, (train_batch, labels_batch) in enumerate(dataloader):\n\n # move the data onto the device\n train_batch, labels_batch = train_batch.to(device), labels_batch.to(device)\n\n # # convert to torch Variables\n # train_batch, labels_batch = Variable(train_batch), Variable(labels_batch)\n\n # clear the previous grad \n optimizer.zero_grad()\n\n # compute model outputs and loss\n outputs = model(train_batch)\n loss = loss_fn(outputs, labels_batch)\n loss.backward()\n\n # after computing gradients based on current batch loss,\n # apply them to parameters\n optimizer.step()\n\n train_loss += loss.item()\n _, predicted = outputs.max(1)\n total += labels_batch.size(0)\n correct += predicted.eq(labels_batch).sum().item()\n # get learning rate\n current_lr = get_lr(optimizer=optimizer)\n\n # write to tensorboard\n writer.add_scalar('train/loss', train_loss/(batch_idx+1), (datacount * (epoch+1)) + (batch_idx+1))\n writer.add_scalar('train/accuracy', 100.*correct/total, (datacount * (epoch+1)) + (batch_idx+1))\n writer.add_scalar('Learning rate', current_lr)\n\n progress_bar(batch_idx, len(dataloader), 'Train Loss: %.3f | Train Acc: %.3f%% (%d/%d)'\n % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))", "def train(loss_function='js', epoch=10, batch_size=512, phi=0.9, alpha=10):\n\n if loss_function =='js':\n model = MLPJSD()\n\n elif loss_function =='wd':\n model = MLPWD()\n\n optimizer = optim.Adam(model.parameters(), lr=1e-3)\n\n\n for epoch in range(epoch):\n\n optimizer.zero_grad()\n\n x = samplers.distribution1(0, batch_size)\n\n for input_x in x:\n input_x = Variable(torch.from_numpy(input_x)).float()\n break\n\n y = samplers.distribution1(phi, batch_size)\n\n for input_y in y:\n input_y = Variable(torch.from_numpy(input_y)).float()\n break\n\n if loss_function == 'js':\n\n loss = loss_js(model, input_x, input_y)\n\n elif loss_function == 'wd':\n\n loss = loss_wd(model, input_x, input_y, alpha)\n\n loss.backward()\n optimizer.step()\n\n loss_print = - loss\n\n if(epoch%50) == 0:\n print('epoch: {}, train loss: {:.6f}'.format(\n epoch, loss_print))\n\n return model, loss_print", "def train(self, train_dataset):\n\n # check fine_tuning option\n model_path = os.path.join(self.check_point, 'model.pt')\n if self.fine_tune and not os.path.exists(model_path):\n raise Exception('Cannot find %s.' %model_path)\n elif self.fine_tune and os.path.exists(model_path):\n if self.verbose:\n pass\n self.model = torch.load(model_path)\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)\n \n # capture best model\n best_val_psnr = -1\n best_model_state = self.model.state_dict()\n\n # Train the model\n for epoch in range(self.num_epochs):\n self._epoch_step(train_dataset, epoch)\n self.scheduler.step()\n\n\n\n # capture running PSNR on train and val dataset\n train_psnr, train_ssim, _, _ = self._check_PSNR(train_dataset)\n self.hist_train_psnr.append(train_psnr)\n \n\n \n # write the model to hard-disk for testing\n if not os.path.exists(self.check_point):\n os.makedirs(self.check_point)\n model_path = os.path.join(self.check_point, 'model.pt')\n torch.save(self.model, model_path)", "def train(self, dataset) -> None:\n raise NotImplementedError()", "def _train(self):\n step = 0\n for epoch in range(self.opts.num_epochs):\n self.hub.publish(Topic.EPOCH, epoch)\n for i, data in enumerate(self.loader):\n # Compute loss ...\n # NOTE(ycho): if one of the callbacks require training loss,\n # e.g. for logging, simply register a hook to the loss module\n # rather than trying to extract them here.\n loss = self.loss_fn(self.model, data)\n self.hub.publish(Topic.TRAIN_LOSS, loss)\n\n # Backprop + Optimize ...\n self.optim.zero_grad()\n loss[\"total\"].backward()\n self.optim.step()\n\n # Emit `step` event.\n # == logging, saving, evaluation\n self.hub.publish(Topic.STEP, step)\n step += 1\n\n if step >= self.opts.train_steps:\n return", "def train(model: nn.Module, train_loader: DataLoader, optimizer: optim.Optimizer,\n loss_function: nn.Module, current_epoch_number: int = 0,\n device: torch.device = None, batch_reports_interval: int = 10):\n model.train()\n loss_accum = 0\n\n for batch_idx, (data, target) in enumerate(train_loader):\n\n # throwing away the gradients\n optimizer.zero_grad()\n\n # predicting scores\n output = model(data.to(device))\n\n # computing the error\n loss = loss_function(output, target.unsqueeze(dim=-1).float().to(device))\n\n # saving loss for stats\n loss_accum += loss.item() / len(data)\n\n # computing gradients\n loss.backward()\n\n # updating the model's weights\n optimizer.step()\n\n if batch_idx % batch_reports_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tAveraged Epoch Loss: {:.6f}'.format(\n current_epoch_number,\n batch_idx * len(data),\n len(train_loader.dataset),\n 100. * batch_idx / len(train_loader),\n loss_accum / (batch_idx + 1)))", "def train(self, *args, **kwargs):\n raise NotImplementedError", "def train(self, *args, **kwargs):\n # Handle overload of train() method\n if len(args) < 1 or (len(args) == 1 and type(args[0]) == bool):\n return nn.Sequential.train(self, *args, **kwargs)\n\n #\n # Parse training arguments\n #\n\n training_data = args[0]\n arguments = {\n \"validation_data\": None,\n \"batch_size\": 256,\n \"sigma_noise\": None,\n \"adversarial_training\": False,\n \"delta_at\": 0.01,\n \"initial_learning_rate\": 1e-2,\n \"momentum\": 0.0,\n \"convergence_epochs\": 5,\n \"learning_rate_decay\": 2.0,\n \"learning_rate_minimum\": 1e-6,\n \"maximum_epochs\": 1,\n \"training_split\": 0.9,\n \"gpu\": False,\n \"optimizer\": None,\n \"learning_rate_scheduler\": None\n }\n argument_names = arguments.keys()\n for a, n in zip(args[1:], argument_names):\n arguments[n] = a\n for k in kwargs:\n if k in arguments:\n arguments[k] = kwargs[k]\n else:\n raise ValueError(\"Unknown argument to {}.\".print(k))\n\n validation_data = arguments[\"validation_data\"]\n batch_size = arguments[\"batch_size\"]\n sigma_noise = arguments[\"sigma_noise\"]\n adversarial_training = arguments[\"adversarial_training\"]\n delta_at = arguments[\"delta_at\"]\n initial_learning_rate = arguments[\"initial_learning_rate\"]\n convergence_epochs = arguments[\"convergence_epochs\"]\n learning_rate_decay = arguments[\"learning_rate_decay\"]\n learning_rate_minimum = arguments[\"learning_rate_minimum\"]\n maximum_epochs = arguments[\"maximum_epochs\"]\n training_split = arguments[\"training_split\"]\n gpu = arguments[\"gpu\"]\n momentum = arguments[\"momentum\"]\n optimizer = arguments[\"optimizer\"]\n learning_rate_scheduler = arguments[\"learning_rate_scheduler\"]\n\n #\n # Determine device to use\n #\n if torch.cuda.is_available() and gpu:\n device = torch.device(\"cuda\")\n else:\n device = torch.device(\"cpu\")\n self.to(device)\n\n #\n # Handle input data\n #\n try:\n x, y = handle_input(training_data, device)\n training_data = BatchedDataset((x, y), batch_size)\n except:\n pass\n\n self.train()\n if not optimizer:\n self.optimizer = optim.SGD(\n self.parameters(), lr=initial_learning_rate, momentum=momentum\n )\n else:\n self.optimizer = optimizer\n self.criterion.to(device)\n\n if not optimizer and not learning_rate_scheduler:\n scheduler = ReduceLROnPlateau(\n self.optimizer,\n factor=1.0 / learning_rate_decay,\n patience=convergence_epochs,\n min_lr=learning_rate_minimum,\n )\n else:\n scheduler = learning_rate_scheduler\n\n training_errors = []\n validation_errors = []\n\n #\n # Training loop\n #\n\n for i in range(maximum_epochs):\n err = 0.0\n n = 0\n for j, (x, y) in enumerate(training_data):\n\n x = x.to(device)\n y = y.to(device)\n\n shape = x.size()\n shape = (shape[0], 1) + shape[2:]\n y = y.reshape(shape)\n\n self.optimizer.zero_grad()\n y_pred = self(x)\n c = self.criterion(y_pred, y)\n c.backward()\n self.optimizer.step()\n\n err += c.item() * x.size()[0]\n n += x.size()[0]\n\n if adversarial_training:\n self.optimizer.zero_grad()\n x_adv = self._make_adversarial_samples(x, y, delta_at)\n y_pred = self(x)\n c = self.criterion(y_pred, y)\n c.backward()\n self.optimizer.step()\n\n if j % 100:\n print(\n \"Epoch {} / {}: Batch {} / {}, Training error: {:.3f}\".format(\n i, maximum_epochs, j, len(training_data), err / n\n ),\n end=\"\\r\",\n )\n\n # Save training error\n training_errors.append(err / n)\n\n lr = [group[\"lr\"] for group in self.optimizer.param_groups][0]\n\n val_err = 0.0\n if not validation_data is None:\n n = 0\n for x, y in validation_data:\n x = x.to(device).detach()\n y = y.to(device).detach()\n\n shape = x.size()\n shape = (shape[0], 1) + shape[2:]\n y = y.reshape(shape)\n\n y_pred = self(x)\n c = self.criterion(y_pred, y)\n\n val_err += c.item() * x.size()[0]\n n += x.size()[0]\n validation_errors.append(val_err / n)\n\n print(\n \"Epoch {} / {}: Training error: {:.3f}, Validation error: {:.3f}, Learning rate: {:.5f}\".format(\n i,\n maximum_epochs,\n training_errors[-1],\n validation_errors[-1],\n lr,\n )\n )\n if scheduler:\n scheduler.step()\n else:\n scheduler.step()\n print(\n \"Epoch {} / {}: Training error: {:.3f}, Learning rate: {:.5f}\".format(\n i, maximum_epochs, training_errors[-1], lr\n )\n )\n\n self.training_errors += training_errors\n self.validation_errors += validation_errors\n self.eval()\n return {\n \"training_errors\": self.training_errors,\n \"validation_errors\": self.validation_errors,\n }", "def train(args, model, data_loader, optimizer):\n # Keep track of average accuracy and loss\n avg_loss = utils.AverageMeter()\n\n # Set training mode\n model.train()\n\n # for inputs, target in tqdm(data_loader):\n for inputs, targets in tqdm(data_loader):\n # Prep\n inputs = utils.wrap_variables(inputs, cuda=args.cuda)\n targets = utils.wrap_variables(targets, cuda=args.cuda)\n\n # Run forward\n predictions, log_likelihood = model(**inputs)\n\n # Loss = -NLL\n loss = -log_likelihood\n avg_loss.update(loss.data[0], len(inputs['x_tags']))\n\n # Run backward\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n return {'loss': avg_loss}", "def train(model, optimizer, loss_fn, dataloader, metrics, params):\n\n # set model to training mode\n model.train()\n\n # summary for current training loop and a running average object for loss\n summ = []\n loss_avg = utils.RunningAverage()\n # crit = nn.CrossEntropyLoss(size_average=True, reduce=True)\n\n # Use tqdm for progress bar\n # with tqdm(total=len(dataloader)) as t:\n for i, (train_batch, labels_batch) in enumerate(dataloader):\n\n # move to GPU if available\n if params.cuda:\n train_batch, labels_batch = train_batch.cuda(async=True), labels_batch.cuda(async=True)\n # convert to torch Variables\n train_batch, labels_batch = Variable(train_batch), Variable(labels_batch)\n labels_batch = labels_batch.view(labels_batch.size(0))\n\n # compute model output and loss\n output_batch = model(train_batch)\n loss = loss_fn(output_batch, labels_batch)\n\n # clear previous gradients, compute gradients of all variables wrt loss\n optimizer.zero_grad()\n loss.backward()\n\n # performs updates using calculated gradients\n optimizer.step()\n\n # Evaluate summaries only once in a while\n if i % params.save_summary_steps == 0:\n # extract data from torch Variable, move to cpu, convert to numpy arrays\n output_batch = output_batch.data.cpu().numpy()\n labels_batch = labels_batch.data.cpu().numpy()\n\n # compute all metrics on this batch\n summary_batch = {metric:metrics[metric](output_batch, labels_batch)\n for metric in metrics}\n summary_batch['loss'] = loss.item()\n summ.append(summary_batch)\n\n # if i % 100 == 0:\n # print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n # i, i*len(labels_batch), len(dataloader.dataset),\n # # i, i * len(labels_batch)), len(data_loader.dataset),\n # 100. * i / len(dataloader), loss.item()))\n\n # print('loss={:05.3f}' ''.format(loss_avg()))\n\n # update the average loss\n loss_avg.update(loss.item())\n\n # print('loss={:05.3f}'\n # ''.format(loss_avg()))\n # t.set_postfix(loss='{:05.3f}'.format(loss_avg()))\n # t.update()\n\n # compute mean of all metrics in summary\n metrics_mean = {metric:np.mean([x[metric] for x in summ]) for metric in summ[0]}\n metrics_string = \" ; \".join(\"{}: {:05.3f}\".format(k, v) for k, v in metrics_mean.items())\n logging.info(\"- Train metrics: \" + metrics_string)", "def learn(\n train_dataloader: torch.utils.data.dataloader,\n val_dataloader: torch.utils.data.dataloader,\n model: nn.Module,\n loss_function: nn.Module,\n optimizer: torch.optim.Optimizer,\n device: torch.device,\n epochs: int,\n scheduler=None,\n):\n for epoch in tqdm(range(epochs)):\n start = time.time()\n\n print(\"\\n Epoch {:} / {:}\".format(epoch + 1, epochs))\n\n # train model\n train(train_dataloader, model, loss_function, optimizer, device)\n\n train_loss, train_metric = evaluate(\n train_dataloader, model, loss_function, optimizer, device\n )\n\n # evaluate model\n valid_loss, val_metric = evaluate(\n val_dataloader, model, loss_function, optimizer, device\n )\n\n # scheduler.step()\n\n print(f\"\\nTraining Loss: {train_loss:.3f} , training metric : {train_metric}\")\n print(f\"Validation Loss: {valid_loss:.3f}, val metric : {val_metric}\")\n\n now = time.time()\n print(f\"Time for epoch {epoch} is {(now - start)/60} min\")", "def train(args: dict, lr_index: int, dataloaders: Tuple[DataLoader]):\n train_dataloader, val_dataloader = dataloaders\n device = torch.device('cuda') if args.gpu else torch.device('cpu')\n model = get_model(args, device)\n if args.label_weights is None:\n label_weights = None\n else:\n label_weights = args.label_weights.to(device)\n loss_criterion = nn.CrossEntropyLoss(weight=label_weights)\n optimizer = torch.optim.AdamW(\n model.parameters(), lr=args.learning_rates[lr_index]\n )\n train_losses = []\n val_losses = []\n stats = {\"true\": [], \"pred\": []}\n for epoch in range(args.epochs):\n start = time.time()\n train_loss = train_epoch(\n model, loss_criterion, optimizer, train_dataloader, device\n )\n val_loss, epoch_stats = val_epoch(\n model, loss_criterion, val_dataloader, device\n )\n train_losses.append(train_loss)\n val_losses.append(val_loss)\n stats[\"true\"] += epoch_stats[\"true\"]\n stats[\"pred\"] += epoch_stats[\"pred\"]\n duration = (time.time() - start) / 60 # epoch duration in minutes\n print_training_update(\n epoch, duration, lr_index, (train_loss, val_loss), epoch_stats\n )\n recent_10_percent = int(0.9*args.epochs)\n return (\n model,\n np.mean(train_losses[recent_10_percent:]),\n np.mean(val_losses[recent_10_percent:]),\n training_metric(stats)\n )", "def train(self, epochs=1, loader=None, reset_optimizer=False):\n if reset_optimizer:\n self.optimizer = self.optimizer_fn(self.tr_model.parameters()) \n \n # start training the worker using local dataset\n self.tr_model.train() \n running_loss, samples = 0.0, 0\n \n # check if a dataloader was provided for training\n loader = self.tr_loader if not loader else loader\n end_training = False\n itr = 0\n for ep in range(epochs):\n # train next epoch\n for i, x, y in loader: \n itr += 1 \n x, y = x.to(device), y.to(device)\n \n self.optimizer.zero_grad()\n \n loss = nn.CrossEntropyLoss()(self.tr_model(x), y)\n \n running_loss += loss.item()*y.shape[0]\n samples += y.shape[0]\n \n loss.backward()\n self.optimizer.step() \n\n # check for early stopping criteria\n if (self.early_stop != -1) and (itr % 5 == 0):\n print(\"Checking early stop criteria...\")\n accuracy = self.evaluate()[\"accuracy\"]\n if accuracy >= self.early_stop:\n print(\"Stopping criteria reached for worker {}...\".format(self.id))\n end_training = True\n break\n # check if early stop criteria was reached\n if end_training:\n break\n train_stats = {\"loss\" : running_loss / samples}\n \n # return training statistics\n return train_stats", "def train(self, train_data, train_labels, train_input_fn, n_epochs):\n raise NotImplementedError(\"Method must be implemented by subclass\")", "def train():\n model.train()\n for batch_index, (xb, yb) in enumerate(train_dl):\n loss = loss_func(model(xb), yb)\n\n loss.backward()\n opt.step()\n opt.zero_grad()", "def train():\n # YOUR TRAINING CODE GOES HERE", "def train(model, trainloader, device, optimizer, loss_function, epoch):\n global train_losses\n model.train()\n train_iter = 0\n loss_meter = AverageMeter(\"train-avg\")\n for x, _ in trainloader:\n x = x.to(device)\n z, logdet, _, logp = model(preprocess(x))\n loss = loss_function(logp, logdet, x.size())\n\n # code for rosalinty model\n # log_p_sum, logdet, z_outs = model(preprocess(x))\n # loss = loss_function(log_p_sum, logdet, x.size())\n\n if(train_iter % 10 == 0):\n print(f\"iteration: {train_iter}, loss: {loss.item()}\", end=\"\\r\")\n \n model.zero_grad()\n loss_meter.update(loss.item())\n loss.backward()\n optimizer.step()\n train_iter += 1\n print(f\"epoch complete, mean loss: {loss_meter.avg}\")\n train_losses.append({\"epoch\": epoch, \"avg_loss\": loss_meter.avg})", "def run_epoch(self, dataloader, phase: str, epoch: int):\n if self.model is None:\n raise RuntimeError(\"You must compile the trainer first!\")\n\n if torch.has_cuda:\n torch.cuda.empty_cache()\n for callback in self.callbacks:\n callback.on_epoch_begin(dataloader, phase, epoch)\n\n # Loop over the dataset_class and update weights.\n for step, (network_inputs, targets) in enumerate(dataloader):\n for callback in self.callbacks:\n callback.on_iter_begin(step, network_inputs, targets)\n\n # Forward pass, computing gradients and applying them\n self.optimizer.zero_grad()\n network_output = self.model(*network_inputs)\n if isinstance(network_output, Tensor):\n if network_output.isnan().any():\n print()\n error(\"NaN NetworkOutput: {}\".format(network_output))\n raise ValueError(\"NetworkOutput got nan.\")\n else:\n for name, p in network_output._asdict().items():\n if p.isnan().any():\n print()\n error(\"NaN NetworkOutput {}: {}\".format(name, p))\n raise ValueError(\"NetworkOutput {} got nan.\".format(name))\n loss_result = self.loss(y_true=targets, y_pred=network_output)\n tensorboard.log_scalar(\"loss/total\", loss_result)\n\n if loss_result == 0:\n print()\n warn(\"Loss is exactly 0, is this a bug?\")\n else:\n if loss_result.isnan().any():\n error(\"NaN Loss\")\n raise ValueError(\"Loss got nan.\")\n\n if phase == \"train\":\n loss_result.backward()\n self.optimizer.step()\n\n for callback in self.callbacks:\n callback.on_iter_end(network_output, loss_result)\n\n for callback in self.callbacks:\n callback.on_epoch_end()", "def train(model, optimizer, loss_function, loader, device, log_every_n=10):\n\n model.train() # Run model in training mode\n\n loss_history = []\n running_loss = 0.\n running_loss_history = []\n\n for i, batch in tqdm(enumerate(loader)):\n\n optimizer.zero_grad() # Always set gradient to 0 before computing it\n\n logits = model(batch[0].to(device), batch[1]).squeeze()\n\n loss = loss_function(logits, batch[2].to(device))\n\n loss_history.append(loss.item())\n running_loss += (loss_history[-1] - running_loss) / (i + 1) # Compute rolling average\n\n loss.backward() # Perform backprop, which will compute dL/dw\n\n if log_every_n and i % log_every_n == 0:\n print(\"Running loss: \", running_loss)\n\n running_loss_history.append(running_loss)\n\n nn.utils.clip_grad_norm_(model.parameters(), 3.0) # We clip gradient's norm to 3\n\n optimizer.step() # Update step: w = w - eta * dL / dW : eta = 1e-2 (0.01), gradient = 5e30; update value of 5e28\n\n print(\"Epoch completed!\")\n print(\"Epoch Loss: \", running_loss)\n print(\"Epoch Perplexity: \", math.exp(running_loss))\n\n # The history information can allow us to draw a loss plot\n return loss_history, running_loss_history", "def _train(self, dataset):\n net = buildNetwork(\n dataset.params_length,\n self._default_hidden_layers,\n 1 # a binary classifier only requires one output layer\n )\n ds = SupervisedDataSet(dataset)\n trainer = BackpropTrainer(net, ds)\n trainer.trainUntilConvergence()\n net.activate(params.as_serialized)", "def train(model, device, train_dl, loss_func, opt_func, epoch_idx):\n running_loss = 0.0\n batches_processed = 0\n for batch_idx, (x, y) in enumerate(train_dl, 1):\n x, y = x.to(device), y.to(device) # Push data to GPU\n\n opt_func.zero_grad() # Reset gradients\n # Forward pass\n output = model(x)\n loss = loss_func(output, y)\n # Backward pass\n loss.backward()\n # Optimizer step\n opt_func.step()\n\n # print statistics\n running_loss += loss.item()\n batches_processed += 1\n print(f'Train loss [Epoch {epoch_idx}]: {running_loss/batches_processed : .2f})')", "def train(model, loss_function, optimizer, data):\n loss_sum = 0\n\n # Set the model in train mode.\n model.train()\n\n # Create progress bar.\n progress_bar = tqdm.tqdm(total=len(data),\n unit='batch',\n desc='[train] batch loss: 0.000',\n leave=False)\n\n # Loop through training batches.\n for inputs, targets in data:\n\n # Reset gradients.\n optimizer.zero_grad()\n\n # Send data to GPU if CUDA is enabled.\n if next(model.parameters()).is_cuda:\n inputs = inputs.cuda()\n targets = targets.cuda()\n\n # Feed forward.\n with torch.set_grad_enabled(True):\n outputs = model(inputs)\n\n # Compute loss.\n loss = loss_function(outputs, targets)\n\n # Compute gradients.\n loss.backward()\n\n # Update parameters.\n optimizer.step()\n\n # Update progress bar.\n progress_bar.update(1)\n progress_bar.set_description(\n '[train] batch loss: {loss:.3f}'.format(loss=loss.item()))\n\n # Accumulate loss sum.\n loss_sum += loss.item()\n\n # Close progress bar.\n progress_bar.close()\n\n return loss_sum / len(data)", "def train(data_loader, model, optimizer, device):\n\t# put the model in train mode\n\tmodel.train()\n\t# go over every batch of data in data loader\n\tfor data in data_loader:\n\t\t# remember we have image and targets in our dataset class\n\t\tinputs = data['image']\n\t\ttargets = data['targets']\n\t\t# remove inputs/targets to cuda/cpu device\n\t\tinputs = inputs.to(device,dtype = torch.float)\n\t\ttargets = targets.to(device,dtype = torch.float)\n\t\t# zero grad the optimizer\n\toptimizer.zero_grad()\n\t# do the forward step of model\n\toutputs = model(inputs)\n\t# calculate loss\n\tloss = nn.BCEWithLogitsLoss()(outputs, targets.view(-1, 1)) \n\t# backward step the loss\n\tloss.backward()\n\t# step optimizer\n\toptimizer.step()\n\t# if you have a scheduler, you either need to\n\t# step it here or you have to step it after\n\t# the epoch. here, we are not using any learning # rate scheduler", "def train_model(model, loss_fn, optimizer, train_generator, dev_generator):\r\n ########## YOUR CODE HERE ##########\r\n # TODO: Given a model, data, and loss function, you should do the following:\r\n EPOCHS = 20\r\n dev_losses = []\r\n for iepoch in range(EPOCHS): \r\n # TODO: 1) Loop through the whole train dataset performing batch optimization with torch.optim.Adam\r\n for train_batch, train_label in train_generator:\r\n # Zero the gradients\r\n model.zero_grad()\r\n # Compute the loss\r\n loss = loss_fn(model(train_batch),train_label)\r\n # perform a backward pass (backpropagation)\r\n loss.backward()\r\n # Update the parameters\r\n optimizer.step()\r\n\r\n # TODO: 2) Each time you reach the end of the train dataset (one \"epoch\"), calculate the loss on the whole dev set;\r\n dev_loss = 0\r\n for ibatch, ilabel in dev_generator:\r\n dev_loss += loss_fn(model(ibatch), ilabel)\r\n\r\n # TODO: Make sure to print the dev set loss each epoch to stdout.\r\n print(\"Epoch:\", iepoch+1, \", dev loss:\", dev_loss)\r\n dev_losses.append(dev_loss)\r\n\r\n # TODO and 3) stop training and return the model once the development loss stops improving (called early stopping).\r\n if iepoch > 1 and dev_losses[-2]-dev_loss < 0.01:\r\n break\r\n return model", "def train(epochs, batch_size, lr, verbose):\n # autograd globally off\n torch.set_grad_enabled(False)\n # generate training and testing datasets\n train_data, train_label = generate_data()\n test_data, test_label = generate_data()\n # normalize data be centered at 0\n train_data, test_data = normalize(train_data, test_data)\n\n if verbose:\n print(\"--- Dataset ---\")\n print(\"Train X: \", train_data.size(), \" | Train y: \", train_label.size())\n print(\" Test X: \", test_data.size(), \" | Test y: \", test_label.size())\n\n layers =[]\n # input layer (2 input units)\n linear1 = Linear(2, 25, bias= True, weight_init=xavier_uniform)\n\n # 3 hidden layers (each 25 units)\n linear2 = Linear(25, 25, bias= True, weight_init=xavier_uniform)\n linear3 = Linear(25, 25, bias= True, weight_init=xavier_uniform)\n linear4 = Linear(25, 25, bias= True, weight_init=xavier_uniform)\n\n # output layer (2 output units)\n linear5 = Linear(25, 2, bias= True, weight_init=xavier_uniform)\n\n\n layers.append(linear1)\n layers.append(Relu())\n layers.append(linear2)\n layers.append(Relu())\n layers.append(linear3)\n layers.append(Relu())\n layers.append(linear4)\n layers.append(Tanh())\n layers.append(linear5)\n\n model = Sequential(layers)\n if verbose: print(\"Number of model parameters: {}\".format(sum([len(p) for p in model.param()])))\n\n criterion = MSE()\n optimizer = SGD(model, lr=lr)\n\n train_losses, test_losses = [], []\n train_accuracies, test_accuracies = [], []\n train_errors, test_errors = [], []\n\n if verbose: print(\"--- Training ---\")\n for epoch in range(1, epochs+1):\n if verbose:print(\"Epoch: {}\".format(epoch))\n\n # TRAINING\n for batch_idx in range(0, train_data.size(0), batch_size):\n # axis 0, start from batch_idx until batch_idx+batch_size\n output = model.forward(train_data.narrow(0, batch_idx, batch_size))\n\n # Calculate loss\n loss = criterion.forward(output, train_label.narrow(0, batch_idx, batch_size))\n train_losses.append(loss)\n if verbose: print(\"Train Loss: {:.2f}\".format(loss.item()))\n\n # put to zero weights and bias\n optimizer.zero_grad()\n\n ## Backpropagation\n # Calculate grad of loss\n loss_grad = criterion.backward()\n\n # Grad of the model\n model.backward(loss_grad)\n\n # Update parameters\n optimizer.step()\n\n train_prediction = model.forward(train_data)\n acc = accuracy(train_prediction, train_label)\n train_accuracies.append(acc)\n train_errors.append(1-acc)\n if verbose: print(\"Train Accuracy: {:.2f}\".format(acc.item()))\n\n # EVALUATION\n for batch_idx in range(0, test_data.size(0), batch_size):\n # axis 0, start from batch_idx until batch_idx+batch_size\n output = model.forward(test_data.narrow(0, batch_idx, batch_size))\n\n # Calculate loss\n loss = criterion.forward(output, test_label.narrow(0, batch_idx, batch_size))\n test_losses.append(loss)\n if verbose: print(\"Test Loss: {:.2f}\".format(loss.item()))\n\n test_prediction = model.forward(test_data)\n acc = accuracy(test_prediction, test_label)\n test_accuracies.append(acc) \n test_errors.append(1-acc)\n if verbose: print(\"Test Accuracy: {:.2f}\".format(acc.item()))\n\n return train_losses, test_losses, train_accuracies, test_accuracies, train_errors, test_errors", "def train(model, optimizer, criterion, trainloader, architecture, attacker=None, num_epochs=25, freq=10, early_stopping=True):\n for epoch in range(num_epochs):\n running_loss = 0.0\n total, correct, correct_adv, total_adv = 0.0, 0.0, 0.0, 1.0\n early_stop_param = 0.01\n for i, data in enumerate(trainloader):\n inputs, labels = data\n inputs = Variable(\n (inputs.cuda() if use_cuda else inputs), requires_grad=True)\n labels = Variable(\n (labels.cuda() if use_cuda else labels), requires_grad=False)\n\n y_hat = model(inputs)\n loss = criterion(y_hat, labels)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n _, predicted = torch.max(y_hat.data, 1)\n total += labels.size(0)\n correct += predicted.eq(labels.data).sum()\n\n # print statistics\n running_loss = loss.data[0]\n\n if attacker:\n # only perturb inputs on the last epoch, to save time\n # if (i+1) % freq == 0: # and (epoch == num_epochs - 1):\n adv_inputs, adv_labels, num_unperturbed = attacker.attack(\n inputs, labels, model, optimizer)\n correct_adv += num_unperturbed\n total_adv += labels.size(0)\n\n if (i+1) % freq == 0:\n print('[%s: %d, %5d] loss: %.4f' % (architecture, epoch + 1, i + 1, running_loss / 2),\n correct/total, correct_adv/total_adv)\n if early_stopping:\n if running_loss < early_stop_param:\n print(\"Early Stopping !!!!!!!!!!\")\n break\n running_loss = 0.0\n\n return correct/total, correct_adv/total_adv", "def train(model, data_loader, optimizer, epoch, train_mloss, train_rloss, train_acc, learning_rate, lr_wr, output_tensor):\r\n print('===> Training mode')\r\n\r\n num_batches = len(data_loader) # iteration per epoch. e.g: 469\r\n total_step = args.epochs * num_batches\r\n epoch_tot_acc = 0\r\n\r\n # Switch to train mode\r\n model.train()\r\n\r\n if args.cuda:\r\n # When we wrap a Module in DataParallel for multi-GPUs\r\n model = model.module\r\n\r\n start_time = timer()\r\n\r\n for batch_idx, (data, target) in enumerate(tqdm(data_loader, unit='batch')):\r\n batch_size = data.size(0)\r\n global_step = batch_idx + (epoch * num_batches) - num_batches\r\n\r\n labels = target\r\n target_one_hot = utils.one_hot_encode(target, length=args.num_classes)\r\n assert target_one_hot.size() == torch.Size([batch_size, 10])\r\n\r\n data, target = Variable(data), Variable(target_one_hot)\r\n\r\n if args.cuda:\r\n data = data.to(args.device)\r\n target = target.to(args.device)\r\n labels = labels.to(args.device)\r\n\r\n # Train step - forward, backward and optimize\r\n optimizer.zero_grad()\r\n #utils.exponential_decay_LRR(optimizer, args.lr, global_step, args.decay_steps, args.decay_rate, args.staircase)\r\n # learning rate policies\r\n if args.find_lr:\r\n utils.find_lr(optimizer, global_step)\r\n\r\n elif args.exp_decay_lr:\r\n utils.exponential_decay_LRR(\r\n optimizer, args.lr, global_step, args.decay_steps, args.decay_rate, args.staircase)\r\n\r\n elif args.one_cycle_policy:\r\n utils.one_cycle_policy(optimizer, args.lr, global_step, total_step)\r\n\r\n elif args.warm_restarts:\r\n # lr_wr.update_lr(optimizer, num_batches)\r\n lr_wr.update_lr(optimizer)\r\n\r\n output, reconstruction = model(data, labels, True)\r\n # utils.write_tensor(output, output_tensor)\r\n loss, margin_loss, recon_loss = loss_func(\r\n output, target, args.regularization_scale, reconstruction, data, args.device, batch_size)\r\n loss.backward()\r\n optimizer.step()\r\n\r\n for param_group in optimizer.param_groups:\r\n lr_temp = param_group['lr']\r\n learning_rate.write('%.10f \\n' % lr_temp)\r\n\r\n # Calculate accuracy for each step and average accuracy for each epoch\r\n acc = utils.accuracy(output, labels, args.cuda)\r\n epoch_tot_acc += acc\r\n epoch_avg_acc = epoch_tot_acc / (batch_idx + 1)\r\n\r\n train_mloss.write('%.6f \\n' % margin_loss)\r\n train_rloss.write('%.6f \\n' % recon_loss)\r\n train_acc.write('%.6f \\n' % acc)\r\n\r\n # Print losses\r\n if batch_idx % args.log_interval == 0:\r\n template = 'Epoch {}/{}, ' \\\r\n 'Step {}/{}: ' \\\r\n '[Total loss: {:.6f},' \\\r\n '\\tMargin loss: {:.6f},' \\\r\n '\\tReconstruction loss: {:.6f},' \\\r\n '\\tBatch accuracy: {:.6f},' \\\r\n '\\tAccuracy: {:.6f}]'\r\n tqdm.write(template.format(\r\n epoch,\r\n args.epochs,\r\n global_step,\r\n total_step,\r\n loss.data.item(),\r\n margin_loss.data.item(),\r\n recon_loss.data.item() if args.use_reconstruction_loss else 0,\r\n acc,\r\n epoch_avg_acc))\r\n\r\n # Print time elapsed for an epoch\r\n end_time = timer()\r\n\r\n global avg_training_time_per_epoch\r\n\r\n avg_training_time_per_epoch = (avg_training_time_per_epoch * (epoch - 1) + end_time - start_time) / epoch\r\n\r\n print('Time elapsed for epoch {}: {:.0f}s.'.format(epoch, end_time - start_time))", "def train(self, training_data, cfg, **kwargs):\n pass", "def train():\n pass", "def train(self, train_loader):\n\n self.model.train()\n with torch.enable_grad():\n return self._iteration(train_loader)", "def train(enc, dec, clsfr, dataset, device):\n\n\t# hyperparameters\n\tepochs = 25\n\tbatch_size = 50\n\tlamda = 1.0\n\n\t# Loss Functions\n\tClsfrLoss = nn.CrossEntropyLoss().to(device)\n\tRecreationLoss = nn.MSELoss().to(device)\n\n\t# Optimizers\n\topt = optim.Adam(list(enc.parameters()) + list(dec.parameters()) + list(clsfr.parameters()), lr=0.0002, betas=(0.5, 0.999))\n\n\t# iterate for epochs\n\tfor epoch in range(1, epochs):\n\n\t\t# set the flags to train\n\t\tenc.train()\n\t\tdec.train()\n\t\tclsfr.train()\n\n\t\t# get the data loader\n\t\tdataloader = torch.utils.data.DataLoader(dataset[\"train\"], batch_size=batch_size, shuffle=True)\n\n\t\t# initialize loss values\n\t\tloss_clsfr = 0.\n\t\tloss_rec = 0.\n\t\tcorrect_epoch = 0\n\n\t\t# iterate over mini batches\n\t\tfor data, target in dataloader:\n\n\t\t\t# put data and target to device\n\t\t\tdata = data.to(device)\n\t\t\ttarget = target.to(device)\n\n\t\t\t# data.shape = [n,3,l,b]\n\t\t\t# target.shape = [n]\n\n\t\t\t# TRAIN\n\n\t\t\t# set gradients to zero\n\t\t\topt.zero_grad()\n\t\t\tenc.zero_grad()\n\t\t\tdec.zero_grad()\n\t\t\tclsfr.zero_grad()\n\n\t\t\t# get hidden\n\t\t\thidden = enc(data)\n\t\t\t\n\t\t\t# add reconstruction error to loss\n\t\t\tdata_ = dec(hidden)\n\t\t\tloss1 = RecreationLoss(data, data_)\n\t\t\tloss_rec += len(data)*loss1\n\n\t\t\t# get output of classifier and update it\n\t\t\tout = clsfr(hidden.view(hidden.shape[0], -1))\n\n\t\t\t# calculate Clasfier Loss\n\t\t\tloss2 = ClsfrLoss(out, target)\n\t\t\tloss_clsfr += len(data)*loss2\n\n\t\t\t# add losses and update params\n\t\t\tloss = loss1 + lamda*loss2\n\t\t\tloss.backward()\n\t\t\topt.step()\n\n\t\t\t# get accuracy and loss_epoch\n\t\t\tcorrect = torch.sum(target == torch.argmax(out, 1))\n\t\t\tcorrect_epoch += correct\n\n\n\t\tloss_clsfr = loss_clsfr/len(dataset[\"train\"])\n\t\tloss_rec = loss_rec/len(dataset[\"train\"])\n\t\t\n\t\t# Pretty Printing\n\t\tprint(\"[Clsfr] Epoch %04d/%04d : Loss : %06f \\t Accuracy : %04d/%04d (%06f)\"%\\\n\t\t\t(epoch, epochs, loss_clsfr, correct_epoch, len(dataset[\"train\"]), correct_epoch*100.0/float(len(dataset[\"train\"]))))\n\t\tprint(\"[Dec] Epoch %04d/%04d : Loss : %06f\"%\\\n\t\t\t(epoch, epochs, loss_rec))\n\t\tprint()\n\n\t\t# eval_model(enc, disc, dataset, device)\n\t\t# print()", "def test_model(net, data_loader):\n net.eval()\n running_loss = 0.0\n with torch.no_grad():\n for data in data_loader:\n X = data['X']\n y_d = data['y_descreen']\n outputs = net(X)\n loss = criterion(outputs, y_d)\n running_loss += loss\n return running_loss", "def train(self, dataset):\n \"*** YOUR CODE HERE question 1 ***\"\n while True:\n trainingComplete = True\n data = dataset.iterate_once(1)\n\n for feature, label in data:\n\n if nn.as_scalar(label) != self.get_prediction(feature):\n self.w.update(feature, nn.as_scalar(label))\n trainingComplete = False\n\n if trainingComplete:\n break", "def train(self, training_data):\n pass", "def train_epoch(data_loader, model, criterion, optimizer):\n # for i, (X, y) in enumerate(data_loader):\n for i, batch in enumerate(data_loader):\n print(\"trainning... batch number\", i)\n optimizer.zero_grad()\n X = torch.Tensor(batch[\"image\"])\n y = torch.Tensor(batch[\"depth\"])\n outputs = model(X)\n # calculate loss\n loss = criterion(outputs, y)\n # gradient_loss = gradient_criterion(outputs, y, device=\"cuda\")\n loss.backward()\n optimizer.step()", "def train(args, model, device, train_loader, optimizer, epoch, writer):\n model.train()\n running_loss = 0\n for batch_idx, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n loss = F.nll_loss(output, target)\n running_loss += loss\n loss.backward()\n optimizer.step()\n if batch_idx % args.log_interval == 0:\n writer.add_scalar('training loss', loss / args.log_interval, epoch * len(train_loader) + batch_idx)\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n running_loss = 0", "def train(\n dataset: torch.utils.data.Dataset,\n autoencoder: torch.nn.Module,\n epochs: int,\n batch_size: int,\n optimizer: torch.optim.Optimizer,\n scheduler: Any = None,\n validation: Optional[torch.utils.data.Dataset] = None,\n corruption: Optional[float] = None,\n cuda: bool = False,\n sampler: Optional[torch.utils.data.sampler.Sampler] = None,\n silent: bool = False,\n update_freq: Optional[int] = 1,\n update_callback: Optional[Callable[[float, float], None]] = None,\n epoch_callback: Optional[Callable[[int, torch.nn.Module], None]] = None,\n num_workers: int = 0,\n) -> None:\n dataloader = DataLoader(\n dataset,\n batch_size=batch_size,\n pin_memory=False,\n sampler=sampler,\n shuffle=True if sampler is None else False,\n num_workers=num_workers,\n )\n if validation is not None:\n validation_loader = DataLoader(\n validation,\n batch_size=batch_size,\n pin_memory=False,\n sampler=None,\n shuffle=False,\n num_workers=num_workers,\n )\n else:\n validation_loader = None\n autoencoder.train()\n perplexity_value = -1\n loss_value = 0\n for epoch in range(epochs):\n if scheduler is not None:\n scheduler.step()\n data_iterator = tqdm(\n dataloader,\n leave=True,\n unit=\"batch\",\n postfix={\"epo\": epoch, \"lss\": \"%.6f\" % 0.0, \"ppx\": \"%.6f\" % -1,},\n disable=silent,\n )\n losses = []\n for index, batch in enumerate(data_iterator):\n batch = batch[0]\n if cuda:\n batch = batch.cuda(non_blocking=True)\n # run the batch through the autoencoder and obtain the output\n if corruption is not None:\n recon, mean, logvar = autoencoder(F.dropout(batch, corruption))\n else:\n recon, mean, logvar = autoencoder(batch)\n # calculate the loss and backprop\n loss = autoencoder.loss(batch, recon, mean, logvar).mean()\n loss_value = float(loss.mean().item())\n optimizer.zero_grad()\n loss.backward()\n optimizer.step(closure=None)\n # log losses\n losses.append(loss_value)\n data_iterator.set_postfix(\n epo=epoch, lss=\"%.6f\" % loss_value, ppx=\"%.6f\" % perplexity_value,\n )\n if update_freq is not None and epoch % update_freq == 0:\n average_loss = (sum(losses) / len(losses)) if len(losses) > 0 else -1\n if validation_loader is not None:\n autoencoder.eval()\n perplexity_value = perplexity(\n validation_loader, autoencoder, cuda, silent\n )\n data_iterator.set_postfix(\n epo=epoch, lss=\"%.6f\" % average_loss, ppx=\"%.6f\" % perplexity_value,\n )\n autoencoder.train()\n else:\n perplexity_value = -1\n data_iterator.set_postfix(\n epo=epoch, lss=\"%.6f\" % average_loss, ppx=\"%.6f\" % -1,\n )\n if update_callback is not None:\n update_callback(\n autoencoder,\n epoch,\n optimizer.param_groups[0][\"lr\"],\n average_loss,\n perplexity_value,\n )\n if epoch_callback is not None:\n autoencoder.eval()\n epoch_callback(epoch, autoencoder)\n autoencoder.train()", "def trainer(model, train_dataloader, val_dataloader, num_epochs, path_to_save='/home/atharva',\n checkpoint_path='/home/atharva',\n checkpoint=100, train_batch=1, test_batch=1, device='cuda:0'): # 2 Marks.\n #torch.backends.cudnn.benchmark = True #Comment this if you are not using a GPU...\n # set the network to training mode.\n model.cuda() # if gpu available otherwise comment this line. \n # your code goes here. \n def accuracy(y1,y2):\n aa = list((y1==y2).astype('int'))\n acc = sum(aa) / len(aa)\n del aa\n return acc\n training_acc = []\n training_loss = []\n val_acc = []\n val_loss = []\n\n #Train the model on the train_dataloader.\n from torch.nn import CrossEntropyLoss\n criterion = CrossEntropyLoss()\n for epoch in range(num_epochs): # loop over the dataset multiple times\n preds = []\n labels = []\n \n for i in range(len(train_dataloader)):\n # get the inputs; data is a list of [inputs, labels]\n data_dict = train_dataloader[i]\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n output = model(data_dict['statement'], data_dict['justification'], data_dict['credit_history'])\n loss = criterion(outputs, data_dict['label'])\n loss.backward()\n optimizer.step()\n preds.append(output)\n labels.append(data_dict['label'])\n\n #Calculate the metrics, that is the loss and accuracy for the training phase per epoch and store them in a list.\n training_acc.append(accuracy(preds.numpy(), labels.numpy()))\n training_loss.append(criterion(preds, label))\n\n #Validating\n preds = []\n labels = []\n for i in range(len(val_dataloader)):\n # get the inputs; data is a list of [inputs, labels]\n data_dict = val_dataloader[i]\n\n # forward + backward + optimize\n outputs = model(data_dict['statement'], data_dict['justification'], data_dict['credit_history'])\n loss = criterion(outputs, labels)\n preds.append(output)\n labels.append(data_dict['label'])\n\n val_acc.append(accuracy(preds.numpy(), labels.numpy()))\n val_loss.append(criterion(preds, label))\n\n #Save your model at the maximum validation accuracy obtained till the latest epoch.\n if val_acc[-1] > max(val_acc[:-1]):\n #Save model\n torch.save(model.state_dict(), save_path)\n\n #Checkpoint at the 100th epoch\n if epoch%100 == 0:\n #make a checkpoint\n torch.save(model.state_dict(), save_path)\n\n\n\n \n\n\n plt.plot(training_acc)\n plt.plot(val_acc)\n plt.plot(training_loss)\n plt.plot(val_loss)\n plt.show()", "def train(self):\r\n \r\n args = self.args\r\n model = self.model\r\n dataset = self.dataset\r\n train_state = self.train_state\r\n optimizer = self.optimizer\r\n scheduler = self.scheduler\r\n train_bar = self.train_bar\r\n val_bar = self.val_bar\r\n epoch_bar = self.epoch_bar\r\n \r\n for epoch_index in range(args.num_epochs):\r\n train_state['epoch_index'] = epoch_index\r\n \r\n # Iterate over training dataset\r\n \r\n running_loss,running_acc = self.train_loop(epoch_index, args, \r\n model, dataset, \r\n optimizer, train_bar)\r\n \r\n train_state['train_loss'].append(running_loss)\r\n train_state['train_acc'].append(running_acc)\r\n \r\n running_loss,running_acc = self.val_loop(epoch_index, args, model, \r\n dataset, optimizer, val_bar)\r\n \r\n \r\n train_state['val_loss'].append(running_loss)\r\n train_state['val_acc'].append(running_acc)\r\n \r\n print(\"Epoch \"+str(epoch_index+1)+\": Running loss=\"+ \\\r\n str(running_loss)+\", Running Acc=\"+str(running_acc))\r\n \r\n train_state = update_train_state(args=args, model=model, \r\n train_state=train_state)\r\n \r\n scheduler.step(train_state['val_loss'][-1])\r\n \r\n if train_state['stop_early']:\r\n break\r\n \r\n train_bar.n = 0\r\n val_bar.n = 0\r\n epoch_bar.set_postfix(best_val=train_state['early_stopping_best_val'] )\r\n epoch_bar.update()\r\n \r\n state_dict = torch.load(train_state['model_filename'])\r\n model.load_state_dict(state_dict)\r\n return model", "def get_adv_train_routine(\n self,\n ) -> Callable[\n [\n torch.utils.data.Dataset,\n argparse.Namespace,\n torch.nn.Module,\n str,\n Progress,\n TaskID,\n ],\n Tuple[Dict[str, float], pd.DataFrame],\n ]:\n pass", "def training(model, trainloader, in_args_epochs, in_args_gpu, \n in_args_learning_rate):\n if in_args_gpu == True:\n model.to('cuda')\n epochs = in_args_epochs\n criterion = nn.NLLLoss()\n optimizer = optim.Adam(model.classifier.parameters(), \n lr=in_args_learning_rate)\n print_every = 100\n steps = 0\n for e in range(epochs):\n running_loss = 0\n for ii, (inputs, labels) in enumerate(trainloader):\n steps += 1\n if in_args_gpu == True:\n inputs, labels = inputs.to('cuda'), labels.to('cuda')\n optimizer.zero_grad()\n # Forward and backward passes\n outputs = model.forward(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n running_loss += loss.item()\n if steps % print_every == 0:\n print(\"Epoch: {}/{}... \".format(e+1, epochs),\n \"Training Loss: {:.4f}\".format(running_loss/print_every))\n running_loss = 0", "def train(model: torch.nn.Module,\n trainingset_dataloader: torch.utils.data.DataLoader, trainingset_eval_dataloader: torch.utils.data.DataLoader,\n validationset_eval_dataloader: torch.utils.data.DataLoader,\n results_directory: str = \"results\", n_updates: int = int(1e5), show_progress: bool = True,\n load_file: str = None, device: torch.device = torch.device('cuda:0'), rnd_seed: int = 0,\n num_torch_threads: int = 3, learning_rate: float = 1e-4, l1_weight_decay: float = 0,\n l2_weight_decay: float = 0, log_training_stats_at: int = int(1e2), evaluate_at: int = int(5e3)):\n # Append current timestamp to results directory\n results_directory = os.path.join(results_directory, datetime.datetime.now().strftime('%Y_%m_%d_%H_%M_%S'))\n os.makedirs(results_directory, exist_ok=True)\n \n # Read config file and set up results folder\n logfile = os.path.join(results_directory, 'log.txt')\n checkpointdir = os.path.join(results_directory, 'checkpoint')\n os.makedirs(checkpointdir, exist_ok=True)\n tensorboarddir = os.path.join(results_directory, 'tensorboard')\n os.makedirs(tensorboarddir, exist_ok=True)\n \n # Prepare tensorboard writer\n writer = SummaryWriter(log_dir=tensorboarddir)\n \n # Print all outputs to logfile and terminal\n tee_print = TeePrint(logfile)\n tprint = tee_print.tee_print\n \n # Set up PyTorch and numpy random seeds\n try:\n torch.set_num_threads(num_torch_threads)\n torch.manual_seed(rnd_seed)\n np.random.seed(rnd_seed)\n \n # Send model to device\n model.to(device)\n \n # Define loss function\n mean_cross_entropy = torch.nn.BCEWithLogitsLoss().to(device)\n \n # Get optimizer (eps needs to be at about 1e-4 to be numerically stable with 16 bit float)\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=l2_weight_decay, eps=1e-4)\n \n # Create a checkpoint dictionary with objects we want to have saved and loaded if needed\n state = dict(model=model, optimizer=optimizer, update=0, best_validation_loss=np.inf)\n \n # Setup the SaverLoader class to save/load our checkpoint dictionary to files or to RAM objects\n saver_loader = SaverLoader(save_dict=state, device=device, save_dir=checkpointdir,\n n_savefiles=1, # keep only the latest checkpoint\n n_inmem=1 # save checkpoint only in RAM\n )\n \n # Load previous checkpoint dictionary, if load_file is specified\n if load_file is not None:\n state.update(saver_loader.load_from_file(loadname=load_file, verbose=True))\n tprint(f\"Loaded checkpoint from file {load_file}\")\n update, best_validation_loss = state['update'], state['best_validation_loss']\n \n # Save checkpoint dictionary to RAM object\n saver_loader.save_to_ram(savename=str(update))\n \n #\n # Start training\n #\n try:\n tprint(\"Training model...\")\n update_progess_bar = tqdm(total=n_updates, disable=not show_progress, position=0,\n desc=f\"loss={np.nan:6.4f}\")\n while update < n_updates:\n for data in trainingset_dataloader:\n # Get samples as lists\n labels, inputs, sequence_lengths, counts_per_sequence, sample_ids = data\n \n # Apply attention-based sequence reduction and create minibatch\n with torch.no_grad():\n labels, inputs, sequence_lengths, n_sequences = model.reduce_and_stack_minibatch(\n labels, inputs, sequence_lengths, counts_per_sequence)\n \n # Reset gradients\n optimizer.zero_grad()\n \n # Calculate predictions from reduced sequences\n logit_outputs = model(inputs, n_sequences)\n \n # Calculate losses\n pred_loss = mean_cross_entropy(logit_outputs, labels[..., -1])\n l1reg_loss = (torch.mean(torch.stack([p.abs().float().mean() for p in model.parameters()])))\n loss = pred_loss + l1reg_loss * l1_weight_decay\n \n # Perform update\n loss.backward()\n optimizer.step()\n \n update += 1\n update_progess_bar.update()\n update_progess_bar.set_description(desc=f\"loss={loss.item():6.4f}\", refresh=True)\n \n # Add to tensorboard\n if update % log_training_stats_at == 0:\n tb_group = 'training/'\n writer.add_scalar(tag=tb_group+'pred_loss', scalar_value=pred_loss, global_step=update)\n writer.add_scalar(tag=tb_group+'l1reg_loss', scalar_value=l1reg_loss, global_step=update)\n writer.add_scalar(tag=tb_group+'loss', scalar_value=loss, global_step=update)\n writer.add_histogram(tag=tb_group+'logit_outputs', values=logit_outputs, global_step=update)\n \n # Calculate scores and loss on training set and validation set\n if update % evaluate_at == 0 or update == n_updates or update == 1:\n print(\" Calculating training score...\")\n roc_auc, bacc, f1, scoring_loss = evaluate(model=model, dataloader=trainingset_eval_dataloader)\n print(f\" ...done!\")\n tprint(f\"[training_inference] u: {update:07d}; roc_auc: {roc_auc:6.4f}; bacc: {bacc:6.4f}; \"\n f\"f1: {f1:6.4f}; scoring_loss: {scoring_loss:6.4f}\")\n \n tb_group = 'training_inference/'\n writer.add_scalar(tag=tb_group+'roc_auc', scalar_value=roc_auc, global_step=update)\n writer.add_scalar(tag=tb_group+'bacc', scalar_value=bacc, global_step=update)\n writer.add_scalar(tag=tb_group+'f1', scalar_value=f1, global_step=update)\n writer.add_scalar(tag=tb_group+'scoring_loss', scalar_value=scoring_loss, global_step=update)\n \n print(\" Calculating validation score...\")\n roc_auc, bacc, f1, scoring_loss = evaluate(model=model, dataloader=validationset_eval_dataloader)\n print(f\" ...done!\")\n tprint(f\"[validation] u: {update:07d}; roc_auc: {roc_auc:6.4f}; bacc: {bacc:6.4f}; \"\n f\"f1: {f1:6.4f}; scoring_loss: {scoring_loss:6.4f}\")\n \n tb_group = 'validation/'\n writer.add_scalar(tag=tb_group+'roc_auc', scalar_value=roc_auc, global_step=update)\n writer.add_scalar(tag=tb_group+'bacc', scalar_value=bacc, global_step=update)\n writer.add_scalar(tag=tb_group+'f1', scalar_value=f1, global_step=update)\n writer.add_scalar(tag=tb_group+'scoring_loss', scalar_value=scoring_loss, global_step=update)\n writer.add_histogram(tag=tb_group+'weights',\n values=model.sequence_embedding_16bit.conv_aas.weight.cpu().detach(),\n global_step=update)\n writer.add_histogram(tag=tb_group+'biases',\n values=model.sequence_embedding_16bit.conv_aas.bias.cpu().detach(),\n global_step=update)\n \n # If we have a new best loss on the validation set, we save the model as new best model\n if best_validation_loss > scoring_loss:\n best_validation_loss = scoring_loss\n tprint(f\" New best validation loss: {scoring_loss}\")\n # Save current state as RAM object\n state['update'] = update\n state['best_validation_loss'] = scoring_loss\n # Save checkpoint dictionary with currently best model to RAM\n saver_loader.save_to_ram(savename=str(update))\n # This would save to disk every time a new best model is found, which can be slow\n # saver_loader.save_to_file(filename=f'best_so_far_u{update}.tar.gzip')\n \n if update >= n_updates:\n break\n update_progess_bar.close()\n finally:\n # In any case, save the current model and best model to a file\n saver_loader.save_to_file(filename=f'lastsave_failed_u{update}.tar.gzip')\n state.update(saver_loader.load_from_ram()) # load best model so far\n saver_loader.save_to_file(filename=f'best_failed_u{update}.tar.gzip')\n print('Finished Training!')\n except Exception as e:\n with open(logfile, 'a') as lf:\n print(f\"Exception: {e}\", file=lf)\n raise e\n finally:\n close_all() # Clean up", "def main():\n # Fix random seed.\n torch.manual_seed(0)\n\n # Create checkpoint directory.\n try:\n os.mkdir('checkpoints')\n except FileExistsError:\n pass\n\n # Make preparations.\n args = get_args()\n logger = get_logger()\n data_train, data_val, data_test = get_data(args.batch_size,\n args.num_workers)\n model = get_model()\n\n # Log command arguments.\n logger.info(' '.join(sys.argv))\n logger.info(vars(args))\n\n # Send the model to the GPU, if enabled and available.\n if args.cuda:\n model = model.cuda()\n\n # Create the loss function and optimizer.\n loss_function = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(model.parameters(),\n lr=args.learning_rate,\n momentum=args.momentum)\n\n # Load checkpoint, if given.\n if args.checkpoint:\n load_checkpoint(args.checkpoint, model, optimizer)\n\n # Loop epochs.\n for epoch in range(args.num_epochs):\n logger.info(f'Epoch {epoch}:')\n\n mean_loss = train(model, loss_function, optimizer, data_train)\n logger.info(f' - [training] mean loss: {mean_loss:.3f}')\n\n accuracy = evaluate(model, data_val)\n logger.info(f' - [validation] accuracy: {accuracy:.3f}')\n\n torch.save([model.state_dict(), optimizer.state_dict()],\n os.path.join('checkpoints', f'{epoch}.pth'))\n\n # Run final evaluation on the test data.\n logger.info('Test:')\n accuracy = evaluate(model, data_test)\n logger.info(f' - [test] accuracy: {accuracy:.3f}')", "def train(Dataset, model, criterion, epoch, optimizer, writer, device, args):\n\n # Create instances to accumulate losses etc.\n losses = AverageMeter()\n batch_time = AverageMeter()\n data_time = AverageMeter()\n\n top1 = AverageMeter()\n\n # switch to train mode\n model.train()\n\n end = time.time()\n\n # train\n for i, (inp, target) in enumerate(Dataset.train_loader):\n inp = inp.to(device)\n target = target.to(device)\n\n # measure data loading time\n data_time.update(time.time() - end)\n\n # compute model forward\n output = model(inp)\n\n # calculate loss\n loss = criterion(output, target)\n\n # record precision/accuracy and losses\n prec1 = accuracy(output, target)[0]\n top1.update(prec1.item(), inp.size(0))\n losses.update(loss.item(), inp.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n # print progress\n if i % args.print_freq == 0:\n print('Training: [{0}][{1}/{2}]\\t' \n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(\n epoch+1, i, len(Dataset.train_loader), batch_time=batch_time,\n data_time=data_time, loss=losses, top1=top1))\n\n # TensorBoard summary logging\n writer.add_scalar('training/train_precision@1', top1.avg, epoch)\n writer.add_scalar('training/train_class_loss', losses.avg, epoch)\n writer.add_scalar('training/train_average_loss', losses.avg, epoch)\n\n print(' * Train: Loss {loss.avg:.5f} Prec@1 {top1.avg:.3f}'.format(loss=losses, top1=top1))", "def _train(args): \n\n #device = 'cuda' if torch.cuda.is_available() else 'cpu'\n device = 'cpu'\n logger.info(\"Device Type: {}\".format(device))\n\n logger.info(\"Loading SUN360 dataset\")\n transform = transforms.Compose(\n [transforms.Resize((224,224)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n target_transform = transforms.Compose([transforms.Resize((224,224)),\n transforms.ToTensor()]) \n\n trainset = SUN360Dataset(\"imagedata.json\",transform = transform, target_transform = target_transform)\n train_loader = DataLoader(trainset, batch_size=args.batch_size,\n shuffle=True, num_workers=args.workers)\n \"\"\"\n testset = torchvision.datasets.CIFAR10(root=args.data_dir, train=False,\n download=False, transform=transform)\n test_loader = DataLoader(testset, batch_size=args.batch_size,\n shuffle=False, num_workers=args.workers)\n \"\"\" \n\n logger.info(\"Model loaded\")\n model = EfficientNet.from_name('efficientnet-b0',conv_type='Equi')\n\n if torch.cuda.device_count() > 1:\n logger.info(\"Gpu count: {}\".format(torch.cuda.device_count()))\n model = nn.DataParallel(model)\n\n model = model.to(device)\n\n criterion = CELoss().to(device)\n optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\n\n for epoch in range(0, args.epochs):\n running_loss = 0.0\n for i, data in enumerate(train_loader):\n # get the inputs\n inputs, EM , CM = data\n inputs, EM, CM = inputs.to(device), EM.to(device), CM.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = model(inputs)\n EMLoss, CMLoss = map_loss(outputs,EM,CM,criterion)\n loss = EMLoss + CMLoss\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n if i % 2000 == 1999: # print every 2000 mini-batches\n print('[%d, %5d] loss: %.3f' %\n (epoch + 1, i + 1, running_loss / 2000))\n running_loss = 0.0\n print('Finished Training')\n return _save_model(model, args.model_dir)", "def __call__(self):\n #Losses and optimizers\n for epoch in range(self.nb_epochs): # loop over the dataset multiple times\n self.train_loss = 0.0\n self.gan_loss = 0.0\n self.loss_discrim = 0.0\n val_loss = 0.0\n nb_data = 0.\n nb_data_val = 0.\n for i, data in enumerate(self.trainloader, 0):\n # get the batch; data is a list of [inputs, labels]\n inputs, real = data\n inputs, real = inputs.to(device), real.to(device)\n if i%self.discrimTrainPeriod==0:\n self.trainDiscrim(inputs, real)\n else:\n self.trainGen(inputs, real)\n nb_data += 1.\n #occasionnally save an example target/generated\n if i%self.displayPeriod==0:\n self.gen.eval()\n real = self.unNormalize(real[0,:,:,:].detach().cpu())\n self.transformToImage(real).save(self.targetFile)\n fake = self.gen(inputs)\n fake = self.unNormalize(fake[0,:,:,:].detach().cpu())\n self.transformToImage(fake).save(self.generatedFile)\n\n self.gen.eval()\n for i, data in enumerate(self.valloader, 0):\n with torch.no_grad():\n # get the inputs; data is a list of [inputs, labels]\n inputs, real = data\n inputs, real = inputs.to(device), real.to(device)\n #compute L1 loss\n fake = self.gen(inputs)\n lossGenL1 = self.criterionL1(fake, real)\n #statistics\n val_loss += lossGenL1.item()\n nb_data_val += 1.\n self.gan_loss = self.gan_loss / nb_data\n self.train_loss = self.train_loss / nb_data\n self.loss_discrim = self.loss_discrim / nb_data\n val_loss = val_loss / nb_data_val\n self.gan_loss_list.append(self.gan_loss)\n self.train_loss_list.append(self.train_loss)\n self.val_loss_list.append(val_loss)\n print(\"Epoch \", epoch, \"; train loss = \", self.train_loss,\n \"; val loss = \", val_loss, \"; gan loss = \", self.gan_loss,\n \"; loss discrim = \", self.loss_discrim)\n\n plt.plot(range(len(self.train_loss_list)), self.train_loss_list,\n self.val_loss_list, self.gan_loss_list)\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Generator Loss\")\n plt.savefig(self.graphFile)\n #save the weights\n torch.save(self.gen.state_dict(), self.savefileGen)\n torch.save(self.discrim.state_dict(), self.savefileDiscrim)", "def train(self, dataset, model_dir):\n raise NotImplementedError", "def train_epoch(model, train_dataloader, optimizer, loss_fn):\n model.train()\n total_training_loss = 0\n for batch_index, batch in enumerate(train_dataloader):\n batch = batch[0].view(-1,1,28,28).float()\n output_batch = model(batch)\n loss = loss_fn(batch, output_batch, model.prev_means, model.prev_vars)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n total_training_loss += loss", "def train(self, train_dataset, val_dataset):\n\n # check fine_tuning option\n model_path = os.path.join(self.check_point, 'model.pt')\n if self.fine_tune and not os.path.exists(model_path):\n raise Exception('Cannot find %s.' % model_path)\n elif self.fine_tune and os.path.exists(model_path):\n if self.verbose:\n print('Loading %s for finetuning.' % model_path)\n self.model = torch.load(model_path)\n '''\n if isinstance(net, torch.nn.DataParallel):\n net = net.module\n model_dict = self.model.state_dict()\n net_dict = net.state_dict()\n # 1. filter out unnecessary keys\n pretrained_dict = {k: v for k, v in net_dict.items() if k in model_dict}\n # 2. overwrite entries in the existing state dict\n model_dict.update(pretrained_dict)\n # 3. load the new state dict\n self.model.load_state_dict(model_dict)\n '''\n self.optimizer = optim.Adam(self.model.parameters(), lr=self.learning_rate)\n\n # capture best model\n best_val_psnr = -1\n best_psnr = -1\n best_model_state = self.model.state_dict()\n\n with open(os.path.join(self.check_point, 'PSNR' + '.txt'), 'w') as f:\n # Train the model\n for epoch in range(self.num_epochs):\n self._epoch_step(train_dataset, epoch)\n self.scheduler.step()\n\n if epoch % 10 == 0:\n if self.verbose:\n print('Computing PSNR...')\n\n # capture running PSNR on train and val dataset\n train_psnr, train_ssim, _, _, _ = self._check_PSNR(val_dataset)\n self.hist_train_psnr.append(train_psnr)\n\n f.write('epoch%d:\\t%.3f\\n' % (epoch, train_psnr))\n\n if self.verbose:\n print('Average train PSNR:%.3fdB average ssim: %.3f' % (train_psnr, train_ssim))\n print('')\n if best_psnr < train_psnr:\n best_psnr = train_psnr\n # write the model to hard-disk for testing\n if not os.path.exists(self.check_point):\n os.makedirs(self.check_point)\n model_path = os.path.join(self.check_point, 'model.pt')\n torch.save(self.model, model_path)\n print(' Best average psnr: %.3f' % (best_psnr))\n print('')", "def train(self):\n start_time = time()\n self.model.train()\n\n for step, sample in enumerate(self.train_loader):\n self.optimizer.zero_grad()\n\n x, _, _ = sample\n x = x.to(self.device)\n\n y_pred = self.model.forward(x)\n loss = nn.MSELoss()(y_pred, x)\n loss.backward()\n self.train_losses.append(loss.item())\n\n self.optimizer.step(None)\n\n # print an incredible progress bar\n print(f'\\r{self.progress_bar} │ Loss: {np.mean(self.train_losses):.6f}', end='')\n self.progress_bar.inc()\n\n # log average loss of this epoch\n mean_epoch_loss = np.mean(self.train_losses)\n self.sw.add_scalar(tag='train_loss', scalar_value=mean_epoch_loss, global_step=self.epoch)\n self.train_losses = []\n\n # log epoch duration\n print(f' │ T: {time() - start_time:.2f} s')", "def _train(trainer, train_data, batcher_fn, total_batch_steps = 5, seed = 1):\n for i in range(total_batch_steps):\n torch.manual_seed(seed)\n set_seed(seed)\n data, targets = batcher_fn(train_data, i*35)\n trainer.train_step(data, targets)", "def _train(self):\n self.network.train() # note that here we are calling torch.nn.Module train class method\n epochs_since_improvement = 0\n best_params = None\n self.calculate_validation_loss()\n best_validation_loss = self.validation_average_loss\n\n while epochs_since_improvement < 10:\n self.train_epoch()\n self.calculate_validation_loss()\n if self.validation_average_loss < best_validation_loss:\n epochs_since_improvement = 0\n best_validation_loss = self.validation_average_loss\n best_params = self.network.state_dict()\n else:\n epochs_since_improvement += 1\n LOGGER.info(\"Epochs since improvement in validation_loss: {} \\n\".format(epochs_since_improvement))\n if self.maximum_epochs_allowed is not None and self.epochs_trained >= self.maximum_epochs_allowed:\n break\n LOGGER.info(\"Training complete after {} epochs \\n\".format(self.epochs_trained))\n LOGGER.info(\"Best training loss achieved: {} \\n\".format(self.training_average_loss))\n LOGGER.info(\"Best validation loss achieved: {}\".format(self.validation_average_loss))\n self.learned_params = best_params\n self.network.load_state_dict(best_params)", "def train(\n self, training_data: TrainingData, cfg: DazuConfig, **kwargs: Any\n ) -> None:", "def training_network(self, session, epochs, batch_size, get_batches_fn,\\\n\t\t\t\t\t\t train_op, cross_entropy_loss, image_input, correct_label,\\\n\t\t\t\t\t\t keep_prob, learning_rate, saver):\n\n\t #\n\t\tfor epoch in range(epochs):\n\t\t\t#\n\t\t\ts_time = time.time()\n\t\t\t#\n\t\t\tfor image, targets in get_batches_fn(batch_size):\n\t\t\t\t#\n\t\t\t\t_, loss = session.run( [train_op, cross_entropy_loss],feed_dict = \\\n\t\t\t\t\t\t\t\t\t {image_input: image, correct_label: targets,\\\n\t\t\t\t\t\t\t\t\t keep_prob: 0.8 , learning_rate: 0.0001 })\n\t\t\t# Print data on the learning process\n\t\t\tprint(\"Epoch: {}\".format(epoch + 1), \"/ {}\".format(epochs), \" Loss: {:.3f}\".format(loss))", "def main(opt):\n ##################################################################################################################\n # Setup\n ##################################################################################################################\n # Device handling (CPU, GPU, multi GPU)\n if opt.device is None:\n device = torch.device('cpu')\n opt.n_gpu = 0\n else:\n opt.n_gpu = len(opt.device)\n os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.device[opt.local_rank])\n device = torch.device('cuda:0')\n torch.cuda.set_device(0)\n # In the case of multi GPU: sets up distributed training\n if opt.n_gpu > 1 or opt.local_rank > 0:\n torch.distributed.init_process_group(backend='nccl')\n # Since we are in distributed mode, divide batch size by the number of GPUs\n assert opt.batch_size % opt.n_gpu == 0\n opt.batch_size = opt.batch_size // opt.n_gpu\n # Seed\n if opt.seed is None:\n opt.seed = random.randint(1, 10000)\n else:\n assert isinstance(opt.seed, int) and opt.seed > 0\n print(f'Learning on {opt.n_gpu} GPU(s) (seed: {opt.seed})')\n random.seed(opt.seed)\n np.random.seed(opt.seed + opt.local_rank)\n torch.manual_seed(opt.seed)\n # cuDNN\n if opt.n_gpu > 1 or opt.local_rank > 0:\n assert torch.backends.cudnn.enabled\n cudnn.deterministic = True\n # Mixed-precision training\n if opt.torch_amp and not torch_amp_imported:\n raise ImportError('Mixed-precision not supported by this PyTorch version, upgrade PyTorch or use Apex')\n if opt.apex_amp and not apex_amp_imported:\n raise ImportError('Apex not installed (https://github.com/NVIDIA/apex)')\n\n ##################################################################################################################\n # Data\n ##################################################################################################################\n print('Loading data...')\n # Load data\n dataset = data.load_dataset(opt, True)\n trainset = dataset.get_fold('train')\n valset = dataset.get_fold('val')\n # Change validation sequence length, if specified\n if opt.seq_len_test is not None:\n valset.change_seq_len(opt.seq_len_test)\n\n # Handle random seed for dataloader workers\n def worker_init_fn(worker_id):\n np.random.seed((opt.seed + itr + opt.local_rank * opt.n_workers + worker_id) % (2**32 - 1))\n # Dataloader\n sampler = None\n shuffle = True\n if opt.n_gpu > 1:\n # Let the distributed sampler shuffle for the distributed case\n sampler = torch.utils.data.distributed.DistributedSampler(trainset)\n shuffle = False\n train_loader = DataLoader(trainset, batch_size=opt.batch_size, collate_fn=data.collate_fn, sampler=sampler,\n num_workers=opt.n_workers, shuffle=shuffle, drop_last=True, pin_memory=True,\n worker_init_fn=worker_init_fn)\n val_loader = DataLoader(valset, batch_size=opt.batch_size_test, collate_fn=data.collate_fn,\n num_workers=opt.n_workers, shuffle=True, drop_last=True, pin_memory=True,\n worker_init_fn=worker_init_fn) if opt.local_rank == 0 else None\n\n ##################################################################################################################\n # Model\n ##################################################################################################################\n # Buid model\n print('Building model...')\n model = srvp.StochasticLatentResidualVideoPredictor(opt.nx, opt.nc, opt.nf, opt.nhx, opt.ny, opt.nz, opt.skipco,\n opt.nt_inf, opt.nh_inf, opt.nlayers_inf, opt.nh_res,\n opt.nlayers_res, opt.archi)\n model.init(res_gain=opt.res_gain)\n # Make the batch norms in the model synchronized in the distributed case\n if opt.n_gpu > 1:\n if opt.apex_amp:\n from apex.parallel import convert_syncbn_model\n model = convert_syncbn_model(model)\n else:\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n model.to(device)\n\n ##################################################################################################################\n # Optimizer\n ##################################################################################################################\n optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr)\n opt.n_iter = opt.lr_scheduling_burnin + opt.lr_scheduling_n_iter\n lr_sch_n_iter = opt.lr_scheduling_n_iter\n lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer,\n lr_lambda=lambda i: max(0, (lr_sch_n_iter - i) / lr_sch_n_iter))\n\n ##################################################################################################################\n # Automatic Mixed Precision\n ##################################################################################################################\n scaler = None\n if opt.torch_amp:\n scaler = torch_amp.GradScaler()\n if opt.apex_amp:\n model, optimizer = apex_amp.initialize(model, optimizer, opt_level=opt.amp_opt_lvl,\n keep_batchnorm_fp32=opt.keep_batchnorm_fp32,\n verbosity=opt.apex_verbose)\n\n ##################################################################################################################\n # Multi GPU\n ##################################################################################################################\n if opt.n_gpu > 1:\n if opt.apex_amp:\n from apex.parallel import DistributedDataParallel\n forward_fn = DistributedDataParallel(model)\n else:\n forward_fn = torch.nn.parallel.DistributedDataParallel(model)\n else:\n forward_fn = model\n\n ##################################################################################################################\n # Training\n ##################################################################################################################\n cudnn.benchmark = True # Activate benchmarks to select the fastest algorithms\n assert opt.n_iter > 0\n itr = 0\n finished = False\n # Progress bar\n if opt.local_rank == 0:\n pb = tqdm(total=opt.n_iter, ncols=0)\n # Current and best model evaluation metric (lower is better)\n val_metric = None\n best_val_metric = None\n try:\n while not finished:\n if sampler is not None:\n sampler.set_epoch(opt.seed + itr)\n # -------- TRAIN --------\n for batch in train_loader:\n # Stop when the given number of optimization steps have been done\n if itr >= opt.n_iter:\n finished = True\n status_code = 0\n break\n\n itr += 1\n model.train()\n # Optimization step on batch\n # Allow PyTorch's mixed-precision computations if required while ensuring retrocompatibilty\n with (torch_amp.autocast() if opt.torch_amp else nullcontext()):\n loss, nll, kl_y_0, kl_z = train(forward_fn, optimizer, scaler, batch, device, opt)\n\n # Learning rate scheduling\n if itr >= opt.lr_scheduling_burnin:\n lr_scheduler.step()\n\n # Evaluation and model saving are performed on the process with local rank zero\n if opt.local_rank == 0:\n # Evaluation\n if itr % opt.val_interval == 0:\n model.eval()\n val_metric = evaluate(forward_fn, val_loader, device, opt)\n if best_val_metric is None or best_val_metric > val_metric:\n best_val_metric = val_metric\n torch.save(model.state_dict(), os.path.join(opt.save_path, 'model_best.pt'))\n\n # Checkpointing\n if opt.chkpt_interval is not None and itr % opt.chkpt_interval == 0:\n torch.save(model.state_dict(), os.path.join(opt.save_path, f'model_{itr}.pt'))\n\n # Progress bar\n if opt.local_rank == 0:\n pb.set_postfix({'loss': loss, 'nll': nll, 'kl_y_0': kl_y_0, 'kl_z': kl_z, 'val_metric': val_metric,\n 'best_val_metric': best_val_metric}, refresh=False)\n pb.update()\n\n except KeyboardInterrupt:\n status_code = 130\n\n if opt.local_rank == 0:\n pb.close()\n # Save model\n print('Saving...')\n if opt.local_rank == 0:\n torch.save(model.state_dict(), os.path.join(opt.save_path, 'model.pt'))\n print('Done')\n return status_code", "def train(train_features, train_labels, val_features, val_labels, network, optimizer, loss, config, log_date, log_timestamp):\n\n # prints the number of learnable parameters in the network\n count_parameters(network)\n\n # init network using weight initialization of choice\n network = init_weights(network)\n # send network to GPU\n network.to(config['gpu'])\n network.train()\n\n # if weighted loss chosen, calculate weights based on training dataset; else each class is weighted equally\n if config['use_weights']:\n class_weights = class_weight.compute_class_weight('balanced', classes=np.unique(train_labels + 1), y=train_labels + 1)\n if config['loss'] == 'cross_entropy':\n loss.weights = class_weights\n print('Applied weighted class weights: ')\n print(class_weights)\n else:\n class_weights = class_weight.compute_class_weight(None, classes=np.unique(train_labels + 1), y=train_labels + 1)\n if config['loss'] == 'cross_entropy':\n loss.weights = class_weights\n\n\n # initialize optimizer and loss\n opt, criterion = optimizer, loss\n\n if config['loss'] == 'maxup':\n maxup = Maxup(myNoiseAdditionAugmenter, ntrials=4)\n\n # initialize training and validation dataset, define DataLoaders\n dataset = torch.utils.data.TensorDataset(torch.from_numpy(train_features), torch.from_numpy(train_labels))\n trainloader = DataLoader(dataset, batch_size=config['batch_size'], shuffle=True)\n dataset = torch.utils.data.TensorDataset(torch.from_numpy(val_features).float(), torch.from_numpy(val_labels))\n valloader = DataLoader(dataset, batch_size=config['batch_size'], shuffle=False)\n\n # counters and objects used for early stopping and learning rate adjustment\n best_loss = np.inf\n best_network = None\n best_val_losses = None\n best_train_losses = None\n best_val_preds = None\n best_train_preds = None\n early_stop = False\n lr_pt_counter = 0\n es_pt_counter = 0\n\n # training loop; iterates through epochs\n for e in range(config['epochs']):\n \"\"\"\n TRAINING\n \"\"\"\n # helper objects\n train_preds = []\n train_gt = []\n train_losses = []\n start_time = time.time()\n batch_num = 1\n\n # iterate over train dataset\n for i, (x, y) in enumerate(trainloader):\n # send x and y to GPU\n inputs, targets = x.to(config['gpu']), y.to(config['gpu'])\n # zero accumulated gradients\n opt.zero_grad()\n\n if config['loss'] == 'maxup':\n # Increase the inputs via data augmentation\n inputs, targets = maxup(inputs, targets)\n\n # send inputs through network to get predictions, calculate loss and backpropagate\n train_output = network(inputs)\n\n if config['loss'] == 'maxup':\n # calculates loss\n train_loss = maxup.maxup_loss(train_output, targets.long())[0]\n else:\n train_loss = criterion(train_output, targets.long())\n\n train_loss.backward()\n opt.step()\n # append train loss to list\n train_losses.append(train_loss.item())\n\n # create predictions and append them to final list\n y_preds = np.argmax(train_output.cpu().detach().numpy(), axis=-1)\n y_true = targets.cpu().numpy().flatten()\n train_preds = np.concatenate((np.array(train_preds, int), np.array(y_preds, int)))\n train_gt = np.concatenate((np.array(train_gt, int), np.array(y_true, int)))\n\n # if verbose print out batch wise results (batch number, loss and time)\n if config['verbose']:\n if batch_num % config['print_freq'] == 0 and batch_num > 0:\n cur_loss = np.mean(train_losses)\n elapsed = time.time() - start_time\n print('| epoch {:3d} | {:5d} batches | ms/batch {:5.2f} | '\n 'train loss {:5.2f}'.format(e, batch_num, elapsed * 1000 / config['batch_size'], cur_loss))\n start_time = time.time()\n batch_num += 1\n\n # plot gradient flow if wanted\n if config['save_gradient_plot']:\n plot_grad_flow(network)\n\n \"\"\"\n VALIDATION\n \"\"\"\n\n # helper objects\n val_preds = []\n val_gt = []\n val_losses = []\n\n # set network to eval mode\n network.eval()\n with torch.no_grad():\n # iterate over validation dataset\n for i, (x, y) in enumerate(valloader):\n # send x and y to GPU\n inputs, targets = x.to(config['gpu']), y.to(config['gpu'])\n\n if config['loss'] == 'maxup':\n # Increase the inputs via data augmentation\n inputs, targets = maxup(inputs, targets)\n\n # send inputs through network to get predictions, loss and calculate softmax probabilities\n val_output = network(inputs)\n if config['loss'] == 'maxup':\n # calculates loss\n val_loss = maxup.maxup_loss(val_output, targets.long())[0]\n else:\n val_loss = criterion(val_output, targets.long())\n\n val_output = torch.nn.functional.softmax(val_output, dim=1)\n\n # append validation loss to list\n val_losses.append(val_loss.item())\n\n # create predictions and append them to final list\n y_preds = np.argmax(val_output.cpu().numpy(), axis=-1)\n y_true = targets.cpu().numpy().flatten()\n val_preds = np.concatenate((np.array(val_preds, int), np.array(y_preds, int)))\n val_gt = np.concatenate((np.array(val_gt, int), np.array(y_true, int)))\n\n # print epoch evaluation results for train and validation dataset\n print(\"EPOCH: {}/{}\".format(e + 1, config['epochs']),\n \"Train Loss: {:.4f}\".format(np.mean(train_losses)),\n \"Train Acc: {:.4f}\".format(jaccard_score(train_gt, train_preds, average='macro')),\n \"Train Prec: {:.4f}\".format(precision_score(train_gt, train_preds, average='macro')),\n \"Train Rcll: {:.4f}\".format(recall_score(train_gt, train_preds, average='macro')),\n \"Train F1: {:.4f}\".format(f1_score(train_gt, train_preds, average='macro')),\n \"Val Loss: {:.4f}\".format(np.mean(val_losses)),\n \"Val Acc: {:.4f}\".format(jaccard_score(val_gt, val_preds, average='macro')),\n \"Val Prec: {:.4f}\".format(precision_score(val_gt, val_preds, average='macro')),\n \"Val Rcll: {:.4f}\".format(recall_score(val_gt, val_preds, average='macro')),\n \"Val F1: {:.4f}\".format(f1_score(val_gt, val_preds, average='macro')))\n\n # if chosen, print the value counts of the predicted labels for train and validation dataset\n if config['print_counts']:\n y_train = np.bincount(train_preds)\n ii_train = np.nonzero(y_train)[0]\n y_val = np.bincount(val_preds)\n ii_val = np.nonzero(y_val)[0]\n print('Predicted Train Labels: ')\n print(np.vstack((ii_train, y_train[ii_train])).T)\n print('Predicted Val Labels: ')\n print(np.vstack((ii_val, y_val[ii_val])).T)\n\n # if adjust learning rate is enabled\n if config['adj_lr'] or config['early_stopping']:\n if best_loss < np.mean(val_losses):\n lr_pt_counter += 1\n es_pt_counter += 1\n\n # adjust learning rate check\n if lr_pt_counter >= config['adj_lr_patience'] and config['adj_lr']:\n config['lr'] *= 0.1\n for param_group in opt.param_groups:\n param_group['lr'] = param_group['lr'] * 0.1\n print('Changing learning rate to {} since no loss improvement over {} epochs.'\n .format(config['lr'], str(lr_pt_counter)))\n\n # early stopping check\n if es_pt_counter >= config['es_patience'] and config['early_stopping']:\n print('Stopping training early since no loss improvement over {} epochs.'\n .format(str(es_pt_counter)))\n early_stop = True\n # print results of best epoch\n print('Final (best) results: ')\n print(\"Train Loss: {:.4f}\".format(np.mean(best_train_losses)),\n \"Train Acc: {:.4f}\".format(jaccard_score(train_gt, best_train_preds, average='macro')),\n \"Train Prec: {:.4f}\".format(precision_score(train_gt, best_train_preds, average='macro')),\n \"Train Rcll: {:.4f}\".format(recall_score(train_gt, best_train_preds, average='macro')),\n \"Train F1: {:.4f}\".format(f1_score(train_gt, best_train_preds, average='macro')),\n \"Val Loss: {:.4f}\".format(np.mean(best_val_losses)),\n \"Val Acc: {:.4f}\".format(jaccard_score(val_gt, best_val_preds, average='macro')),\n \"Val Prec: {:.4f}\".format(precision_score(val_gt, best_val_preds, average='macro')),\n \"Val Rcll: {:.4f}\".format(recall_score(val_gt, best_val_preds, average='macro')),\n \"Val F1: {:.4f}\".format(f1_score(val_gt, best_val_preds, average='macro')))\n\n else:\n lr_pt_counter = 0\n es_pt_counter = 0\n best_network = network\n best_loss = np.mean(val_losses)\n best_train_losses = train_losses\n best_train_preds = train_preds\n best_val_losses = val_losses\n best_val_preds = val_preds\n else:\n best_network = network\n best_train_losses = train_losses\n best_train_preds = train_preds\n best_val_losses = val_losses\n best_val_preds = val_preds\n\n # set network to train mode again\n network.train()\n\n if early_stop:\n break\n\n # if plot_gradient gradient plot is shown at end of training\n if config['save_gradient_plot']:\n mkdir_if_missing(os.path.join('logs', log_date, log_timestamp))\n plt.savefig(os.path.join('logs', log_date, log_timestamp, 'grad_flow.png'))\n\n # return validation, train and test predictions as numpy array with ground truth\n return best_network, np.vstack((best_val_preds, val_gt)).T, np.vstack((best_train_preds, train_gt)).T", "def _doTraining(self, train_dl: torch.utils.data.DataLoader) -> float:\n\n # Initialize the variable for tracking the training loss\n train_loss = 0.0\n # Set the model to training mode (enables gradient computation and dropout)\n self.train()\n\n # Iterate over the training data loader\n for x_batch, y_batch in train_dl:\n # Clear the gradients of the optimizer\n self.optimizer.zero_grad()\n # Forward pass to obtain model predictions\n y_pred = self.forward(x_batch)\n\n # Compute the loss between the predictions and the ground truth\n loss = self.criterion(y_pred, y_batch)\n # Backpropagation: compute gradients and update model parameters\n loss.backward()\n self.optimizer.step()\n # Accumulate the training loss\n train_loss += loss.item()\n\n # Compute the average training loss\n train_loss /= len(train_dl)\n # Return the training loss and None values for additional metrics\n return train_loss, None, None, None", "def _doTraining(self, train_dl: torch.utils.data.DataLoader) -> float:\n\n # Initialize variables for tracking loss, correct predictions, total samples, and labels\n train_loss = 0.0\n correct = 0\n total = 0\n true_labels = []\n pred_labels = []\n\n # Set the model to training mode (enables gradient computation and dropout)\n self.train()\n\n # Iterate over the training data loader\n for x_batch, y_batch in train_dl:\n # Clear the gradients of the optimizer\n self.optimizer.zero_grad()\n # Forward pass to obtain model predictions\n y_pred = self.forward(x_batch)\n\n # Compute the loss between the predictions and the ground truth\n loss = self.criterion(y_pred, y_batch)\n # Backpropagation: compute gradients and update model parameters\n loss.backward()\n self.optimizer.step()\n # Accumulate the training loss\n train_loss += loss.item()\n\n # Get the predicted labels by selecting the maximum value along the second dimension\n _, predicted = torch.max(y_pred.data, 1)\n # Update the count of total samples and correct predictions\n total += y_batch.size(0)\n correct += (predicted == y_batch).sum().item()\n\n # Extend the true and predicted labels lists\n true_labels.extend(y_batch.tolist())\n pred_labels.extend(predicted.tolist())\n\n # Compute the average training loss\n train_loss /= len(train_dl)\n # Calculate the weighted F1 score for the true and predicted labels\n train_f1 = f1_score(true_labels, pred_labels, average='weighted') * 100\n\n # Return the training loss, F1 score, true labels, and predicted labels\n return train_loss, train_f1, true_labels, pred_labels", "def train(model, optimizer: torch.optim, data: torch_geometric.data.Data):\n model.train()\n optimizer.zero_grad()\n F.nll_loss(model()[data.train_mask], data.y[data.train_mask]).backward()\n optimizer.step()\n\n model.eval()", "def train(\n model: nn.Module,\n train_set: IterableDataset,\n dev_set: Optional[IterableDataset],\n total_loss: Callable[[nn.Module, IterableDataset], TT],\n accuracy: Callable[[nn.Module, IterableDataset], float],\n batch_size=32,\n learning_rate=1e-3,\n report_rate=10,\n epoch_num=50\n):\n # Choose Adam for optimization\n optimizer = torch.optim.Adam(\n model.parameters(), lr=learning_rate)\n\n # Create batched loader\n batches = batch_loader(\n # Use no shuffling, it doesn't work with iterable datasets\n train_set, batch_size=batch_size, shuffle=False)\n\n # Perform SGD in a loop\n for t in range(epoch_num):\n\n # We use a PyTorch DataLoader to provide a stream of\n # dataset element batches\n for batch in batches:\n loss = total_loss(model, batch)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # Reporting (every `report_rate` epochs)\n if (t+1) % report_rate == 0:\n with torch.no_grad():\n train_loss = total_loss(model, train_set).item()\n train_acc = accuracy(model, train_set)\n if dev_set:\n dev_acc = accuracy(model, dev_set)\n else:\n dev_acc = 0.0\n msg = (\"@{k}: \"\n \"loss(train)={tl}, acc(train)={ta}, \"\n \"acc(dev)={da}\")\n print(msg.format(\n k=t+1,\n tl=round(train_loss, 3),\n ta=round(train_acc, 3),\n da=round(dev_acc, 3))\n )", "def train(ds, **kwargs):\n# {{{\n\n t_REPS = [ds.grand['representations'][tr] for tr in ds.data['trainers']]\n t_VALS = [ds.grand['values'][tr] for tr in ds.data['trainers']]\n\n # For convenience, set the mean of the training values to 0\n t_AVG = np.mean(t_VALS)\n t_VALS = np.subtract(t_VALS,t_AVG)\n\n # model determination (`s` and `l` hypers, then `a` coefficients)\n # {{{\n # train the hypers\n if ds.data['hypers']:\n print(\"Loading hyperparameters from Dataset.\")\n s = ds.data['s']\n l = ds.data['l']\n else:\n if 'k' in kwargs:\n k = kwargs['k']\n else:\n k = ds.setup['M']\n s, l = find_hypers(t_VALS,t_REPS,k)\n ds.data['hypers'] = True\n ds.data['s'] = s\n ds.data['l'] = l\n\n # train for alpha\n if ds.data['a']:\n print(\"Loading coefficients from Dataset.\") \n alpha = np.asarray(ds.data['a'])\n else:\n print(\"Model training using s = {} and l = {} . . .\".format(s,l))\n alpha = train_a(t_REPS,t_VALS,s,l)\n ds.data['a'] = alpha.tolist()\n # }}}\n\n return ds, t_AVG", "def train(forward_fn, optimizer, scaler, batch, device, opt):\n # Zero gradients\n optimizer.zero_grad()\n\n # Data\n x = batch.to(device)\n nt, n = x.shape[0], x.shape[1]\n\n # Forward (inference)\n x_, y, z, _, q_y_0_params, q_z_params, p_z_params, res = forward_fn(x, nt, dt=1 / opt.n_euler_steps)\n\n # Loss\n # NLL\n nll = utils.neg_logprob(x_, x, scale=opt.obs_scale).sum()\n # y_0 KL\n q_y_0 = utils.make_normal_from_raw_params(q_y_0_params)\n kl_y_0 = distrib.kl_divergence(q_y_0, distrib.Normal(0, 1)).sum()\n # z KL\n q_z, p_z = utils.make_normal_from_raw_params(q_z_params), utils.make_normal_from_raw_params(p_z_params)\n kl_z = distrib.kl_divergence(q_z, p_z).sum()\n # ELBO\n loss = nll + opt.beta_y * kl_y_0 + opt.beta_z * kl_z\n # L2 regularization of residuals\n if opt.l2_res > 0:\n l2_res = torch.norm(res, p=2, dim=2).sum()\n loss += opt.l2_res * l2_res\n # Batch average\n loss /= n\n\n # Backward and weight update\n if opt.torch_amp:\n with torch_amp.autocast(enabled=False):\n scaler.scale(loss).backward()\n scaler.step(optimizer)\n scaler.update()\n else:\n if opt.apex_amp:\n with apex_amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n optimizer.step()\n\n # Logs\n with torch.no_grad():\n loss = loss.item()\n nll = nll.sum().item() / n\n kl_y_0 = kl_y_0.item() / n\n kl_z = kl_z.item() / n\n\n return loss, nll, kl_y_0, kl_z", "def train():\n\t# 1、make dataloader\n\ttrain_loader, val_loader, num_query, num_class = make_data_loader(cfg)\n\t#print(\"num_query:{},num_class:{}\".format(num_query,num_class))\n\n\t# 2、make model\n\tmodel = build_model(cfg, num_class)\n\n\t# model.eval()\n\t# x = model(img_tensor)\n\t# print(x.shape)\n\t# 3、 make optimizer\n\toptimizer = make_optimizer(cfg, model)\n\n\t# 4、 make lr_scheduler\n\tscheduler = make_lr_scheduler(cfg, optimizer)\n\n\t# 5、 make loss_func\n\tif cfg.MODEL.PCB_NECK:\n\t\t# make loss specificially for pcb \n\t\tloss_func = get_softmax_triplet_loss_fn(cfg, num_class)\n\telse:\n\t\tloss_func = make_loss(cfg, num_class)\n\n\t# get paramters\n\tlog_period = cfg.OUTPUT.LOG_PERIOD \n\tckpt_period =cfg.OUTPUT.CHECKPOINT_PERIOD\n\teval_period = cfg.OUTPUT.EVAL_PERIOD\n\toutput_dir = cfg.OUTPUT.ROOT_DIR\n\tdevice = cfg.MODEL.DEVICE\n\tepochs = cfg.SOLVER.MAX_EPOCHS\n\tuse_gpu = device == \"cuda\"\n\tuse_neck = cfg.MODEL.NECK or cfg.MODEL.LEARN_REGION \n\t# how many batch for each log\n\tbatch_size = cfg.SOLVER.IMGS_PER_BATCH\n\tbatch_num = len(train_loader) \n\t\n\tlog_iters = batch_num // log_period\n\tpretrained = cfg.MODEL.PRETRAIN_PATH != ''\n\tparallel = cfg.MODEL.PARALLEL \t\n\tgrad_clip = cfg.DARTS.GRAD_CLIP \n\n\tfeat_norm = cfg.TEST.FEAT_NORM \n\tckpt_save_path = cfg.OUTPUT.ROOT_DIR + cfg.OUTPUT.CKPT_DIR\n\tif not os.path.exists(ckpt_save_path):\n\t\tos.makedirs(ckpt_save_path)\n\n\n\t# create *_result.xlsx\n\t# save the result for analyze\n\tname = (cfg.OUTPUT.LOG_NAME).split(\".\")[0] + \".xlsx\"\n\tresult_path = cfg.OUTPUT.ROOT_DIR + name\n\n\twb = xl.Workbook()\n\tsheet = wb.worksheets[0]\n\ttitles = ['size/M','speed/ms','final_planes', 'acc', 'mAP', 'r1', 'r5', 'r10', 'loss',\n\t\t\t 'acc', 'mAP', 'r1', 'r5', 'r10', 'loss','acc', 'mAP', 'r1', 'r5', 'r10', 'loss']\n\tsheet.append(titles)\n\tcheck_epochs = [40, 80, 120, 160, 200, 240, 280, 320, 360, epochs]\n\tvalues = []\n\n\tlogger = logging.getLogger('MobileNetReID.train')\n\t\n\t# count parameter\n\tsize = count_parameters(model)\n\tlogger.info(\"the param number of the model is {:.2f} M\".format(size))\n\t\n\tvalues.append(format(size, '.2f'))\n\tvalues.append(model.final_planes)\n\n\tlogger.info(\"Start training\")\n\t\n\t#count = 183, x, y = batch -> 11712 for train\n\tif pretrained:\n\t\tstart_epoch = model.start_epoch\n\n\tif parallel:\n\t\tmodel = nn.DataParallel(model)\n\n\tif use_gpu:\n\t\t# model = nn.DataParallel(model)\n\t\tmodel.to(device)\n\t\n\t# save the best model\n\tbest_mAP, best_r1 = 0., 0.\n\tis_best = False\n\t# batch : img, pid, camid, img_path\n\tavg_loss, avg_acc = RunningAverageMeter(), RunningAverageMeter()\n\tavg_time, global_avg_time = AverageMeter(), AverageMeter()\n\tglobal_avg_time.reset()\n\tfor epoch in range(epochs):\n\t\tscheduler.step()\n\n\t\tif pretrained and epoch < start_epoch - 1:\n\t\t\tcontinue\n\t\n\t\tmodel.train()\n\t\t# sum_loss, sum_acc = 0., 0.\n\t\tavg_loss.reset()\n\t\tavg_acc.reset()\n\t\tavg_time.reset()\n\t\tfor i, batch in enumerate(train_loader):\n\n\t\t\tt0 = time.time()\n\t\t\timgs,labels = batch\n\n\t\t\tif use_gpu:\n\t\t\t\timgs = imgs.to(device)\n\t\t\t\tlabels = labels.to(device)\n\n\t\t\tres = model(imgs)\n\t\t\t# score, feat = model(imgs)\n\t\t\t# loss = loss_func(score, feat, labels)\n\t\t\tloss, acc = compute_loss_acc(use_neck, res, labels, loss_func)\n\t\t\t\n\t\t\tloss.backward()\n\t\t\tif grad_clip != 0:\n\t\t\t\tnn.utils.clip_grad_norm(model.parameters(), grad_clip)\n\n\t\t\toptimizer.step()\n\n\t\t\toptimizer.zero_grad()\n\n\t\t\t# acc = (score.max(1)[1] == labels).float().mean()\n\n\t\t\t# sum_loss += loss\n\t\t\t# sum_acc += acc \n\t\t\tt1 = time.time()\n\t\t\tavg_time.update((t1 - t0) / batch_size)\n\t\t\tavg_loss.update(loss)\n\t\t\tavg_acc.update(acc)\n\n\t\t\t#log the info \n\t\t\tif (i+1) % log_iters == 0:\n\n\t\t\t\tlogger.info(\"epoch {}: {}/{} with loss is {:.5f} and acc is {:.3f}\".format(\n\t\t\t\t\t epoch+1, i+1, batch_num, avg_loss.avg, avg_acc.avg))\n\n\t\tlr = optimizer.state_dict()['param_groups'][0]['lr']\n\t\tlogger.info(\"end epochs {}/{} with lr: {:.5f} and avg_time is {:.3f} ms\".format(epoch+1, epochs, lr, avg_time.avg * 1000))\n\t\tglobal_avg_time.update(avg_time.avg)\n\t\t# change the lr \n\n\t\t# eval the model \n\t\tif (epoch+1) % eval_period == 0 or (epoch + 1) == epochs :\n\t\t\t\n\t\t\tmodel.eval()\n\t\t\tmetrics = R1_mAP(num_query, use_gpu = use_gpu, feat_norm = feat_norm)\n\n\t\t\twith torch.no_grad():\n\n\t\t\t\tfor vi, batch in enumerate(val_loader):\n\t\t\t\t\t\n\t\t\t\t\timgs, labels, camids = batch\n\n\t\t\t\t\tif use_gpu:\n\t\t\t\t\t\timgs = imgs.to(device)\n\n\t\t\t\t\tfeats = model(imgs)\n\t\t\t\t\tmetrics.update((feats,labels, camids))\n\n\t\t\t\t#compute cmc and mAP\n\t\t\t\tcmc, mAP = metrics.compute()\n\t\t\t\tlogger.info(\"validation results at epoch:{}\".format(epoch + 1))\n\t\t\t\tlogger.info(\"mAP:{:.2%}\".format(mAP))\n\t\t\t\tfor r in [1,5,10]:\n\t\t\t\t\tlogger.info(\"CMC curve, Rank-{:<3}:{:.2%}\".format(r,cmc[r-1]))\t\n\n\t\t\t\t# determine whether cur model is the best \n\t\t\t\tif mAP > best_mAP:\n\t\t\t\t\tis_best = True\n\t\t\t\t\tbest_mAP = mAP\n\t\t\t\t\tlogger.info(\"Get a new best mAP\")\n\t\t\t\tif cmc[0] > best_r1:\n\t\t\t\t\tis_best = True\n\t\t\t\t\tbest_r1 = cmc[0]\n\t\t\t\t\tlogger.info(\"Get a new best r1\")\n\n\t\t\t\t# add the result to sheet\n\t\t\t\tif (epoch + 1) in check_epochs:\n\t\t\t\t\tval = [avg_acc.avg, mAP, cmc[0], cmc[4], cmc[9]]\n\t\t\t\t\tchange = [format(v * 100, '.2f') for v in val]\n\t\t\t\t\tchange.append(format(avg_loss.avg, '.3f'))\n\t\t\t\t\tvalues.extend(change)\n\n\n\t\t# we hope that eval_period == ckpt_period or eval_period == k* ckpt_period where k is int\t\t\t\n\t\t# whether to save the model\n\t\tif (epoch+1) % ckpt_period == 0 or is_best:\n\n\t\t\tif parallel:\n\t\t\t\ttorch.save(model.module.state_dict(), ckpt_save_path + \"checkpoint_{}.pth\".format(epoch + 1 ))\n\t\t\telse:\n\t\t\t\ttorch.save(model.state_dict(), ckpt_save_path + \"checkpoint_{}.pth\".format(epoch + 1 ))\n\n\t\t\tlogger.info(\"checkpoint {} saved !\".format(epoch + 1))\n\n\t\t\tif is_best:\n\t\t\t\tif parallel:\n\t\t\t\t\ttorch.save(model.module.state_dict(), ckpt_save_path + \"best_ckpt.pth\")\n\t\t\t\telse:\n\t\t\t\t\ttorch.save(model.state_dict(), ckpt_save_path + \"best_ckpt.pth\")\n\t\t\t\tlogger.info(\"best checkpoint was saved\")\n\t\t\t\tis_best = False\n\t\n\tvalues.insert(1, format(global_avg_time.avg * 1000, '.2f'))\n\tsheet.append(values)\n\twb.save(result_path)\n\n\tlogger.info(\"training is end, time for per imgs is {} ms\".format(global_avg_time.avg *1000))", "def train(epoch, model, loss_fn, optimizer, dataloder,pair_generation_tnf):\n model.train()\n train_loss =0\n for batch_idx, batch in enumerate(tqdm(dataloder, desc='Epoch {}'.format(epoch))):\n optimizer.zero_grad()\n #batch = pair_generation_tnf(batch)\n \n theta = model(batch)\n print()\n loss= loss_fn(theta, batch['theta'])\n \n \n \n loss.backward()\n optimizer.step()\n train_loss += loss.data.cpu().numpy().item()\n train_loss /= len(dataloader)\n print('Train set: Average loss: {:.4f}'.format(train_loss))\n return train_loss", "def train(self):\n train_dataloader = self.get_train_dataloader()\n\n if self.args.max_steps > 0:\n t_total = self.args.max_steps\n num_train_epochs = (\n self.args.max_steps // (len(train_dataloader) // self.args.gradient_accumulation_steps) + 1\n )\n else:\n t_total = int(len(train_dataloader) // self.args.gradient_accumulation_steps * self.args.num_train_epochs)\n num_train_epochs = self.args.num_train_epochs\n\n lr_scheduler = orttrainer.optim.LinearWarmupLRScheduler(t_total, self.args.warmup_steps / float(t_total))\n\n loss_scaler = amp.DynamicLossScaler() if self.args.fp16 else None\n device = self.args.device.type\n\n device = f\"{device}:{self.args.device.index}\" if self.args.device.index else f\"{device}:0\"\n options = orttrainer.ORTTrainerOptions(\n {\n \"batch\": {\"gradient_accumulation_steps\": self.args.gradient_accumulation_steps},\n \"device\": {\"id\": device},\n \"mixed_precision\": {\"enabled\": self.args.fp16, \"loss_scaler\": loss_scaler},\n \"debug\": {\n \"deterministic_compute\": True,\n },\n \"utils\": {\"grad_norm_clip\": False},\n \"distributed\": {\n # we are running single node multi gpu test. thus world_rank = local_rank\n # and world_size = self.args.n_gpu\n \"world_rank\": max(0, self.args.local_rank),\n \"world_size\": int(self.world_size),\n \"local_rank\": max(0, self.args.local_rank),\n \"allreduce_post_accumulation\": True,\n },\n \"lr_scheduler\": lr_scheduler,\n }\n )\n\n param_optimizer = list(self.model.named_parameters())\n params = [\n {\n \"params\": [n for n, p in param_optimizer if \"bias\" in n or \"LayerNorm.weight\" in n],\n \"weight_decay_mode\": 1,\n },\n {\n \"params\": [n for n, p in param_optimizer if not (\"bias\" in n or \"LayerNorm.weight\" in n)],\n \"weight_decay_mode\": 1,\n },\n ]\n\n optim_config = optim.AdamConfig(params=params, lr=2e-5, do_bias_correction=True)\n self.model = orttrainer.ORTTrainer(self.model, self.model_desc, optim_config, options=options)\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(\" Num examples = %d\", len(train_dataloader.dataset))\n logger.info(\" Num Epochs = %d\", num_train_epochs)\n logger.info(\" Instantaneous batch size per GPU = %d\", self.args.per_gpu_train_batch_size)\n logger.info(\n \" Total train batch size (w. parallel, distributed & accumulation) = %d\",\n self.args.train_batch_size\n * self.args.gradient_accumulation_steps\n * (torch.distributed.get_world_size() if self.args.local_rank != -1 else 1),\n )\n logger.info(\" Gradient Accumulation steps = %d\", self.args.gradient_accumulation_steps)\n logger.info(\" Total optimization steps = %d\", t_total)\n\n global_step = 0\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n\n tr_loss = 0.0\n logging_loss = 0.0\n train_iterator = trange(\n epochs_trained,\n int(num_train_epochs),\n desc=\"Epoch\",\n disable=self.args.local_rank not in [-1, 0],\n )\n\n for _epoch in train_iterator:\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=self.args.local_rank not in [-1, 0])\n for step, inputs in enumerate(epoch_iterator):\n # Skip past any already trained steps if resuming training\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n\n tr_loss += self._training_step(self.model, inputs)\n\n if (step + 1) % self.args.gradient_accumulation_steps == 0 or (\n len(epoch_iterator) <= self.args.gradient_accumulation_steps and (step + 1) == len(epoch_iterator)\n ):\n global_step += 1\n\n if self.args.local_rank in [-1, 0]:\n if (self.args.logging_steps > 0 and global_step % self.args.logging_steps == 0) or (\n global_step == 1 and self.args.logging_first_step\n ):\n logs = {}\n if self.args.evaluate_during_training:\n results = self.evaluate()\n for key, value in results.items():\n eval_key = f\"eval_{key}\"\n logs[eval_key] = value\n\n loss_scalar = (tr_loss - logging_loss) / self.args.logging_steps\n\n logs[\"loss\"] = loss_scalar\n logging_loss = tr_loss\n\n epoch_iterator.write(json.dumps({**logs, **{\"step\": global_step}}))\n\n if self.args.max_steps > 0 and global_step > self.args.max_steps:\n epoch_iterator.close()\n break\n if self.args.max_steps > 0 and global_step > self.args.max_steps:\n train_iterator.close()\n break\n\n logger.info(\"\\n\\nTraining completed. \\n\\n\")\n return TrainOutput(global_step, tr_loss / global_step)", "def _epoch_step(self, dataset, epoch):\n dataloader = DataLoader(dataset, batch_size=self.batch_size,\n shuffle=True, num_workers=64)\n\n num_batchs = len(dataset) // self.batch_size\n\n # observe the training progress\n if self.verbose:\n bar = progressbar.ProgressBar(max_value=num_batchs)\n\n running_loss = 0\n for i, sample in enumerate(dataloader):\n input_batch, label_batch = sample['lr'], sample['hr']\n # Wrap with torch Variable\n input_batch, label_batch = self._wrap_variable(input_batch,\n label_batch,\n self.use_gpu)\n # zero the grad\n self.optimizer.zero_grad()\n\n # Forward\n\n if self.model_name in ['TDAN']:\n output_batch, lrs = self.model(input_batch)\n num = input_batch.size(1)\n center = num // 2\n x = input_batch[:, center, :, :, :].unsqueeze(1).repeat(1, num, 1, 1, 1)\n loss = self.loss_fn(output_batch, label_batch) + 0.25 * self.loss_fn(lrs, x)\n else:\n output_batch = self.model(input_batch)\n loss = self.loss_fn(output_batch, label_batch)\n\n running_loss += loss.data[0]\n\n # Backward + update\n loss.backward()\n #nn.utils.clip_grad_norm(self.model.parameters(), 0.4)\n self.optimizer.step()\n\n if self.verbose:\n bar.update(i, force=True)\n\n average_loss = running_loss / num_batchs\n self.hist_loss.append(average_loss)\n if self.verbose:\n print('Epoch %5d, loss %.5f' \\\n % (epoch, average_loss))", "def train_epoch(data_loader, model, optimizer, criterion, device, fold, epoch):\n\tmodel.train()\n\tfor inputs, input_lens, labels in tqdm.tqdm(data_loader, ncols=100, desc=f\"train-- F: {fold} -- E: {epoch}\"):\n\t\tinputs = inputs.to(device)\n\t\tlabels = labels.to(device)\n\t\t#input_lens = input_lens.to(device)\n\n\t\toptimizer.zero_grad()\n\t\tpreds = model(inputs, input_lens)\n\t\t\n\t\tloss = criterion(preds, labels.unsqueeze(1))\n\t\tloss.backward()\n\t\toptimizer.step()", "def train(self, trainData):\n pass", "def train(model, optimizer, epochs=1):\n model = model.to(device=device) # move the model parameters to CPU/GPU\n for e in range(epochs):\n for t, (x, y) in enumerate(dataloader_train):\n model.train() # put model to training mode\n\n x = x.to(device=device, dtype=dtype) # move to device, e.g. GPU\n y = y.to(device=device, dtype=torch.long)\n\n scores = model(x)\n \n loss = F.multi_margin_loss(scores, y) \n if t == 0:\n losses.append(loss.item())\n\n\n optimizer.zero_grad()\n loss.backward()\n\n optimizer.step()\n\n if t % print_every == 0:\n print('Iteration %d, loss = %.4f' % (t, loss.item()))\n check_accuracy(dataloader_val, model)\n print()\n # losses.append(loss.item())", "def train_model(model, criterion, optimizer, scheduler, \n dataloaders,dataset_sizes,device, num_epochs=5, start_epoch = 0):\n start = time.time()\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_loss = np.inf\n best_epoch = 0\n for epoch in range(start_epoch,num_epochs):\n print('Epoch {}/{}'.format(epoch+1, num_epochs))\n print('-' * 10)\n\n # Each epoch has a training and validation phase\n for phase in ['train', 'val']:\n if phase == 'train':\n if False: #disable for Adam\n scheduler.step()\n model.train() # Set model to training mode\n else:\n model.eval() # Set model to evaluate mode\n running_loss = 0.0\n\n # Iterate over data.\n count = 0\n for X, Y in dataloaders[phase]:\n X = X.to(device)\n Y = Y.to(device)\n \n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(phase == 'train'):\n Y_pred = model(X)\n \n\n if type(criterion) == Point_Loss:\n pts_2d = X[:,:16].view(-1,2,8).to(device)\n P = (X[:,-12:].view(-1,3,4))[:,:,:3] * 1000\n Pinv = torch.inverse(P)\n Pinv.requires_grad = False\n \n # extract and invert each P matrix dumbly\n P2 = np.array([[721.5377,0,609.5593],[0,721.5377,172.8540],[0,0,1]])\n Pinv_test = torch.from_numpy(np.linalg.inv(P2)).float().to(device)\n \n loss = criterion(Y_pred,Y,Pinv,pts_2d,device)\n \n else:\n loss = criterion(Y_pred,Y)\n \n # backward + optimize only if in training phase\n if phase == 'train':\n loss.backward()\n optimizer.step()\n \n # statistics\n running_loss += loss.item()* X.size(0)\n \n # copy data to cpu and numpy arrays for scoring\n Y_pred = Y_pred.data.cpu().numpy()\n Y = Y.data.cpu().numpy()\n \n # TODO - need some intuitive accuracy function here\n\n \n # verbose update\n count += 1\n if False and count % 20 == 0:\n print(\"loss: {}\".format(loss.item()))\n \n epoch_loss = running_loss / dataset_sizes[phase]\n \n\n print('{} Loss: {:.5f}'.format(\n phase, epoch_loss))\n\n # deep copy the model\n if phase == 'val' and epoch_loss < best_loss:\n best_loss = epoch_loss\n best_epoch = epoch\n del best_model_wts\n best_model_wts = copy.deepcopy(model.state_dict())\n\n print()\n \n if epoch % 10 == 0:\n # save checkpoint\n PATH = \"checkpoints/label_convert_{}.pt\".format(epoch)\n torch.save({\n 'epoch': epoch,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'loss': epoch_loss\n }, PATH)\n\n time_elapsed = time.time() - start\n print('Training complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n print('Best val loss: {:4f}, epoch {}'.format(best_loss,best_epoch))\n\n # load best model weights\n model.load_state_dict(best_model_wts)\n return model", "def train(self, data_dict, label_dict):\n loaders = self.init_loaders(data_dict, label_dict)\n best_performance = 1e18\n loss_dict = self.init_loss_dict()\n performance_dict = self.init_performance_dict()\n\n for epoch in range(self.config_dict[\"num_epochs\"]):\n print(\"Epoch {}/{}\".format(epoch, self.config_dict[\"num_epochs\"] - 1))\n print(\"-\" * 10)\n\n if self.scheduler is not None:\n self.scheduler.step()\n\n for phase in [\"train\", \"val\"]:\n self.model.train(phase == \"train\")\n running_loss_dict = self.init_running_loss_dict(\n list(loss_dict[phase].keys())\n )\n output_dict = self.init_output_dict()\n i = 0\n for the_data in loaders[phase]:\n i += 1\n batch_loss_dict = {}\n inputs, labels = self.transform_batch(the_data)\n\n # zero parameter gradients\n self.optimizer.zero_grad()\n\n # forward\n outputs = self.model(inputs)\n\n output_dict = self.update_output_dict(output_dict, outputs, labels)\n\n batch_loss_dict[\"loss\"] = self.criterion(outputs, labels)\n if phase == \"train\":\n batch_loss_dict[\"loss\"].backward()\n self.optimizer.step()\n\n for key in batch_loss_dict.keys():\n running_loss_dict[key] += batch_loss_dict[key].item()\n\n # Compute epoch losses and update loss dict\n epoch_loss_dict = {\n key: running_loss_dict[key] / i for key in running_loss_dict.keys()\n }\n loss_dict[phase] = self.update_metric_dict(\n loss_dict[phase], epoch_loss_dict\n )\n\n # Compute epoch performance and update performance dict\n epoch_statistics = self.compute_epoch_statistics(output_dict)\n performance_dict[phase] = self.update_metric_dict(\n performance_dict[phase], epoch_statistics\n )\n\n print(\"Phase: {}:\".format(phase))\n self.print_metric_dict(epoch_loss_dict)\n self.print_metric_dict(epoch_statistics)\n\n if phase == \"val\":\n best_model_condition = epoch_loss_dict[\"loss\"] < best_performance\n if best_model_condition:\n print(\"Best model updated\")\n best_performance = epoch_loss_dict[\"loss\"]\n best_model_wts = copy.deepcopy(self.model.state_dict())\n\n print(\"Best val performance: {:4f}\".format(best_performance))\n self.model.load_state_dict(best_model_wts)\n result_dict = {\n phase: {**performance_dict[phase], **loss_dict[phase]}\n for phase in performance_dict.keys()\n }\n return result_dict", "def train_dp(trainloader, model, optimizer, epoch):\n model.train()\n running_loss = 0.0\n for i, data in tqdm(enumerate(trainloader, 0), leave=True):\n inputs, labels = data\n inputs = inputs.to(device)\n labels = labels.to(device)\n optimizer.zero_grad()\n\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n running_loss += torch.mean(loss).item()\n\n losses = torch.mean(loss.reshape(num_microbatches, -1), dim=1)\n saved_var = dict()\n for tensor_name, tensor in model.named_parameters():\n saved_var[tensor_name] = torch.zeros_like(tensor)\n\n for j in losses:\n j.backward(retain_graph=True)\n torch.nn.utils.clip_grad_norm_(model.parameters(), S)\n for tensor_name, tensor in model.named_parameters():\n new_grad = tensor.grad\n saved_var[tensor_name].add_(new_grad)\n model.zero_grad()\n\n for tensor_name, tensor in model.named_parameters():\n if device.type =='cuda':\n noise = torch.cuda.FloatTensor(tensor.grad.shape).normal_(0, sigma)\n else:\n noise = torch.FloatTensor(tensor.grad.shape).normal_(0, sigma)\n saved_var[tensor_name].add_(noise)\n tensor.grad = saved_var[tensor_name] / num_microbatches\n optimizer.step()\n\n if i > 0 and i % 20 == 0:\n # logger.info('[%d, %5d] loss: %.3f' %\n # (epoch + 1, i + 1, running_loss / 2000))\n plot(epoch * len(trainloader) + i, running_loss, 'Train Loss')\n running_loss = 0.0", "def train(self, training_data, training_labels, validation_data, validation_labels):\n abstract", "def train(self, batch):\n pass", "def train(n_hidden_1, dropout, lr, wdecay, _run):\n\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n np.random.seed(42)\n\n ## Prepare all functions\n # Get number of units in each hidden layer specified in the string such as 100,100\n if FLAGS.dnn_hidden_units:\n dnn_hidden_units = FLAGS.dnn_hidden_units.split(\",\")\n dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]\n else:\n dnn_hidden_units = []\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n def get_xy_tensors(batch):\n x, y = batch\n x = torch.tensor(x.reshape(-1, 3072), dtype=torch.float32).to(device)\n y = torch.tensor(y, dtype=torch.long).to(device)\n return x, y\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n datasets = cifar10_utils.read_data_sets(DATA_DIR_DEFAULT, one_hot=False)\n train_data = datasets['train']\n test_data = datasets['test']\n model = MLP(n_inputs=3072, n_hidden=[n_hidden_1, 400], n_classes=10, dropout=dropout).to(device)\n loss_fn = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=wdecay)\n\n log_every = 50\n avg_loss = 0\n avg_acc = 0\n for step in range(FLAGS.max_steps):\n x, y = get_xy_tensors(train_data.next_batch(FLAGS.batch_size))\n\n # Forward and backward passes\n optimizer.zero_grad()\n out = model.forward(x)\n loss = loss_fn(out, y)\n loss.backward()\n\n # Parameter updates\n optimizer.step()\n\n avg_loss += loss.item() / log_every\n avg_acc += accuracy(out, y) / log_every\n if step % log_every == 0:\n print('[{}/{}] train loss: {:.6f} train acc: {:.6f}'.format(step,\n FLAGS.max_steps,\n avg_loss, avg_acc))\n _run.log_scalar('train-loss', avg_loss, step)\n _run.log_scalar('train-acc', avg_acc, step)\n avg_loss = 0\n avg_acc = 0\n\n # Evaluate\n if step % FLAGS.eval_freq == 0 or step == (FLAGS.max_steps - 1):\n x, y = get_xy_tensors(test_data.next_batch(test_data.num_examples))\n model.eval()\n out = model.forward(x)\n model.train()\n test_loss = loss_fn(out, y).item()\n test_acc = accuracy(out, y)\n print('[{}/{}] test accuracy: {:6f}'.format(step, FLAGS.max_steps, test_acc))\n\n _run.log_scalar('test-loss', test_loss, step)\n _run.log_scalar('test-acc', test_acc, step)\n ########################\n # END OF YOUR CODE #\n #######################", "def load_torch_data(load_data_func):\n\n def torch_loader(dataset, data_path, batch_size, shuffle=True, cuda_device=None, num_workers=1):\n (train_data, val_data), (train_labels, val_labels), label_names = load_data_func(dataset, data_path)\n\n kwargs = {'num_workers': num_workers, 'pin_memory': True} if cuda_device is not None else {}\n kwargs['drop_last'] = True\n\n if type(train_data) == numpy.ndarray:\n train_dataset = TensorDataset(torch.from_numpy(train_data), torch.from_numpy(train_labels))\n val_dataset = TensorDataset(torch.from_numpy(val_data), torch.from_numpy(val_labels))\n elif type(train_data) == scipy.sparse.csr.csr_matrix:\n from sklearn.feature_extraction.text import TfidfTransformer\n tfidf_trans = TfidfTransformer(norm=None)\n tfidf_trans.fit(train_data)\n train_dataset = SparseDataset(train_data, tfidf_trans.idf_)\n val_dataset = SparseDataset(val_data, tfidf_trans.idf_)\n else:\n train_dataset = torchvision.datasets.ImageFolder(train_data)\n val_dataset = torchvision.datasets.ImageFolder(val_data)\n\n train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=shuffle, **kwargs)\n val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, **kwargs)\n\n return train_loader, val_loader, label_names\n\n return torch_loader", "def train(train_ds):\n rng = random.PRNGKey(0)\n\n # initialise the model\n py, params = GPModel.init(rng, train_ds['index_points'])\n model = nn.Model(GPModel, params)\n\n # utility functions for packing and unpacking param dicts\n par_from_array, array_from_par = build_par_pack_and_unpack(model)\n\n @jax.jit\n def loss_fun(model: GPModel, params: dict) -> float:\n \"\"\" This is clumsier than the usual FLAX loss_fn. \"\"\"\n py = model.module.call(params, train_ds['index_points'])\n return -py.log_prob(train_ds['y'])\n\n # wrap loss fun for scipy.optimize\n def wrapped_loss_fun(arr):\n params = par_from_array(arr)\n return loss_fun(model, params)\n\n @jax.jit\n def loss_and_grads(x):\n return jax.value_and_grad(wrapped_loss_fun)(x)\n\n res = oscipy.optimize.minimize(\n loss_and_grads,\n x0=array_from_par(params),\n jac=True,\n method='BFGS')\n\n logging.info('Optimisation message: {}'.format(res.message))\n\n trained_model = model.replace(params=par_from_array(res.x))\n return trained_model", "def loss(self, dataset=None, loss=None, training=None):\n # Recover the defaults, if missing\n dataset, loss = self._resolve_defaults(trainset=dataset, loss=loss)\n # Sample the train batch\n inputs, targets = dataset.sample(self._config)\n # Guess whether computation is for training, if necessary\n if training is None:\n training = torch.is_grad_enabled()\n # Forward pass\n return loss(self.run(inputs), targets, self._params)", "def train(train_loader, model, criterion, optimizer, lr_schedule, epoch,Lambda,layerID):\r\n global total_steps, exp_flops, exp_l0, args, writer\r\n losses = AverageMeter()\r\n top1 = AverageMeter()\r\n model.eval()\r\n lr_schedule.step(epoch=epoch)\r\n for i, (input_, target) in enumerate(train_loader):\r\n total_steps += 1\r\n if torch.cuda.is_available():\r\n target = target.cuda()\r\n input_ = input_.cuda()\r\n input_var = torch.autograd.Variable(input_)\r\n target_var = torch.autograd.Variable(target)\r\n # compute output\r\n output = model(input_var)\r\n totalloss,loss, reg = criterion(model,output, target_var,layerID,Lambda)\r\n prec1 = accuracy(output.data, target, topk=(1,))[0]\r\n losses.update(totalloss.data, input_.size(0))\r\n top1.update(100 - prec1, input_.size(0))\r\n ## Adjust LR\r\n oldloss = totalloss\r\n if oldloss-totalloss > 1.0:\r\n optimizer.defaults['lr'] = optimizer.defaults['lr']*1\r\n # compute gradient and do SGD step\r\n optimizer.zero_grad()\r\n totalloss.backward()\r\n optimizer.step()\r\n # clamp the parameters\r\n layers = model.layers if not args.multi_gpu else model.module.layers\r\n for k, layer in enumerate(layers):\r\n if not isinstance(layer,nn.Linear):\r\n layer.constrain_parameters()\r\n TotalDataScale = len(train_loader.dataset)\r\n # input()\r\n IMPORTANCE = model.layers[layerID].qz_loga/(1+model.layers[layerID].qz_loga)\r\n MAX = torch.max(IMPORTANCE)\r\n MIN = torch.min(IMPORTANCE)\r\n if i == 0:\r\n Log = ('\\nEpoch:[{0}][{1}/{2}], '\r\n 'Loss:{loss:.4f}, '\r\n 'Reg:{reg:.4f}, '\r\n 'Max Importance:{max:.4f}, ''Min Importance:{min:.4f}, '\r\n 'Lr:{lr:.4f}'.format(\r\n epoch, i, TotalDataScale,reg=reg, loss=loss, top1=top1,max=MAX,min=MIN,lr=optimizer.defaults['lr']))\r\n else:\r\n Log = ('\\rEpoch:[{0}][{1}/{2}], '\r\n 'Loss:{loss:.4f}, '\r\n 'Reg:{reg:.4f}, '\r\n 'Max Importance:{max:.4f}, ''Min Importance:{min:.4f}, '\r\n 'Lr:{lr:.4f}'.format(\r\n epoch, i, len(train_loader), reg=reg, loss=loss, top1=top1, max=MAX, min=MIN,\r\n lr=optimizer.defaults['lr']))\r\n sys.stdout.write(Log)\r\n\r\n return top1.avg", "def train(self, train_fn, dev_fn):\n X_train, Y_train = self.load_dataset(train_fn)\n X_dev, Y_dev = self.load_dataset(dev_fn)\n logging.debug(\"Classes: {}\".format((self.num_of_classes(), self.classes_())))\n # Set model params, called here after labels have been identified in load dataset\n self.model_fn()\n\n # Create a callback to print a sample after each epoch\n logging.debug(\"Training model on {}\".format(train_fn))\n self.model.fit(X_train, Y_train,\n batch_size = self.batch_size,\n epochs = self.epochs,\n validation_data = (X_dev, Y_dev),\n callbacks = self.get_callbacks(X_train))", "def _training(self, data_loader: torch.utils.data.DataLoader,\n data_size: int):\n\n self.model.train()\n total_loss = torch.Tensor([0])\n with tqdm(total=data_size//self.batch_size) as pbar:\n for _, ((row, col), val) in enumerate(data_loader):\n self.optimizer.zero_grad()\n\n row = row.long()\n if isinstance(col, list):\n col = tuple(c.long() for c in col)\n else:\n col = col.long()\n\n preds = self.model(row, col)\n loss = self.loss_function(preds)\n loss.backward()\n\n self.optimizer.step()\n\n total_loss += loss.item()\n batch_loss = loss.item() / row.size()[0]\n\n pbar.update(1)\n\n total_loss /= data_size\n return total_loss", "def training_step(self, x):\n self.train() # Sets network to train mode\n rec_error, feat, y = self.forward(x)\n # Reconstruction Loss\n rec_loss = torch.mean(rec_error)\n loss = rec_loss\n\n self.zero_grad()\n loss.backward()\n self.optimizer.step()\n self.eval() # Sets network to evaluation mode\n print('Rec Loss: {}'.format(rec_loss.cpu().data))\n print()\n return loss, feat, y", "def train_on_batch(network, optimizer, loss_fn, metrics_fn, X, targets,\n config):\n optimizer.zero_grad()\n\n # Extract the per primitive features\n F = network.compute_features(X)\n predictions = compute_predictions_from_features(\n F, network, targets, config\n )\n\n # Do the forward pass to predict the primitive_parameters\n batch_loss = loss_fn(predictions, targets, config[\"loss\"])\n metrics_fn(predictions, targets)\n # Do the backpropagation\n batch_loss.backward()\n nn.utils.clip_grad_norm_(network.parameters(), 1)\n # Do the update\n optimizer.step()\n\n return batch_loss.item()", "def train(data_train, data_test):\n data = data_train\n # # xxx = [item for xx in data for item in xx]\n # xxx = []\n # for xx in data:\n # xxx.extend(xx.flatten())\n\n checkpoint_and_write_save_dir = logdir()\n\n os.system(\"mkdir -p checkpoints\")\n os.system(\"mkdir -p checkpoints/{}\".format(checkpoint_and_write_save_dir))\n\n writer = SummaryWriter(os.path.join(\"runs\", checkpoint_and_write_save_dir), comment=\"FreqWarp\")\n\n logging.info(\"Building architecture...\")\n\n if use_cuda:\n net = Net(20, 20, 20, nb_lstm_layers, batch_size).cuda()\n else:\n net = Net(20, 20, 20, nb_lstm_layers, batch_size)\n net.train()\n\n # optimizer = optim.SGD(net.parameters(), lr=0.001)\n # optimizer = optim.Adam(net.parameters(), lr=0.005, weight_decay=0.0001)\n optimizer = optim.RMSprop(net.parameters(), lr=0.005, weight_decay=0.0001)\n\n # criterion = nn.MSELoss()\n criterion = nn.L1Loss(size_average=False)\n\n logging.info(\"Reading data ...\")\n\n best_avg_loss = 1000000\n best_avg_loss_at_epoch = 0\n\n logging.info(\"START TRAINING ... MAX EPOCH: \" + str(nb_epoch))\n for epoch in range(nb_epoch):\n print(\"====================================================================\")\n count = 0\n loss_sum = 0\n\n for i in range(len(data)):\n if use_cuda:\n temp_x = torch.tensor(data[i][0]).cuda()\n temp_y = torch.tensor(data[i][1]).cuda()\n else:\n temp_x = torch.tensor(data[i][0])\n temp_y = torch.tensor(data[i][1])\n \n # exit()\n # for ii in range(0, data[i][0].shape[0] - nb_frame_in_batch*2 + 1):\n optimizer.zero_grad()\n\n h_state = net.hidden_init(temp_x) # New added Dec 07: They say hidden state need to be clear before each step\n\n # prediction, h_state = net(batch_x.float(), h_state)\n prediction, h_state = net(temp_x.float(), h_state)\n # prediction = net(batch_x.unsqueeze(0).float(), None)\n\n loss = criterion(prediction.float(), temp_y.float().view(len(temp_y), batch_size, -1))\n\n # h_state = (h_state[0].detach(), h_state[1].detach())\n\n loss.backward()\n optimizer.step()\n\n loss_sum += loss\n count += 1\n\n else:\n with torch.no_grad():\n losses = []\n for i in range(len(data_test)):\n if use_cuda:\n temp_x = torch.tensor(data_test[i][0]).cuda()\n temp_y = torch.tensor(data_test[i][1]).cuda()\n else:\n temp_x = torch.tensor(data_test[i][0])\n temp_y = torch.tensor(data_test[i][1])\n\n h_state = net.hidden_init(temp_x)\n prediction, h_state = net(temp_x.float(), h_state)\n loss = criterion(prediction.float(), temp_y.float().view(len(temp_y), batch_size, -1))\n\n losses.append(loss.data.item())\n logging.info(describe(losses))\n\n writer.add_scalar(\"loss/minibatch\", loss_sum / count, global_step=epoch)\n # writer.add_graph(net, (temp_x.float(), h_state), verbose=True)\n\n # for m_index, m in enumerate(net.parameters()):\n # print(m_index)\n # print(net_modules[m_index])\n # writer.add_histogram('histogram/', m.data, global_step=epoch)\n for name, param in net.named_parameters():\n writer.add_histogram('histogram/' + name, param.data, global_step=epoch)\n\n avg_loss = loss_sum / count\n if avg_loss < best_avg_loss:\n state = {\n 'epoch': epoch,\n 'state_dict': net,\n 'optimizer': optimizer\n }\n\n save_checkpoint(checkpoint_and_write_save_dir + \"/\" + MODEL_PTH_NAME + \"_epoch\" + str(epoch) + \"_\" + str(round(float(avg_loss), 3)), model=net, state=state)\n\n logging.info(\"Epoch {}: average loss = {:.3f}, improve {:.3f} from {:.3f}. Model saved at checkpoints/{}/{}.pth\"\n .format(epoch, avg_loss, best_avg_loss - avg_loss, best_avg_loss, checkpoint_and_write_save_dir, MODEL_PTH_NAME + \"_epoch\" + str(epoch) + \"_\" + str(round(float(avg_loss), 3))))\n\n best_avg_loss = avg_loss\n best_avg_loss_at_epoch = epoch\n\n elif epoch - best_avg_loss_at_epoch > patience:\n logging.info(\"Model hasn't improved since epoch {}. Stop training ...\".format(best_avg_loss_at_epoch))\n break\n else:\n logging.info(\"Epoch {}: average loss = {:.3f}. No improvement since epoch {}\".format(epoch, avg_loss, best_avg_loss_at_epoch))\n\n writer.close()\n\n return net", "def train(args, data_loader, model, global_stats):\n # Initialize meters + timers\n train_loss = AverageMeter()\n epoch_time = Timer()\n \n for batch_idx, (input_idxs, target_idxs, input_tokens, target_tokens) in enumerate(data_loader):\n # input_idxs and target_idxs have dim (batch_size x max_len)\n # they are NOT sorted by length\n\n lengths = (input_idxs != 0).long().sum(dim=1)\n sorted_lengths, order = torch.sort(lengths, descending=True)\n\n input_variable = Variable(input_idxs[order, :][:, :max(lengths)])\n target_variable = Variable(target_idxs[order, :])\n \n model.optimizer.zero_grad()\n output_log_probs, output_ses = model(input_variable,\n list(sorted_lengths),\n targets=target_variable)\n \n batch_size = input_variable.shape[0]\n flattened_outputs = output_log_probs.view(batch_size * model.max_length, -1)\n \n batch_loss = model.citerion(flattened_outputs, target_variable.contiguous().view(-1))\n batch_loss.backward()\n model.optimizer.step()\n \n model.updates += 1\n \n train_loss.update(batch_loss[0], batch_size)\n \n if batch_idx % args.display_iter == 0:\n logger.info('train: Epoch = %d | iter = %d/%d | ' %\n (global_stats['epoch'], batch_idx, len(data_loader)) +\n 'loss = %.2f | elapsed time = %.2f (s)' %\n (train_loss.avg, global_stats['timer'].time()))\n train_loss.reset()\n \n logger.info('train: Epoch %d done. Time for epoch = %.2f (s)' %\n (global_stats['epoch'], epoch_time.time()))\n \n # Checkpoint\n if args.checkpoint:\n model.checkpoint(args.model_file + '.checkpoint',\n global_stats['epoch'] + 1)", "def train(params):\n p, x = load_simulated_data()\n\n # calculate means\n p_mean = p.mean(axis=(0, 1))\n p_std = p.std(axis=(0, 1))\n\n x_mean = x.mean(axis=(0, 1))\n x_std = x.std(axis=(0, 1))\n\n # TODO - does this make sense?\n # delta = x[:,2::2] - x[:,:-2:2]\n # the number to look ahead\n delta = x[:, 1:] - x[:, :-1]\n delta_mean = delta.mean(axis=(0, 1))\n delta_std = delta.std(axis=(0, 1))\n\n # send to torch tensors\n p_mean, p_std = torch.Tensor(p_mean).to(device), torch.Tensor(p_std).to(device)\n x_mean, x_std = torch.Tensor(x_mean).to(device), torch.Tensor(x_std).to(device)\n delta_mean, delta_std = (\n torch.Tensor(delta_mean).to(device),\n torch.Tensor(delta_std).to(device),\n )\n\n # parameters\n buffer_size = int(params[\"buffer size\"])\n activation = params[\"activation\"]\n\n # train val split\n training_split = 0.5\n n = len(p)\n k = int(n * training_split)\n train_p, val_p = p[:k], p[k:]\n train_x, val_x = x[:k], x[k:]\n\n n_ahead = 1\n train_dataset = LookaheadDataset(states=train_x, actions=train_p, n_ahead=n_ahead)\n val_dataset = LookaheadDataset(states=val_x, actions=val_p, n_ahead=n_ahead)\n\n action_size = len(train_dataset[0][0][0])\n state_size = len(train_dataset[0][1])\n output_size = len(train_dataset[0][2][0])\n\n model_path = params.get(\"model path\", None)\n dropout = params[\"dropout\"]\n hidden_layers = int(params[\"hidden layers\"])\n hidden_size = int(params[\"hidden size\"])\n\n model = Network(\n action_size=action_size,\n state_size=state_size,\n output_size=output_size,\n hidden_layers=hidden_layers,\n hidden_size=hidden_size,\n dropout=dropout,\n activation=activation,\n action_mean=p_mean,\n action_std=p_std,\n state_mean=x_mean,\n state_std=x_std,\n output_mean=delta_mean,\n output_std=delta_std,\n )\n\n model.to(device)\n if params.get(\"load\", False):\n model.load_state_dict(torch.load(model_path))\n\n learning_rate = params[\"learning rate\"]\n batch_size = int(params[\"batch size\"])\n\n optimizer = optim.Adam(model.parameters(), lr=learning_rate)\n\n train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\n val_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True)\n\n train_losses = []\n val_losses = []\n\n best_loss = np.inf\n print_info = params.get(\"print\", False)\n\n epochs = int(params[\"epochs\"])\n max_batches = np.inf\n if print_info:\n loop = tqdm(total=min(len(train_dataloader), max_batches) * epochs)\n\n def step(state, deltas):\n s = state + deltas\n return s\n\n for epoch in range(epochs):\n model.train()\n # new_n_ahead = min((epoch + 1) * 5, 100)\n new_n_ahead = 10\n if new_n_ahead != n_ahead:\n n_ahead = new_n_ahead\n if print_info:\n print(n_ahead)\n train_dataset = LookaheadDataset(\n states=train_x, actions=train_p, n_ahead=n_ahead\n )\n val_dataset = LookaheadDataset(states=val_x, actions=val_p, n_ahead=n_ahead)\n train_dataloader = DataLoader(\n train_dataset, batch_size=batch_size, shuffle=True\n )\n val_dataloader = DataLoader(\n val_dataset, batch_size=batch_size, shuffle=True\n )\n for b, (a, s, d) in enumerate(train_dataloader):\n s = s.float().to(device)\n a = a.float().to(device)\n d = d.float().to(device)\n\n d_est = torch.zeros(d.shape).to(device)\n\n for i in range(n_ahead):\n d_hat = model(a[:, i], s)\n if i == 0:\n # d_est[:,i] = d_est[:,i] + d_hat\n d_est[:, i] = d_hat\n else:\n d_est[:, i] = d_est[:, i - 1] + d_hat\n s = s + d_hat\n\n # normalize d\n d = (d - delta_mean) / delta_std\n d_est = (d_est - delta_mean) / delta_std\n\n loss = loss_function(d, d_est)\n if print_info:\n if not val_losses:\n loop.set_description(\"loss: {:.3f}\".format(loss.item()))\n else:\n loop.set_description(\n \"loss: {:.4f}, val loss: {:.4f}\".format(\n loss.item(), val_losses[-1]\n )\n )\n train_losses.append(loss.item())\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if print_info:\n loop.update(1)\n if b > max_batches:\n break\n with torch.no_grad():\n model.eval()\n epoch_losses = []\n for b, (a, s, d) in enumerate(val_dataloader):\n s = s.float().to(device)\n a = a.float().to(device)\n d = d.float().to(device)\n\n d_est = torch.zeros(d.shape).to(device)\n\n for i in range(n_ahead):\n d_hat = model(a[:, i], s)\n if i == 0:\n # d_est[:,i] = d_est[:,i] + d_hat\n d_est[:, i] = d_hat\n else:\n d_est[:, i] = d_est[:, i - 1] + d_hat\n s = s + d_hat\n\n # normalize d\n d = (d - delta_mean) / delta_std\n d_est = (d_est - delta_mean) / delta_std\n\n loss = loss_function(d, d_est)\n\n epoch_losses.append(loss.item())\n if b > max_batches:\n break\n val_losses.append(np.mean(epoch_losses))\n\n if np.mean(epoch_losses) < best_loss:\n best_loss = np.mean(epoch_losses)\n if model_path:\n torch.save(model.state_dict(), model_path)\n if print_info:\n print(\"Best Val Loss: {:.4}\".format(best_loss))\n n_ahead = 100\n val_dataset = LookaheadDataset(states=val_x, actions=val_p, n_ahead=n_ahead)\n val_dataloader = DataLoader(val_dataset, batch_size=100, shuffle=True)\n\n # calculate HZ\n start = time()\n with torch.no_grad():\n model.eval()\n for b, (a, s, d) in enumerate(val_dataloader):\n s = s.float().to(device)\n a = a.float().to(device)\n d = d.float().to(device)\n\n d_est = torch.zeros(d.shape).to(device)\n\n for i in range(n_ahead):\n d_hat = model(a[:, i], s)\n if i == 0:\n # d_est[:,i] = d_est[:,i] + d_hat\n d_est[:, i] = d_hat\n else:\n d_est[:, i] = d_est[:, i - 1] + d_hat\n s = s + d_hat\n elapsed = time() - start\n speed = elapsed / len(val_dataloader)\n return val_losses[-1].item(), speed", "def train_with_loader(self, data, validating_data=None, scheduler=None, epochs=1):\n print('Training...')\n for epoch in range(epochs):\n self.train()\n for train_in, train_out in data:\n self.compute_loss(train_in, train_out, is_guess=False, training=True)\n self.eval()\n if validating_data:\n with torch.no_grad():\n valid_loss = self.compute_loss_loader(validating_data).item()\n print('Average validation error at step ',epoch+1,': ', valid_loss)\n if scheduler and valid_loss:\n scheduler.step()", "def train(self, epochs=10):\n self.model.set_mode('train')\n self.dataloader.reset()\n \n for epoch in range(1, epochs+1):\n running_loss = 0.0\n with tqdm(range(1, len(self.dataloader)+1),\n desc=\"Epoch %d/%d\" % (epoch, epochs),\n unit=\"batches\") as t:\n for i in t:\n self.model.detach_hidden(zero=self.zero_hidden)\n self.optimizer.zero_grad()\n sequence, target = self.dataloader()\n outputs = self.model(sequence)\n loss = self.criterion(torch.chunk(outputs, self.dataloader.time_steps, 1)[-1].squeeze(1), target)\n loss.backward()\n self.optimizer.step()\n running_loss += loss.item()\n t.set_postfix(loss=running_loss/i)\n self.dataloader.reset()\n if self.save_file is not None:\n print(\"Saving progress...\")\n self.save(self.save_file)\n print(\"\\nDone! Final loss: %f\" % (running_loss / len(self.dataloader)))", "def wrapper_train(tree_depth, demos, validation_demos, pred_data=[None,None], verbose=True):\n return train(program_gen_step_size = 1000, \n num_programs = NUM_PROGRAMS, \n num_dts = 5, \n max_num_particles = 25, \n input_demos = demos, \n further_demos = validation_demos, \n tree_depth = tree_depth, \n return_prior=True,\n pred_data=pred_data,\n verbose=verbose)", "def train(train_loader, model, criterion, optimizer, epoch, noise_decay_rate,\n noise_param_list, noise_decay, random_noise=False, sparsity=0):\n global total_steps, exp_flops, exp_l0, args, writer\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n\n # switch to train mode\n model.train()\n\n end = time.time()\n loss_part = []\n acc_part = []\n print(\"Before\", [param.abs().mean().item() for name, param in\n noise_param_list])\n for i, (input_, target) in enumerate(train_loader):\n noise_decay *= noise_decay_rate\n data_time.update(time.time() - end)\n total_steps += 1\n if torch.cuda.is_available():\n target = target.cuda(async=True)\n input_ = input_.cuda()\n #input_var = torch.autograd.Variable(input_)\n #target_var = torch.autograd.Variable(target)\n\n # compute output\n output = model(input_)\n preds = output.max(dim=1)[1]\n loss = criterion(output, target, model)\n\n # measure accuracy and record loss\n #prec1 = accuracy(output.data, target, topk=(1,))[0]\n prec1 = (preds == target).sum().item() / preds.size(0)\n losses.update(loss.item(), input_.size(0))\n top1.update(100 - prec1*100, input_.size(0))\n loss_part.append(loss.item())\n acc_part.append(prec1)\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # clamp the parameters\n layers = model.layers if not args.multi_gpu else model.module.layers\n for k, layer in enumerate(layers):\n layer.constrain_parameters()\n\n e_fl, e_l0 = model.get_exp_flops_l0() if not args.multi_gpu else \\\n model.module.get_exp_flops_l0()\n exp_flops.append(e_fl)\n exp_l0.append(e_l0)\n if writer is not None:\n writer.add_scalar('stats_comp/exp_flops', e_fl, total_steps)\n writer.add_scalar('stats_comp/exp_l0', e_l0, total_steps)\n\n if not args.multi_gpu:\n if model.beta_ema > 0.:\n model.update_ema()\n else:\n if model.module.beta_ema > 0.:\n model.module.update_ema()\n # decay noise\n if random_noise and sparsity == 0:\n [init.kaiming_normal_(param) if 'bias' not in name else\n init.uniform_(param, - math.sqrt(1 / param.shape[0]),\n math.sqrt(1 /param.shape[0])) \\\n for name, param in noise_param_list]\n if noise_decay_rate < 1:\n [param.data.mul_(noise_decay) for name, param in noise_param_list]\n else:\n if noise_decay_rate < 1:\n [param.data.mul_(noise_decay_rate) for name, param in noise_param_list]\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n # input()\n if (i + 1) % args.print_freq == 0 and args.verbose:\n print(' Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Err@1 {top1.val:.3f} ({top1.avg:.3f})'.format(\n epoch, i, len(train_loader), batch_time=batch_time,\n data_time=data_time, loss=losses, top1=top1))\n\n print(\"After\", [param.abs().mean().item() for name, param in noise_param_list])\n\n # log to TensorBoard\n if writer is not None:\n writer.add_scalar('train/loss', losses.avg, epoch)\n writer.add_scalar('train/acc', np.mean(acc_part), epoch)\n writer.add_scalar('train/err', top1.avg, epoch)\n print(noise_decay)\n return np.mean(loss_part), np.mean(acc_part), noise_decay" ]
[ "0.7443008", "0.7346822", "0.7322582", "0.7247038", "0.6962793", "0.6957798", "0.6951179", "0.68994415", "0.6894855", "0.68578106", "0.684405", "0.6805708", "0.6804601", "0.67843026", "0.6783091", "0.67713857", "0.6762238", "0.67368186", "0.6734932", "0.6691646", "0.6680826", "0.6670736", "0.6658369", "0.66562605", "0.6654794", "0.66463584", "0.6601257", "0.66010475", "0.6589077", "0.658754", "0.6578497", "0.65743893", "0.65657824", "0.6557378", "0.6556789", "0.65457827", "0.65400094", "0.65384316", "0.65374476", "0.6534014", "0.65336484", "0.6519751", "0.6516352", "0.64953136", "0.64934784", "0.64809394", "0.647419", "0.6456496", "0.64527255", "0.645158", "0.64460754", "0.6445804", "0.6439191", "0.64386284", "0.64360917", "0.643506", "0.6427764", "0.6422154", "0.64211893", "0.64195883", "0.6413171", "0.64050436", "0.640095", "0.6397427", "0.63875043", "0.6381544", "0.63772666", "0.6372931", "0.63607585", "0.63589156", "0.6356081", "0.6355326", "0.6345771", "0.63431865", "0.6331627", "0.6327691", "0.6319753", "0.6319325", "0.63189054", "0.63109833", "0.63095134", "0.63064486", "0.63019085", "0.62998414", "0.6298728", "0.6296018", "0.629432", "0.62911576", "0.6280993", "0.627193", "0.62719", "0.62675023", "0.6264798", "0.62636036", "0.626265", "0.62573576", "0.625292", "0.6249561", "0.6246237", "0.62444645", "0.62435967" ]
0.0
-1
Generates predictions and attentions for a batch.
def apply_to_batch(self, batch_dict): self._last_batch = batch_dict if isinstance(self.model,NMTModelWithMLTM): y_pred = self.model(x_source=batch_dict['x_source'], x_mltm=batch_dict['x_source_mltm_vector'], x_source_lengths=batch_dict['x_source_length'], target_sequence=batch_dict['x_target']) else: y_pred = self.model(x_source=batch_dict['x_source'], x_source_lengths=batch_dict['x_source_length'], target_sequence=batch_dict['x_target']) self._last_batch['y_pred'] = y_pred attention_batched = np.stack(self.model.decoder._cached_p_attn).transpose(1, 0, 2) self._last_batch['attention'] = attention_batched
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_prediction_dicts(batch_dict, pred_dicts, class_names, output_path=None):\n raise NotImplementedError", "def predict_on_batch(engine, batch):\n\t\tengine.model.eval()\n\t\tengine.model.rpn.nms_thresh = 0.3\n\t\twith torch.no_grad():\n\t\t\timgs, target = prepare_batch(batch, device=get_device(engine.model))\n\t\t\ty_pred = engine.model(imgs)\n\t\treturn y_pred, target", "def __predict_batch(self, model: AutoModel, batch: Tuple):\n input_ids_batch = batch[0]\n token_type_ids_batch = batch[1]\n attention_mask_batch = batch[2]\n\n output = model(\n input_ids=input_ids_batch,\n token_type_ids=token_type_ids_batch,\n attention_mask=attention_mask_batch,\n )\n\n logits = output.logits\n preds_batch = np.argmax(torch.softmax(logits, dim=1).detach().numpy(), axis=1)\n preds_batch_list = list(preds_batch)\n\n return preds_batch_list", "def predict_batch(self, imgs_batch, augment=False):\n if augment:\n aug_funcs = [\n lambda x: x, # identity\n lambda x: x[:, ::-1, ...], # vlip\n lambda x: x[:, :, ::-1], # hflip\n lambda x: np.rot90(x, 1, axes=(1, 2)), # +90\n lambda x: np.rot90(x, 2, axes=(1, 2)), # +180\n lambda x: np.rot90(x, 3, axes=(1, 2)), # +270\n lambda x: np.rot90(x, 1, axes=(1, 2))[:, ::-1, ...], # vflip(+90)\n lambda x: np.rot90(x, 1, axes=(1, 2))[:, :, ::-1] # vflip(+90)\n ]\n\n yp = np.zeros((imgs_batch.shape[0], len(TAGS)))\n for aug_func in aug_funcs:\n imgs_batch = aug_func(imgs_batch)\n tags_batch = self.net.predict(imgs_batch)\n yp += tags_batch / len(aug_funcs)\n return yp\n else:\n return self.net.predict_on_batch(imgs_batch)", "def feed_batch(self, generated_batch, generated_labels):\n _, self.act2, _ = self.inference_net(generated_batch.cuda(self.gpu_id))\n self.g_labels = generated_labels", "def eval(self): \n inputs,enc_input_weights, outputs, dec_input_weights = self.get_batch()\n predicted_ids = self.model.step(self.sess, inputs, enc_input_weights) \n print(\"=\"*20)\n for i in range(FLAGS.batch_size):\n print(\"* %dth sample target: %s\" % (i,str(outputs[i,1:]-2)))\n for predict in predicted_ids[i]:\n print(\"prediction: \"+str(predict)) \n print(\"=\"*20)", "def generate_batch(\n batch: Tuple[Dict[str, Sequence[int]], List[Sequence[int]]]\n ) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:\n input_ids = torch.tensor([b[0][\"input_ids\"] for b in batch])\n attention_mask = torch.tensor([b[0][\"attention_mask\"] for b in batch])\n token_type_ids = torch.tensor([b[0][\"token_type_ids\"] for b in batch])\n labels = torch.tensor([b[1] for b in batch])\n features = {\n \"input_ids\": input_ids,\n \"attention_mask\": attention_mask,\n \"token_type_ids\": token_type_ids,\n }\n return features, labels", "def on_predict_batch_begin(self, batch, logs=None):", "def batched_predict(model, batcher, batch_size, int_mapped_X, doc_labels):\n # Intialize batcher but dont shuffle.\n train_batcher = batcher(full_X=int_mapped_X, full_y=doc_labels,\n batch_size=batch_size, shuffle=False)\n preds = []\n for batch_X, _ in train_batcher.next_batch():\n batch_preds = model.predict(batch_X=batch_X)\n preds.append(batch_preds)\n preds = np.hstack(preds)\n return preds", "def predict_batch(self, model, context, data=None):\n pass", "def process(self, data_batch: Sequence[dict],\n data_samples: Sequence[dict]) -> None:\n for data_sample in data_samples:\n # predicted keypoints coordinates, [1, K, D]\n pred_coords = data_sample['pred_instances']['keypoints']\n # ground truth data_info\n gt = data_sample['gt_instances']\n # ground truth keypoints coordinates, [1, K, D]\n gt_coords = gt['lifting_target']\n # ground truth keypoints_visible, [1, K, 1]\n mask = gt['lifting_target_visible'].astype(bool).reshape(1, -1)\n # instance action\n img_path = data_sample['target_img_path']\n _, rest = osp.basename(img_path).split('_', 1)\n action, _ = rest.split('.', 1)\n\n result = {\n 'pred_coords': pred_coords,\n 'gt_coords': gt_coords,\n 'mask': mask,\n 'action': action\n }\n\n self.results.append(result)", "def predict_on_batch(self, input_batch):\n from deeplift.util import run_function_in_batches\n from deeplift.util import compile_func\n x_standardized = self.model._batch_to_list(input_batch)\n if self.fwd_predict_fn is None:\n # TODO: Once DeepLIFT layer annotation works integrate it here too:\n \"\"\"\n # identify model output layers:\n self.output_layers_idxs = []\n for output_name in self.model.model.output_names:\n for i, l in enumerate(self.model.model.layers):\n if l.name == output_name:\n self.output_layers_idxs.append(i)\n \"\"\"\n inputs = [self.deeplift_model.get_layers()[i].get_activation_vars()\n for i in self.input_layer_idxs]\n outputs = [self.deeplift_model.get_layers()[i].get_activation_vars()\n for i in self.output_layers_idxs]\n self.fwd_predict_fn = compile_func(inputs, outputs)\n\n preds = run_function_in_batches(\n input_data_list=x_standardized,\n func=self.fwd_predict_fn,\n batch_size=self.batch_size,\n progress_update=None)\n\n preds = np.array(preds)\n if len(self.output_layers_idxs) == 1:\n preds = preds[0, ...]\n\n return preds", "def process(self, data_batch: Sequence[Dict],\n data_samples: Sequence[Dict]) -> None:\n for data_sample in data_samples:\n pred_labels = data_sample.get('pred_instances').get(self.key).cpu()\n gt_labels = data_sample.get('gt_instances').get(self.key).cpu()\n\n result = dict(\n pred_labels=pred_labels.flatten(),\n gt_labels=gt_labels.flatten())\n self.results.append(result)", "def batch_predict(\n self, batch_in: Union[Tuple[tf.Tensor, ...], Tuple[np.ndarray, ...]]\n ) -> Dict[Text, Union[tf.Tensor, Dict[Text, tf.Tensor]]]:\n if self.all_labels_embed is None:\n raise ValueError(\n \"The model was not prepared for prediction. \"\n \"Call `prepare_for_predict` first.\"\n )\n\n tf_batch_data = self.batch_to_model_data_format(\n batch_in, self.predict_data_signature\n )\n self._compute_dialogue_indices(tf_batch_data)\n\n dialogue_in, text_output, text_sequence_lengths = self._process_batch_data(\n tf_batch_data\n )\n (\n dialogue_embed,\n dialogue_mask,\n dialogue_transformer_output,\n attention_weights,\n ) = self._embed_dialogue(dialogue_in, tf_batch_data)\n dialogue_mask = tf.squeeze(dialogue_mask, axis=-1)\n\n sim_all, scores = self._tf_layers[\n f\"loss.{LABEL}\"\n ].get_similarities_and_confidences_from_embeddings(\n dialogue_embed[:, :, tf.newaxis, :],\n self.all_labels_embed[tf.newaxis, tf.newaxis, :, :],\n dialogue_mask,\n )\n\n predictions = {\n \"scores\": scores,\n \"similarities\": sim_all,\n DIAGNOSTIC_DATA: {\"attention_weights\": attention_weights},\n }\n\n if (\n self.config[ENTITY_RECOGNITION]\n and text_output is not None\n and text_sequence_lengths is not None\n ):\n pred_ids, confidences = self._batch_predict_entities(\n tf_batch_data,\n dialogue_transformer_output,\n text_output,\n text_sequence_lengths,\n )\n name = ENTITY_ATTRIBUTE_TYPE\n predictions[f\"e_{name}_ids\"] = pred_ids\n predictions[f\"e_{name}_scores\"] = confidences\n\n return predictions", "def run_testing_batch(self, session, batch):\n feed_dict = self.batch_to_feed(batch)\n feed_dict[self.use_dropout_placeholder] = 0.0\n fetches = [self.loss, self.predictions]\n loss, probabilities = session.run(fetches, feed_dict=feed_dict)\n return loss, probabilities", "def inference(self, input_batch):\r\n inferences = []\r\n # Handling inference for token_classification.\r\n batch_size = len(input_batch)\r\n\r\n num_rows = batch_size\r\n for i in range(num_rows):\r\n inferences.append({'entity':input_batch[i]})\r\n logger.info(\"Model predicted: '%s'\", input_batch)\r\n\r\n return inferences", "def _predict_batch(self, review_fwd, review_bwd):\n summary_out = []\n # Forward\n feed_dict_test_fwd = {self.enc_inp_fwd[t]: review_fwd[t] for t in range(self.seq_length)}\n feed_dict_test_fwd.update({self.labels[t]: review_fwd[t] for t in range(self.seq_length)})\n summary_test_prob_fwd = self.sess.run(self.dec_outputs_fwd_tst, feed_dict_test_fwd)\n # Backward\n feed_dict_test_bwd = {self.enc_inp_bwd[t]: review_bwd[t] for t in range(self.seq_length)}\n feed_dict_test_bwd.update({self.labels[t]: review_bwd[t] for t in range(self.seq_length)})\n summary_test_prob_bwd = self.sess.run(self.dec_outputs_bwd_tst, feed_dict_test_bwd)\n\n summary_sum_pool = [x + y for x, y in zip(summary_test_prob_fwd, summary_test_prob_bwd)]\n # Do a softmax layer to get the final result\n summary_test_out = [logits_t.argmax(axis=1) for logits_t in summary_sum_pool]\n\n for i in range(self.test_batch_size):\n summary_out.append([x[i] for x in summary_test_out])\n\n return summary_out", "def batch_generate(self, inputs, labels, batch_size=64):\n inputs_image, inputs, labels = check_inputs_labels(inputs, labels)\n arr_x = inputs\n arr_y = labels\n len_x = inputs_image.shape[0]\n batch_size = check_int_positive('batch_size', batch_size)\n batches = int(len_x / batch_size)\n rest = len_x - batches*batch_size\n res = []\n for i in range(batches):\n if isinstance(arr_x, tuple):\n x_batch = tuple([sub_items[i*batch_size: (i + 1)*batch_size] for sub_items in arr_x])\n else:\n x_batch = arr_x[i*batch_size: (i + 1)*batch_size]\n if isinstance(arr_y, tuple):\n y_batch = tuple([sub_labels[i*batch_size: (i + 1)*batch_size] for sub_labels in arr_y])\n else:\n y_batch = arr_y[i*batch_size: (i + 1)*batch_size]\n adv_x = self.generate(x_batch, y_batch)\n # Black-attack methods will return 3 values, just get the second.\n res.append(adv_x[1] if isinstance(adv_x, tuple) else adv_x)\n\n if rest != 0:\n if isinstance(arr_x, tuple):\n x_batch = tuple([sub_items[batches*batch_size:] for sub_items in arr_x])\n else:\n x_batch = arr_x[batches*batch_size:]\n if isinstance(arr_y, tuple):\n y_batch = tuple([sub_labels[batches*batch_size:] for sub_labels in arr_y])\n else:\n y_batch = arr_y[batches*batch_size:]\n adv_x = self.generate(x_batch, y_batch)\n # Black-attack methods will return 3 values, just get the second.\n res.append(adv_x[1] if isinstance(adv_x, tuple) else adv_x)\n\n adv_x = np.concatenate(res, axis=0)\n return adv_x", "def _process_batch(sess, original_images, semantic_predictions, image_names,\n image_heights, image_widths, image_id_offset, save_dir,\n raw_save_dir, train_id_to_eval_id=None):\n (original_images,\n semantic_predictions,\n image_names,\n image_heights,\n image_widths) = sess.run([original_images, semantic_predictions,\n image_names, image_heights, image_widths])\n\n num_image = semantic_predictions.shape[0]\n for i in range(num_image):\n image_height = np.squeeze(image_heights[i])\n image_width = np.squeeze(image_widths[i])\n original_image = np.squeeze(original_images[i])\n semantic_prediction = np.squeeze(semantic_predictions[i])\n crop_semantic_prediction = semantic_prediction[:image_height, :image_width]\n\n # Save image.\n save_annotation.save_annotation(\n original_image, save_dir, _IMAGE_FORMAT % (image_id_offset + i),\n add_colormap=False)\n\n # Save prediction.\n save_annotation.save_annotation(\n crop_semantic_prediction, save_dir,\n _PREDICTION_FORMAT % (image_id_offset + i), add_colormap=True,\n colormap_type=FLAGS.colormap_type)\n\n if FLAGS.also_save_raw_predictions:\n image_filename = os.path.basename(image_names[i])\n\n if train_id_to_eval_id is not None:\n crop_semantic_prediction = _convert_train_id_to_eval_id(\n crop_semantic_prediction,\n train_id_to_eval_id)\n save_annotation.save_annotation(\n crop_semantic_prediction, raw_save_dir, image_filename,\n add_colormap=False)", "def predict(self, images, batch_size):\n pass", "def predict(self):\n batch = get_predict_batch(1, num_rec_out=self.num_test_rec)\n self.g_model.test_batch(\n batch, self.global_step, num_rec_out=self.num_test_rec)", "def generate_train_batch(self):\n\n patients_indices = self.get_indices()\n patients_for_batch = [self._data[i] for i in patients_indices]\n\n data = np.zeros((self.batch_size, 1, *self.patch_size), dtype=np.short)\n labels = np.empty(self.batch_size, dtype=np.float32)\n\n # iterate over patients_for_batch and include them in the batch\n for i, j in enumerate(patients_for_batch):\n patient_data_ct = np.load(j).astype(np.short)\n\n data[i] = self.preprocess_func(patient_data_ct).astype(np.short)\n path = str(j).split('/')[-1].replace('.npy', '')\n labels[i] = float(self.age_info[path])\n\n return {'data': np.array(data), 'label': np.array(labels)}", "def predict_batch(self, batch):\n # batch = src_tensor when predicting = [batch_size, src len]\n\n src_tensor = batch\n src_mask = self.make_src_mask(batch)\n\n # src_mask = [batch size, 1, 1, src len]\n\n enc_src = self.encoder(src_tensor, src_mask)\n\n # enc_src = [batch size, src len, hid dim]\n\n trg_indexes = [[self.trg_lang.SOS_idx] for _ in range(len(batch))]\n\n # trg_indexes = [batch_size, cur trg len = 1]\n\n trg_tensor = torch.LongTensor(trg_indexes).to(self.device)\n\n # trg_tensor = [batch_size, cur trg len = 1]\n # cur trg len increases during the for loop up to the max len\n\n for _ in range(self.hparams.max_len):\n\n trg_mask = self.make_trg_mask(trg_tensor)\n\n # trg_mask = [batch size, 1, cur trg len, cur trg len]\n\n output, attention = self.decoder(trg_tensor, enc_src, trg_mask, src_mask)\n\n # output = [batch size, cur trg len, output dim]\n\n preds = output.argmax(2)[:, -1].reshape(-1, 1)\n\n # preds = [batch_size, 1]\n\n trg_tensor = torch.cat((trg_tensor, preds), dim=-1)\n\n # trg_tensor = [batch_size, cur trg len], cur trg len increased by 1\n\n src_tensor = src_tensor.detach().cpu().numpy()\n trg_tensor = trg_tensor.detach().cpu().numpy()\n attention = attention.detach().cpu().numpy()\n\n pred_words = []\n pred_sentences = []\n pred_attention = []\n for src_indexes, trg_indexes, attn in zip(src_tensor, trg_tensor, attention):\n # trg_indexes = [trg len = max len (filled with eos if max len not needed)]\n # src_indexes = [src len = len of longest sentence (padded if not longest)]\n\n # indexes where first eos tokens appear\n src_eosi = np.where(src_indexes == self.src_lang.EOS_idx)[0][0]\n _trg_eosi_arr = np.where(trg_indexes == self.trg_lang.EOS_idx)[0]\n if len(_trg_eosi_arr) > 0: # check that an eos token exists in trg\n trg_eosi = _trg_eosi_arr[0]\n else:\n trg_eosi = len(trg_indexes)\n\n # cut target indexes up to first eos token and also exclude sos token\n trg_indexes = trg_indexes[1:trg_eosi]\n\n # attn = [n heads, trg len=max len, src len=max len of sentence in batch]\n # we want to keep n heads, but we'll cut trg len and src len up to\n # their first eos token\n attn = attn[:, :trg_eosi, :src_eosi] # cut attention for trg eos tokens\n\n words = [self.trg_lang.index2word[index] for index in trg_indexes]\n sentence = self.trg_lang.words_to_sentence(words)\n pred_words.append(words)\n pred_sentences.append(sentence)\n pred_attention.append(attn)\n\n # pred_sentences = [batch_size]\n # pred_words = [batch_size, trg len]\n # attention = [batch size, n heads, trg len (varies), src len (varies)]\n\n return pred_sentences, pred_words, pred_attention", "def default_generator(self,\n dataset,\n epochs=1,\n predict=False,\n deterministic=True,\n pad_batches=True):\n for epoch in range(epochs):\n if not predict:\n print('Starting epoch %i' % epoch)\n for (X_b, y_b, w_b, ids_b) in dataset.iterbatches(\n batch_size=self.batch_size,\n deterministic=deterministic,\n pad_batches=pad_batches):\n\n feed_dict = dict()\n if y_b is not None and not predict:\n for index, label in enumerate(self.labels_fd):\n if self.mode == \"classification\":\n feed_dict[label] = to_one_hot(y_b[:, index])\n if self.mode == \"regression\":\n feed_dict[label] = y_b[:, index:index + 1]\n if w_b is not None:\n feed_dict[self.weights] = w_b\n # Transform SMILES string to integer vectors\n smiles_seqs = [self.smiles_to_seq(smiles) for smiles in ids_b]\n feed_dict[self.smiles_seqs] = np.stack(smiles_seqs, axis=0)\n yield feed_dict", "def generate(batch, size=32):\n\n # Using the data Augmentation in traning data\n ptrain = 'data224/train'\n pval = 'data224/test'\n\n datagen1 = ImageDataGenerator(\n samplewise_center=True,\n samplewise_std_normalization=True,\n shear_range=0.2,\n zoom_range=0.2,\n rotation_range=90,\n width_shift_range=0.2,\n height_shift_range=0.2,\n horizontal_flip=True)\n\n datagen2 = ImageDataGenerator(samplewise_center=True,\n samplewise_std_normalization=True,)\n\n train_generator = datagen1.flow_from_directory(\n ptrain,\n target_size=(size, size),\n batch_size=batch,\n class_mode='categorical')\n\n validation_generator = datagen2.flow_from_directory(\n pval,\n target_size=(size, size),\n batch_size=batch,\n class_mode='categorical')\n\n count1 = 0\n for root, dirs, files in os.walk(ptrain):\n for each in files:\n count1 += 1\n\n count2 = 0\n for root, dirs, files in os.walk(pval):\n for each in files:\n count2 += 1\n\n return train_generator, validation_generator, count1, count2", "def on_predict_batch_end(self, batch, logs=None):", "def generate_batch(\n batch, vocab: Dict[str, int]\n ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], torch.Tensor]:\n input_unigrams = [DatasetLSTM.encode_sequence(b[0][0], vocab) for b in batch]\n input_bigrams = [DatasetLSTM.encode_sequence(b[0][1], vocab) for b in batch]\n input_unigrams = torch.tensor(input_unigrams)\n input_bigrams = torch.tensor(input_bigrams)\n labels = torch.tensor([b[1] for b in batch])\n return (input_unigrams, input_bigrams), labels", "def predict(self, dataset, batch_size):\n self.eval()\n yhat = torch.Tensor().to(self.device)\n with torch.no_grad():\n for i in tqdm(range(0,len(dataset.X),batch_size)):\n batch_X = dataset.X[i:i+batch_size].view(-1,1,self.input_rows,self.input_cols).to(self.device)\n batch_y = dataset.y[i:i+batch_size].to(self.device)\n\n batch_ls_embed = dataset.ls[i:i+batch_size].to(self.device) if self.type == \"listener\" or self.type == \"both\" else None\n batch_sp_embed = dataset.sp[i:i+batch_size].to(self.device) if self.type == \"speaker\" or self.type == \"both\" else None\n\n outputs = self(batch_X, batch_ls_embed, batch_sp_embed)\n\n yhat = torch.cat((yhat, outputs), 0)\n\n\n\n yf = dataset.y[:, 1]\n yhatf = torch.argmax(yhat, 1).cpu()\n stats = precision_recall_fscore_support(yf, yhatf)\n\n tp = 0\n tn = 0\n fn = 0\n fp = 0\n for i, j in zip(yhat, dataset.y):\n if torch.argmax(i) == torch.argmax(j):\n if j.data.numpy()[0] == 1: # positive instance\n tp += 1\n else:\n tn += 1\n else:\n if j.data.numpy()[0] == 1:\n fn += 1\n else:\n fp += 1\n acc = (tp + tn) / (tp + tn + fp + fn)\n\n print(f\"Accuracy: {round(acc*100,4)}\")\n print(f\"Confusion: TP: {tp}, FP: {fp}, FN: {fn}, TN: {tn}\")\n\n print(f\"Precision BC: {round(stats[0][0]*100,4)}\")\n print(f\"Precision NO BC: {round(stats[0][1]*100,4)}\")\n print(f\"Recall BC: {round(stats[1][0]*100,4)}\")\n print(f\"Recall No BC: {round(stats[1][1]*100,4)}\")\n print(f\"F-score BC: {round(stats[2][0]*100,4)}\")\n print(f\"F-score No BC: {round(stats[2][1]*100,4)}\")", "def process(self, data_batch: Any, predictions: Sequence[dict]) -> None:\n self.results.extend(_to_cpu(predictions))", "def make_batch(self, batch_size):\n filenames = self.get_filenames()\n if self.mode == tf.estimator.ModeKeys.PREDICT and self.imagenet_train_predict_partial:\n # Sort and shuffle with seed to randomize deterministically.\n random.seed(self.imagenet_train_predict_shuffle_seed)\n random.shuffle(filenames)\n dataset = tf.contrib.data.TFRecordDataset(filenames)\n\n # Parse records.\n dataset = dataset.map(self.parser,\n num_threads=batch_size,\n output_buffer_size=2 * batch_size)\n\n # If training, shuffle and repeat indefinitely.\n if self.mode == tf.estimator.ModeKeys.TRAIN:\n dataset = dataset.shuffle(buffer_size=50000 + 3 * batch_size)\n dataset = dataset.repeat(-1)\n elif self.mode == tf.estimator.ModeKeys.PREDICT:\n if self.predict_split == 'train':\n if self.imagenet_train_predict_partial:\n MAX_EXAMPLES = 50000\n # Skip to start at a random spot in the first TFRecord.\n random.seed(self.imagenet_train_predict_shuffle_seed)\n skip_examples = random.randint(0, 1251)\n dataset = dataset.skip(skip_examples)\n # Continue shuffling amongst at least as many examples\n # as it could see in 3 cross validations.\n dataset.shuffle(buffer_size=3 * MAX_EXAMPLES,\n seed=self.imagenet_train_predict_shuffle_seed)\n num_examples = MAX_EXAMPLES\n else:\n # Take whole training set.\n num_examples = self.num_examples_per_epoch(tf.estimator.ModeKeys.TRAIN)\n else:\n # Take whole validation set.\n num_examples = self.num_examples_per_epoch(tf.estimator.ModeKeys.EVAL)\n # Take as much of the dataset as possible that can be evenly\n # divided by batch_size.\n while True:\n if num_examples % batch_size == 0:\n break\n else:\n num_examples -= 1\n dataset = dataset.take(num_examples)\n dataset = dataset.repeat(1)\n\n # dataset = dataset.take(1000) # For fast debugging!\n else:\n dataset = dataset.repeat(1)\n\n # Batch it up.\n dataset = dataset.batch(batch_size)\n iterator = dataset.make_one_shot_iterator()\n image_batch, label_batch = iterator.get_next()\n\n return image_batch, label_batch", "def predict(self, model, batch):\n device = list(model.parameters())[0].device\n batch = batch.to(device)\n inputs = batch.inputs\n # Extract features with the model\n h = model(*inputs)\n # predictions\n return self.predict_on_features(h)", "def train(self, batch):\n pass", "def on_predict_batch_begin(\n self, batch: int, logs: tp.Optional[tp.Dict[str, np.ndarray]] = None\n ):\n pass", "def generator(self, annotations_csv_path, num_classes, augmentation=False, batch_size=4, size=224):\n\n # Get image paths and labels\n image_paths, labels = self.__read_image_paths_labels(annotations_csv_path)\n steps = len(image_paths)//batch_size\n \n step = 0\n itr = 0\n \n while True:\n #for itr in range(0, len(image_paths), batch_size):\n\n # Storing batches of paths and labels in lists\n temp_path = [image_paths[i] for i in range(itr, itr + batch_size)]\n temp_label = [labels[i] for i in range(itr, itr + batch_size)]\n\n # Create empty tensors for images and labels\n image_batch = np.zeros((batch_size, size, size, 3), dtype=np.float32)\n label_batch = np.zeros((batch_size, num_classes), dtype=np.float32)\n\n # Keep track of batch size\n count = 0\n\n for n, path in enumerate(temp_path):\n\n temp_org_image = self.__read_image(path)\n temp_image = self.__preprocesses_image(temp_org_image, size)\n\n image_batch[count] = temp_image\n label_batch[count] = temp_label[n]\n\n # At least two more empty arrays must be available in the \n # image_batch tensor\n if not temp_label[n][-1] and count < batch_size-2 and augmentation: \n\n aug_image_1 = self.sequence.augment_image(temp_org_image)\n aug_image_2 = self.sequence.augment_image(temp_org_image)\n\n aug_image_1 = self.__preprocesses_image(aug_image_1, size)\n aug_image_2 = self.__preprocesses_image(aug_image_2, size)\n\n image_batch[count+1] = aug_image_1\n label_batch[count+1] = temp_label[n]\n\n image_batch[count+2] = aug_image_2\n label_batch[count+2] = temp_label[n]\n\n count += 3\n\n else: \n count += 1\n\n\n if count == batch_size:\n break\n \n step += 1\n itr += batch_size\n\n yield image_batch, label_batch\n \n if step >= steps:\n step = 0\n itr = 0", "def predict(self, images):\n\t\t#testing_dataset = tf.data.Dataset.from_tensor_slices(images)\n\t\ttf.keras.backend.set_learning_phase(0)\n\t\ttesting_dataset = tf.data.Dataset.from_tensor_slices(np.asarray(images)).map(lambda x: tf.image.resize(x, [self.image_size, self.image_size]) / 255.0)\n\t\t#testing_dataset_shape = tf.data.Dataset.from_tensor_slices(np.full((len(images), 2), 500, dtype=np.int32))\n\t\ttesting_iterator_X = tf.data.Dataset.zip((testing_dataset, )).batch(self.batch_size).make_initializable_iterator()\n\n\t\tself.sess.run(testing_iterator_X.initializer)\n\t\ttesting_handle_X = self.sess.run(testing_iterator_X.string_handle())\n\n\t\tfinal_output = np.zeros([len(images), 500, 500, num_classes])\n\t\tj = 0\n\t\tcount = 0\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\t[test_output] = self.sess.run(\n\t\t\t\t\t[self.output],\n\t\t\t\t\t\tfeed_dict={\n\t\t\t\t\t\t\tself.is_training: False,\n\t\t\t\t\t\t\tself.handle_X: testing_handle_X,\n\t\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t\tthis_len = len(test_output)\n\t\t\t\tfor z in range(len(test_output)):\n\t\t\t\t\tfor dim in range(num_classes):\n\t\t\t\t\t\tfinal_output[count+z:count+z+1, :, :, dim] = scipy.misc.imresize(test_output[z, :, :, dim], [500, 500])\n\n\t\t\t\t#final_output[count:count+this_len, :, :, :] = test_output\n\t\t\t\tto = final_output[count:count+this_len, :, :, :].argmax(axis=-1)\n\t\t\t\t'''\n\t\t\t\tpdb.set_trace()\n\t\t\t\tfor z in range(this_len):\n\t\t\t\t\tplt.matshow(to[z])\n\t\t\t\t\tplt.colorbar()\n\t\t\t\t\tplt.show()\n\t\t\t\t'''\n\t\t\t\tcount += this_len\n\t\t\t\tprint(f'Batch: {j}')\n\t\t\t\tj += 1\n\t\t\texcept tf.errors.OutOfRangeError:\n\t\t\t\tbreak\n\t\treturn final_output", "def generate_batch(batch_ims):\n batch_X = np.zeros((len(batch_ims), 3, 224, 224))\n batch_y = np.zeros((len(batch_ims), 1))\n for i, im_file in enumerate(batch_ims):\n img = imread(im_file).astype(\"float32\")\n img[:, :, 0] -= 103.939\n img[:, :, 1] -= 116.779\n img[:, :, 2] -= 123.68\n img = img.transpose((2, 0, 1))\n batch_X[i, :, :, :] = img\n\n file_id = im_file.split(\"/\")[-1].split(\"_\")[0]\n score = labels_map[file_id][PERS_FIELD_NAME]\n if score >= 5.5:\n batch_y[i] = 1\n return (batch_X, batch_y)", "def generate(self, models, sample, **kwargs):\n net_input = sample['net_input']\n\n def batch_for_softmax(dec_out, target):\n # assumes decoder_out[0] is the only thing needed (may not be correct for future models!)\n first, rest = dec_out[0], dec_out[1:]\n bsz, tsz, dim = first.shape\n if bsz * tsz < self.softmax_batch:\n yield dec_out, target, True\n else:\n flat = first.contiguous().view(1, -1, dim)\n flat_tgt = target.contiguous().view(flat.shape[:-1])\n s = 0\n while s < flat.size(1):\n e = s + self.softmax_batch\n yield (flat[:, s:e],) + rest, flat_tgt[:, s:e], False\n s = e\n\n def gather_target_probs(probs, target):\n probs = probs.gather(\n dim=2,\n index=target.unsqueeze(-1),\n )\n return probs\n\n orig_target = sample['target']\n\n # compute scores for each model in the ensemble\n avg_probs = None\n avg_attn = None\n for model in models:\n model.eval()\n decoder_out = model(**net_input)\n attn = decoder_out[1]\n if type(attn) is dict:\n attn = attn.get('attn', None)\n\n batched = batch_for_softmax(decoder_out, orig_target)\n probs, idx = None, 0\n for bd, tgt, is_single in batched:\n sample['target'] = tgt\n curr_prob = model.get_normalized_probs(bd, log_probs=len(models) == 1, sample=sample).data\n if is_single:\n probs = gather_target_probs(curr_prob, orig_target)\n else:\n if probs is None:\n probs = curr_prob.new(orig_target.numel())\n step = curr_prob.size(0) * curr_prob.size(1)\n end = step + idx\n tgt_probs = gather_target_probs(curr_prob.view(tgt.shape + (curr_prob.size(-1),)), tgt)\n probs[idx:end] = tgt_probs.view(-1)\n idx = end\n sample['target'] = orig_target\n\n probs = probs.view(sample['target'].shape)\n\n if avg_probs is None:\n avg_probs = probs\n else:\n avg_probs.add_(probs)\n if attn is not None and torch.is_tensor(attn):\n attn = attn.data\n if avg_attn is None:\n avg_attn = attn\n else:\n avg_attn.add_(attn)\n if len(models) > 1:\n avg_probs.div_(len(models))\n avg_probs.log_()\n if avg_attn is not None:\n avg_attn.div_(len(models))\n\n bsz = avg_probs.size(0)\n hypos = []\n start_idxs = sample['start_indices'] if 'start_indices' in sample else [0] * bsz\n for i in range(bsz):\n # remove padding from ref\n ref = utils.strip_pad(sample['target'][i, start_idxs[i]:], self.pad) \\\n if sample['target'] is not None else None\n tgt_len = ref.numel()\n avg_probs_i = avg_probs[i][start_idxs[i]:start_idxs[i] + tgt_len]\n score_i = avg_probs_i.sum() / tgt_len\n if avg_attn is not None:\n avg_attn_i = avg_attn[i]\n alignment = utils.extract_hard_alignment(\n avg_attn_i,\n sample['net_input']['src_tokens'][i],\n sample['target'][i],\n self.pad,\n self.eos,\n )\n else:\n avg_attn_i = alignment = None\n hypos.append([{\n 'tokens': ref,\n 'score': score_i,\n 'attention': avg_attn_i,\n 'alignment': alignment,\n 'positional_scores': avg_probs_i,\n }])\n return hypos", "def on_predict_batch_begin(self, step, logs=None):", "def process_batch(batch):\n args = get_args()\n\n tokens = batch['text'].long().cuda().contiguous()\n types = batch['types'].long().cuda().contiguous()\n labels = batch['label'].long().cuda().contiguous()\n attention_mask = batch['padding_mask'].float().cuda().contiguous()\n if args.fp16:\n attention_mask = attention_mask.half()\n\n return tokens, types, labels, attention_mask", "def predict_batch_generator(self):\n input = np.zeros((self.batch_size, self.max_seq_len,\n self.embedding_size))\n seq_lengths = np.zeros((self.batch_size), dtype=np.intp)\n unique_counts = np.zeros((self.batch_size), dtype=np.intp)\n i = 0\n\n fi = open(self.config.parsed_predict_file)\n sample_gen = self.predict_sample_generator(fi)\n self.load_embedding()\n\n for sequence, seq_length, unique_count in sample_gen:\n seq_lengths[i], unique_counts[i] = seq_length, unique_count\n if seq_lengths[i] > self.max_seq_len:\n seq_lengths[i] = self.max_seq_len\n sequence = sequence[:seq_lengths[i]]\n input[i, 0:seq_lengths[i], :] = self.embedding[sequence, :]\n\n i += 1\n\n if i == self.batch_size:\n yield input, seq_lengths, unique_counts\n input = np.zeros(\n (self.batch_size, self.max_seq_len,\n self.embedding_size)\n )\n i = 0\n\n if i < self.batch_size:\n yield input[:i, :, :], seq_lengths[:i], unique_counts[:i]\n\n fi.close()", "def _data_generation(self, batch_data):\n # Initialization\n batch_x = []\n batch_y = defaultdict(list)\n\n for ind, item_data in batch_data.iterrows():\n img_path = os.path.join(self.img_dir, \"images\", \"rgb\", item_data[\"name\"])\n img = cv2.imread(img_path)\n try:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n except Exception as error:\n print(img_path)\n print(error)\n not_valid_mask = self.read_masks_borders(item_data[\"name\"])\n img[not_valid_mask] = 0\n\n # getmasks\n targets = np.zeros((img.shape[0], img.shape[1], len(self.classes)))\n for i, c in enumerate(self.classes):\n mask_path = os.path.join(self.img_dir, \"labels\", c, item_data[\"name\"])\n mask = cv2.imread(\n mask_path.replace(\".jpg\", \".png\"), cv2.IMREAD_GRAYSCALE\n )\n mask[not_valid_mask[:, :, 0]] = 0\n mask = mask > 0\n targets[:, :, i] = mask\n\n res = self.reshape_func(image=img, mask=targets)\n img, targets = res['image'], res['mask']\n if self.do_aug:\n res = self.aug(image=img, mask=targets)\n img, targets = res['image'], res['mask']\n\n for i, c in enumerate(self.classes):\n batch_y[c].append(targets[:, :, i])\n\n batch_x.append(img)\n\n batch_x = np.array(batch_x, np.float32)\n batch_y = {k: np.array(v, np.float32) for k, v in batch_y.items()}\n batch_y = {k: np.expand_dims(v, axis=-1) for k, v in batch_y.items()}\n\n return (\n imagenet_utils.preprocess_input(batch_x, \"channels_last\", mode=\"tf\"),\n batch_y\n )", "def predict(self, X, pred_batch_size=None):", "def batch_predict(args, ilastik_args):\n # Create the folder for the intermediate results.\n if not os.path.isdir(args.cache):\n os.makedirs(args.cache)\n\n # Find the random forest files.\n rf_files = autocontext_forests(args.batch_predict)\n n = len(rf_files)\n\n # Get the output format arguments.\n default_output_format = \"hdf5\"\n default_output_filename_format = os.path.join(args.cache, \"{nickname}_probs.h5\")\n ilastik_parser = argparse.ArgumentParser()\n ilastik_parser.add_argument(\"--output_format\", type=str, default=default_output_format)\n ilastik_parser.add_argument(\"--output_filename_format\", type=str, default=default_output_filename_format)\n ilastik_parser.add_argument(\"--output_internal_path\", type=str, default=default_export_key())\n format_args, ilastik_args = ilastik_parser.parse_known_args(ilastik_args)\n output_formats = [default_output_format] * (n-1) + [format_args.output_format]\n if args.no_overwrite:\n output_filename_formats = [default_output_filename_format[:-3] + \"_%s\" % str(i).zfill(2) + default_output_filename_format[-3:] for i in xrange(n-1)] + [format_args.output_filename_format]\n else:\n output_filename_formats = [default_output_filename_format] * (n-1) + [format_args.output_filename_format]\n output_internal_paths = [default_export_key()] * (n-1) + [format_args.output_internal_path]\n\n # Reshape the data to tzyxc and move it to the cache folder.\n outfiles = []\n keep_channels = None\n for i in xrange(len(args.files)):\n # Read the data and attach axistags.\n filename = args.files[i]\n if \".h5/\" in filename or \".hdf5/\" in filename:\n data_key = os.path.basename(filename)\n data_path = filename[:-len(data_key)-1]\n data = vigra.readHDF5(data_path, data_key)\n else:\n data_key = default_export_key()\n data_path_base, data_path_ext = os.path.splitext(filename)\n data_path = data_path_base + \".h5\"\n data = vigra.readImage(filename)\n if not hasattr(data, \"axistags\"):\n default_tags = {1: \"x\",\n 2: \"xy\",\n 3: \"xyz\",\n 4: \"xyzc\",\n 5: \"txyzc\"}\n data = vigra.VigraArray(data, axistags=vigra.defaultAxistags(default_tags[len(data.shape)]),\n dtype=data.dtype)\n new_data = reshape_tzyxc(data)\n\n if i == 0:\n c_index = new_data.axistags.index(\"c\")\n keep_channels = new_data.shape[c_index]\n\n # Save the reshaped dataset.\n output_filename = os.path.split(data_path)[1]\n output_filename = os.path.join(args.cache, output_filename)\n vigra.writeHDF5(new_data, output_filename, data_key, compression=args.compression)\n args.files[i] = output_filename + \"/\" + data_key\n if args.no_overwrite:\n outfiles.append([os.path.splitext(output_filename)[0] + \"_probs_%s.h5\" % str(i).zfill(2) for i in xrange(n-1)])\n else:\n outfiles.append([os.path.splitext(output_filename)[0] + \"_probs.h5\"] * (n-1))\n assert keep_channels > 0\n\n # Run the batch prediction.\n for i in xrange(n):\n rf_file = rf_files[i]\n output_format = output_formats[i]\n output_filename_format = output_filename_formats[i]\n output_internal_path = output_internal_paths[i]\n\n filename_key = os.path.basename(args.files[0])\n filename_path = args.files[0][:-len(filename_key)-1]\n\n # Quick hack to prevent the ilastik error \"wrong number of channels\".\n p = ILP(rf_file, args.cache, compression=args.compression)\n for j in xrange(p.data_count):\n p.set_data_path_key(j, filename_path, filename_key)\n\n # Call ilastik to run the batch prediction.\n cmd = [args.ilastik,\n \"--headless\",\n \"--project=%s\" % rf_file,\n \"--output_format=%s\" % output_format,\n \"--output_filename_format=%s\" % output_filename_format,\n \"--output_internal_path=%s\" % output_internal_path]\n\n if args.predict_file:\n pfile = os.path.join(args.cache, \"predict_file.txt\")\n with open(pfile, \"w\") as f:\n for pf in args.files:\n f.write(os.path.abspath(pf) + \"\\n\")\n cmd.append(\"--predict_file=%s\" % pfile)\n else:\n cmd += args.files\n\n print col.Fore.GREEN + \"- Running autocontext batch prediction round %d of %d -\" % (i+1, n) + col.Fore.RESET\n subprocess.call(cmd, stdout=sys.stdout)\n\n if i < n-1:\n # Merge the probabilities back to the original file.\n for filename, filename_out in zip(args.files, outfiles):\n filename_key = os.path.basename(filename)\n filename_path = filename[:-len(filename_key)-1]\n merge_datasets(filename_path, filename_key, filename_out[i], output_internal_path, n=keep_channels,\n compression=args.compression)", "def predict(self):\n self.predicted_test_summary = []\n for step in xrange(0, self.test_size // self.test_batch_size):\n print 'Predicting Batch No.:', step\n offset = (step * self.test_batch_size) % self.test_size\n batch_data_fwd = self.X_tst_fwd[offset:(offset + self.test_batch_size), :].T\n batch_data_bwd = self.X_tst_bwd[offset:(offset + self.test_batch_size), :].T\n summary_test_out = self._predict_batch(batch_data_fwd, batch_data_bwd)\n self.predicted_test_summary.extend(summary_test_out)\n\n print 'Prediction Complete. Moving Forward..'\n\n # test answers\n self.test_review = self.X_tst_fwd\n self.predicted_test_summary = self.predicted_test_summary\n self.true_summary = self.Y_tst", "def loss_and_predict(self,\n feats_dict: Dict,\n batch_data_samples: SampleList,\n proposal_cfg: Optional[dict] = None,\n **kwargs) -> Tuple[dict, InstanceList]:\n batch_gt_instances_3d = []\n batch_gt_instances_ignore = []\n batch_input_metas = []\n for data_sample in batch_data_samples:\n batch_input_metas.append(data_sample.metainfo)\n batch_gt_instances_3d.append(data_sample.gt_instances_3d)\n batch_gt_instances_ignore.append(\n data_sample.get('ignored_instances', None))\n raw_points = feats_dict.pop('raw_points')\n bbox_preds, cls_preds = self(feats_dict)\n\n loss_inputs = (bbox_preds, cls_preds,\n raw_points) + (batch_gt_instances_3d, batch_input_metas,\n batch_gt_instances_ignore)\n losses = self.loss_by_feat(*loss_inputs)\n\n predictions = self.predict_by_feat(\n raw_points,\n bbox_preds,\n cls_preds,\n batch_input_metas=batch_input_metas,\n cfg=proposal_cfg)\n feats_dict['points_cls_preds'] = cls_preds\n if predictions[0].bboxes_3d.tensor.isinf().any():\n print(predictions)\n return losses, predictions", "def predict(self, batch_inputs: dict,\n batch_data_samples: SampleList) -> SampleList:\n pass", "def predict(predict_var, x_unlabeled, inputs, batch_sizes, view_size):\n x = x_unlabeled\n\n # calculate batches for predict loop\n unlabeled_batch_size = batch_sizes.get(\"Embedding\", 0)\n batch_size = min(len(x[0]), unlabeled_batch_size)\n batches = make_batches(len(x[0]), batch_size)\n\n y_preds = []\n # predict over all points\n for j, (batch_start, batch_end) in enumerate(batches):\n feed_dict = {K.learning_phase(): 0}\n # feed corresponding input for each input_type\n for input_type, input_placeholder in inputs.items():\n if input_type == \"Embedding\":\n for i in range(view_size):\n feed_dict[input_placeholder[i]] = x[i][batch_start:batch_end]\n elif input_type == \"Orthogonal\":\n batch_ids = np.random.choice(\n len(x), size=min(len(x), batch_sizes[input_type]), replace=False\n )\n for i in range(view_size):\n feed_dict[input_placeholder[i]] = x[i][batch_ids]\n else:\n raise Exception(\"Unrecognized feed name ['{}']\".format(input_type))\n # evaluate the batch\n y_pred_batch = np.asarray(K.get_session().run(predict_var, feed_dict=feed_dict))\n y_preds.append(y_pred_batch)\n y_list = np.concatenate(y_preds, axis=1)\n\n return y_list", "def predict(self, test_batch_size=64, device='cuda', load=False, model_path=None, dataloader_num_workers=4, save_prediction=True):\n self.model.eval()\n self.device = device\n self.test_batch_size = test_batch_size\n if load:\n if model_path:\n self.load(model_path, device=self.device)\n else:\n model_path = os.path.join(path_checkpoints_dir, f\"{self.experiment_id}.pth\")\n print(f\"loaded model={model_path}\")\n self.load(model_path, device=self.device)\n if self.model is None:\n raise Exception(\"model cannot be None. Load or train the model before inference\")\n dataloader = self.data_module.get_test_dataloader(batch_size=self.test_batch_size, shuffle=False, num_workers=dataloader_num_workers)\n all_outputs = []\n tk0 = tqdm(enumerate(dataloader, 1), total=len(dataloader))\n for batch_id, data in tk0:\n for key, value in data.items():\n data[key] = value.to(self.device)\n # batch_outputs, batch_loss = self.model(**data)\n batch_outputs, batch_loss= self.validate_one_batch(data)\n all_outputs.append(batch_outputs.detach().cpu().numpy())\n predictions = np.concatenate(all_outputs, axis=0)\n if save_prediction:\n submission = pd.read_csv(path_sample_submission_file)\n assert submission.shape[0] == predictions.shape[0], \"unexpected behavior.code fix required\"\n submission.iloc[:, 1:] = predictions\n\n if not os.path.isdir(path_submissions_dir):\n os.mkdir(path_submissions_dir)\n submission.to_csv(os.path.join(path_submissions_dir, f\"{self.experiment_id}.csv\"), index=False)\n tk0.close()\n return predictions", "def evaluate(self, eval_batches, result_dir=None, result_prefix=None, save_full_info=False):\n pred_answers, ref_answers = [], []\n total_loss, total_num = 0, 0\n for b_itx, batch in enumerate(eval_batches):\n feed_dict = {self.p: batch['passage_token_ids'],\n self.q: batch['question_token_ids'],\n self.p_char: batch['passage_char_ids'],\n self.q_char: batch['question_char_ids'],\n self.p_length: batch['passage_length'],\n self.q_length: batch['question_length'],\n self.start_label: batch['start_id'],\n self.end_label: batch['end_id'],\n self.dropout_keep_prob: 1.0}\n start_probs, end_probs, loss = self.sess.run([self.start_probs,\n self.end_probs, self.loss], feed_dict)\n\n total_loss += loss * len(batch['raw_data'])\n total_num += len(batch['raw_data'])\n\n padded_p_len = len(batch['passage_token_ids'][0])\n for sample, start_prob, end_prob in zip(batch['raw_data'], start_probs, end_probs):\n\n best_answer = self.find_best_answer(sample, start_prob, end_prob, padded_p_len)\n if save_full_info:\n sample['pred_answers'] = [best_answer]\n pred_answers.append(sample)\n else:\n pred_answers.append({'question_id': sample['question_id'],\n 'question_type': sample['question_type'],\n 'answers': [best_answer],\n 'entity_answers': [[]],\n 'yesno_answers': []})\n if 'answers' in sample:\n ref_answers.append({'question_id': sample['question_id'],\n 'question_type': sample['question_type'],\n 'answers': sample['answers'],\n 'entity_answers': [[]],\n 'yesno_answers': []})\n\n if result_dir is not None and result_prefix is not None:\n result_file = os.path.join(result_dir, result_prefix + '.json')\n with open(result_file, 'w') as fout:\n for pred_answer in pred_answers:\n fout.write(json.dumps(pred_answer, ensure_ascii=False) + '\\n')\n\n self.logger.info('Saving {} results to {}'.format(result_prefix, result_file))\n\n # this average loss is invalid on test set, since we don't have true start_id and end_id\n ave_loss = 1.0 * total_loss / total_num\n # compute the bleu and rouge scores if reference answers is provided\n if len(ref_answers) > 0:\n pred_dict, ref_dict = {}, {}\n for pred, ref in zip(pred_answers, ref_answers):\n question_id = ref['question_id']\n if len(ref['answers']) > 0:\n pred_dict[question_id] = normalize(pred['answers'])\n ref_dict[question_id] = normalize(ref['answers'])\n bleu_rouge = compute_bleu_rouge(pred_dict, ref_dict)\n else:\n bleu_rouge = None\n return ave_loss, bleu_rouge", "def evaluate(self, batch):\n images, labels, projs, planes = [], [], [], []\n for serialized in batch:\n example = tf.train.Example.FromString(serialized)\n image, label = self.encoder.parse_example(example)\n images.append(image)\n labels.append(label)\n proj, _ = self.encoder.parse_camera(example)\n projs.append(proj)\n plane = self.encoder.parse_plane(example)\n planes.append(plane)\n\n\n #pred = self.model.predict(np.asarray(images), batch_size=len(batch))\n results = self.predict(np.asarray(images), batch_size=len(batch))\n \n # Creating some fake results for testing as well as example of what the \n # the results should look like.\n # results = []\n # for label in labels:\n # instances = label['2d_instance']\n # instances_3d = label['3d_instance']\n # boxes = []\n # for i in range(len(instances)):\n # point_2d = np.copy(instances[i])\n # point_3d = np.copy(instances_3d[i])\n # for j in range(9):\n # # Translating the box in 3D, this will have a large impact on 3D IoU.\n # point_3d[j] += np.array([0.01, 0.02, 0.5])\n # boxes.append((point_2d, point_3d))\n # results.append(boxes)\n\n for boxes, label, plane in zip(results, labels, planes): \n instances = label['2d_instance']\n instances_3d = label['3d_instance']\n visibilities = label['visibility']\n num_instances = 0\n for instance, instance_3d, visibility in zip(\n instances, instances_3d, visibilities):\n if (visibility > self._vis_thresh and\n self._is_visible(instance[0]) and instance_3d[0, 2] < 0):\n num_instances += 1\n # We don't have negative examples in evaluation.\n if num_instances == 0:\n continue\n\n iou_hit_miss = metrics.HitMiss(self._iou_thresholds)\n azimuth_hit_miss = metrics.HitMiss(self._azimuth_thresholds)\n polar_hit_miss = metrics.HitMiss(self._polar_thresholds)\n pixel_hit_miss = metrics.HitMiss(self._pixel_thresholds)\n\n num_matched = 0\n for box in boxes:\n box_point_2d, box_point_3d = box\n index = self.match_box(box_point_2d, instances, visibilities)\n if index >= 0:\n num_matched += 1\n pixel_error = self.evaluate_2d(box_point_2d, instances[index])\n\n # If you only compute the 3D bounding boxes from RGB images, \n # your 3D keypoints may be upto scale. However the ground truth\n # is at metric scale. There is a hack to re-scale your box using \n # the ground planes (assuming your box is sitting on the ground).\n # However many models learn to predict depths and scale correctly.\n #scale = self.compute_scale(box_point_3d, plane)\n #box_point_3d = box_point_3d * scale\n azimuth_error, polar_error, iou = self.evaluate_3d(box_point_3d, instances_3d[index])\n iou_hit_miss.record_hit_miss(iou)\n pixel_hit_miss.record_hit_miss(pixel_error, greater=False)\n azimuth_hit_miss.record_hit_miss(azimuth_error, greater=False)\n polar_hit_miss.record_hit_miss(polar_error, greater=False)\n\n if num_matched > 0:\n self._iou_ap.append(iou_hit_miss, num_instances)\n self._pixel_ap.append(pixel_hit_miss, num_instances)\n self._azimuth_ap.append(azimuth_hit_miss, num_instances)\n self._polar_ap.append(polar_hit_miss, num_instances)\n self._matched += num_matched", "def after_pred(self):\n # If training then skip\n if self.training:\n return\n\n # Get ground truths in epoch 0 i.e. start of training\n if self.epoch == 0:\n self.y_true.extend(self.y.cpu().flatten().numpy())\n\n # Get predictions from each batch and add them to prediction container\n y_pred = self.pred.detach().cpu()\n \n self.y_pred.extend(y_pred.flatten().numpy())", "def training_step(self, batch, batch_nb):\n # batch\n input_ids, attention_mask, token_type_ids, labels, emph_probs = batch\n inputs = {\n 'input_ids': input_ids,\n 'attention_mask': attention_mask,\n 'labels': labels,\n }\n\n # XLM and RoBERTa don't use segment_ids\n if self.hparams.model_type != 'distilbert':\n inputs['token_type_ids'] = (\n token_type_ids if self.hparams.model_type in ['bert', 'xlnet'] else None\n )\n\n # forward and loss\n loss, _ = self.forward(**inputs)\n\n # logs\n logs = {\n 'train_loss': loss,\n 'lr': self.lr_scheduler.get_last_lr()[-1],\n }\n\n # output dict\n output = {\n 'loss': loss,\n 'progress_bar': logs,\n 'log': logs\n }\n return output", "def collate_fn_predict(batch):\n all_input_ids, all_attention_mask, all_token_type_ids, all_lens = map(torch.stack, zip(*batch))\n max_len = max(all_lens).item()\n all_input_ids = all_input_ids[:, :max_len]\n all_attention_mask = all_attention_mask[:, :max_len]\n all_token_type_ids = all_token_type_ids[:, :max_len]\n return all_input_ids, all_attention_mask, all_token_type_ids", "def evaluate_batch(self, batch: TorchData, model: nn.Module) -> Dict[str, Any]:\n pass", "def predict():\n\n predict_cfg = get_predict_args()\n device = get_device()\n print(device)\n\n # load checkpoint\n ckpt_path = find_ckpt_in_directory(predict_cfg.ckpt)\n ckpt = torch.load(ckpt_path, map_location=device)\n best_iter = ckpt[\"best_iter\"]\n cfg = ckpt[\"cfg\"]\n aspect = cfg[\"aspect\"]\n\n for k, v in cfg.items():\n print(\"{:20} : {:10}\".format(k, str(v)))\n\n eval_batch_size = 64\n\n print(\"Loading data\")\n dev_data = list(beer_reader(cfg[\"dev_path\"]))\n test_data = beer_annotations_reader(cfg[\"test_path\"], aspect=aspect)\n\n print(\"dev\", len(dev_data))\n print(\"test\", len(test_data))\n\n print(\"Loading pre-trained word embeddings\")\n vocab = Vocabulary()\n vectors = load_embeddings(cfg[\"embeddings\"], vocab) # required for vocab\n\n # build model\n model = build_model(cfg[\"model\"], vocab, cfg=cfg)\n\n # load parameters from checkpoint into model\n print(\"Loading saved model..\")\n model.load_state_dict(ckpt[\"state_dict\"])\n model.to(device)\n print(\"Done\")\n\n print(model)\n print_parameters(model)\n\n print(\"Evaluating\")\n dev_eval = evaluate_loss(\n model, dev_data, batch_size=eval_batch_size,\n device=device, cfg=cfg)\n test_eval = evaluate_loss(\n model, test_data, batch_size=eval_batch_size,\n device=device, cfg=cfg)\n\n if hasattr(model, \"z\"):\n path = os.path.join(\n cfg[\"save_path\"], \"final_rationales.txt\")\n test_precision, test_macro_prec = evaluate_rationale(\n model, test_data, aspect=aspect, device=device,\n batch_size=eval_batch_size, path=path)\n else:\n test_precision = 0.\n test_macro_prec = 0.\n test_eval[\"precision\"] = test_precision\n test_eval[\"macro_precision\"] = test_macro_prec\n\n dev_s = make_kv_string(dev_eval)\n test_s = make_kv_string(test_eval)\n\n print(\"best model iter {:d} dev {} test {}\".format(\n best_iter, dev_s, test_s))", "def sample_batch(self, batch_type, batch_size):\n if batch_type == \"train\":\n folders = self.metatrain_character_folders\n elif batch_type == \"val\":\n folders = self.metaval_character_folders\n else:\n folders = self.metatest_character_folders\n\n #############################\n #### YOUR CODE GOES HERE ####\n all_image_batches = []\n all_label_batches = []\n for _ in range(batch_size):\n # create batches of K lists\n images = [list() for _ in range(self.num_samples_per_class)]\n labels = [list() for _ in range(self.num_samples_per_class)]\n next_idx = [0] * self.num_classes\n\n # sample the classes and images\n classes = np.random.choice(folders, size=(self.num_classes,))\n labels_and_paths = get_images(classes, range(self.num_classes),\n nb_samples=self.num_samples_per_class)\n\n # load images and one-hot encode labels\n for label, path in labels_and_paths:\n # only add one class instance per sample list\n idx = next_idx[label]\n\n\n image = image_file_to_array(path, self.img_size, flatten=self.flatten)\n one_hot_label = np.zeros((self.num_classes,))\n one_hot_label[label] = 1.\n\n images[idx].append(image)\n labels[idx].append(one_hot_label)\n\n next_idx[label] += 1\n\n all_image_batches.append(images)\n all_label_batches.append(labels)\n\n # convert to numpy arrays\n all_image_batches = np.array(all_image_batches)\n all_label_batches = np.array(all_label_batches)\n #############################\n\n return all_image_batches, all_label_batches", "def update_output(self, ):\n input_ids, outputs, grads, adv_tokens = self.batch_output\n\n probs = softmax(outputs, dim=-1)\n probs, labels = torch.max(probs, dim=-1)\n\n tokens = [\n self.tokenizer.convert_ids_to_tokens(input_ids_)\n for input_ids_ in input_ids\n ]\n\n embedding_grads = grads.sum(dim=2)\n \n # norm for each sequence\n norms = torch.norm(embedding_grads, dim=1, p=2) # need check hyperparameter\n \n # normalizing\n for i, norm in enumerate(norms):\n embedding_grads[i] = torch.abs(embedding_grads[i]) / norm\n\n batch_output = []\n \n # check probs, labels shape\n labels = torch.reshape(labels, (1, -1))\n probs = torch.reshape(probs, (1, -1))\n iterator = zip(tokens, probs, embedding_grads, labels)\n\n for example_tokens, example_prob, example_grad, example_label in iterator:\n example_dict = dict()\n # as we do it by batches we has a padding so we need to remove it\n \n example_tokens = [t for t in example_tokens if t != self.tokenizer.pad_token]\n example_dict['tokens'] = example_tokens\n example_dict['grad'] = example_grad.cpu().tolist()[:len(example_tokens)]\n example_dict['label'] = example_label.cpu().tolist()[:len(example_tokens)] # example_label.item()\n example_dict['prob'] = example_prob.cpu().tolist()[:len(example_tokens)] # example_prob.item() \n\n batch_output.append(example_dict)\n\n return batch_output", "def on_predict_batch_end(\n self,\n trainer: pl.Trainer,\n pl_module: AnomalyModule,\n outputs: dict,\n batch: Any,\n batch_idx: int,\n dataloader_idx: int,\n ) -> None:\n del trainer, batch, batch_idx, dataloader_idx # These variables are not used.\n\n self._standardize_batch(outputs, pl_module)\n self._normalize_batch(outputs, pl_module)\n outputs[\"pred_labels\"] = outputs[\"pred_scores\"] >= 0.5", "def process_batch(self, inputs):\n for key, ipt in inputs.items():\n inputs[key] = ipt.to(self.device)\n\n # we only feed the image with frame_id 0 through the depth encoder\n features = self.models[\"encoder\"](inputs[\"color_aug\", 0, 0])\n outputs = self.models[\"depth\"](features)\n\n outputs.update(self.predict_poses(inputs, features))\n\n self.generate_images_pred(inputs, outputs)\n losses = self.compute_losses(inputs, outputs)\n\n return outputs, losses", "def batch_generator(batch_size):\n\n # Infinite loop.\n while True:\n # Get a list of random indices for images in the training-set.\n idx = np.random.randint(100,size=batch_size)\n \n # Get the pre-computed transfer-values for those images.\n # These are the outputs of the pre-trained image-model.\n transf_values = np.array([transfer_values[_] for _ in idx])\n\n # For each of the randomly chosen images there are\n # at least 5 captions describing the contents of the image.\n # Select one of those captions at random and get the\n # associated sequence of integer-tokens.\n tokens = [caps_markedwords[_] for _ in idx]\n\n # Count the number of tokens in all these token-sequences.\n num_tokens = [len(t) for t in tokens]\n \n # Max number of tokens.\n max_tokens = np.max(num_tokens)\n # Pad all the other token-sequences with zeros\n # so they all have the same length and can be\n # input to the neural network as a numpy array.\n tokens_padded = pad_sequences(tokens,\n maxlen=max_tokens,\n padding='post',\n truncating='post')\n \n # Further prepare the token-sequences.\n # The decoder-part of the neural network\n # will try to map the token-sequences to\n # themselves shifted one time-step.\n decoder_input_data = tokens_padded[:, 0:-1]\n decoder_output_data = tokens_padded[:, 1:]\n\n # Dict for the input-data. Because we have\n # several inputs, we use a named dict to\n # ensure that the data is assigned correctly.\n x_data = \\\n {\n 'decoder_input': decoder_input_data,\n 'transfer_values_input': transf_values\n }\n\n\n # Dict for the output-data.\n y_data = \\\n {\n 'decoder_output': decoder_output_data\n }\n \n yield (x_data, y_data)", "def on_predict_batch_end(\n self, batch: int, logs: tp.Optional[tp.Dict[str, np.ndarray]] = None\n ):\n pass", "def run_prediction(question_texts, context_text):\r\n examples = []\r\n\r\n for i, question_text in enumerate(question_texts):\r\n example = SquadExample(\r\n qas_id=str(i),\r\n question_text=question_text,\r\n context_text=context_text,\r\n answer_text=None,\r\n start_position_character=None,\r\n title=\"Predict\",\r\n is_impossible=False,\r\n answers=None,\r\n )\r\n\r\n examples.append(example)\r\n\r\n features, dataset = squad_convert_examples_to_features(\r\n examples=examples,\r\n tokenizer=tokenizer,\r\n max_seq_length=384,\r\n doc_stride=128,\r\n max_query_length=64,\r\n is_training=False,\r\n return_dataset=\"pt\",\r\n threads=1,\r\n )\r\n\r\n eval_sampler = SequentialSampler(dataset)\r\n eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=10)\r\n\r\n all_results = []\r\n\r\n for batch in eval_dataloader:\r\n model.eval()\r\n batch = tuple(t.to(device) for t in batch)\r\n\r\n with torch.no_grad():\r\n inputs = {\r\n \"input_ids\": batch[0],\r\n \"attention_mask\": batch[1],\r\n \"token_type_ids\": batch[2],\r\n }\r\n\r\n example_indices = batch[3]\r\n\r\n outputs = model(**inputs)\r\n\r\n for i, example_index in enumerate(example_indices):\r\n eval_feature = features[example_index.item()]\r\n unique_id = int(eval_feature.unique_id)\r\n\r\n output = [to_list(output[i]) for output in outputs]\r\n\r\n start_logits, end_logits = output\r\n result = SquadResult(unique_id, start_logits, end_logits)\r\n all_results.append(result)\r\n\r\n output_prediction_file = \"predictions.json\"\r\n output_nbest_file = \"nbest_predictions.json\"\r\n output_null_log_odds_file = \"null_predictions.json\"\r\n\r\n predictions = compute_predictions_logits(\r\n examples,\r\n features,\r\n all_results,\r\n n_best_size,\r\n max_answer_length,\r\n do_lower_case,\r\n output_prediction_file,\r\n output_nbest_file,\r\n output_null_log_odds_file,\r\n False, # verbose_logging\r\n True, # version_2_with_negative\r\n null_score_diff_threshold,\r\n tokenizer,\r\n )\r\n\r\n return predictions", "def get_batch(batch_data, config):\n N = len(batch_data['obs_traj_rel'])\n P = config.P\n OF = config.flow_size\n T_in = config.obs_len\n T_pred = config.pred_len\n\n returned_inputs = []\n traj_obs_gt = np.zeros([N, T_in, P], dtype='float32')\n traj_pred_gt = np.zeros([N, T_pred, P], dtype='float32')\n # --- xy input\n for i, (obs_data, pred_data) in enumerate(zip(batch_data['obs_traj_rel'],\n batch_data['pred_traj_rel'])):\n for j, xy in enumerate(obs_data):\n traj_obs_gt[i, j, :] = xy\n for j, xy in enumerate(pred_data):\n traj_pred_gt[i, j, :] = xy\n returned_inputs.append(traj_obs_gt)\n # ------------------------------------------------------\n # Social component (through optical flow)\n if config.add_social:\n obs_flow = np.zeros((N, T_in, OF),dtype ='float32')\n # each batch\n for i, flow_seq in enumerate(batch_data['obs_optical_flow']):\n for j , flow_step in enumerate(flow_seq):\n obs_flow[i,j,:] = flow_step\n returned_inputs.append(obs_flow)\n # -----------------------------------------------------------\n # Person pose input\n if config.add_kp:\n obs_kp = np.zeros((N, T_in, KP, 2), dtype='float32')\n # each bacth\n for i, obs_kp_rel in enumerate(batch_data['obs_kp_rel']):\n for j, obs_kp_step in enumerate(obs_kp_rel):\n obs_kp[i, j, :, :] = obs_kp_step\n return returned_inputs,traj_pred_gt", "def process(self, data_batch: Any, data_samples: Sequence[dict]) -> None:", "def forward_step(self, batch):\n input_ids = torch.as_tensor(batch.input_ids).to(self.device).reshape((1, -1)) # batch.get('input_ids').to(self.device)\n attention_mask = torch.as_tensor(batch.attention_mask).to(self.device).reshape((1, -1)) # batch.get('attention_mask').to(self.device)\n outputs = self.model(input_ids=input_ids, attention_mask=attention_mask)[0]\n\n _, _, num_label = outputs.shape\n \"\"\"\n outputs : (batch, seq_length, feat_dim) => (seq_length, feat_dim)\n labels : (batch, seq_length) => (seq_length,)\n \"\"\"\n outputs = outputs.view(-1, num_label)\n labels = torch.argmax(outputs, dim=1) # torch.argmax(outputs, dim=1)\n batch_losses = self.criterion(outputs, labels)\n loss = torch.mean(batch_losses) # mean average\n self.batch_output = [input_ids, outputs]\n return loss", "def predict(self, batch_inputs_dict: dict, batch_data_samples: SampleList,\n **kwargs) -> SampleList:\n x = self.extract_feat(batch_inputs_dict)\n results_list = self.bbox_head.predict(x, batch_data_samples, **kwargs)\n predictions = self.add_pred_to_datasample(batch_data_samples,\n results_list)\n return predictions", "def predict(self,\n feats: Tuple[Tensor],\n batch_data_samples: OptSampleList,\n test_cfg: ConfigType = {}) -> Predictions:\n\n batch_coords = self.forward(feats) # (B, K, D)\n\n # Restore global position with target_root\n target_root = batch_data_samples[0].metainfo.get('target_root', None)\n if target_root is not None:\n target_root = torch.stack([\n torch.from_numpy(b.metainfo['target_root'])\n for b in batch_data_samples\n ])\n else:\n target_root = torch.stack([\n torch.empty((0), dtype=torch.float32)\n for _ in batch_data_samples[0].metainfo\n ])\n\n preds = self.decode((batch_coords, target_root))\n\n return preds", "def data_generator(dataset, config, shuffle=True, augment=False, augmentation=None, batch_size=1):\n b = 0 # batch item index\n image_index = -1\n image_ids = np.copy(dataset.image_ids)\n error_count = 0\n\n # Keras requires a generator to run indefinately.\n while True:\n try:\n # Increment index to pick next image. Shuffle if at the start of an epoch.\n image_index = (image_index + 1) % len(image_ids)\n if shuffle and image_index == 0:\n np.random.shuffle(image_ids)\n\n # Get GT bounding boxes and masks for image.\n image_id = image_ids[image_index]\n image, gt_class_ids = load_image_gt(dataset, config, image_id, augment=augment,\n augmentation=augmentation)\n\n # Init batch arrays\n if b == 0:\n batch_images = np.zeros(\n (batch_size,) + image.shape, dtype=np.float32)\n batch_gt_class_ids = np.zeros(\n (batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)\n\n # Add to batch\n batch_images[b] = mold_image(image.astype(np.float32), config)\n batch_gt_class_ids[b, gt_class_ids] = 1\n b += 1\n\n # Batch full?\n if b >= batch_size:\n inputs = [batch_images, batch_gt_class_ids]\n outputs = []\n\n yield inputs, outputs\n\n # start a new batch\n b = 0\n except (GeneratorExit, KeyboardInterrupt):\n raise\n except:\n # Log it and skip the image\n logging.exception(\"Error processing image {}\".format(\n dataset.image_info[image_id]))\n error_count += 1\n if error_count > 5:\n raise", "def generate(args):\n\n # Using the data Augmentation in traning data\n\n normalizer = Normalizer()\n\n train_aug = tf.keras.preprocessing.image.ImageDataGenerator(\n #rescale=1. / 255.,\n shear_range=args.shear_range,\n zoom_range=args.zoom_range,\n rotation_range=args.rotation_range,\n width_shift_range=args.width_shift_range,\n height_shift_range=args.height_shift_range,\n horizontal_flip=args.horizontal_flip,\n vertical_flip=args.vertical_flip,\n preprocessing_function=normalizer)\n\n\n validation_aug = tf.keras.preprocessing.image.ImageDataGenerator(preprocessing_function=normalizer)\n\n train_generator = train_aug.flow_from_directory(\n args.train_dir,\n target_size=(args.input_size, args.input_size),\n batch_size=args.batch_size,\n class_mode='categorical',\n shuffle=True)\n\n mean, std = [], []\n if args.mean is None or args.std is None:\n mean, std = normalizer.get_stats(args.train_dir, train_generator.filenames, (args.input_size, args.input_size))\n else:\n mean = [float(m.strip()) for m in args.mean.split(',')]\n std = [float(s.strip()) for s in args.std.split(',')]\n normalizer.set_stats(mean, std)\n\n if not os.path.exists('model'):\n os.makedirs('model')\n with open('model/stats.txt', 'w') as stats:\n stats.write(\"Dataset mean [r, g, b] = {}\\n\".format(mean))\n\n\n label_map = (train_generator.class_indices)\n label_map = dict((v,k) for k,v in label_map.items())\n\n with open('model/labels.csv', 'w') as csv_file:\n csv_writer = csv.writer(csv_file, lineterminator='\\n')\n csv_writer.writerows(label_map.items())\n\n validation_generator = validation_aug.flow_from_directory(\n args.validation_dir,\n target_size=(args.input_size, args.input_size),\n batch_size=args.batch_size,\n class_mode='categorical')\n\n return train_generator, validation_generator, train_generator.samples, validation_generator.samples, len(label_map)", "def data_generator(dataset, config, shuffle=True, augmentation=None,\n random_rois=0, batch_size=1, detection_targets=False,\n diverse=0, no_augmentation_sources=None):\n b = 0 # batch item index\n image_index = -1\n image_ids = np.copy(dataset.image_ids)\n error_count = 0\n no_augmentation_sources = no_augmentation_sources or []\n\n # Anchors\n # [anchor_count, (y1, x1, y2, x2)]\n backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)\n anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,\n config.RPN_ANCHOR_RATIOS,\n backbone_shapes,\n config.BACKBONE_STRIDES,\n config.RPN_ANCHOR_STRIDE)\n\n # Keras requires a generator to run indefinitely.\n while True:\n try:\n # Increment index to pick next image. Shuffle if at the start of an epoch.\n image_index = (image_index + 1) % len(image_ids)\n if shuffle and image_index == 0:\n np.random.shuffle(image_ids)\n\n # Get GT bounding boxes and masks for image.\n image_id = image_ids[image_index]\n # If the image source is not to be augmented pass None as augmentation\n if dataset.image_info[image_id]['source'] in no_augmentation_sources: augmentation = None\n image, image_meta, gt_class_ids, gt_class_ids2, gt_boxes, gt_rboxes, gt_global_mask, \\\n gt_masks, gt_mask_score, gt_text_embeds, gt_embed_lengths = load_image_gt(dataset, config, image_id,\n augmentation=augmentation)\n\n \n # Skip images that have no instances. This can happen in cases\n # where we train on a subset of classes and the image doesn't\n # have any of the classes we care about.\n if not np.any(gt_class_ids > 0):\n continue\n\n # Use only positive class_ids\n categories = np.unique(gt_class_ids)\n _idx = categories > 0\n categories = categories[_idx]\n \n if config.MODEL == \"smrcnn\":\n # Use only active classes\n active_categories = []\n for c in categories:\n if any(c == dataset.ACTIVE_CLASSES):\n active_categories.append(c)\n \n # Skiop image if it contains no instance of any active class \n if not np.any(np.array(active_categories) > 0):\n continue\n # Randomly select category\n category = np.random.choice(active_categories)\n \n # NOTE for siamese\n # Generate siamese target crop\n targets = []\n for i in range(config.NUM_TARGETS):\n targets.append(get_one_target(category, dataset, config, augmentation=augmentation))\n # target = np.stack(target, axis=0)\n \n # print(target_class_id)\n target_class_id = category\n target_class_ids = np.array([target_class_id])\n \n idx = gt_class_ids == target_class_id\n siamese_class_ids = idx.astype('int8')\n # print(idx)\n # print(gt_boxes.shape, gt_masks.shape)\n siamese_class_ids = siamese_class_ids[idx]\n gt_class_ids = gt_class_ids[idx]\n gt_boxes = gt_boxes[idx,:]\n gt_masks = gt_masks[:,:,idx]\n image_meta = image_meta[:15] # TODO\n # --------------------------------------------------------------\n\n # RPN Targets\n # if rpn have muiltple label, rewrite here\n rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors,\n gt_class_ids, gt_boxes, config)\n\n # Mask R-CNN Targets\n if random_rois:\n rpn_rois = generate_random_rois(image.shape, random_rois, gt_class_ids, gt_boxes)\n if detection_targets:\n rois, mrcnn_class_ids, mrcnn_class_ids2, mrcnn_bbox, mrcnn_rbbox, mrcnn_mask,\\\n mrcnn_text_embeds, mrcnn_embed_lengths = build_detection_targets(\n rpn_rois, gt_class_ids, gt_boxes, gt_rboxes, gt_masks, gt_mask_score, gt_class_ids2, config)\n\n # Init batch arrays\n if b == 0:\n batch_image_meta = np.zeros((batch_size,) + image_meta.shape, dtype=image_meta.dtype)\n batch_rpn_match = np.zeros([batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)\n batch_rpn_bbox = np.zeros([batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)\n batch_images = np.zeros((batch_size,) + image.shape, dtype=np.float32)\n batch_gt_class_ids = np.zeros((batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)\n batch_gt_boxes = np.zeros((batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)\n batch_gt_rboxes = np.zeros((batch_size, config.MAX_GT_INSTANCES, 5), dtype=np.float32)\n if config.MODEL == \"smrcnn\":\n batch_targets = np.zeros((batch_size, config.NUM_TARGETS) + targets[0].shape, dtype=np.float32)\n batch_gt_masks = np.zeros((batch_size, gt_masks.shape[0], gt_masks.shape[1],\n config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)\n batch_gt_class_ids2 = np.zeros((batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)\n batch_gt_text_embeds = np.zeros((batch_size, config.MAX_GT_INSTANCES, config.MAX_LABEL_LENGTH), dtype=np.int32)\n batch_gt_embed_lengths = np.zeros((batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)\n if random_rois:\n batch_rpn_rois = np.zeros((batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)\n if detection_targets:\n batch_rois = np.zeros((batch_size,) + rois.shape, dtype=rois.dtype)\n batch_mrcnn_class_ids = np.zeros((batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)\n \n # ************************* NOTE for 2 label dataset\n if config.NUM_CLASSES2 > 2:\n batch_mrcnn_class_ids2 = np.zeros(\n (batch_size,) + mrcnn_class_ids2.shape, dtype=mrcnn_class_ids.dtype)\n # ************************* NOTE for ocr\n if config.READ:\n batch_mrcnn_text_embeds = np.zeros(\n (batch_size,) + mrcnn_text_embeds.shape, dtype=mrcnn_text_embeds.dtype)\n batch_mrcnn_embed_lengths = np.zeros(\n (batch_size,) + mrcnn_embed_lengths.shape, dtype=mrcnn_text_embeds.dtype)\n batch_mrcnn_bbox = np.zeros((batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)\n batch_mrcnn_rbbox = np.zeros((batch_size,) + mrcnn_rbbox.shape, dtype=mrcnn_rbbox.dtype)\n batch_mrcnn_mask = np.zeros((batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)\n\n # If more instances than fits in the array, sub-sample from them.\n if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:\n ids = np.random.choice(\n np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)\n gt_class_ids = gt_class_ids[ids]\n siamese_class_ids = siamese_class_ids[ids] # NOTE\n gt_boxes = gt_boxes[ids]\n gt_rboxes = gt_rboxes[ids]\n gt_masks = gt_masks[:, :, ids]\n gt_class_ids2 = gt_class_ids2[ids]\n gt_text_embeds = gt_text_embeds[ids]\n gt_embed_lengths = gt_embed_lengths[ids]\n\n # Add to batch\n batch_image_meta[b] = image_meta\n batch_rpn_match[b] = rpn_match[:, np.newaxis]\n batch_rpn_bbox[b] = rpn_bbox\n batch_images[b] = mold_image(image.astype(np.float32), config)\n # NOTE for siamese\n if config.MODEL == \"smrcnn\":\n batch_targets[b] = np.stack([mold_image(target.astype(np.float32), config) for target in targets], axis=0)\n batch_gt_class_ids[b, :siamese_class_ids.shape[0]] = siamese_class_ids\n else:\n batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids\n batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes\n batch_gt_rboxes[b, :gt_rboxes.shape[0]] = gt_rboxes\n batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks\n batch_gt_class_ids2[b, :gt_class_ids2.shape[0]] = gt_class_ids2\n batch_gt_text_embeds[b, :gt_text_embeds.shape[0], :gt_text_embeds.shape[1]] = gt_text_embeds\n batch_gt_embed_lengths[b, :gt_embed_lengths.shape[0]] = gt_embed_lengths\n if random_rois:\n batch_rpn_rois[b] = rpn_rois\n if detection_targets:\n batch_rois[b] = rois\n batch_mrcnn_class_ids[b] = mrcnn_class_ids\n batch_mrcnn_bbox[b] = mrcnn_bbox\n batch_mrcnn_rbbox[b] = mrcnn_rbbox\n batch_mrcnn_mask[b] = mrcnn_mask\n batch_mrcnn_class_ids2[b] = mrcnn_class_ids2\n batch_mrcnn_text_embeds[b] = mrcnn_text_embeds\n batch_mrcnn_embed_lengths[b] = mrcnn_embed_lengths\n b += 1\n # Batch full?\n if b >= batch_size:\n \n\n # NOTE for siamese\n if config.MODEL == \"smrcnn\":\n inputs = [batch_images, batch_image_meta, batch_targets, batch_rpn_match, batch_rpn_bbox,\n batch_gt_class_ids, batch_gt_class_ids2, batch_gt_boxes, batch_gt_rboxes, batch_gt_masks,\n batch_gt_text_embeds, batch_gt_embed_lengths]\n else:\n inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,\n batch_gt_class_ids, batch_gt_class_ids2, batch_gt_boxes, batch_gt_rboxes, batch_gt_masks,\n batch_gt_text_embeds, batch_gt_embed_lengths]\n outputs = []\n if random_rois:\n inputs.extend([batch_rpn_rois])\n if detection_targets:\n inputs.extend([batch_rois])\n # Keras requires that output and targets have the same number of dimensions\n batch_mrcnn_class_ids = np.expand_dims(batch_mrcnn_class_ids, -1) \n \n # ************************* NOTE for 2 label dataset\n # ************************* NOTE for ocr\n if config.RBOX and config.READ and config.HAVE_LABEL2:\n batch_mrcnn_class_ids2 = np.expand_dims(batch_mrcnn_class_ids2, -1)\n batch_mrcnn_text_embeds = np.expand_dims(batch_mrcnn_text_embeds, -1)\n batch_mrcnn_embed_lengths = np.expand_dims(batch_mrcnn_embed_lengths, -1)\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_class_ids2, batch_mrcnn_bbox,\\\n batch_mrcnn_rbbox, batch_mrcnn_mask,\n batch_mrcnn_text_embeds, batch_mrcnn_embed_lengths])\n elif config.RBOX and config.READ and not config.HAVE_LABEL2:\n batch_mrcnn_text_embeds = np.expand_dims(batch_mrcnn_text_embeds, -1)\n batch_mrcnn_embed_lengths = np.expand_dims(batch_mrcnn_embed_lengths, -1)\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_rbbox, batch_mrcnn_mask,\n batch_mrcnn_text_embeds, batch_mrcnn_embed_lengths])\n elif config.RBOX and not config.READ and config.HAVE_LABEL2:\n batch_mrcnn_class_ids2 = np.expand_dims(batch_mrcnn_class_ids2, -1) \n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_class_ids2, batch_mrcnn_bbox,\\\n batch_mrcnn_rbbox, batch_mrcnn_mask])\n elif config.RBOX and not config.READ and not config.HAVE_LABEL2:\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_rbbox, batch_mrcnn_mask])\n elif not config.RBOX and config.READ and config.HAVE_LABEL2:\n batch_mrcnn_class_ids2 = np.expand_dims(batch_mrcnn_class_ids2, -1)\n batch_mrcnn_text_embeds = np.expand_dims(batch_mrcnn_text_embeds, -1)\n batch_mrcnn_embed_lengths = np.expand_dims(batch_mrcnn_embed_lengths, -1)\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_class_ids2, batch_mrcnn_bbox,\\\n batch_mrcnn_mask,\n batch_mrcnn_text_embeds, batch_mrcnn_embed_lengths])\n elif not config.RBOX and config.READ and not config.HAVE_LABEL2:\n batch_mrcnn_text_embeds = np.expand_dims(batch_mrcnn_text_embeds, -1)\n batch_mrcnn_embed_lengths = np.expand_dims(batch_mrcnn_embed_lengths, -1)\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask,\n batch_mrcnn_text_embeds, batch_mrcnn_embed_lengths])\n elif not config.RBOX and not config.READ and config.HAVE_LABEL2:\n batch_mrcnn_class_ids2 = np.expand_dims(batch_mrcnn_class_ids2, -1) \n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_class_ids2, batch_mrcnn_bbox,\\\n batch_mrcnn_mask])\n elif not config.RBOX and not config.READ and not config.HAVE_LABEL2:\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])\n\n yield inputs, outputs\n\n # start a new batch\n b = 0\n except (GeneratorExit, KeyboardInterrupt):\n raise\n except:\n # Log it and skip the image\n logging.exception(f\"Error processing image {dataset.image_info[image_id]}\")\n error_count += 1\n if error_count > 5:\n raise", "def evaluate(predict_var, x_unlabeled, inputs, batch_sizes):\n x = x_unlabeled\n\n # calculate batches for predict loop\n unlabeled_batch_size = batch_sizes.get(\"Embedding\", 0)\n batch_size = min(len(x[0]), unlabeled_batch_size)\n batches = make_batches(len(x[0]), batch_size)\n\n y_preds = []\n # predict over all points\n for j, (batch_start, batch_end) in enumerate(batches):\n feed_dict = {K.learning_phase(): 0}\n # feed corresponding input for each input_type\n for input_type, input_placeholder in inputs.items():\n if input_type == \"Embedding\":\n for i in range(len(input_placeholder)):\n feed_dict[input_placeholder[i]] = x[i][batch_start:batch_end]\n elif input_type == \"Orthogonal\":\n batch_ids = np.random.choice(\n len(x), size=min(len(x), batch_sizes[input_type]), replace=False\n )\n for i in range(len(input_placeholder)):\n feed_dict[input_placeholder[i]] = x[i][batch_ids]\n else:\n raise Exception(\"Unrecognized feed name ['{}']\".format(input_type))\n # evaluate the batch\n y_pred_batch = np.asarray(K.get_session().run(predict_var, feed_dict=feed_dict))\n y_preds.append(y_pred_batch)\n\n if len(y_preds[0].shape):\n return np.concatenate(y_preds)\n else:\n return np.sum(y_preds)", "def evaluate_batch(self, batch: TorchData) -> Dict[str, Any]:\n batch = cast(Tuple[torch.Tensor, torch.Tensor], batch)\n data, labels = batch\n\n output = self.model(data)\n accuracy = accuracy_rate(output, labels)\n return {\"validation_accuracy\": accuracy, \"validation_error\": 1.0 - accuracy}", "def predict_batch(self, states: np.ndarray):\n return self.model(states, training=False)", "def postprocessing(batch, vocab):\n\n return batch", "def _infer_batch(self, input_batch: torch.Tensor) -> Dict[str, torch.Tensor]:\n slices, pady, padx = self._get_slices(\n self.stride, self.patch_size, tuple(input_batch.shape[2:]), self.padding\n )\n\n padx, modx = divmod(padx, 2)\n pady, mody = divmod(pady, 2)\n padx += modx\n pady += mody\n\n input_batch = F.pad(\n input_batch.float(), pad=(padx, padx, pady, pady), mode=\"reflect\"\n )\n\n # initialize the output masks.\n out_maps = {}\n max_channels = 0\n for head_name, out_channels in self.out_heads:\n # use the largest out channel for recovery mask.\n max_channels = out_channels if out_channels > max_channels else max_channels\n\n out_maps[head_name] = torch.zeros(\n input_batch.shape[0],\n out_channels,\n *input_batch.shape[2:],\n dtype=input_batch.dtype,\n device=self.device,\n )\n\n # patches are added to the out mask so need a recovery mask.\n recovery = torch.zeros(\n input_batch.shape[0],\n max_channels,\n *input_batch.shape[2:],\n dtype=input_batch.dtype,\n device=self.device,\n )\n\n # run inference with the slices\n for k, (yslice, xslice) in slices.items():\n batch = input_batch[..., yslice, xslice].to(self.device).float()\n logits = self.predictor.forward_pass(batch)\n\n probs = {}\n for k, logit in logits.items():\n probs[k] = self.predictor.classify(logit, **self.head_kwargs[k])\n\n for k, out_map in out_maps.items():\n out_map[..., yslice, xslice] += probs[k]\n\n recovery[..., yslice, xslice] += 1\n\n for k, out_map in out_maps.items():\n out = out_map / recovery[:, 0 : out_map.shape[1], ...]\n out_maps[k] = out[..., pady:-pady, padx:-padx]\n\n return out_maps", "def batch_inference(question,context): \n inputs = tokenizer(question, context, \n return_tensors='pt', \n truncation=True, \n padding=True)\n \n # Move data to GPU\n inputs = inputs.to(device)\n \n # Feed data through the model\n with torch.no_grad():\n outputs = model(**inputs)\n\n # Q&A model outputs the two logit scores for each word.\n # One for its chance of being the start of the answer\n # and one for its chance of being the end\n start_logits = outputs.start_logits\n end_logits = outputs.end_logits\n \n # Find the words with the highest score\n # argmax(dim=1) means argmax with each sample\n start = start_logits.argmax(dim=1)\n end = end_logits.argmax(dim=1)\n \n # Return the answers\n # This is the point where we move the prediction back to main memory with .cpu()\n tokens = [tokenizer.convert_ids_to_tokens(x) for x in inputs[\"input_ids\"].cpu().numpy()]\n return [tokenizer.convert_tokens_to_string(x[start[i]:end[i]+1]) for i,x in enumerate(tokens)]", "def next_batch(self):\n next_train_index = self.curr_train_index + self.hparams.batch_size\n if next_train_index > self.num_train:\n # Increase epoch number\n epoch = self.epochs + 1\n self.reset()\n self.epochs = epoch\n batched_data = (\n self.train_images[self.curr_train_index:self.curr_train_index +\n self.hparams.batch_size],\n self.train_labels[self.curr_train_index:self.curr_train_index +\n self.hparams.batch_size])\n final_imgs = []\n images, labels = batched_data\n if self.hparams.augment_type == 'mixup':\n images, labels = augmentation_transforms.mixup_batch(\n images, labels, self.hparams.mixup_alpha)\n elif self.hparams.augment_type == 'image_freq':\n images, labels = augmentation_transforms.freq_augment(\n images,\n labels,\n amplitude=self.hparams.freq_augment_amplitude,\n magnitude=self.hparams.augmentation_magnitude,\n proportion_f=self.hparams.freq_augment_ffrac,\n probability=self.hparams.augmentation_probability)\n for data in images:\n if self.hparams.augment_type == 'autoaugment':\n epoch_policy = self.good_policies[np.random.choice(\n len(self.good_policies))]\n final_img = augmentation_transforms.apply_policy(epoch_policy, data)\n elif self.hparams.augment_type == 'random':\n epoch_policy = found_policies.random_policy(\n self.hparams.num_augmentation_layers,\n self.hparams.augmentation_magnitude,\n self.hparams.augmentation_probability)\n final_img = augmentation_transforms.apply_policy(epoch_policy, data)\n else:\n final_img = np.copy(data)\n if self.hparams.apply_flip_crop:\n final_img = augmentation_transforms.random_flip(\n augmentation_transforms.zero_pad_and_crop(data, 4))\n # Apply cutout\n if self.hparams.apply_cutout:\n final_img = augmentation_transforms.cutout_numpy(final_img)\n\n final_imgs.append(final_img)\n final_imgs = np.array(final_imgs, np.float32)\n if self.hparams.noise_type == 'radial':\n labels = augmentation_transforms.add_radial_noise(\n final_imgs, labels, self.hparams.frequency, self.hparams.amplitude,\n self.hparams.noise_class, self.hparams.normalize_amplitude)\n elif self.hparams.noise_type == 'random' or self.hparams.noise_type == 'fourier' or self.hparams.noise_type == 'f' or self.hparams.noise_type == '1/f':\n labels = augmentation_transforms.add_sinusoidal_noise(\n final_imgs, labels, self.hparams.frequency, self.hparams.amplitude,\n self.direction, self.hparams.noise_class,\n self.hparams.normalize_amplitude)\n elif self.hparams.noise_type == 'uniform':\n labels = augmentation_transforms.add_uniform_noise(\n labels, self.hparams.amplitude, self.hparams.noise_class)\n\n batched_data = (final_imgs, labels)\n self.curr_train_index += self.hparams.batch_size\n return batched_data", "def evaluate(self, eval_batches, result_dir=None, result_prefix=None, save_full_info=False):\n pred_answers, ref_answers = [], []\n total_loss, total_num = 0, 0\n for b_itx, batch in enumerate(eval_batches):\n feed_dict = {self.p: batch['passage_token_ids'],\n self.q: batch['question_token_ids'],\n self.p_length: batch['passage_length'],\n self.q_length: batch['question_length'],\n self.start_label: batch['start_id'],\n self.end_label: batch['end_id'],\n self.dropout_keep_prob: 1.0,\n self.em: batch['exact_match']}\n batch_size = len(batch['start_id'])\n padded_p_len = len(batch['passage_token_ids'][0])\n padded_p_num = len(batch['passage_token_ids']) / batch_size\n para_ids = []\n for start_id in batch['start_id']:\n para_ids.append(start_id // padded_p_len)\n feed_dict[self.para_label] = para_ids\n content_label = np.zeros([batch_size, padded_p_num * padded_p_len], dtype=int)\n for s_idx, (start_id, end_id) in enumerate(zip(batch['start_id'], batch['end_id'])):\n content_label[s_idx, start_id: end_id+1] = 1\n feed_dict[self.content_label] = content_label\n start_probs, end_probs, content_scores, verif_scores, loss = self.sess.run([self.start_probs, self.end_probs,\n self.concat_content_score, self.reshaped_ans_verif_score,\n self.loss], feed_dict)\n\n total_loss += loss * len(batch['raw_data'])\n total_num += len(batch['raw_data'])\n\n padded_p_len = len(batch['passage_token_ids'][0])\n for s_idx, sample in enumerate(batch['raw_data']):\n start_prob = start_probs[s_idx]\n end_prob = end_probs[s_idx]\n content_score = content_scores[s_idx]\n verif_score = verif_scores[s_idx]\n best_answer = self.find_best_answer_with_verif(sample, start_prob, end_prob,\n content_score, verif_score, padded_p_len)\n if save_full_info:\n sample['pred_answers'] = [best_answer]\n pred_answers.append(sample)\n else:\n pred_answers.append({'question_id': sample['question_id'],\n 'question_type': sample['question_type'],\n 'answers': [best_answer],\n 'entity_answers': [[]],\n 'yesno_answers': []})\n if 'answers' in sample:\n ref_answers.append({'question_id': sample['question_id'],\n 'question_type': sample['question_type'],\n 'answers': sample['answers'],\n 'entity_answers': [[]],\n 'yesno_answers': []})\n\n if result_dir is not None and result_prefix is not None:\n result_file = os.path.join(result_dir, result_prefix + '.json')\n with open(result_file, 'w') as fout:\n for pred_answer in pred_answers:\n fout.write(json.dumps(pred_answer, encoding='utf8', ensure_ascii=False) + '\\n')\n\n self.logger.info('Saving {} results to {}'.format(result_prefix, result_file))\n\n # this average loss is invalid on test set, since we don't have true start_id and end_id\n ave_loss = 1.0 * total_loss / total_num\n # compute the bleu and rouge scores if reference answers is provided\n if len(ref_answers) > 0:\n pred_dict, ref_dict = {}, {}\n for pred, ref in zip(pred_answers, ref_answers):\n question_id = ref['question_id']\n if len(ref['answers']) > 0:\n pred_dict[question_id] = normalize(pred['answers'])\n ref_dict[question_id] = normalize(ref['answers'])\n bleu_rouge = compute_bleu_rouge(pred_dict, ref_dict)\n else:\n bleu_rouge = None\n return ave_loss, bleu_rouge", "def preprocess(batch):\n batch_size = batch[\"idx\"].shape[0]\n input_ids = np.zeros((batch_size, max_len), dtype=np.int32)\n type_ids = np.zeros((batch_size, max_len), dtype=np.int32)\n\n for i in range(batch_size):\n sentence_a = batch[key_a][i]\n sentence_b = batch[key_b][i]\n tokens_a = tokenizer.EncodeAsIds(sentence_a)\n tokens_b = tokenizer.EncodeAsIds(sentence_b)[1:] # Strip start token\n\n ex_input_ids = (tokens_a + tokens_b)[:max_len]\n ex_type_ids = ([0] * len(tokens_a) + [1] * len(tokens_b))[:max_len]\n\n input_ids[i, :len(ex_input_ids)] = ex_input_ids\n type_ids[i, :len(ex_type_ids)] = ex_type_ids\n\n return {\n \"input_ids\": input_ids,\n \"type_ids\": type_ids,\n \"idx\": batch[\"idx\"].astype(np.int32),\n \"label\": batch[\"label\"],\n }", "def generate_predictions_on_folder(folder_path, unet, img_size):\n \n testing_dir = folder_path\n\n testing_img_paths = [os.path.join(testing_dir, fname) \n for fname in os.listdir(testing_dir)\n if (fname.endswith(\".png\") or fname.endswith(\".jpg\"))]\n\n x = np.zeros((len(testing_img_paths),) + img_size + (3,), dtype=\"float32\")\n\n for j, path in enumerate(testing_img_paths):\n img = load_img(path)\n # cropping images from 900x720 to 512x512\n img = img.crop(box=(313,99,825,611))\n # resizing image from 512x512 to 256x256\n img = img.resize(img_size)\n x[j] = img\n\n testing_preds = unet.model.predict(x)\n\n def display_mask(i):\n \"\"\"Quick utility to display a model's prediction.\"\"\"\n ### To display binary masks, comment the folowing line\n # mask = np.argmax(testing_preds[i], axis=-1)\n ### To display probability maps, comment the folowing line\n mask = testing_preds[i,:,:,-1]\n mask = np.expand_dims(mask, axis=-1)\n img = PIL.ImageOps.autocontrast(keras.preprocessing.image.array_to_img(mask))\n display(img)\n \n def display_cropped_img(i):\n \"\"\" Utility to display the original image. \"\"\"\n image = PIL.Image.open(testing_img_paths[i])\n image = image.crop(box=(313,99,825,611))\n image = image.resize((256,256))\n display(image)\n\n # displaying all predictions for images in a folder\n for i in range(0,len(testing_img_paths)):\n # Display input image\n display_cropped_img(i)\n # Display mask predicted by our model\n display_mask(i)", "def predicts(self, data_iter):\n predicteds = []\n logits = []\n\n all_corrects, all_loss, all_size = 0, 0, 0\n step = 0\n for feature, target in data_iter:\n step += 1\n # print(feature)\n # if self._cuda:\n # feature, target = feature.cuda(), target.cuda()\n\n logit = self._model(feature)\n predicted = torch.max(logit.data, 1)[1].view(target.size()).data\n # print(predicted)\n predicteds.extend(predicted)\n logits.extend(logit)\n loss = F.cross_entropy(logit, target, size_average=False)\n\n cur_loss = loss.data[0]\n all_loss += cur_loss\n cur_corrects = (torch.max(logit, 1)[1].view(target.size()).data == target.data).sum()\n all_corrects += cur_corrects\n print('Evaluation - average loss: {:.6f} average acc: {:.4f}%'.format(\n float(all_loss) / (int(all_size) + 1), 100 * float(all_corrects) / (int(all_size) + 1)))\n\n return predicteds, logits", "def batch_generate(self, text, mel_target):\n mel_in = torch.zeros(1,80,1).to(self.device)\n for i in range(mel_target.shape[2]):\n # forward pass\n y, ylogit, a = self.generate(text, mel_in)\n # create new mel_in\n mel_in = torch.cat((mel_in, y[:,:,-1].view(1,80,1)), dim=-1)\n\n # compute L1 loss to target and return target and prediction\n loss = F.l1_loss(y, mel_target)\n return loss, y, a", "def predict_batches(model, X, batchsize=None):\n if batchsize is None:\n batchsize = model.flags.bs\n pred = []\n for batch in grouper(X, batchsize):\n pred.append(model.predict(np.array(batch)))\n\n return np.concatenate(pred)", "def eval_batch(self, outputs, target):\n raise NotImplementedError", "def make_batch(self, batch_size):\n filenames = self.get_filenames()\n dataset = tf.contrib.data.TFRecordDataset(filenames)\n\n # Parse records.\n dataset = dataset.map(self.parser,\n num_threads=batch_size,\n output_buffer_size=2 * batch_size)\n\n # If training, shuffle and repeat indefinitely.\n if self.mode == tf.estimator.ModeKeys.TRAIN:\n dataset = dataset.shuffle(buffer_size=10000 + 3 * batch_size)\n dataset = dataset.repeat(-1)\n elif self.mode == tf.estimator.ModeKeys.PREDICT:\n if self.predict_split == 'train':\n num_examples = self.num_examples_per_epoch(tf.estimator.ModeKeys.TRAIN)\n else:\n num_examples = self.num_examples_per_epoch(tf.estimator.ModeKeys.EVAL)\n # Take as much of the dataset as possible that can be evenly\n # divided by batch_size.\n while True:\n if num_examples % batch_size == 0:\n break\n else:\n num_examples -= 1\n dataset = dataset.take(num_examples)\n dataset = dataset.repeat(1)\n\n # dataset = dataset.take(1000) # For fast debugging!\n else:\n dataset = dataset.repeat(1)\n\n # Batch it up.\n dataset = dataset.batch(batch_size)\n iterator = dataset.make_one_shot_iterator()\n image_batch, label_batch = iterator.get_next()\n\n return image_batch, label_batch", "def sample_batch(self, batch_type, batch_size):\n\n if batch_type == \"train\":\n folders = self.meta_train_characters\n if batch_type == \"test\":\n folders = self.meta_test_characters\n if batch_type == \"val\":\n folders = self.meta_val_characters\n\n num_batches = len(folders)//batch_size\n folders = folders[:num_batches*batch_size]\n all_image_batches = []\n all_label_batches = []\n\n for batch_idx in range(batch_size):\n sample_classes = random.sample(folders, self.num_classes)\n #sample_classes = folders[batch_idx*self.num_classes : (batch_idx+1)*self.num_classes]\n one_hot_labels = np.identity(self.num_classes)\n\n labels_images = get_images(sample_classes, one_hot_labels, nb_samples=self.num_samples_per_class, shuffle=False)\n train_images = []\n train_labels = [] \n for sample_idx, (labels, images) in enumerate(labels_images):\n train_images.append(image_file_to_array(images, 784))\n train_labels.append(labels)\n\n \n train_images, train_labels = shuffle(train_images, train_labels)\n\n labels = np.vstack(train_labels).reshape((-1, self.num_classes, self.num_classes)) # K, N, N\n images = np.vstack(train_images).reshape((self.num_samples_per_class, self.num_classes, -1)) # K x N x 784\n\n all_label_batches.append(labels)\n all_image_batches.append(images)\n\n all_image_batches = np.stack(all_image_batches).astype(np.float32)\n all_label_batches = np.stack(all_label_batches).astype(np.float32)\n\n return all_label_batches, all_image_batches", "def next(self):\n #print('next')\n batch_size = self.batch_size\n batch_data = nd.empty((batch_size,)+self.data_shape)\n batch_label = nd.empty((batch_size,)+self.label_shape)\n i = 0\n #self.cutoff = random.randint(800,1280)\n try:\n while i < batch_size:\n #print('N', i)\n data, label, annot = self.next_sample()\n R = self.get_data(data, label, annot)\n if R is None:\n continue\n data_out, label_out, flip_data_out, flip_label_out = R\n if not self.use_coherent:\n data = nd.array(data_out)\n data = nd.transpose(data, axes=(2, 0, 1))\n label = nd.array(label_out)\n #print(data.shape, label.shape)\n batch_data[i][:] = data\n batch_label[i][:] = label\n i += 1\n else:\n data = nd.array(data_out)\n data = nd.transpose(data, axes=(2, 0, 1))\n label = nd.array(label_out)\n data2 = nd.array(flip_data_out)\n data2 = nd.transpose(data2, axes=(2, 0, 1))\n label2 = nd.array(flip_label_out)\n #M = nd.array(M)\n #print(data.shape, label.shape)\n batch_data[i][:] = data\n batch_label[i][:] = label\n #i+=1\n j = i+self.per_batch_size//2\n batch_data[j][:] = data2\n batch_label[j][:] = label2\n i += 1\n if j%self.per_batch_size==self.per_batch_size-1:\n i = j+1\n except StopIteration:\n if i<batch_size:\n raise StopIteration\n\n #return {self.data_name : batch_data,\n # self.label_name : batch_label}\n #print(batch_data.shape, batch_label.shape)\n return mx.io.DataBatch([batch_data], [batch_label], batch_size - i)", "def _process_batch_data(\n self, tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]]\n ) -> Tuple[tf.Tensor, Optional[tf.Tensor], Optional[tf.Tensor]]:\n # encode each attribute present in tf_batch_data\n text_output = None\n text_sequence_lengths = None\n batch_encoded = {}\n for attribute in tf_batch_data.keys():\n if attribute in SENTENCE_FEATURES_TO_ENCODE + STATE_LEVEL_FEATURES:\n (\n attribute_features,\n _text_output,\n _text_sequence_lengths,\n ) = self._encode_features_per_attribute(tf_batch_data, attribute)\n\n batch_encoded[attribute] = attribute_features\n if attribute == TEXT:\n text_output = _text_output\n text_sequence_lengths = _text_sequence_lengths\n\n # if both action text and action name are present, combine them; otherwise,\n # return the one which is present\n\n if (\n batch_encoded.get(ACTION_TEXT) is not None\n and batch_encoded.get(ACTION_NAME) is not None\n ):\n batch_action = batch_encoded.pop(ACTION_TEXT) + batch_encoded.pop(\n ACTION_NAME\n )\n elif batch_encoded.get(ACTION_TEXT) is not None:\n batch_action = batch_encoded.pop(ACTION_TEXT)\n else:\n batch_action = batch_encoded.pop(ACTION_NAME)\n # same for user input\n if (\n batch_encoded.get(INTENT) is not None\n and batch_encoded.get(TEXT) is not None\n ):\n batch_user = batch_encoded.pop(INTENT) + batch_encoded.pop(TEXT)\n elif batch_encoded.get(TEXT) is not None:\n batch_user = batch_encoded.pop(TEXT)\n else:\n batch_user = batch_encoded.pop(INTENT)\n\n batch_features = [batch_user, batch_action]\n # once we have user input and previous action,\n # add all other attributes (SLOTS, ACTIVE_LOOP, etc.) to batch_features;\n for key in batch_encoded.keys():\n batch_features.append(batch_encoded.get(key))\n\n batch_features = tf.concat(batch_features, axis=-1)\n\n return batch_features, text_output, text_sequence_lengths", "def get_batch_predictions(rnn, X, target):\n\n out = rnn.forward(X)\n arr_preds = nn.functional.softmax(out, dim=-1).data.cpu().numpy()\n arr_target = target.detach().cpu().numpy()\n\n return arr_preds, arr_target", "def conv_batchify(self, batch):\n batch_roles = []\n batch_context_tokens = []\n batch_response = []\n\n for conv_dict in batch:\n batch_roles.append(0 if conv_dict['role'] == 'Seeker' else 1)\n context_tokens = [utter + [self.conv_bos_id] for utter in conv_dict['context_tokens']]\n context_tokens[-1] = context_tokens[-1][:-1]\n batch_context_tokens.append(\n truncate(merge_utt(context_tokens), max_length=self.context_truncate, truncate_tail=False),\n )\n batch_response.append(\n add_start_end_token_idx(\n truncate(conv_dict['response'], max_length=self.response_truncate - 2),\n start_token_idx=self.start_token_idx,\n end_token_idx=self.end_token_idx\n )\n )\n\n batch_context_tokens = padded_tensor(items=batch_context_tokens,\n pad_idx=self.pad_token_idx,\n max_len=self.context_truncate,\n pad_tail=False)\n batch_response = padded_tensor(batch_response,\n pad_idx=self.pad_token_idx,\n max_len=self.response_truncate,\n pad_tail=True)\n batch_input_ids = torch.cat((batch_context_tokens, batch_response), dim=1)\n batch_roles = torch.tensor(batch_roles)\n\n return (batch_roles,\n batch_input_ids,\n batch_context_tokens,\n batch_response)", "def predict_on_batch(self, X):\n len_unpadded = len(X)\n if self.pad_batches:\n X = pad_features(self.batch_size, X)\n\n if not self._restored_model:\n self.restore()\n with self.eval_graph.graph.as_default():\n\n # run eval data through the model\n n_tasks = self.n_tasks\n output = []\n with self._get_shared_session(train=False).as_default():\n feed_dict = self.construct_feed_dict(X)\n data = self._get_shared_session(train=False).run(\n self.eval_graph.output, feed_dict=feed_dict)\n batch_output = np.asarray(data[:n_tasks], dtype=float)\n # reshape to batch_size x n_tasks x ...\n if batch_output.ndim == 3:\n batch_output = batch_output.transpose((1, 0, 2))\n elif batch_output.ndim == 2:\n batch_output = batch_output.transpose((1, 0))\n else:\n raise ValueError('Unrecognized rank combination for output: %s' %\n (batch_output.shape,))\n output.append(batch_output)\n\n outputs = np.array(\n from_one_hot(np.squeeze(np.concatenate(output)), axis=-1))\n\n outputs = np.copy(outputs)\n outputs = np.reshape(outputs, (len(X), n_tasks))\n outputs = outputs[:len_unpadded]\n return outputs", "def _batch_train(self, batch, training_step, step):\n lstm_size = (self.batch_size, self.Qmain.h_size)\n batch_mem = np.zeros(lstm_size)\n batch_carry = np.zeros(lstm_size)\n input_shape = (self.batch_size,\n self.trace_length,\n self.observation_size)\n m_data = np.vstack(batch[:, 0])\n m_data = m_data.reshape(input_shape)\n t_data = np.vstack(batch[:, 4])\n t_data = t_data.reshape(input_shape)\n q_input = [np.copy(batch_mem), np.copy(batch_carry), np.copy(m_data)]\n q1_input = [np.copy(batch_mem), np.copy(batch_carry), np.copy(t_data)]\n\n # Batch predict\n self.Qmain.trace_length.assign(self.trace_length)\n self.Qmain.dropout_rate.assign(0.0)\n self.Qtarget.trace_length.assign(self.trace_length)\n self.Qtarget.dropout_rate.assign(0.0)\n\n # Save the graph just the first time\n if training_step == 0:\n tf.summary.trace_on()\n\n # T batch predict\n pred = self.Qmain.model.predict(q_input,\n batch_size=self.batch_size)\n Q = pred[0]\n batch_bus = pred[1]\n batch_line = pred[2]\n batch_disp = pred[3]\n\n ## Log graph once and disable graph logging\n if training_step == 0:\n with self.tf_writer.as_default():\n tf.summary.trace_export(self.name + \"-graph\", step)\n\n # T+1 batch predict\n Qn, *_ = self.Qtarget.model.predict(q1_input,\n batch_size=self.batch_size)\n \n # Compute batch Q update to Qtarget\n for i in range(self.batch_size):\n idx = i * (self.trace_length - 1)\n a = batch[idx][1]\n grid = a[0]\n batch_bus[i][:] = a[1][:]\n batch_line[i][:] = a[2][:]\n batch_disp[i][:] = a[3][:]\n r = batch[idx][2]\n d = batch[idx][3]\n Q[i][grid] = r\n if d == False:\n Q[i][grid] += DISCOUNT_FACTOR * Qn[i][grid]\n\n # Batch train\n batch_x = [batch_mem, batch_carry, m_data]\n batch_y = [\n Q,\n batch_bus, batch_line, batch_disp,\n batch_mem, batch_carry\n ]\n loss = self.Qmain.model.train_on_batch(batch_x, batch_y)\n loss = loss[0]\n\n # Log to tensorboard\n self._tf_log_summary(loss, step)", "def predict(self, dir, batch_size=16):\n raise NotImplementedError(\"Subclasses should implement this!\")", "def _get_batch(self):\n # index = self._index[self._current]\n # im_path = self._imdb.image_path_from_index(0)\n # im_path = 'data/demo/dog.jpg'\n # with open(im_path, 'rb') as fp:\n # img_content = fp.read()\n\n batch_data = mx.nd.zeros((self.batch_size, 3, self._data_shape[0], self._data_shape[1]))\n batch_label = [] \n global imgi\n # img = mx.nd.array(imgi)\n # imgr = mx.img.imdecode(img_content)\n data = self._data_augmentation(imgi)\n batch_data[0] = data\n \n self._data = {'data': batch_data}\n self._label = {'label': None}", "def generator(features, labels, batch_size):\n \n # Create empty arrays to contain batch of features and labels#\n batch_features = np.zeros((batch_size, 160, 320, 3))\n batch_labels = np.zeros((batch_size, 1))\n while True:\n for i in range(batch_size):\n # choose random index in features\n index = random.choice(range(len(features)))\n batch_features[i] = features[index]\n batch_labels[i] = labels[index]\n yield batch_features, batch_labels", "def eval(self, val_batch_size: int = 32):\n\n val_generator = DataGenerator(\n batch_size=val_batch_size,\n split=\"test\",\n layers=self.n_blocks,\n train_mode=\"classifier\",\n )\n if self.train_mode == \"combined\":\n model = KM.Model(\n inputs=self.combined.input,\n outputs=self.combined.get_layer(\"logits\").output,\n )\n elif self.train_mode == \"classifier\":\n model = KM.Model(\n inputs=self.classifier.input,\n outputs=self.classifier.get_layer(\"logits\").output,\n )\n\n # initialize the array to store preds for each label\n accuracy = np.zeros((10, 10), dtype=int)\n\n for input, true_logits in val_generator():\n pred_logits = model.predict(input)\n\n true_logits = tf.split(\n true_logits, num_or_size_splits=self.n_blocks, axis=-1\n )\n true_logits = true_logits[0]\n\n # Split the logits from different levels\n pred_logits = tf.split(\n tf.expand_dims(pred_logits, axis=-1),\n num_or_size_splits=self.n_blocks,\n axis=1,\n )\n # Predicted label by taking an elementwise maximum across all layers\n pred_logits = tf.reduce_max(tf.concat(pred_logits, axis=2), axis=2)\n\n # Get true and pred labels\n true_labels = tf.argmax(true_logits, axis=-1)\n pred_labels = tf.argmax(pred_logits, axis=-1)\n for i, gt_label in enumerate(true_labels):\n pred_label = int(pred_labels[i])\n accuracy[int(gt_label)][pred_label] += 1\n\n import matplotlib.pyplot as plt\n import seaborn as sn\n\n plt.figure(figsize=(10, 7))\n sn.heatmap(accuracy / np.sum(accuracy, axis=-1), annot=True)\n plt.show()\n # metrics = self.combined.evaluate(\n # val_generator(),\n # )\n # print(metrics)", "def eval(self, splt):\n params = self.params\n self.embedder.eval()\n self.proj.eval()\n\n assert splt in ['valid', 'test']\n has_labels = 'y' in self.data[splt]\n\n scores = OrderedDict({'epoch': self.epoch})\n task = self.task.lower()\n\n idxs = [] # sentence indices\n prob = [] # probabilities\n pred = [] # predicted values\n gold = [] # real values\n\n lang_id = params.lang2id['en']\n\n for batch in self.get_iterator(splt):\n\n # batch\n if self.n_sent == 1:\n (x, lengths), idx = batch\n # x, lengths = truncate(x, lengths, params.max_len, params.eos_index)\n else:\n (sent1, len1), (sent2, len2), idx = batch\n # sent1, len1 = truncate(sent1, len1, params.max_len, params.eos_index)\n # sent2, len2 = truncate(sent2, len2, params.max_len, params.eos_index)\n x, lengths, _, _ = concat_batches(sent1, len1, lang_id, sent2, len2, lang_id, params.pad_index, params.eos_index, reset_positions=False)\n y = self.data[splt]['y'][idx] if has_labels else None\n\n # cuda\n x, y, lengths = to_cuda(x, y, lengths)\n\n # prediction\n output = self.proj(self.embedder.get_embeddings(x, lengths, positions=None, langs=None))\n p = output.data.max(1)[1] if self.is_classif else output.squeeze(1)\n idxs.append(idx)\n prob.append(output.cpu().numpy())\n pred.append(p.cpu().numpy())\n if has_labels:\n gold.append(y.cpu().numpy())\n\n # indices / predictions\n idxs = np.concatenate(idxs)\n prob = np.concatenate(prob)\n pred = np.concatenate(pred)\n assert len(idxs) == len(pred), (len(idxs), len(pred))\n assert idxs[-1] == len(idxs) - 1, (idxs[-1], len(idxs) - 1)\n\n # score the predictions if we have labels\n if has_labels:\n gold = np.concatenate(gold)\n prefix = f'{splt}_{task}'\n if self.is_classif:\n scores['%s_acc' % prefix] = 100. * (pred == gold).sum() / len(pred)\n scores['%s_f1' % prefix] = 100. * f1_score(gold, pred, average='binary' if params.out_features == 2 else 'micro')\n scores['%s_mc' % prefix] = 100. * matthews_corrcoef(gold, pred)\n else:\n scores['%s_prs' % prefix] = 100. * pearsonr(pred, gold)[0]\n scores['%s_spr' % prefix] = 100. * spearmanr(pred, gold)[0]\n logger.info(\"__log__:%s\" % json.dumps(scores))\n\n # output predictions\n pred_path = os.path.join(params.dump_path, f'{splt}.pred.{self.epoch}')\n with open(pred_path, 'w') as f:\n for i, p in zip(idxs, prob):\n f.write('%i\\t%s\\n' % (i, ','.join([str(x) for x in p])))\n logger.info(f\"Wrote {len(idxs)} {splt} predictions to {pred_path}\")\n\n return scores", "def on_predict_batch_end(self, step, logs=None):", "def predict_on_batch(self, X):\n len_unpadded = len(X)\n if self.pad_batches:\n X = pad_features(self.batch_size, X)\n\n if not self._restored_model:\n self.restore()\n with self.eval_graph.graph.as_default():\n\n # run eval data through the model\n n_tasks = self.n_tasks\n outputs = []\n with self._get_shared_session(train=False).as_default():\n n_samples = len(X)\n feed_dict = self.construct_feed_dict(X)\n data = self._get_shared_session(train=False).run(\n self.eval_graph.output, feed_dict=feed_dict)\n batch_outputs = np.asarray(data[:n_tasks], dtype=float)\n # reshape to batch_size x n_tasks x ...\n if batch_outputs.ndim == 3:\n batch_outputs = batch_outputs.transpose((1, 0, 2))\n elif batch_outputs.ndim == 2:\n batch_outputs = batch_outputs.transpose((1, 0))\n # Handle edge case when batch-size is 1.\n elif batch_outputs.ndim == 1:\n n_samples = len(X)\n batch_outputs = batch_outputs.reshape((n_samples, n_tasks))\n else:\n raise ValueError('Unrecognized rank combination for output: %s' %\n (batch_outputs.shape))\n # Prune away any padding that was added\n batch_outputs = batch_outputs[:n_samples]\n outputs.append(batch_outputs)\n\n outputs = np.squeeze(np.concatenate(outputs))\n\n outputs = np.copy(outputs)\n\n # Handle case of 0-dimensional scalar output\n if len(outputs.shape) > 0:\n return outputs[:len_unpadded]\n else:\n outputs = np.reshape(outputs, (1,))\n return outputs", "def gen_batch(img_dir, id_label_dict, batch_size, num_class, shuffle=True):\n img_file_path = gen_img_files(img_dir, shuffle)\n num_images = len(img_file_path)\n while True:\n for i in range(0, num_images-batch_size, batch_size):\n X, y = gen_data_file(img_file_path[i:i+batch_size], id_label_dict, num_class)\n yield X, y" ]
[ "0.7235579", "0.7028992", "0.70033675", "0.69987303", "0.6998135", "0.6970026", "0.69600385", "0.6889559", "0.67980856", "0.6786844", "0.6781948", "0.6712437", "0.6697618", "0.66913795", "0.66129225", "0.65865463", "0.6565857", "0.6542319", "0.6535848", "0.65276676", "0.65126806", "0.64974046", "0.6490124", "0.6489355", "0.64884955", "0.64843714", "0.6476121", "0.6433911", "0.6429112", "0.641584", "0.64010346", "0.6371066", "0.6366517", "0.63654625", "0.63554436", "0.63305354", "0.63278806", "0.632653", "0.6324552", "0.630482", "0.62985665", "0.62984324", "0.6282738", "0.62802094", "0.62744796", "0.6271214", "0.62516826", "0.6246582", "0.6246063", "0.6234397", "0.62332886", "0.6231217", "0.62259185", "0.61997056", "0.61942047", "0.61934274", "0.6186697", "0.61833686", "0.6181415", "0.6164056", "0.61487013", "0.6147595", "0.61470085", "0.61326593", "0.61292213", "0.6116224", "0.61129624", "0.6104841", "0.60987025", "0.6088845", "0.6084758", "0.6079261", "0.6073615", "0.6069138", "0.6058423", "0.6057556", "0.60424185", "0.60417306", "0.601254", "0.60034573", "0.600285", "0.60026395", "0.60016", "0.59983623", "0.5995509", "0.59940815", "0.5993339", "0.59907883", "0.5985988", "0.5965125", "0.59615684", "0.5960705", "0.59597296", "0.59582716", "0.5956417", "0.59502435", "0.59499705", "0.59496826", "0.5948168", "0.594582" ]
0.6662756
14
Splits a DataFrame into 3 distinct DataFrames based on the given percentages and returns a dict of the data.
def split_data(text_df,splits=None,rand_perm=True): if splits is None: splits = {'train':0.6,'val':0.1,'test':0.3} if np.round(np.sum(list(splits.values())),4) != 1: raise Exception("Split percentages do not sum to 1") size = len(text_df) if rand_perm: perm_idx = np.random.permutation(size) else: perm_idx = np.arange(size) text_df = text_df.iloc[perm_idx,:] all_data = dict() keys = list(splits.keys()) pct = list(splits.values()) count = np.round(np.array(pct) * size).astype(np.int32) split_idx = np.cumsum(count)[:-1] data_list = np.split(text_df,split_idx,axis=0) all_data = {keys[i]:data for i,data in enumerate(data_list)} return all_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_data(df_data, clusters):\n\n if clusters is None:\n\n return {0: df_data}\n\n return {\n k: df_data.loc[clusters.index[clusters == k]]\n for k in clusters.unique()\n }", "def split_train_dev_set(df, percent=0.2):\n train = []\n dev = []\n for k, g in df.groupby(\"sender\")[\"mid\", \"recipients\"]:\n n_msg = g.shape[0]\n n_dev = int(n_msg * percent)\n g = g.sort_values(\"date\")\n g_train = g[:-n_dev]\n g_dev = g[-n_dev:]\n train.append(g_train)\n dev.append(g_dev)\n # concat all dataframe\n df_train = pd.concat(train, axis=0).sort_index()\n df_dev = pd.concat(dev, axis=0).sort_index()\n return df_train, df_dev", "def split_dataset(df, predict_window):\n\n #split dataset into train and test datasets\n #train 80 percent of rows\n dataset_train = np.array(df[:int(df.shape[0]*0.8)])\n\n #test dataset is 20 percent of rows\n #50 - that's where historical data and prediction overlap\n dataset_test = np.array(df[int(df.shape[0]*0.8)- predict_window:])\n\n return dataset_train, dataset_test", "def split_percentiles_pediatrics(df):\n df.rename(columns={\"ageyears\": \"age\", \"sex\": \"Sex\"}, inplace=True)\n cols = [\"Sex\", \"agedays\", \"age\"]\n\n ht_cols = cols.copy()\n ht_cols.extend([col for col in df.columns if \"s_ht_p\" in col])\n df_ht = df[ht_cols]\n df_ht.columns = [c.replace(\"s_ht_p\", \"P\") for c in df_ht]\n\n wt_cols = cols.copy()\n wt_cols.extend([col for col in df.columns if \"s_wt_p\" in col])\n df_wt = df[wt_cols]\n df_wt.columns = [c.replace(\"s_wt_p\", \"P\") for c in df_wt]\n\n bmi_cols = cols.copy()\n bmi_cols.extend([col for col in df.columns if \"s_bmi_p\" in col])\n df_bmi = df[bmi_cols]\n df_bmi.columns = [c.replace(\"s_bmi_p\", \"P\") for c in df_bmi]\n\n return (df_ht, df_wt, df_bmi)", "def split(df, group):\n\n data = namedtuple(\"data\", [\"filename\", \"object\"]) #initiate \"data\" tyoe\n gb = df.groupby(group) #group df by group attribute\n return [\n data(filename, gb.get_group(x))\n for filename, x in zip(gb.groups.keys(), gb.groups)\n ]", "def split_data(df: pd.DataFrame, ratio: float, purging: bool = True, n_bars: int = 10) -> Tuple[pd.DataFrame, pd.DataFrame]:\n split_idx = int(df.shape[0] * ratio)\n df1 = df[:split_idx]\n df2 = df[split_idx:]\n if purging:\n purge_idx = round((n_bars-1) * ratio)\n df1 = df1[:-purge_idx]\n df2 = df2[(n_bars - 1 - purge_idx):]\n\n return df1, df2", "def grouping_cols(df, cat_percentage = 0.05, checking_itr = 10):", "def split_by_percentage(data, percentage) -> tuple:\n try:\n percentage = int(round(percentage*len(data)))\n return(data[percentage:], data[:percentage])\n except Exception as error:\n print(f\"Error: split_by_percentage([...], {percentage}) -> {error}\")", "def split_dataset(dataset, train_percentage, feature_headers, target_header):\n\n # Split dataset into train and test dataset\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],\n train_size=train_percentage)\n return train_x, test_x, train_y, test_y", "def split_dataset(dataset, train_percentage, feature_headers, target_header):\n\n # Split dataset into train and test dataset\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],\n train_size=train_percentage)\n return train_x, test_x, train_y, test_y", "def split_train_test(df, percentage_train=50):\n return (\n df.loc[(df.index.values % 100) < percentage_train].reset_index().copy(),\n df.loc[~((df.index.values % 100) < percentage_train)].reset_index().copy(),\n )", "def split_dataset(dataset, train_percentage, feature_headers, target_header):\n \n # Split dataset into train and test dataset\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],\n train_size=train_percentage)\n return train_x, test_x, train_y, test_y", "def split_data(dataset, ratio = 0.9):\n cutoff_row = int(dataset.shape[0] * ratio)\n return (dataset[:cutoff_row], dataset[cutoff_row:])", "def split_dataset(dataset, train_percentage, feature_headers, target_header):\r\n\r\n # Split dataset into train and test dataset\r\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],train_size=train_percentage)\r\n return train_x, test_x, train_y, test_y", "def subset_df(df: pd.DataFrame) -> dict:\n prct10 = int(round(len(df) * 10 / 100, 0))\n dict_nb = {}\n deb = 0\n fin = prct10\n dict_nb[\"df1\"] = df.iloc[deb:fin, :]\n deb = fin\n dixieme = 10 * prct10\n reste = (len(df) - dixieme)\n fin_reste = len(df) + 1\n for i in range(2, 11):\n fin = (i * prct10 + 1)\n dict_nb[\"df\" + str(i)] = df.iloc[deb:fin, :]\n if reste > 0:\n dict_nb[\"reste\"] = df.iloc[fin: fin_reste, :]\n deb = fin\n\n return dict_nb", "def split_dataset(data_df: pd.DataFrame) -> dict:\n\n # Splits the dataset\n print(data_df.shape)\n X, y = data_df.drop(\"SalePrice\", axis=1), data_df[\"SalePrice\"]\n splits = train_test_split(X, y, test_size=0.3)\n\n # Transform to dict\n labels = [\"X_train\", \"X_test\", \"y_train\", \"y_test\"]\n splits_dict = dict(zip(labels, splits))\n\n return splits_dict", "def data():\n df = gen_sliced_df()\n df = df[[\"x\", \"z_categ\", \"y\", \"residual\"]]\n new_df = df.iloc[[1, 100, 150, 200, 250, 300, 305, 400, 405, 500, 550, 609]].copy()\n return {\"df\": df, \"new_df\": new_df}", "def split_data(train_percentage, *data):\n train = [entry[0:int(train_percentage * len(entry))] for entry in data]\n val = [entry[int(train_percentage * len(entry)):] for entry in data]\n return train, val", "def split_train_test_by_percentage(dataset, train_percentage=0.8):\n train_length = int(len(dataset) * train_percentage)\n return torch.utils.data.random_split(dataset, (train_length, len(dataset) - train_length))", "def split_dataset(dataset, train_percentage, feature_headers, target_header):\r\n\r\n train_x, test_x, train_y, test_y = train_test_split(dataset[feature_headers], dataset[target_header],\r\n train_size=train_percentage, random_state=42)\r\n return train_x, test_x, train_y, test_y", "def split_dataset(dataset, train_percentage, valid_percentage):\n\n # Split dataset into train and test dataset\n train_x, test_x, train_y, test_y = train_test_split(dataset[:, :-1], dataset[:, -1],\n train_size=train_percentage + valid_percentage,\n test_size=1-(train_percentage + valid_percentage))\n\n valid_x = train_x[int(np.ceil(train_percentage * len(dataset))):]\n valid_y = train_y[int(np.ceil(train_percentage * len(dataset))):]\n\n return train_x, valid_x, test_x, train_y, valid_y, test_y", "def _split_by_filename(\n df: pd.DataFrame):\n data = namedtuple('data', ['filename', 'object'])\n gb = df.groupby('filename')\n return [data(filename, gb.get_group(x)) for filename, x in zip(gb.groups.keys(), gb.groups)]", "def split_data(data, labels, proportion):\n size = data.shape[0]\n np.random.seed(42)\n s = np.random.permutation(size)\n split_idx = int(proportion * size)\n return (data[s[:split_idx]], data[s[split_idx:]], labels[s[:split_idx]], labels[s[split_idx:]])", "def split_dataset(dataset: torch.utils.data.Dataset, split_perc: float = 0.20):\n assert (split_perc >= 0.0) and (split_perc <= 1.0), (\n f\"FATAL ERROR: invalid split_perc value {split_perc}.\" f\"Expecting float >= 0.0 and <= 1.0\"\n )\n\n if split_perc > 0.0:\n num_recs = len(dataset)\n train_count = int((1.0 - split_perc) * num_recs)\n test_count = num_recs - train_count\n train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_count, test_count])\n return train_dataset, test_dataset\n else:\n return dataset, None", "def splitting_df(dataframe):\n dataframe = dataframe.dropna()\n index = 100\n train_set = dataframe.iloc[:index]\n test_set = dataframe.iloc[index:]\n return train_set, test_set, dataframe", "def split_train_test_data(total_data_df, frac):\n test_data_df = total_data_df.sample(frac=frac, random_state=1)\n train_data_df = total_data_df.loc[total_data_df.index.difference(test_data_df.index)]\n return train_data_df, test_data_df", "def train_test_split(ratio, classes, files):\n train_dict = {}\n test_dict = {}\n for cl in classes:\n train_cnt = int(ratio * len(files[cl]))\n train_dict[cl] = files[cl][:train_cnt]\n test_dict[cl] = files[cl][train_cnt:]\n return train_dict, test_dict", "def split_data(df, train_prop):\n # Create random Tensors to hold inputs and outputs, and wrap them in Variables\n train_df = df.sample(frac=train_prop)\n test_df = df.loc[~df.index.isin(train_df.index)]\n return train_df, test_df", "def get_contests_per_style(data_frame: pd.DataFrame, ballot_styles: list) -> dict:\n contests_per_style = collections.OrderedDict()\n for ballot_style in ballot_styles:\n df = data_frame[data_frame['Ballot Style'] == ballot_style].copy()\n df.drop('Ballot Style', axis=1, inplace=True)\n contests_per_style[ballot_style] = list(df.dropna(axis='columns', how='all'))\n return contests_per_style", "def split_dataset(df_playlists, df_interactions):\n df_train_pl, cat_pids = generate_train(df_playlists)\n df_test_pl, df_test_itr, df_eval_itr, df_train_itr = generate_test(cat_pids, df_playlists, df_interactions)\n\n return df_train_pl, df_train_itr, df_test_pl, df_test_itr, df_eval_itr", "def classify_df(cls, data):\n\t\tif isinstance(data, pd.DataFrame) == False:\n\t\t\traise Exception(\"data must be pandas.Dataframe\")\n\t\t#get unique atom_type id and sorting\n\t\tunique_atom_type = sorted(data[\"atom_id\"].unique())\n\t\t# find the subset dataframe for each atom_type\n\t\t# put their into a dictionary\n\t\t# tuple pair key, val in .items() might be useful\n\t\tgroups = dict()\n\t\tfor i in unique_atom_type:\n\t\t\tgroups[i] = data.loc[data[\"atom_id\"] == i]\n\t\treturn groups", "def dataset_splits(self):\n # 10% evaluation data\n return [{\n \"split\": problem.DatasetSplit.TRAIN,\n \"shards\": 799,\n }, {\n \"split\": problem.DatasetSplit.EVAL,\n \"shards\": 1,\n }]", "def getFig3Data(df, path):\n\ttmp = pd.DataFrame()\n\t# tmp = tmp.append(df)\n\ttmp = tmp.append(df[df.Location == 'exon'])\n\ttmp = tmp.append(df[df.Location == 'intron'])\n\t# print(df[df.Location == 'exon'].NbpG4rWt)\n\t# print(df[df.Location == 'intron'].NbpG4rWt)\n\tdicoNbTr = countTranscript.getFig3Percent(path)\n\tGlobal = pd.DataFrame()\n\tgroups = tmp.groupby('Class')\n\tfor name, group in groups:\n\t\trow = sumSubTable(group, name)\n\t\trow['Class'] = name\n\t\trow = pd.DataFrame(row, index=[len(Global)+1])\n\t\tGlobal = Global.append(row)\n\t# print(sum(Global.NbpG4rWt))\n\trow = {'Class' : 'Global',\n\t\t\t'nuclG' : sum(Global.nuclG),\n\t\t\t'nuclC' : sum(Global.nuclC),\n\t\t\t'NbpG4rWt' : sum(Global.NbpG4rWt),\n\t\t\t'NbpG4rShuf' : sum(Global.NbpG4rShuf),\n\t\t\t'Tot' : sum(Global.Tot)}\n\trow = pd.DataFrame(row, index=[len(Global)+1])\n\tGlobal = Global.append(row)\n\tGlobal['nbTr'] = Global['Class'].map( dicoNbTr['Tot'] )\n\tGlobal['NbTrpG4Wt'] = Global['Class'].map( dicoNbTr['Wt'] )\n\tGlobal['NbTrpG4Shuf'] = Global['Class'].map( dicoNbTr['Shuf'] )\n\tGlobal['PercentWt'] = Global['NbTrpG4Wt'] / Global['nbTr'] * 100\n\tGlobal['PercentShuf'] = Global['NbTrpG4Shuf'] / Global['nbTr'] * 100\n\tGlobal = computeDensity(Global, 'Segment')\n\treturn Global", "def split_data(df: pd.DataFrame):\n size = int(df.shape[0] * 0.8)\n indexes = np.random.choice(df.index, size, replace=False)\n train_set = df.loc[indexes]\n test_set = df.loc[~df.index.isin(indexes)]\n return train_set, test_set", "def split_dataset(dataset: tf.data.Dataset, validation_data_fraction: float):\n\n validation_data_percent = round(validation_data_fraction * 100)\n if not (0 <= validation_data_percent <= 100):\n raise ValueError(\"validation data fraction must be ∈ [0,1]\")\n\n dataset = dataset.enumerate()\n train_dataset = dataset.filter(lambda f, data: f % 100 > validation_data_percent)\n validation_dataset = dataset.filter(lambda f, data: f % 100 <= validation_data_percent)\n\n # remove enumeration\n train_dataset = train_dataset.map(lambda f, data: data)\n validation_dataset = validation_dataset.map(lambda f, data: data)\n\n return train_dataset, validation_dataset", "def split_dataset(dataset: tf.data.Dataset, validation_data_fraction: float):\n\n validation_data_percent = round(validation_data_fraction * 100)\n if not (0 <= validation_data_percent <= 100):\n raise ValueError(\"validation data fraction must be ∈ [0,1]\")\n\n dataset = dataset.enumerate()\n train_dataset = dataset.filter(lambda f, data: f % 100 > validation_data_percent)\n validation_dataset = dataset.filter(lambda f, data: f % 100 <= validation_data_percent)\n\n # remove enumeration\n train_dataset = train_dataset.map(lambda f, data: data)\n validation_dataset = validation_dataset.map(lambda f, data: data)\n\n return train_dataset, validation_dataset", "def split_dataset(dataset: tf.data.Dataset, validation_data_fraction: float):\n\n validation_data_percent = round(validation_data_fraction * 100)\n if not (0 <= validation_data_percent <= 100):\n raise ValueError(\"validation data fraction must be ∈ [0,1]\")\n\n dataset = dataset.enumerate()\n train_dataset = dataset.filter(lambda f, data: f % 100 > validation_data_percent)\n validation_dataset = dataset.filter(lambda f, data: f % 100 <= validation_data_percent)\n\n # remove enumeration\n train_dataset = train_dataset.map(lambda f, data: data)\n validation_dataset = validation_dataset.map(lambda f, data: data)\n\n return train_dataset, validation_dataset", "def split_parliament(parliament_df):\n pro_independence = get_pro_independence_parties()\n votes = parliament_df[VOTES].to_dict()\n deputies = parliament_df[SEATS].to_dict()\n const_votes = sum([v for k, v in votes.items() if k not in pro_independence])\n indep_votes = sum([v for k, v in votes.items() if k in pro_independence])\n const_diput = sum([v for k, v in deputies.items() if k not in pro_independence])\n indep_diput = sum([v for k, v in deputies.items() if k in pro_independence])\n return {NO_INDEPENDENCE: {VOTES: const_votes, SEATS: const_diput},\n PRO_INDEPENDENCE: {VOTES: indep_votes, SEATS: indep_diput}}", "def split_lst_x_perc(lst, perc):\r\n tst_idx = [i for i in range(0, int(len(lst) * 0.2))]\r\n trn_idx = [i for i in range(int(len(lst) * 0.2), len(lst))]\r\n tst = slice_by_index(lst, tst_idx)\r\n trn = slice_by_index(lst, trn_idx)\r\n return trn, tst", "def _split_into_categories(data_struct):\n data_names = [\"left_x\",\"top_y\",\"width\",\"height\",\"FPS\",\"AVG_FPS\",\"Accuracy\"]\n groups = {}\n\n for cat in set(data_struct[\"Objects\"]): \n indices = [i for i, x in enumerate(data_struct[\"Objects\"]) if x == cat]\n mask = []\n mask = np.empty((len(indices),len(data_names)))\n\n for counter,value in enumerate(data_names):\n mask[:,counter] = np.array(data_struct[value])[indices]\n\n groups[cat] = mask\n \n return(groups,data_names)", "def splitData(df, split):\n train = df.iloc[:int(len(df)*split)]\n test = df.iloc[int(len(df)*split):]\n \n return train, test", "def split_data_randomly(X, y, percentage):\n\n s = X.shape\n mask = np.random.rand(s[0]) <= percentage\n X_train = X[mask]\n y_train = y[mask]\n X_test = X[~mask]\n y_test = y[~mask]\n return X_train, y_train, X_test, y_test", "def split_dataset(dataset, Ntotal, val_frac,\n batch_size, num_workers,\n random_seed=0, shuffle=True, balance=False):\n \n Nval = math.floor(Ntotal*val_frac)\n train_ds, val_ds = ch.utils.data.random_split(dataset, \n [Ntotal - Nval, Nval], \n generator=ch.Generator().manual_seed(random_seed))\n if balance: \n val_ds = balance_dataset(val_ds)\n split_datasets = [train_ds, val_ds]\n \n split_loaders = []\n for ds in split_datasets:\n split_loaders.append(ch.utils.data.DataLoader(ds, \n num_workers=num_workers, \n batch_size=batch_size, \n shuffle=shuffle))\n return split_datasets, split_loaders", "def get_data_parascans(rootdir, datasetnames, filterdata):\n datasets = {}\n\n print('Loading: ' + str(len(datasetnames)) + ' datasets')\n for dataset in tqdm(datasetnames):\n time.sleep(0.1)\n\n # Original images (to predict)\n images_org = load_scans(rootdir + dataset + '/crop_org')\n\n # Ground truth images (mask image of expert)\n images_gt = load_scans(rootdir + dataset + '/crop_gt')\n images_gt = sitk.GetArrayFromImage(images_gt)\n\n # Smoothed images by specific filter\n images_smoothed = load_scans_filter(images_org, filterdata)\n\n # Save images in datasets dictionary\n datasets.update({dataset : {'org': images_org, 'gt': images_gt, 'smoothed': images_smoothed}})\n\n print(\"datasets created\")\n return datasets", "def DivideDF(all_data):\n return all_data.iloc[:890], all_data.iloc[891:].drop(\"Survived\", axis=1)", "def forestPandas(data, resCol, maxDepth=None, percentage=70, numfeats = 15, fsize=5, selected=None):\n indices = data.index.tolist()\n trainingSets = {}\n percent = float(percentage)/100\n split = int(percent * len(indices) + 0.5)\n cols = data.columns.tolist() \n for i in range(fsize + 1):\n if selected == None:\n np.random.shuffle(cols)\n selected = cols[:15]\n selected.append(\"spam\")\n np.random.shuffle(indices)\n trainingSets[i] = {}\n trainingSets[i][\"data\"]= data[selected].loc[indices[:split + 1]]\n trainingSets[i][\"tree\"]= buildTreePandas(trainingSets[i][\"data\"], resCol, maxDepth=maxDepth) \n return trainingSets", "def split_dataset(dset: tf.data.Dataset, validation_data_fraction: float):\n\n validation_data_percent = round(validation_data_fraction * 100)\n if not (0 <= validation_data_percent <= 100):\n raise ValueError(\"validation data fraction must be ∈ [0,1]\")\n\n dset = dset.enumerate()\n train_dataset = dset.filter(lambda f, data: f % 100 > validation_data_percent)\n validation_dataset = dset.filter(lambda f, data: f % 100 <= validation_data_percent)\n\n # remove enumeration\n train_dataset = train_dataset.map(lambda f, data: data)\n validation_dataset = validation_dataset.map(lambda f, data: data)\n\n return train_dataset, validation_dataset", "def create_information_dictionary_for_sites(hpo_dfs, selected_hpo_names,\n most_popular_race_cids):\n\n racial_percentages = {}\n\n # want to get the percentages for each of the race concept IDs\n for race_concept_id in most_popular_race_cids:\n race_percentage_list = []\n\n # want to look at the sites in parallel - access their dataframe\n for hpo in selected_hpo_names:\n df = hpo_dfs[hpo]\n temp = df.loc[df['race_concept_id'] == race_concept_id]\n\n if temp.empty:\n race_percentage_list.append(0)\n else:\n val = float(temp['percent_of_site_persons']) # convert to float\n race_percentage_list.append(val)\n\n racial_percentages[race_concept_id] = race_percentage_list\n\n return racial_percentages", "def _get_percentages(games_table: pd.DataFrame, stats_table: pd.DataFrame,\n grouping_column: str) -> pd.DataFrame:\n stats_table[\n [\n \"total_free_throws_achieved\",\n \"total_free_throws_attempted\",\n \"total_two_point_achieved\",\n \"total_two_point_attempted\",\n \"total_three_point_achieved\",\n \"total_three_point_attempted\",\n ]\n ] = (\n games_table[\n [\n grouping_column,\n \"free_throws_achieved\",\n \"free_throws_attempted\",\n \"two_point_achieved\",\n \"two_point_attempted\",\n \"three_point_achieved\",\n \"three_point_attempted\",\n ]\n ]\n .groupby(grouping_column)\n .sum()\n .reset_index()\n .drop(grouping_column, axis=1)\n )\n\n stats_table[\"free_throws_pct\"] = (\n stats_table[\"total_free_throws_achieved\"] / stats_table[\"total_free_throws_attempted\"]\n )\n stats_table[\"two_point_pct\"] = (\n stats_table[\"total_two_point_achieved\"] / stats_table[\"total_two_point_attempted\"]\n )\n stats_table[\"three_point_pct\"] = (\n stats_table[\"total_three_point_achieved\"] / stats_table[\"total_three_point_attempted\"]\n )\n return stats_table", "def split(self, fractions=[0.8, 0.2]):\n\n if sum(fractions) > 1.0 or sum(fractions) <= 0:\n raise ValueError(\"the sum of fractions argument should be between 0 and 1\")\n\n # random indices\n idx = np.arange(self.n_samples)\n np.random.shuffle(idx)\n\n # insert zero\n fractions.insert(0, 0)\n\n # gte limits of the subsets\n limits = (np.cumsum(fractions) * self.n_samples).astype(np.int32)\n\n subsets = []\n # create output dataset\n for i in range(len(fractions) - 1):\n subsets.append(\n Dataset(self.inputs[idx[limits[i]:limits[i + 1]]], self.targets[idx[limits[i]:limits[i + 1]]]))\n\n return subsets", "def group_data(data):\n\n data_grouped = dict()\n\n for data_pt in data:\n resonance_id = data_pt.par['resonance_id']\n\n assignment = parse_assignment(resonance_id)\n index = int(assignment[0][0])\n\n data_grouped.setdefault((index, resonance_id), []).append(data_pt)\n\n return data_grouped", "def get_contribution_dataframe_groups(self):\n pargrp_dict = {}\n par = self.pst.parameter_data\n groups = par.groupby(\"pargp\").groups\n for grp,idxs in groups.items():\n pargrp_dict[grp] = list(par.loc[idxs,\"parnme\"])\n return self.get_contribution_dataframe(pargrp_dict)", "def DivideDF(df_all):\n return df_all.iloc[:df_all.trn_len], df_all.iloc[df_all.trn_len:]", "def df_division(data, col_name, n_group=5, ascending=False):\n assert col_name in data.columns, '{} is not in columns of data!'.format(col_name)\n assert data[col_name].dtype == 'float' or data[col_name].dtype == 'int', \\\n 'type of {} is not comparable!'.format(col_name)\n\n data.reset_index(drop=True, inplace=True)\n rows = data.shape[0]\n rows_each_group = rows // n_group\n data.sort_values(by=col_name, ascending=ascending, inplace=True)\n data.reset_index(drop=True, inplace=True)\n\n division = []\n for i in range(n_group):\n if not i == n_group-1:\n division.append(data.iloc[i * rows_each_group: (i+1) * rows_each_group, :])\n else:\n division.append(data.iloc[i * rows_each_group:, :])\n\n return division", "def split_data(x, y, ratio, index=None):\n m = x.shape[0]\n splitter = np.cumsum(ratio)\n train_start = 0\n val_start = batch_size * ((splitter[0] * m) // batch_size)\n test_start = batch_size * ((splitter[1] * m) // batch_size)\n test_end = batch_size * ((splitter[2] * m) // batch_size)\n\n val_start = int(val_start)\n test_start = int(test_start)\n test_end = int(test_end)\n\n if index is not None:\n split = ( x[train_start:val_start, :], y[train_start:val_start, :],\n index[train_start:val_start],\n x[val_start:test_start, :], y[val_start:test_start, :],\n index[val_start:test_start],\n x[test_start:test_end, :], y[test_start:test_end, :],\n index[test_start:test_end]\n )\n\n\n\n else:\n split = ( x[train_start:val_start, :], y[train_start:val_start, :],\n x[val_start:test_start, :], y[val_start:test_start, :],\n x[test_start:test_end, :], y[test_start:test_end, :]\n )\n\n return split", "def extract_data(first, second, third):\n\n first_df = pd.read_csv(first)\n second_df = pd.read_csv(second)\n third_df = pd.read_csv(third)\n\n return first_df, second_df, third_df", "def leitner_proportions(df):\n denom = df.shape[0]\n prop_dict = {}\n\n for i in range(1,6):\n df_i = df[df['comfort_level'] == i]\n numer = df_i.shape[0]\n prop_dict[i] = numer / denom\n\n prop_df = pd.DataFrame.from_dict([prop_dict], orient='columns') \n\n prop_df = prop_df.T.rename(columns={0:'proportion'}) \n \n return prop_df", "def build_data_cv(df, datapath, n_folds=10):\n\n kf_ids = [np.array([], dtype=int) for _ in range(0, 10)]\n\n # split each class ids into number of folds and add to each fold\n ls = df.label.unique()\n for l in ls:\n c = df.label[df.label == l].index\n for i in c:\n rnd = random.randint(0, n_folds-1)\n kf_ids[rnd] = np.append(kf_ids[rnd], i)\n\n splits_data = []\n\n for ids in kf_ids:\n\n splits_data.append({\n \"id\": np.array(df.file[ids]),\n \"x\": read_data_files(list(df.file), datapath, ids),\n \"y\": np.array(df.label[ids])\n })\n\n return splits_data", "def split_data(df):\n\n df['ranked_latest'] = df.groupby(['userId'])['timestamp'].rank(method='first', ascending=False)\n train_df = df[df['ranked_latest'] != 1]\n test_df = df[df['ranked_latest'] == 1]\n\n train_df = train_df[['userId', 'movieId', 'rating']]\n test_df = test_df[['userId', 'movieId', 'rating']]\n\n return train_df, test_df", "def __split_df(self, df:pd.DataFrame, ratio:float, rem_day4:bool, shuffle:bool, n_vec: int=1) -> Tuple[list, list, list, list]:\n X_test = []\n X_train = [] \n y_test = [] \n y_train = [] \n\n header = df['label'].tolist()\n responses = df['response'].tolist()\n # Removing Day 4\n trails = set()\n for i in range(len(header)):\n if rem_day4 and responses[i] == \"0\":\n pass\n else:\n trails.add(header[i])\n \n header = trails\n\n # Getting all the matrices from the trials\n for trial in header:\n # geting rows with (day, Trail)-label\n rows = df.loc[df['label'] == trial].to_numpy()\n # getting response label\n response = rows[0][-1]\n # getting the actual data from the matrix\n rows = np.delete(rows, np.s_[0,1,-1], axis=1)\n if shuffle:\n # shuffle PC-Matrix\n np.random.shuffle(rows)\n\n if n_vec == 1:\n pass\n else:\n new_rows = []\n # taking samples\n while len(rows) > n_vec:\n vecs = rows[:n_vec]\n # deleting vectors that are already taken\n rows = rows[n_vec:]\n # Concat vectors to one\n new_rows.append(np.concatenate(vecs))\n rows = new_rows\n\n # Splitting into Test and training\n cut = int(ratio*len(rows))\n for i in range(len(rows)):\n if i < cut or ratio == 0.0:\n X_train.append(rows[i])\n y_train.append(response)\n else:\n X_test.append(rows[i])\n y_test.append(response)\n\n return X_train, X_test, y_train, y_test", "def split(interactions: pd.DataFrame, p: float = 0.25) -> Tuple[pd.DataFrame, pd.DataFrame]:\n test = interactions.groupby('track_id').sample(frac=p)\n rows = set((a, b) for _, (a, b, _) in test.iterrows())\n train_mask = [i for i, (_, (a, b, _)) in tqdm(enumerate(interactions.iterrows()), desc=\"Constructing train-set\",\n total=len(interactions)) if (a, b) not in rows]\n train = interactions.iloc[train_mask]\n\n return train, test", "def assign_folds(df, num_folds, held_out_fraction, held_out_max):\n result_df = pandas.DataFrame(index=df.index)\n\n for fold in range(num_folds):\n result_df[\"fold_%d\" % fold] = True\n for (allele, sub_df) in df.groupby(\"allele\"):\n medians = sub_df.groupby(\"peptide\").measurement_value.median()\n\n low_peptides = medians[medians < medians.median()].index.values\n high_peptides = medians[medians >= medians.median()].index.values\n\n held_out_count = int(\n min(len(medians) * held_out_fraction, held_out_max))\n\n held_out_peptides = set()\n if held_out_count == 0:\n pass\n elif held_out_count < 2:\n held_out_peptides = set(\n medians.index.to_series().sample(n=held_out_count))\n else:\n held_out_low_count = min(\n len(low_peptides),\n int(held_out_count / 2))\n held_out_high_count = min(\n len(high_peptides),\n held_out_count - held_out_low_count)\n\n held_out_low = pandas.Series(low_peptides).sample(\n n=held_out_low_count) if held_out_low_count else set()\n held_out_high = pandas.Series(high_peptides).sample(\n n=held_out_high_count) if held_out_high_count else set()\n held_out_peptides = set(held_out_low).union(set(held_out_high))\n\n result_df.loc[\n sub_df.index[sub_df.peptide.isin(held_out_peptides)],\n \"fold_%d\" % fold\n ] = False\n\n print(\"Training points per fold\")\n print(result_df.sum())\n\n print(\"Test points per fold\")\n print((~result_df).sum())\n return result_df", "def outcome_split(df,outcome_dict={\n 'Good':['To Home','No Reason Given','Assissted Living Facility','No Reason Given'], # CAN WE ASSUME THIS??? that In Nursing Facility\n 'Bad':['Hospital','Death'],\n 'Test':['In Nursing Facility','Skilled Nursing Facility (SNF)',\n 'Not approriate for program, removed']}):\n outcome={}\n train={}\n for row in range(df.shape[0]):\n if df.iloc[row]['status'] in outcome_dict['Good']:\n outcome[df.iloc[row]['patient_link']]=1\n train[df.iloc[row]['patient_link']]=1\n if df.iloc[row]['status'] in outcome_dict['Bad']:\n outcome[df.iloc[row]['patient_link']]=0\n train[df.iloc[row]['patient_link']]=1\n if df.iloc[row]['status'] in outcome_dict['Test']:\n train[df.iloc[row]['patient_link']]=0\n elif df.iloc[row]['discharge']==True:\n train[df.iloc[row]['patient_link']]=1\n elif df.iloc[row]['discharge']==False:\n train[df.iloc[row]['patient_link']]=0\n df['outcome']=df['patient_link'].map(outcome)\n df['train']=df['patient_link'].map(train)\n return df", "def _split_flattened(data, split_ratio, seed=default.DEFAULT_CV_RANDOM_SEED):\n\n check.argument_numeric(split_ratio, 0, 1)\n\n pc = np.sum(data.values != 0)\n gs_count = int(split_ratio * pc)\n idx = _make_shuffled_index(pc, seed=seed)\n\n pr_idx = data.values[data.values != 0].copy()\n gs_idx = data.values[data.values != 0].copy()\n\n pr_idx[idx[0:gs_count]] = 0\n gs_idx[idx[gs_count:]] = 0\n\n gs = data.values.copy()\n pr = data.values.copy()\n\n gs[gs != 0] = gs_idx\n pr[pr != 0] = pr_idx\n\n priors_data = pd.DataFrame(pr, index=data.index, columns=data.columns)\n gold_standard = pd.DataFrame(gs, index=data.index, columns=data.columns)\n\n return priors_data, gold_standard", "def split_dataset(dataset, eval_proportion, shuffle=False):\n split_sizes = [1. - eval_proportion, eval_proportion]\n split_frames = []\n split_demos = []\n num_demos = dataset.get_num_demos()\n split_num_demos = [int(fraction * num_demos) for fraction in split_sizes]\n split_num_demos[0] += num_demos - sum(split_num_demos)\n num_instances = len(dataset)\n demos = list(range(num_demos))\n if shuffle:\n np.random.shuffle(demos)\n start_idx = 0\n for split_idx in range(len(split_sizes)):\n if split_sizes[split_idx] == 0:\n split_frames.append(None)\n continue\n split_frames.append([])\n split_demos.append(range(start_idx, start_idx + split_num_demos[split_idx]))\n for demo_idx in split_demos[split_idx]:\n demo_slice = dataset.get_demo_frame_idxs(demos[demo_idx])\n split_frames[split_idx].extend(\n list(range(demo_slice.start, demo_slice.stop)))\n start_idx += split_num_demos[split_idx]\n # Check if the split indices are unique\n assert len(set(split_frames[split_idx])) == len(split_frames[split_idx])\n\n if eval_proportion > 0:\n # Check that splits do not intersect\n for split_idx in range(len(split_frames)):\n for split_idx2 in range(split_idx + 1, len(split_frames)):\n assert len(set(split_frames[split_idx]).intersection(split_frames[split_idx2])) == 0\n assert sum([len(s) for s in split_frames]) == num_instances\n\n split_datasets = [Subset(dataset, split) if split is not None else None for split in split_frames]\n return split_datasets", "def group(df, dvmin, dvmax, step):\n\tr = step/2\n\tres = []\n\n\tfor ticker in range(dvmin, dvmax, step):\n\t\t#select values by left-right difference in sum in range (x-r, x+r). x is the middle value of a bucket. \n\t\tsubgroup = df.loc[(df['diff']>ticker-r) & (df['diff']<ticker+r)\n\t\t\t& (df['choice'] != 0.5)]\n\t\t#count frequency of choosing left\n\t\tnum = subgroup['choice'].sum()\n\t\t#total number of datapoints in the bucket\n\t\tdenom = subgroup.shape[0]\n\t\t#calculate and append the prob. append 0 if empty bucket\n\t\tres.append(num/denom) if denom else res.append(0)\n\treturn res", "def cluster_by_split(filtered_df):\n global features_in_range\n global table\n # make a copy of the entire data set\n unfiltered_df = table\n # get total number of robot faces in data set\n total_rows = len(unfiltered_df)\n\n # drop any column that is not included in our list of 11 features\n # 11 features = 16 features with no dependencies filtered via 20-80% range\n for col in unfiltered_df:\n if not unfiltered_df[col].name in features_in_range:\n unfiltered_df = unfiltered_df.drop(unfiltered_df[col].name, 1)\n\n # iterate over the dataframe of columns generated by the range\n for col in filtered_df:\n try:\n # for each column, call groupby() and calculate percentage\n check_for_20 = unfiltered_df.groupby(col).size().reset_index(name='count')\n check_for_20['as_percent'] = 100 * check_for_20['count'] / float(total_rows)\n # ignore feature values that represent less than 20% of all faces\n cluster_by_feature = check_for_20[check_for_20['as_percent'] >= 20]\n # if feature has values over 20%, iterate over\n # each feature_value and generate clusters\n if not cluster_by_feature.empty:\n # iterate over every value of the feature\n for index, row in cluster_by_feature.iterrows():\n # use feature value to call groupby() on the entire data set\n results = unfiltered_df[unfiltered_df[col] == row[0]]\n results = results \\\n .groupby(list(unfiltered_df)) \\\n .size() \\\n .reset_index(name='count')\n # calculate count as a percentage\n results['as_percent'] = 100 * results['count'] / float(total_rows)\n results = results.sort_values(by='as_percent', ascending=False)\n # store results in a .tsv file\n filename = str(col) + \"_\" + str(row[0]) + '_feature_cluster.tsv'\n results.to_csv(filename.replace(\"/\", \"-\"), header=True, sep='\\t')\n print(\"results written to file\")\n except:\n # 'count' and 'percentage' columns will generate errors\n # since they don't exist in the original data set\n pass", "def split_data(x, y, ratio, seed=1):\n # set seed\n np.random.seed(seed)\n # ***************************************************\n # INSERT YOUR CODE HERE\n # split the data based on the given ratio: TODO\n # ***************************************************\n ind = np.random.permutation(y.shape[0])\n threshold = int(y.shape[0]*ratio)\n return y[ind[:threshold]], x[ind[:threshold]], y[ind[threshold:]], x[ind[threshold:]]", "def _create_performance_contribution_tables(self, performance_df: QFDataFrame) -> List[DFTable]:\n # Create a QFSeries which contains the initial amount of cash in the portfolio for each year / month\n numeric_columns = [col for col in performance_df.columns if is_numeric_dtype(performance_df[col])]\n portfolio_values = performance_df[numeric_columns].sum().shift(fill_value=self._initial_cash).cumsum()\n performance_df[numeric_columns] = performance_df[numeric_columns] / portfolio_values[numeric_columns]\n\n # Add category column and aggregate data accordingly\n ticker_name_to_category = {t.name: category for t, category in self._ticker_to_category.items()}\n performance_df[\"Category\"] = performance_df[\"Asset\"].apply(lambda t: ticker_name_to_category[t])\n all_categories = list(set(ticker_name_to_category.values()))\n performance_df = performance_df.sort_values(by=[\"Category\", \"Asset\"])\n performance_df = performance_df.groupby(\"Category\").apply(\n lambda d: pd.concat([PricesDataFrame({**{\"Asset\": [d.name], \"Category\": [d.name]},\n **{c: [d[c].sum()] for c in numeric_columns}}), d],\n ignore_index=True)).drop(columns=[\"Category\"])\n\n # Add the Total Performance row (divide by 2 as the df contains already aggregated data for each group)\n total_sum_row = performance_df[numeric_columns].sum() / 2\n total_sum_row[\"Asset\"] = \"Total Performance\"\n performance_df = performance_df.append(total_sum_row, ignore_index=True)\n\n # Format the rows using the percentage formatter\n performance_df[numeric_columns] = performance_df[numeric_columns].applymap(lambda x: '{:.2%}'.format(x))\n\n # Divide the performance dataframe into a number of dataframes, so that each of them contains up to\n # self._max_columns_per_page columns\n split_dfs = np.array_split(performance_df.set_index(\"Asset\"),\n np.ceil((performance_df.num_of_columns - 1) / self._max_columns_per_page), axis=1)\n df_tables = [DFTable(df.reset_index(), css_classes=['table', 'shrink-font', 'right-align', 'wide-first-column'])\n for df in split_dfs]\n\n # Get the indices of rows, which contain category info\n category_indices = performance_df[performance_df[\"Asset\"].isin(all_categories)].index\n\n for df_table in df_tables:\n # Add table formatting, highlight rows showing the total contribution of the given category\n df_table.add_rows_styles(category_indices, {\"font-weight\": \"bold\", \"font-size\": \"0.95em\",\n \"background-color\": \"#cbd0d2\"})\n df_table.add_rows_styles([performance_df.index[-1]], {\"font-weight\": \"bold\", \"font-size\": \"0.95em\",\n \"background-color\": \"#b9bcbd\"})\n return df_tables", "def split_dataset(samples, ratio=0.8):\n nsamples = len(samples)\n num_train = int(ratio*nsamples)\n\n # shuffle samples\n shuffle(samples)\n\n trainset = samples[:num_train]\n testset = samples[num_train:]\n\n return trainset, testset", "def split_dataset_by_year(dataset, save_dataset=True):\n\n key = str(dataset[0][0])[:4]\n datasets = []\n current_dataset = []\n\n for data in dataset:\n if key == str(data[0])[:4]:\n current_dataset.append(data[1])\n else:\n datasets.append(current_dataset.copy())\n key = str(data[0])[:4]\n current_dataset.clear()\n current_dataset.append(data[1])\n\n datasets.append(current_dataset.copy())\n\n if save_dataset:\n for i in range(0, len(datasets)):\n pandas.DataFrame(datasets[i]).to_csv(\"dataset_\" + str(i + 1) + \".csv\", index=False)\n\n return datasets", "def split_data(test_data, split_ratio):\n split_index = int(split_ratio * len(test_data))\n \n # randomly permute the values in place\n random.shuffle(test_data)\n \n # take slices of the determined size\n training_set = copy.copy(test_data[:split_index])\n test_data = copy.copy(test_data[split_index:])\n\n return training_set, test_data", "def merge_keyword_chunks(data_list):\n # Iteratively merge the DataFrame objects in the list of data\n data = reduce((lambda x, y: merge_two_keyword_chunks(x, y)), data_list)\n # Find the maximal value across keywords and time\n max_value = data.max().max()\n # Rescale the trends by the maximal value, i.e. such that the largest value across keywords and time is 100\n data = 100 * data / max_value\n return data", "def read_dfdict_data(datadir, subset=None):\n print('Reading datasets...')\n # Initialize dict to store all dataframes\n dfdict = {}\n\n # If subset of datasets are given, read only those\n if subset is not None:\n with open(subset, 'r') as f:\n datasetids = f.read().splitlines()\n else:\n datasetids = get_dataset_ids(datadir)\n\n # Read each dataset and convert to relative abundance\n for dataset in datasetids:\n print(dataset),\n ## Read dataset\n df, meta = read_dataset_files(dataset, datadir)\n df = raw2abun(df)\n\n ## Get case and control samples\n classes_list = get_classes(meta)\n if len(classes_list[0]) == 0 or len(classes_list[1]) == 0:\n raise ValueError('Something wrong with ' + dataset + ' metadata.')\n H_smpls, dis_smpls = get_samples(meta, classes_list)\n\n dfdict.update({dataset: {'df': df, 'meta': meta, 'dis_smpls': dis_smpls, 'H_smpls': H_smpls, 'classes': classes_list}})\n print('\\nReading datasets... Finished.')\n return dfdict", "def split(self):\n\n ratio_c = 1 - self.ratio\n self.train, self.test = self.df.randomSplit([self.ratio, ratio_c], seed=12345)", "def groupby_train_test_split(df, selected_features=None, test_ratio=0.2, seed=12345, groupby='user_id'):\n\n ############################################################\n # Train Test Split\n ############################################################\n\n grp = df[groupby]\n n_splits = int(1 / test_ratio)\n groupkfold = GroupKFold(n_splits=n_splits)\n random.seed(seed)\n folds = groupkfold.split(df, groups = grp)\n train_idx, test_idx = next(folds)\n df_train, df_test = df.iloc[train_idx], df.iloc[test_idx]\n \n return df_train, df_test", "def toDataFrame(self, split=True):\n\n def cleanColumns(df):\n # Cleanup columns\n colnames = df.columns\n colnames=[c.replace('\\'','') for c in colnames]\n colnames=[c[1:] if c.startswith('/') else c for c in colnames]\n # If there is only one group, we remove the group key\n groupNames = self.groupNames\n if len(groupNames)==1:\n nChar = len(groupNames[0])\n colnames=[c[nChar+1:] for c in colnames] # +1 for the \"/\"\n df.columns = colnames\n\n fh = self['data']\n if split:\n # --- One dataframe per group. We skip group that have empty data\n dfs={}\n for group in fh.groups():\n try:\n df = group.as_dataframe(time_index=True)\n df.insert(0,'Time_[s]', df.index.values)\n df.index=np.arange(0,len(df))\n except KeyError:\n df = group.as_dataframe(time_index=False)\n if len(df)>0:\n dfs[group.name] = df\n if len(dfs)==1:\n dfs=dfs[group.name]\n return dfs\n else:\n # --- One dataframe with all data\n try:\n df = fh.as_dataframe(time_index=True)\n cleanColumns(df)\n df.insert(0,'Time_[s]', df.index.values)\n df.index=np.arange(0,len(df))\n except KeyError:\n df = fh.as_dataframe(time_index=False)\n return df", "def get_countries_percent_of_region(db, region, percent):\n # Hint: Use INNER JOIN to join both the tables StatelessCountByCountry\n # and StatelessCountByRegion.\n # Select rows in WHERE based on three conditions:\n # 1: Region should be selected based on argument\n # 2: Both the tables should have same region\n # 3: Population of region should be compared with population from country\n # to check whether it meets criteria\n pass", "def collect_data(input_folder, ratio):\n # TODO implement ratio\n data = pd.DataFrame()\n\n folderpaths = [os.path.normpath((os.path.join(input_folder, x)))\n for x in os.listdir(input_folder) if not x.endswith('.gitkeep')]\n # for folder in folderpaths:\n for folder in folderpaths:\n filepaths = [os.path.normpath((os.path.join(folder, x)))\n for x in os.listdir(folder) if not x.endswith('.gitkeep')]\n for file in filepaths:\n df = pd.read_pickle(file)\n df = df[df['is_feas'] == 1]\n data = data.append(df[['frames', 'label']], ignore_index=True)\n\n return data.rename(columns={'frames': 'x', 'label': 'y'})", "def train_test_split_drifters():\n df = process_raw_df()\n ids = np.unique(df.index.get_level_values(level=0))\n rng = np.random.default_rng(seed=1)\n train_ids = np.sort(rng.choice(ids, size=len(ids)//2, replace=False))\n test_ids = np.sort(np.setdiff1d(ids, train_ids))\n train_df = df[df.index.get_level_values(level=0).isin(train_ids)].copy()\n test_df = df[df.index.get_level_values(level=0).isin(test_ids)].copy()\n return train_df, test_df", "def extract_data():\n raw_data = pd.read_csv(\"../../../resource/DataVisualization/vaccinations.csv\")\n raw_data = raw_data[[\"location\", \"date\", \"people_fully_vaccinated_per_hundred\"]]\n raw_data.date = pd.to_datetime(raw_data.date, format=\"%Y-%m-%d\")\n min_date = raw_data.date.min()\n raw_data.date = raw_data.date-min_date\n raw_data.date = pd.Series([x.days for x in raw_data.date])\n raw_data.drop(raw_data.loc[raw_data.people_fully_vaccinated_per_hundred.isnull()].index,\n axis=0, inplace=True)\n raw_data[\"people_fully_vaccinated_per_hundred\"] /= 100\n\n data_dict = dict()\n for country in raw_data.location.unique():\n if len(raw_data.loc[raw_data.location == country]) >= 100:\n tmp_data = raw_data.loc[raw_data.location == country]\n tmp_data.drop(\"location\", axis=1, inplace=True)\n data_dict[country] = {\"data\":tmp_data}\n else:\n raw_data.drop(raw_data.loc[raw_data.location ==\n country].index, inplace=True)\n return data_dict, min_date, raw_data", "def interpolate_dataframes(ff):\n assert isinstance(ff, dict)\n year_min = ff['CA'][0].index[0]\n year_max = ff['CA'][0].index[-1]\n years = list(range(year_min, year_max + 1))\n for state in ff.keys():\n for cf in ff[state]:\n for year in years:\n if year not in cf.index:\n cf.loc[year] = cf.loc[year-1:year+1, :].sum(axis=0)\n cf.loc[year] = (cf.loc[year] / 2).astype(np.int64)\n cf.sort_index(inplace=True)\n return(ff)", "def split_df(df, n_chunks):\n chunk_size = int(np.ceil(df.shape[0] / n_chunks))\n assert n_chunks * chunk_size >= df.shape[0]\n chunks = []\n for i in range(0, df.shape[0], chunk_size):\n chunks.append(df[i:i + chunk_size])\n assert len(chunks) == n_chunks\n return chunks", "def split_data(input_data: List[Tuple[Any, int]], split_percentage: float = 0.70) -> Tuple[List[Tuple[str, int]],\n List[Tuple[str, int]]]:\n try:\n input_data = set(input_data)\n training_count = int(len(input_data) * split_percentage)\n\n training_data = set(random.sample(input_data, training_count))\n test_data = input_data - training_data\n except TypeError:\n training_count = int(len(input_data) * split_percentage)\n counts_list = set([item for item in range(len(input_data))])\n\n training_counts = set(random.sample(counts_list, training_count))\n test_counts = counts_list - training_counts\n\n training_data = [input_data[i] for i in training_counts]\n test_data = [input_data[i] for i in test_counts]\n\n return list(training_data), list(test_data)", "def top100_by_age(df_info, year, col):\n global ages, df_list\n #Remove inactive players\n data = df_info[df_info.Flag != \"i\"]\n data = data[data.Flag != \"wi\"]\n\n for age in ages:\n data_by_age = data[data[\"age\"] < age]#Select the desired players\n #Create a new dataframe with data from the top 100\n data_top100 = data_by_age.sort_values(col, ascending=False)\n data_top100 = data_top100.head(100)\n #The if clause differentiates between the 2 different formats\n if year > 2012:\n #Calculate the percentage in top100 and in total\n percentage_top100 = data_top100[data_top100[\"Sex\"] == \"F\"].shape[0]\n percentage_total = data_by_age[data_by_age[\"Sex\"] == \"F\"].shape[0] / data_by_age.shape[0]\n else:\n percentage_top100 = data_top100[data_top100[\"Flag\"] == \"w\"].shape[0]\n percentage_total = data_by_age[data_by_age[\"Flag\"] == \"w\"].shape[0] / data_by_age.shape[0]\n #Append the data to a list of dictionaries\n df_list_top100.append(\n {\"age\": \"under \" + str(age), \"percentage\": percentage_top100, \"year\": year})\n df_list_total.append(\n {\"age\": \"under \" + str(age), \"percentage\": percentage_total, \"year\": year})", "def partition_instances(instances, split_attribute, attribute_domains):\n # this is a group by split_attribute's domain, not by\n # the values of this attribute in instances\n # example: if split_attribute is \"level\"\n attribute_domain = attribute_domains[split_attribute] # [\"Senior\", \"Mid\", \"Junior\"]\n # Build a dictionary\n partitions = {} # key (attribute value): value (list of instances with this attribute value)\n # For loop through attributes in dictionary\n for attribute_value in attribute_domain:\n partitions[attribute_value] = []\n for instance in instances:\n index = int(split_attribute[3:])\n if instance[index] == attribute_value:\n partitions[attribute_value].append(instance)\n return partitions", "def percentage_od_xy(range,count):\n percentages = []\n for i in range:\n percentages.append(n_percentage_part(i, count))\n\n percentile_list = pd.DataFrame(\n {'percentage': range,\n 'od_needed': percentages\n })\n return percentile_list", "def prepare_stops_to_request(df: pd.DataFrame) -> list:\n return [split_df(df, i, i + 100) for i in range(0, len(df), 100)]", "def meetup_groups_dynamic(growth_df):\n\n def convert_to_percent(row):\n total_groups = row.sum()\n return row.apply(lambda x: x * 100 / total_groups)\n\n return growth_df.apply(convert_to_percent, axis=1)", "def split_data(df):\n # drop any instances that have missing values\n df = df.dropna()\n\n # define features\n features = df[['pitch_type', 'release_speed', 'release_spin_rate',\n 'if_fielding_alignment', 'launch_angle', 'launch_speed',\n 'hc_x', 'hc_y', 'stand', 'type', 'RH']]\n\n # make dummies for categorical features\n features = pd.get_dummies(features)\n\n # define label\n label = df['hit']\n\n # split data into test and training\n features_train, features_test, label_train, label_test = \\\n train_test_split(features, label, test_size=0.3)\n\n standard = StandardScaler()\n\n features_train = standard.fit_transform(features_train)\n features_test = standard.transform(features_test)\n\n return features_train, features_test, label_train, label_test", "def split_data(df: pd.DataFrame, cutoff_year: int) -> Tuple[pd.DataFrame, pd.DataFrame]:\n return df[df.year >= cutoff_year], df[df.year < cutoff_year]", "def data_pandas(detections):\n return DataWrapperPandas(detections, duplicates_radius=1)", "def split_data(df):\n\trandom_seed = 1\n\tdf_train = df.sample(frac=0.8, random_state=random_seed)\n\tdf_rem = df.loc[~df.index.isin(df_train.index)]\n\tdf_valid = df_rem.sample(frac=0.5, random_state=random_seed)\n\tdf_test = df_rem.loc[~df_rem.index.isin(df_valid.index)]\n\tlogger.info(\"Shape of training dataframe: \" + str(df_train.shape))\n\tlogger.info(\"Shape of validation dataframe: \" + str(df_valid.shape))\n\tlogger.info(\"Sahpe of test dataframe: \" + str(df_test.shape))\n\n\treturn df_train, df_valid, df_test", "def split_dataset(X: np.array, y: np.array, ratio=0.8):\n '''split dataset to train data and valid data'''\n X_train = X[:int(X.shape[0] * ratio)]\n y_train = y[:int(y.shape[0] * ratio)]\n X_valid = X[int(X.shape[0] * ratio):]\n y_valid = y[int(y.shape[0] * ratio):]\n dataset = tuple([X_train, y_train, X_valid, y_valid])\n\n return dataset", "def sliceByUniqueLabelsAndMakePages(filename, labels_per_csv, exception_col):\n\n # Helpers\n updated_rows = []\n updated_rows_start_index = 0\n unique_labels = {}\n unique_label_index = 0\n pages = []\n header = {}\n\n with open(filename) as infile:\n reader = csv.DictReader(infile)\n header = reader.fieldnames\n CSV_LAST_INDEX = getLastIndexWithoutHeader(filename)\n \n for index, row in enumerate(reader):\n for column_name, column_value in row.items():\n LABEL = column_name+column_value\n if column_name != exception_col and len(column_value) > 0:\n # print('column_name: {}, column_value: {}'.format(column_name, column_value))\n \n # handle unique labels count\n if not LABEL in unique_labels:\n unique_labels[LABEL] = 0\n unique_labels[LABEL] += 1\n \n updated_rows.append(row)\n updated_rows_current_index = len(updated_rows)\n \n # # print('index: {}, unique_labels_size: {}, CSV_LAST_INDEX: {}, Year: {}'.format(index, len(unique_labels), CSV_LAST_INDEX, row['year']))\n if(labels_per_csv < len(unique_labels) or index == CSV_LAST_INDEX):\n pages.append(updated_rows[updated_rows_start_index: updated_rows_current_index])\n updated_rows_start_index = updated_rows_current_index\n unique_labels={}\n \n print('Done: Splitting')\n return {PAGES: pages, HEADER: header}", "def split_into_frames(num_frames, data):\n inds = collections.defaultdict(list)\n for i, frame_id in enumerate(data[:, FRAME_ID_COLUMN]):\n inds[frame_id].append(i)\n return [data[inds[t + 1], :] for t in range(num_frames)]", "def split_data(df, split_method='fo', test_size=.2, random_state=42):\n if split_method == 'fo':\n train_set, test_set = _split_fo(df,\n test_size=test_size,\n random_state=random_state)\n elif split_method == 'tfo':\n train_set, test_set = _split_tfo(df, test_size=test_size)\n elif split_method == 'ufo':\n train_set, test_set = _split_ufo(df,\n test_size=test_size,\n random_state=random_state)\n elif split_method == 'utfo':\n train_set, test_set = _split_utfo(df, test_size=test_size)\n else:\n raise HuitreError('Invalid data_split value, expect: ufo, utfo')\n train_set = train_set.reset_index(drop=True)\n test_set = test_set.reset_index(drop=True)\n return train_set, test_set", "def prepare_data(groups):\n all_dicts = []\n for idx, group in groups:\n res_dict = {'organism': group.organism.iloc[0]}\n for g_idx, row in group.iterrows():\n if pd.notna(row.label):\n res_dict[row.cmp_name] = {'label': row.label, 'mic': row.MIC}\n else:\n res_dict[row.cmp_name] = {'label': '', 'mic': row.MIC}\n all_dicts.append(res_dict)\n return all_dicts", "def split_data(y, num_folds=10):\r\n print(f\"Creating splits...\", end=\"\")\r\n\r\n fold_dict = dict()\r\n start_index = 0\r\n # if the number of proteins is not evenly divisible by the number of folds, the last samples are distributed\r\n # evenly across folds\r\n fold_size = math.floor(len(y) / num_folds)\r\n for fold in range(num_folds):\r\n fold_dict[fold] = list(range(start_index, start_index + fold_size))\r\n start_index += fold_size\r\n\r\n # distributing samples which are left over (due to the number of samples not being divisible by the number of folds)\r\n # evenly across folds\r\n fold = 0\r\n while start_index < len(y):\r\n fold_dict[fold] += [start_index]\r\n start_index += 1\r\n fold += 1\r\n\r\n # sanity check that we did not loose any samples while splitting\r\n assert sum([len(fold) for fold in fold_dict.values()]) == len(y), \"Number of samples after splitting does not \" \\\r\n \"match number of samples before splitting.\"\r\n\r\n additional_text = \"\" if len(y) % num_folds == 0 else f\" with {len(y) % num_folds} left over samples \" \\\r\n f\"being distributed evenly among folds\"\r\n print(f\"done! Created {num_folds} splits of size {fold_size}{additional_text}.\")\r\n\r\n # TODO: use the results of this to determine if we should proceed with the current folds\r\n test_stratification(fold_dict, y)\r\n\r\n return fold_dict", "def _split_by_patients(self, patients, val_split=0.2, test_split=0.1, random_state=42):\n train, test = train_test_split(patients, test_size=test_split, random_state=random_state)\n train, val = train_test_split(train, test_size=val_split, random_state=random_state)\n\n return train, val, test" ]
[ "0.59946746", "0.5814314", "0.5597616", "0.55113435", "0.5504025", "0.54890805", "0.5455985", "0.54370016", "0.54187405", "0.54187405", "0.54108846", "0.54088694", "0.5390827", "0.5368966", "0.53635633", "0.53455263", "0.53044295", "0.5286926", "0.52688", "0.5268422", "0.5253747", "0.52520144", "0.5217308", "0.5201055", "0.5160175", "0.5145082", "0.512048", "0.50514823", "0.5047043", "0.5029761", "0.49982542", "0.4997753", "0.49889454", "0.49642628", "0.4946201", "0.4946201", "0.4946201", "0.49436745", "0.49413216", "0.49406245", "0.4930731", "0.49304456", "0.4927088", "0.49118525", "0.4911708", "0.48810294", "0.48760673", "0.48564285", "0.48557127", "0.48523974", "0.4847472", "0.48460853", "0.48447013", "0.48420998", "0.48353043", "0.4832168", "0.4829812", "0.48294502", "0.48275185", "0.4827206", "0.48147058", "0.47873276", "0.47732157", "0.47644928", "0.47541386", "0.47499466", "0.47427773", "0.47413883", "0.4737422", "0.4709498", "0.47056937", "0.46927774", "0.4690101", "0.46855325", "0.46628973", "0.4658852", "0.46579063", "0.4657752", "0.46489954", "0.4641104", "0.46360266", "0.46279553", "0.4623136", "0.46091908", "0.4608414", "0.4603936", "0.45979607", "0.45940772", "0.45875436", "0.45865422", "0.45852637", "0.45815542", "0.4576736", "0.45732874", "0.45722905", "0.4569304", "0.45687675", "0.45685235", "0.45660332", "0.4565526" ]
0.58939797
1
Performs a standard classification test with the given classifier.
def classify(dataset,classifier,feat_mask=None): train = dataset.get_data('train',True) X_train = train['x'] if feat_mask is not None: X_train = X_train[:,feat_mask] y_train = train['y'] classifier.fit(X_train,y_train) test = dataset.get_data('test',True) X_test = test['x'] if feat_mask is not None: X_test = X_test[:,feat_mask] y_test = test['y'] pred = classifier.predict(X_test) acc = np.count_nonzero(pred==y_test) / len(y_test) return acc,y_test,pred
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def runClassifier(clf,title,xtrain,ytrain,xtest,ytest):\n # train the model using the classifier's fit function\n # use a dummy variable to avoid gibberish being printed\n clf.fit(xtrain, ytrain)\n\n # use the model to predict labels for the test set\n # note: this step is redundant if you just want the score\n #predictions = clf.predict(xtest)\n\n # the score function will run the predict method and then calculate\n # the accuracy based on the labels it calculates and the actual labels\n score = clf.score(xtest, ytest)\n\n # print the accuracy of our model on the test data\n print \"%s Accuracy: %0.2f%%\" % (title,(100.0 * score))\n\n # return the predictions in case the caller is interested\n #return predictions", "def classify_test(classifier, test_data):\n for d in test_data:\n test(d[\"name\"], d[\"attribute\"], classifier)", "def test(name, data, classifier):\n classification = classifier.classify(data)\n print('Item ' + name + ' is a ' + classification)", "def test_classify(self):\n classifiers, estimates =\\\n ada_boost.train_dataset(self.larger_matrix,\n self.larger_class_labels,\n 9)\n data_to_classify = [1, 0.5]\n classifications = ada_boost.classify(data_to_classify, classifiers)\n expected = np.mat([-1.])\n self.assertEqual(classifications, expected)", "def testClassifier(x_train, y_train, x_test, y_test, clf):\n #metrics = []\n start = dt.now()\n clf.fit(x_train, y_train)\n end = dt.now()\n print 'training time: ', (end - start)\n \n # add training time to metrics\n #metrics.append(end-start)\n \n start = dt.now()\n yhat = clf.predict(x_test)\n end = dt.now()\n print 'testing time: ', (end - start)\n \n # add testing time to metrics\n #metrics.append(end-start)\n \n print 'classification report: '\n# print classification_report(y_test, yhat)\n pp(classification_report(y_test, yhat))\n \n print 'f1 score'\n print f1_score(y_test, yhat, average='macro')\n \n print 'accuracy score'\n accuracy = accuracy_score(y_test, yhat)\n print accuracy\n #metrics.append(accuracy)\n #precision = precision_score(y_test, yhat, average=None)\n #recall = recall_score(y_test, yhat, average=None)\n \n # add precision and recall values to metrics\n #for p, r in zip(precision, recall):\n # metrics.append(p)\n # metrics.append(r)\n \n \n #add macro-averaged F1 score to metrics\n #metrics.append(f1_score(y_test, yhat, average='macro'))\n \n print 'confusion matrix:'\n print confusion_matrix(y_test, yhat)\n \n # plot the confusion matrix\n plt.imshow(confusion_matrix(y_test, yhat), interpolation='nearest')\n plt.show()\n \n return accuracy", "def classify(X, Y, skf, clf, round_threshold=0.5, average=\"macro\"):\n X = X.values\n if isinstance(Y, pd.Series):\n labels = [\"{}_0\".format(Y.name), \"{}_1\".format(Y.name)]\n Y = np.ravel(Y)\n else:\n Y, labels = Y.values, list(Y.columns)\n\n fold_results = []\n for train, test in skf.split(X, Y):\n current_clf = clone(clf)\n X_train, X_test, Y_train, Y_test = X[train], X[test], Y[train], Y[test]\n\n current_clf.fit(X_train, Y_train)\n Y_prob = current_clf.predict_proba(X_test)\n Y_pred = current_clf.predict(X_test)\n\n (p, r, f1, auc, jac, hl, p_c,\n r_c, f1_c, s_c) = calculate_metrics(Y_test, Y_pred, Y_prob, average)\n\n # calculate overall scores for current fold\n fold_scores = {\n \"precision\": p,\n \"recall\": r,\n \"f1\": f1,\n \"auc\": auc,\n \"jaccard\": jac,\n \"hamming_loss\": hl\n }\n\n for i in range(len(labels)):\n fold_scores[\"precision_{0}\".format(labels[i])] = p_c[i]\n fold_scores[\"recall_{0}\".format(labels[i])] = r_c[i]\n fold_scores[\"f1_{0}\".format(labels[i])] = f1_c[i]\n fold_scores[\"support_{0}\".format(labels[i])] = s_c[i]\n\n fold_results.append({\n \"scores\": fold_scores,\n \"y_pred\": Y_pred,\n \"y_prob\": Y_prob,\n \"y_test\": Y_test\n })\n\n scores = {}\n for score in fold_results[0][\"scores\"].keys():\n values = [s[\"scores\"][score] for s in fold_results]\n scores[score] = (np.sum(values) if score.startswith(\"support_\")\n else np.mean(values))\n\n return scores, fold_results", "def model(classifier, data):\n print(\"Beggining to test model\")\n train, test = cross_validation.train_test_split(data, test_size=.30)\n f,c = train[:,1:], train[:,0]\n classifier.fit(f,c,False)\n print(\"Score: \" + classifier.score(f,c))\n print(\"Finished testing model\")", "def test_classification(model, testing_quakes, device, data_dir):\n ds_train = TriggeredEarthquake(\n data_dir=data_dir,\n testing_quakes=testing_quakes,\n downloadable_data=DownloadableData.TRIGGERED_EARTHQUAKE,\n mode=DatasetMode.INFERENCE,\n transform=triggered_earthquake_transform(random_trim_offset=False),\n )\n ds_test = TriggeredEarthquake(\n data_dir=data_dir,\n testing_quakes=testing_quakes,\n downloadable_data=DownloadableData.TRIGGERED_EARTHQUAKE,\n mode=DatasetMode.TEST,\n transform=triggered_earthquake_transform(random_trim_offset=False)\n )\n train_loader = DataLoader(ds_train, batch_size=1, num_workers=10, shuffle=True)\n test_loader = DataLoader(ds_test, batch_size=1, num_workers=10, shuffle=True)\n\n svc = create_classifier(model, train_loader, type='svc', device=device)\n acc, cm = report_accurarcy(model, svc, test_loader, device=device)\n\n return acc, cm, svc", "def classify(train=None, test=None, data=None, res_dir=\"res/\", disp=True, outfilename=None):\n utils.print_success(\"Comparison of differents classifiers\")\n if data is not None:\n train_features = data[\"train_features\"]\n train_groundtruths = data[\"train_groundtruths\"]\n test_features = data[\"test_features\"]\n test_groundtruths = data[\"test_groundtruths\"]\n else:\n train = utils.abs_path_file(train)\n test = utils.abs_path_file(test)\n train_features, train_groundtruths = read_file(train)\n test_features, test_groundtruths = read_file(test)\n if not utils.create_dir(res_dir):\n res_dir = utils.abs_path_dir(res_dir)\n classifiers = {\n \"RandomForest\": RandomForestClassifier()\n # \"RandomForest\": RandomForestClassifier(n_estimators=5),\n # \"KNeighbors\":KNeighborsClassifier(3),\n # \"GaussianProcess\":GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),\n # \"DecisionTree\":DecisionTreeClassifier(max_depth=5),\n # \"MLP\":MLPClassifier(),\n # \"AdaBoost\":AdaBoostClassifier(),\n # \"GaussianNB\":GaussianNB(),\n # \"QDA\":QuadraticDiscriminantAnalysis(),\n # \"SVM\":SVC(kernel=\"linear\", C=0.025),\n # \"GradientBoosting\":GradientBoostingClassifier(),\n # \"ExtraTrees\":ExtraTreesClassifier(),\n # \"LogisticRegression\":LogisticRegression(),\n # \"LinearDiscriminantAnalysis\":LinearDiscriminantAnalysis()\n }\n for key in classifiers:\n utils.print_success(key)\n clf = classifiers[key]\n utils.print_info(\"\\tFit\")\n clf.fit(train_features, train_groundtruths)\n utils.print_info(\"\\tPredict\")\n predictions = clf.predict(test_features)\n\n if outfilename is not None:\n with open(outfilename, \"w\") as filep:\n for gt, pred in zip(test_groundtruths, predictions):\n filep.write(gt + \",\" + pred + \"\\n\")\n\n # Global\n data = [key]\n data.append(str(precision_score(test_groundtruths, predictions, average='weighted')))\n data.append(str(recall_score(test_groundtruths, predictions, average='weighted')))\n data.append(str(f1_score(test_groundtruths, predictions, average='weighted')))\n data = \",\".join(data)\n if disp:\n print(data)\n else:\n with open(res_dir + \"global.csv\", \"a\") as filep:\n filep.write(data + \",\\n\")\n # Local\n for index, tag in enumerate(list(set(train_groundtruths))):\n precision = precision_score(test_groundtruths, predictions, average=None)\n recall = recall_score(test_groundtruths, predictions, average=None)\n f1 = f1_score(test_groundtruths, predictions, average=None)\n line = key + \",\" + str(precision[index]) + \",\" + str(recall[index]) + \",\" + str(f1[index])\n if disp:\n print(line)\n else:\n with open(res_dir + \"tag_\" + tag + \".csv\", \"a\") as filep:\n filep.write(line + \",\\n\")\n return predictions", "def test_classifiers(\n X,\n y,\n scoring=default_scorers,\n score_aggreg=default_score_aggreg,\n n_features=7,\n # an int will be transformed to a list (with different num of features) of given size\n clfs=None,\n nfolds=10,\n scale=None,\n decompose=None,\n select=None,\n decompose_params={},\n print_progress=False,\n score_to_plot=None,\n):\n scoring = scoring or default_scorers\n score_aggreg = score_aggreg or default_score_aggreg\n\n if isinstance(\n n_features, int\n ): # if n_features is an int, it's the number of different feature set lens to try out\n # ... so make this feature set len list\n total_n_features = np.shape(X)[1]\n n_features = list(\n range(1, total_n_features + 1, int(np.floor(total_n_features / n_features)))\n )[:n_features]\n y = np.asarray(y, dtype='|S6')\n n_features = np.array(n_features)\n\n if clfs is None:\n clfs = default_classifiers\n\n clfs = clfs_to_dict_clfs(clfs)\n\n general_info_dict = dict()\n if (\n scale is not None and scale is not False\n ): # preprocessing.StandardScaler(), preprocessing.MinMaxScaler()\n if scale is True:\n scale = preprocessing.StandardScaler()\n general_info_dict['scale'] = get_name(scale)\n if decompose is not None and decompose is not False:\n if decompose is True:\n decompose = decomposition.PCA(\n **decompose_params\n ) # PCA, KernelPCA, ProbabilisticPCA, RandomizedPCA, TruncatedSVD\n general_info_dict['decompose'] = get_name(decompose)\n\n clf_results = list()\n\n for i_nfeats, nfeats in enumerate(n_features):\n for i_clf, clf in enumerate(clfs):\n clf_name = list(clf.keys())[0]\n clf = clf[clf_name]\n d = dict(general_info_dict, **{'model': clf_name, 'nfeats': nfeats})\n if print_progress:\n printProgress(\n '{}: nfeats={}, nfolds={}'.format(\n clf_name, n_features[i_nfeats], nfolds\n )\n )\n # try:\n start_time = datetime.now()\n score_result = score_classifier(\n X,\n y,\n clf=clf,\n nfeats=nfeats,\n scoring=scoring,\n score_aggreg=score_aggreg,\n nfolds=nfolds,\n scale=scale,\n decompose=decompose,\n select=select,\n decompose_params=decompose_params,\n )\n d.update({'seconds': (datetime.now() - start_time).total_seconds()})\n d.update(score_result.to_dict())\n # except ValueError as e:\n # raise e\n # print(\"Error with: {} ({} features)\".format(get_name(clf),\n # n_features[i_nfeats]))\n\n clf_results.append(d) # accumulate results\n\n clf_results = pd.DataFrame(clf_results)\n if score_to_plot:\n if score_to_plot is True:\n score_to_plot = mk_aggreg_score_name(\n score_aggreg_name=list(mk_score_aggreg_dict(score_aggreg).keys())[0],\n score_name=list(mk_scoring_dict(scoring).keys())[0],\n )\n plot_score(clf_results, score_to_plot)\n\n return reorder_columns_as(clf_results, ['model', 'nfeats', 'seconds'])", "def test(self, test_instances, test_labels):\n scores = self.classifier.predict(test_instances)\n # TODO: print report", "def test_classification(test_data, model, criterion, batch_size, device, generate_batch=None): \n \n # Set model to evaluation mode\n model.eval()\n test_loss = 0\n test_acc = 0\n \n # Create data loader\n data = DataLoader(test_data, batch_size=batch_size, collate_fn=generate_batch)\n \n # Iterate through data by batch of observations\n for feature, target_class in data:\n \n # Load data to specified device\n feature, target_class = feature.to(device), target_class.flatten().to(device)\n \n # Set no update to gradients\n with torch.no_grad():\n \n # Make predictions\n output = model(feature)\n \n # Calculate loss for given batch\n loss = criterion(output, target_class.long())\n\n # Calculate global loss\n test_loss += loss.item()\n \n # Calculate global accuracy\n test_acc += (output.argmax(1) == target_class).sum().item()\n\n return test_loss / len(test_data), test_acc / len(test_data)", "def test_model (self, text_test, labels_test):\n print(classification_report(labels_test, self.classify(text_test)))", "def classify(data, labels, (train_idx, test_idx), classifier=None):\r\n\r\n assert classifier is not None, \"Why would you pass not classifier?\"\r\n\r\n # Data scaling based on training set\r\n scaler = SupervisedStdScaler() #SupervisedRobustScaler() # # \r\n scaler.fit(data[train_idx,:], labels[train_idx], label=-1)\r\n #scaler.fit(data[train_idx,:], labels[train_idx])\r\n data_train = scaler.transform(data[train_idx,:])\r\n data_test = scaler.transform(data[test_idx,:])\r\n try:\r\n classifier.fit(data_train, labels[train_idx])\r\n \r\n \r\n confMat = confusion_matrix(labels[test_idx],\r\n classifier.predict(data_test))\r\n if confMat.shape == (1,1):\r\n if all(labels[test_idx] == -1):\r\n confMat = np.array([[confMat[0], 0], [0, 0]], dtype=confMat.dtype)\r\n else:\r\n confMat = np.array([[0, 0], [0, confMat[0]]], dtype=confMat.dtype)\r\n confMatRate = confMat / np.tile(np.sum(confMat, axis=1).astype('float'), (2,1)).transpose()\r\n totalErr = (confMat[0, 1] + confMat[1, 0]) / float(confMat.sum())\r\n #if type(classifier) not in [type(None), DummyClassifier]:\r\n if hasattr(classifier,'param_grid'): \r\n #isinstance(classifier, GridSearchCV) or \\\r\n # isinstance(classifier, RandomizedSearchCV):\r\n fitted_model = classifier.best_estimator_\r\n else:\r\n fitted_model = copy.copy(classifier) \r\n return confMatRate, totalErr, fitted_model\r\n except np.linalg.linalg.LinAlgError as e:\r\n # sahil added statement to raise the error instead of returning nun values\r\n print e.message\r\n raise e\r\n # return np.array([[np.nan, np.nan], [np.nan, np.nan]]), np.nan, None\r", "def _classifier(self, test_set):\r\n return self._mahalanobis_classifier(test_set.features, self.targets)", "def test_text_classifier_test(self):\n pass", "def test(self, dataset = None, debug = True, labels = None):\n\n\t\tdataset = self.vectorize(dataset) if (dataset != None) else self.testing_set_vector;\n\t\tlabels = labels if (labels != None) else self.testing_labels;\n\n\t\tprediction = self.classifier.predict(dataset)\n\n\t\tif(debug):\n\t\t\tprint(classification_report(labels, prediction))\n\n\t\treturn prediction", "def test_classifier(classifier: nn.Module, dataset: Dataset, batch_size: int = 32) -> Tuple[float, np.array]:\n\n # define data loader\n test_dl = torch.utils.data.DataLoader(\n dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=4,\n )\n\n # define device\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n # set model in evaluation mode\n classifier = classifier.to(device)\n classifier.eval()\n\n # define accuracy\n accuracy = 0\n predictions = []\n\n with torch.no_grad():\n for i, data in enumerate(test_dl):\n # unpack and send data to device\n X, y = data\n X, y = X.to(device), y.to(device)\n\n # predict the class\n y_pred = classifier(X)\n\n # gather predictions\n y_pred = torch.argmax(y_pred, dim=1)\n accuracy += torch.sum(y_pred == y).item()\n predictions += list(y_pred.cpu().numpy())\n\n return accuracy / len(dataset), np.array(predictions)", "def classification(self,a_train,a_test,c_train,c_test,classifier):\n le =LabelEncoder()\n le.fit(c_train)\n c_train = le.transform(c_train)\n c_test = le.transform(c_test)\n if classifier==\"GNB\": #Gaussian Naive Bayes\n gnb = GaussianNB()\n gnb.fit(a_train, c_train)\n c_pred = gnb.predict(a_test)\n elif classifier==\"DT\": #Decision Tree\n dt=DecisionTreeClassifier()\n dt.fit(a_train, c_train)\n c_pred = dt.predict(a_test)\n elif classifier==\"KNN\": #K-Next-Neighbors\n kn=KNeighborsClassifier(n_neighbors=5)\n kn.fit(a_train, c_train)\n c_pred = kn.predict(a_test)\n elif classifier==\"RF\": #Random Forest\n rf=RandomForestClassifier()\n rf.fit(a_train, c_train)\n c_pred = rf.predict(a_test)\n elif classifier==\"SVC\": # Support Vector Classifier\n \"\"\"\n SVC needs normalisation of Feature Values to scale of [-1,1] or [0,1] depending on sign of them\n \"\"\"\n if a_train.min()<0:\n mms = MinMaxScaler(feature_range=(-1,1))\n else:\n mms = MinMaxScaler()\n mms.fit(a_train)\n a_train = mms.transform(a_train)\n a_test = mms.transform(a_test)\n svc=SVC(cache_size=2000,C=1, probability=True,kernel='rbf')\n svc.fit(a_train,c_train)\n #c_pred = svc.predict(a_test) did not work, that's why it is predicted manual\n new_prob = svc.predict_proba(a_test)\n samples=new_prob.shape[0]\n c_pred= np.array\n for k in range(samples):\n c_pred=np.append(c_pred,new_prob[k].argmax())\n c_pred = c_pred[1:samples+1]\n elif classifier==\"DC\": #Dummy Classifier\n dc=DummyClassifier(strategy=\"uniform\")\n dc.fit(a_train, c_train)\n c_pred = dc.predict(a_test)\n elif classifier==\"GMM\": #Gaussian Mixture Modell\n #number of existing classes get passed to the GMM (n_classes)\n n_classes_train = len(np.unique(c_train))\n n_classes_test = len(np.unique(c_test))\n if n_classes_train>n_classes_test:\n n_classes = n_classes_train\n else:\n n_classes = n_classes_test\n #init_params='', because initial values get calculated manual\n gmm = GMM(n_components=n_classes,init_params='')\n #array of feature values of class i get extracted for further process\n gmm.means_=np.array([a_train[c_train==i,:].mean(axis=0) for i in xrange(n_classes)])\n gmm.weights_=np.array([a_train[c_train==i,:].shape[0]/float(c_train.shape[0]) for i in xrange(n_classes)])\n \n gmm_covars = np.zeros((a_train.shape[1]))\n for i in xrange(n_classes):\n valuesOfClassi = a_train[c_train==i,:]\n valuesOfClassi = np.asarray(valuesOfClassi).T\n matrixOfCov = np.cov(valuesOfClassi)+gmm.min_covar*np.eye(valuesOfClassi.shape[0])\n variance = np.array([matrixOfCov[j,j] for j in xrange(matrixOfCov.shape[0])])\n gmm_covars=np.vstack((gmm_covars,variance))\n gmm_covars=gmm_covars[1:,:] #deletes initial row with zeros\n \n gmm.covars_=gmm_covars\n c_pred = gmm.predict(a_test)\n \n c_pred=le.inverse_transform(c_pred)\n return c_pred", "def check_classifier():\n content = []\n labels = []\n file = 'COMP3074-CW1-Dataset.csv'\n content, labels = get_tag(file, \"question_book\", content, labels)\n file = 'name.csv'\n content, labels = get_tag(file, \"question_book\", content, labels)\n file = 'Small_talk.csv'\n content, labels = get_tag(file, \"small_talk\", content, labels, )\n x_train, x_test, y_train, y_test = train_test_split(content, # Sample feature set to be divided\n labels, # The sample result to be divided (label)\n stratify=labels, # Keep the category proportions\n # the same in training and testing\n test_size=0.25, # Refers to the proportion of\n # samples reserved for testing\n random_state=22) # Random seed\n count_vect = CountVectorizer(stop_words=stopwords.words('english'))\n x_train_counts = count_vect.fit_transform(x_train)\n tfidf_transformer = TfidfTransformer(use_idf=True, # Tf_idf\n sublinear_tf=True).fit(x_train_counts)\n x_train_tf = tfidf_transformer.transform(x_train_counts) # Standardize the inherent attributes of the training set,\n # reduce dimensionality and normalize\n classify = LogisticRegression(random_state=0).fit(x_train_tf, y_train) # Logistic regression\n return classify, tfidf_transformer, count_vect", "def test_score():\n\n tpot_obj = TPOTClassifier()\n\n try:\n tpot_obj.score(testing_features, testing_classes)\n assert False # Should be unreachable\n except ValueError:\n pass", "def base_classifier(traitar_model, phenotype_feature_table, features, phenotype, out, do_normalization, get_misclassified_selected):\n model = pd.read_csv(traitar_model, sep = \"\\t\", index_col = 0)\n sel_feats = model.index\n table = pd.read_csv(phenotype_feature_table, sep = \"\\t\", index_col = 0)\n feats = pd.read_csv(features, sep = \"\\t\", index_col = 0).index\n #target\n pt_notnull = pd.notnull(table.loc[:, phenotype])\n y_p = table.loc[:, phenotype].loc[pt_notnull,]\n y_p[y_p == 0] = -1\n #features\n x_p = table.loc[:, feats].loc[pt_notnull,]\n if do_normalization:\n scaler = preprocessing.StandardScaler(with_mean = True, with_std = True).fit(x_p)\n x_p = pd.DataFrame(data = scaler.transform(x_p), index = x_p.index, columns = x_p.columns)\n #train decision stump\n preds = [tree.DecisionTreeClassifier(max_depth = 1, class_weight = 'balanced').fit(pd.DataFrame(x_p.loc[:, i]), y_p).predict(pd.DataFrame(x_p.loc[:, i])) for i in sel_feats] \n conf_per_feat = pd.DataFrame([nested_cv.nested_cv.confusion_m(y_p, pd.Series(p, index = y_p.index).T) for p in preds ])\n conf_per_feat.index = sel_feats\n conf_per_feat.columns = [\"TN\", \"FP\", \"FN\", \"TP\"]\n #get macro accuracy\n bacc = conf_per_feat.apply(lambda x: nested_cv.nested_cv.bacc(nested_cv.nested_cv.recall_pos_conf(x), nested_cv.nested_cv.recall_neg_conf(x)), axis = 1)\n perf_per_feat = pd.concat([conf_per_feat, bacc], 1)\n perf_per_feat.columns = [\"TN\", \"FP\", \"FN\", \"TP\", \"MACC\"]\n feat_df = pd.concat([model.drop([\"TN\", \"FP\", \"FN\", \"TP\", \"MACC\"], axis = 1, inplace = False), perf_per_feat], axis = 1)\n #feat_df = pd.concat([model.drop(\"cor\", axis = 1, inplace = False), perf_per_feat], axis = 1)\n feat_df.sort(columns = [\"MACC\"], ascending = False).to_csv(out, float_format='%.3f', sep = \"\\t\")\n #get misclassified for a selected marker\n if get_misclassified_selected:\n preds_indexed = pd.DataFrame(preds, index = sel_feats).T\n preds_target = preds_indexed.loc[:, get_misclassified_selected]\n preds_target.index = x_p.index\n gs_target = y_p \n #false positives\n fp = gs_target.loc[(gs_target == -1) & (preds_target == 1)]\n #false negatives\n fn = gs_target.loc[(gs_target == 1) & (preds_target == -1)]\n fn.to_csv(\"%s_false_neg.dat\" % get_misclassified_selected, header = False, sep = \"\\t\")\n fp.to_csv(\"%s_false_pos.dat\" % get_misclassified_selected, header = False, sep = \"\\t\")", "def classify(self, verbose=True, print_scores=False):\n if verbose:\n print(\"%s: Training model ...\" % self.clf_name)\n self.clf.fit(self.X_train, self.y_train)\n\n if verbose:\n print(\"%s: Calculating probablities ... \" % self.clf_name)\n y_proba = self.clf.predict_proba(self.X_test)\n\n if verbose:\n print(\"%s: Making predictions\" % self.clf_name)\n y_pred = self.clf.predict(self.X_test)\n\n if verbose:\n print(\"%s: Calculating metrics ...\" % self.clf_name)\n res = ClassificationResult(self.clf, self.y_test, y_pred, y_proba[:, 1])\n res.calculate_scores()\n\n # Print result if print_scores == True\n if print_scores:\n res.print_metrics\n \n return res", "def evaluate_model(model, X_test, y_test, category_names):\n y_pred = model.predict(X_test)\n labels = np.unique(y_pred)\n print(labels)\n #print out score for each class and mean scores, including precision, recall, f1 score\n print(classification_report(y_test.values, y_pred, target_names=category_names.values))", "def classify(self, x, y):\n\t\tif self.classific_method==\"LogisticRegression\":\n\t\t\tclf = LogisticRegression().fit(x,y)\n\t\t\tscore = clf.score(x,y)\n\t\t\tparams = {\"coef\" : clf.coef_, \"intercept\" : clf.intercept_}\n\n\t\telif self.classific_method==\"RidgeClassifier\":\n\t\t\tclf = RidgeClassifier().fit(x,y)\n\t\t\tscore = clf.score(x,y)\n\t\t\tparams = clf.get_params()\n\n\t\telif self.classific_method==\"MLPClassifier\":\n\t\t\tclf = MLPClassifier(solver='lbfgs',alpha=1e-5,hidden_layer_sizes=(5,2),\\\n\t\t\t\t\t\t\t\trandom_state=1,max_iter=1000)\n\t\t\tclf.fit(x, y)\n\t\t\tparams = {\"coefs\" : clf.coefs_}\n\t\t\tscore = clf.score(x,y)\n\n\t\telif self.classific_method==\"RandomForestClassifier\":\n\t\t\t# clf = RandomForestClassifier(n_estimators=100, max_depth=20, random_state=2)\n\t\t\t\n\t\t\t# model = RandomForestClassifier(random_state=2)\n\t\t\t# grid_parameters = {'n_estimators': [i for i in range(300, 601, 50)],\\\n\t\t\t# \t\t\t\t\t'min_samples_split' : [2, 10, 20, 30, 40]}\n\t\t\t# grid = GridSearchCV(estimator=model, param_grid=grid_parameters)\n\t\t\t# grid_result = grid.fit(x, y)\n\n\t\t\t# n_estimator = grid_result.best_params_['n_estimators']\n\t\t\t# min_samples_split = grid_result.best_params_['min_samples_split']\n\t\t\t\n\n\t\t\tclf = RandomForestClassifier(random_state=2,n_estimators=400,\\\n\t\t\t\t\t\t\t\t\t\t min_samples_split=30, max_depth=20)\n\t\t\tclf.fit(x,y)\n\t\t\tscore = clf.score(x,y)\n\t\t\tparams = {}#{\"params\" : grid_result.best_params_}\n\n\t\telif self.classific_method==\"NeuralNetwork\":\n\t\t\tseed = 7\n\t\t\tnp.random.seed(seed)\n\t\t\tinput_shape = x.shape[1]\n\n\n\t\t\tclf = build_keras_model(input_shape,optimizer=\"adam\",init=\"glorot_normal\")\n\n\t\t\tn_epochs = 200\n\t\t\tn_sub_epochs = 10\n\t\t\tsub_epoch_size = len(x) // n_sub_epochs\n\t\t\t# for epoch_number in range(50):\n\t\t\t# \tfor sub_epoch in range(n_sub_epochs):\n\t\t\t# \t\tX = x[sub_epoch * sub_epoch_size: (sub_epoch + 1) * sub_epoch_size]\n\t\t\t# \t\tY = y[sub_epoch * sub_epoch_size: (sub_epoch + 1) * sub_epoch_size]\n\t\t\t# \t\thist = clf.fit(X,Y,epochs=1);\n\t\t\thist=clf.fit(x, y, epochs=n_epochs, batch_size=sub_epoch_size, verbose=0)\n\t\t\tacc = hist.history['accuracy']\n\t\t\tloss = hist.history['loss']\n\t\t\tscore = acc[-1]\n\t\t\tparams = {\"acc\" : acc, \"loss\" : loss}\n\n\t\treturn clf, score, params", "def test_train(C, gamma):\n X_train, X_test, y_train, y_test = src.load()\n clf, score = src.train(X_train, y_train)\n assert isinstance(clf, svm.SVC)\n assert isinstance(score, float)", "def classify(self, data):\n \"*** YOUR CODE HERE ***\"\n return self.sklearn_classifier.predict(data)", "def test_text_classifier_train(self):\n pass", "def classify_split(clf, X_train, Y_train, X_test, Y_test):\n\n # Starting timer\n start = time.time()\n\n # Fitting classifier with training data\n clf.fit(X_train, Y_train)\n\n # Ending timer\n end = time.time()\n\n # Predicting test data\n preds = clf.predict(X_test)\n\n # Calculating the desired metrics\n c_matrix = confusion_matrix(Y_test, preds)\n accuracy = accuracy_score(Y_test, preds)\n precision = precision_score(Y_test, preds)\n recall = recall_score(Y_test, preds)\n f1 = f1_score(Y_test, preds)\n _time = end - start\n\n return {\n 'c_matrix': c_matrix,\n 'accuracy': accuracy,\n 'precision': precision,\n 'recall': recall,\n 'f1': f1,\n 'time': _time\n }", "def evaluate_model(model, X_test, y_test, category_names):\n # Predict for test set\n y_pred = model.predict(X_test)\n \n print(\"**** Scores for each category *****\\n\")\n for i in range(36):\n print(\"Scores for '{}':\".format(category_names[i]))\n print(classification_report(y_test.values[:,i], y_pred[:,i]))", "def classify(self, example):\n raise NotImplementedError()", "def test_create_classifier(self):\n body = SimpleClassifier()\n response = self.client.open(\n '/v1/simple/createclassifier',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def score_classifier(\n X,\n y,\n clf,\n nfeats=None,\n scoring=default_scorers,\n score_aggreg=default_score_aggreg,\n scale=None,\n decompose=None,\n select=None,\n decompose_params={},\n nfolds=10,\n shuffle=True,\n random_fold_state=None,\n include_train_stats=False,\n):\n # give scoring and score_aggreg elements some names\n scoring = scoring or default_scorers\n scoring = mk_scoring_dict(scoring)\n score_aggreg = score_aggreg or default_score_aggreg\n score_aggreg = mk_score_aggreg_dict(score_aggreg)\n\n if nfeats is None:\n nfeats = np.shape(X)[1]\n\n # X = X[:, :nfeats]\n\n stratified_k_fold = StratifiedKFold(\n y, n_folds=nfolds, shuffle=shuffle, random_state=random_fold_state\n )\n score_info = list()\n for train, test in stratified_k_fold:\n d = dict()\n\n X_train, X_test, y_train, y_test = X[train], X[test], y[train], y[test]\n\n if include_train_stats:\n d['train_pts'] = np.shape(X_train)[0]\n d['train_nfeats'] = np.shape(X_train)[1]\n\n pipeline_steps = list()\n if scale: # preprocessing.StandardScaler(), preprocessing.MinMaxScaler()\n pipeline_steps.append(('scale', scale))\n if decompose:\n pipeline_steps.append(('decompose', decompose))\n if select:\n pipeline_steps.append(('select', feature_selection.SelectKBest(k=nfeats)))\n else:\n X = X[:, :nfeats]\n\n pipeline_steps.append(('clf', clf))\n\n pipeline = Pipeline(steps=pipeline_steps)\n\n pipeline.fit(X_train, y_train)\n y_pred = pipeline.predict(X_test)\n\n for score_name, score_fun in scoring.items():\n d[score_name] = score_fun(y_test, y_pred)\n score_info.append(d)\n\n # return score_info\n score_info = pd.DataFrame(score_info)\n score_result = pd.Series()\n for score_aggreg_name, score_aggreg_fun in score_aggreg.items():\n t = score_info.apply(score_aggreg_fun)\n t.set_axis(\n axis=0,\n labels=[\n mk_aggreg_score_name(score_aggreg_name, score_name)\n for score_name in t.index.values\n ],\n )\n score_result = score_result.append(t)\n\n return score_result", "def testModel( self, classTest, classPred):", "def _classifyROMs(self, classifier, features, clusterFeatures):\n # the actual classifying algorithms is the unSupervisedEnging of the QDataMining of the PP Model\n ## get the instance\n classifier = classifier.interface.unSupervisedEngine\n # update classifier features\n classifier.updateFeatures(features)\n # make the clustering instance)\n classifier.train(clusterFeatures)\n # label the training data\n labels = classifier.evaluate(clusterFeatures)\n return labels", "def batch_test(classifier,labeledfiles):\r\n\tcorrect_cnt = 0\r\n\tfor filename, label in labeledfiles:\r\n\t\twlist = utils.load(filename)\r\n\t\tif classifier.cl == 'f':\r\n\t\t\tclassifier.test_one_prep(fe.extract_features(wlist,label)[0],fe.extract_features(wlist,label)[1])\r\n\t\t\tcontinue\t\r\n\t\telse:\r\n\t\t\trlabel = classifier.test_one(fe.extract_features(wlist,label)[0])\r\n\t\tif options.verbose == True:\r\n\t\t\tprint filename, '-->', rlabel\r\n\t\tif rlabel == label:\r\n\t\t\tcorrect_cnt += 1 \r\n\tif classifier.cl == 'f':\r\n\t\tclassifier.test_one_p()\r\n\t\t#classifier.test_one()\r\n\telse:\r\n\t\tprint \"accuracy = %f (%d/%d)\" % (float(correct_cnt)/len(labeledfiles), correct_cnt, len(labeledfiles))", "def _classifier(self, test_set):\r\n return self._euclidian_classifier(test_set.features, test_set.targets)", "def test_training(self):\n self.classifier.train(\"test\", self.message)", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred = model.predict(X_test)\n print(classification_report(Y_test, y_pred, target_names=Y_test.keys()))", "def show_score(clf, X_test, y_test):\n y_pred = predict(clf, X_test)\n print metrics.classification_report(y_test.astype(np.int), y_pred)", "def test(ctx, input_file, model, output_file):\n # parse extra input args\n kwargs = {ctx.args[i][2:]: ctx.args[i+1].strip('\"') for i in range(0, len(ctx.args), 2)}\n if 'use_groups' in kwargs:\n if kwargs['use_groups']:\n no_groups = 0\n else:\n no_groups = 1\n else:\n no_groups = 1\n click.echo('Init model from: ' + model)\n model_class = MDCASClassifier.init(True, None, None)\n click.echo('Make prediction on: ' + input_file)\n pred_df = model_class.test(model_bundle_file = model, test_set_file=input_file, gt_set_file=None, input_format='joblib', verbose=True, prob=1, no_groups=no_groups)\n click.echo('Save predictions to: ' + output_file)\n model_class.export_test(output_file)\n click.echo('Saved')", "def classification(trainData, trainLabels, testData, method):\n\n nClass = 2\n classLabels = [0,1]\n\n trainLabelsUnqArr = np.unique(trainLabels)\n\n if method == 'NaiveBayes':\n classifier = GaussianNB()\n model = classifier.fit(trainData, trainLabels)\n result = model.predict(testData)\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n elif method == 'knnVoting':\n\n classifier = KNeighborsClassifier(5)\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n\n elif method == 'RandomForests':\n\n classifier = RandomForestClassifier(max_depth=10, random_state=0)\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n ############################################\n importances = model.feature_importances_\n std = np.std([tree.feature_importances_ for tree in model.estimators_],\n axis=0)\n indices = np.argsort(importances)[::-1]\n # Print the feature ranking\n print(\"Feature ranking:\")\n for f in range(trainData.shape[1]):\n print(\"%d. feature %d (%f)\" % (f + 1, indices[f], importances[indices[f]]))\n # Plot the feature importances of the forest\n plt.figure()\n plt.title(\"Feature importances\")\n plt.bar(range(trainData.shape[1]), importances[indices],\n color=\"r\", yerr=std[indices], align=\"center\")\n plt.xticks(range(trainData.shape[1]), indices)\n plt.xlim([-1, trainData.shape[1]])\n plt.show()\n\n elif method == 'SVM':\n\n classifier = svm.SVC(C=3, gamma=0.003, probability=True)\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n\n elif method == 'AdaBoost':\n\n classifier = AdaBoostClassifier()\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n ############################################\n importances = model.feature_importances_\n std = np.std([tree.feature_importances_ for tree in model.estimators_],\n axis=0)\n indices = np.argsort(importances)[::-1]\n # Print the feature ranking\n print(\"Feature ranking:\")\n for f in range(trainData.shape[1]):\n print(\"%d. feature %d (%f)\" % (f + 1, indices[f], importances[indices[f]]))\n # Plot the feature importances of the forest\n plt.figure()\n plt.title(\"Feature importances\")\n plt.bar(range(trainData.shape[1]), importances[indices],\n color=\"r\", yerr=std[indices], align=\"center\")\n plt.xticks(range(trainData.shape[1]), indices)\n plt.xlim([-1, trainData.shape[1]])\n plt.show()\n\n elif method == 'NeuralNetwork':\n classifier = MLPClassifier(alpha=1)\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n\n elif method == 'LogisticRegression':\n classifier = LogisticRegression()\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n proba = model.predict_proba(testData)\n proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n probaDf = pd.DataFrame(data=proba, columns=classLabels)\n\n elif method == 'LinearSVM':\n classifier = LinearSVC(random_state=0)\n model = classifier.fit(trainData, trainLabels)\n\n result = model.predict(testData)\n\n ############################################\n importances = model.coef_\n # std = np.std([tree.feature_importances_ for tree in model.estimators_],\n plt.plot(importances.shape[1])\n plt.ylabel('some numbers')\n plt.show()\n elif method == 'kNN':\n\n # logger.info(model.coef_)\n # proba = model.predict_proba(testData)\n # proba = fillinMatrix(proba, trainLabelsUnqArr, nClass)\n # probaDf = pd.DataFrame(data=proba, columns=classLabels)\n neigh = KNeighborsClassifier(n_neighbors=3)\n neigh.fit(trainData, trainLabels)\n\n result=neigh.predict(testData)\n probaDf=neigh.predict_proba(testData)\n\n # logger.info(method)\n\n return result, probaDf", "def test_score_with_fitted_estimator(self):\n model = GaussianNB().fit(self.binary.X.train, self.binary.y.train)\n\n # NOTE that the wrapper will pass a call down to `classes_`\n oz = ClassificationScoreVisualizer(model)\n assert_not_fitted(oz, [\"class_counts_\", \"score_\"])\n\n msg = \"could not determine class_counts_\"\n with pytest.warns(YellowbrickWarning, match=msg):\n oz.score(self.binary.X.test, self.binary.y.test)\n assert_fitted(oz, [\"classes_\", \"class_counts_\", \"score_\"])", "def NBAccuracy(features_train, labels_train, features_test, labels_test):\n ### import the sklearn module for GaussianNB\n from sklearn.naive_bayes import GaussianNB\n\n ### create classifier\n clf = GaussianNB()#TODO\n clf.fit(features_train,labels_train)\n ### fit the classifier on the training features and labels\n #TODO\n\n ### use the trained classifier to predict labels for the test features\n pred = clf.predict(features_test)#TODO\n\n\n ### calculate and return the accuracy on the test data\n ### this is slightly different than the example, \n ### where we just print the accuracy\n ### you might need to import an sklearn module\n from sklearn.metrics import accuracy_score\n accuracy = accuracy_score(pred,labels_test)#TODO\n return accuracy", "def classification_evaluation(self, test_set, predicted_values, certainty):\r\n\r\n percent_accuracy = self.percent_accuracy(test_set, predicted_values)\r\n one_zero = self.one_zero_loss(test_set, predicted_values)\r\n log_loss = self.log_loss(test_set, predicted_values, certainty)\r\n print(f\"Percent correct:\\t{percent_accuracy * 100:.2f}%\")\r\n print(f\"1/0 Loss:\\t\\t\\t{one_zero:.2f}\")\r\n print(\"Log Loss: \", log_loss)", "def classify(self, features):\n\n # TODO: finish this.\n features = np.array(features)\n return self.classifier.classify(features)", "def test_classifiers(train_docs, train_target, test_docs, test_target, min_docs, K, K2, removeStopWords):\n # test_classifiers(train_docs, train_target, test_docs, test_targets, i, 3)\n X_train_counts, X_train_tfidf, X_test_counts, X_test_tfidf = extract_text_features(train_docs, test_docs, min_docs, removeStopWords)\n \n \n num_docs, vocab_size = X_train_counts.shape\n print('Number of (training) documents =',num_docs)\n print('Vocabulary size =',vocab_size)\n \n\n # Now evaluate the classifiers on the test data\n # Print out the accuracy as a percentage for each classifier.\n # np.mean() can be used to calculate the accuracy. Round the accuracy to 2 decimal places.\n\n #predict according to different classifier--evaluate results \n predicted_multNB = fit_and_predict_multinomialNB(X_train_tfidf, train_target, X_test_tfidf)\n predicted_bernNB = fit_and_predict_BernoulliNB(X_train_tfidf, train_target, X_test_tfidf)\n predicted_LR = fit_and_predict_LR(X_train_tfidf, train_target, X_test_tfidf)\n predicted_LR = fit_and_predict_LR(X_train_counts, train_target, X_test_counts)\n predicted_KNN = fit_and_predict_KNN(X_train_tfidf, train_target, X_test_tfidf, K)\n predicted_KNN2 = fit_and_predict_KNN(X_train_tfidf, train_target, X_test_tfidf, K2)\n \n predicted_base = np.array([FreqDist(test_target).most_common(1)[0][0]]*len(test_target))\n\n # count num of correct predictions / total\n np_test_target = np.array(test_target)\n base = np.sum(predicted_base == np_test_target)/len(np_test_target)*100\n multNB = np.sum(predicted_multNB == np_test_target)/len(np_test_target)*100\n bernNB = np.sum(predicted_bernNB == np_test_target)/len(np_test_target)*100\n LR = np.sum(predicted_LR == np_test_target)/len(np_test_target)*100\n KN = np.sum(predicted_KNN == np_test_target)/len(np_test_target)*100\n KN2 = np.sum(predicted_KNN2 == np_test_target)/len(np_test_target)*100\n\n \n print('\\tBase Accuracy: {:.3f}'.format(base))\n print('\\tAccuracy with multinomial naive Bayes: {:.2f}'.format(multNB))\n print('\\tAccuracy with Bernoulli naive Bayes: {:.2f}'.format(bernNB))\n print('\\tAccuracy with logistic regression: {:.2f}'.format(LR))\n print('\\tAccuracy with kNN, k={} classifier: {:2f}'.format(K, KN))\n print('\\tAccuracy with kNN, k={} classifier: {:.2f}'.format(K2, KN2))", "def classify(trait_arg, alpha):\r\n x = df['essay'][1:]\r\n x = x.str.lower()\r\n y = df[trait_arg][1:]\r\n\r\n print(\"Predicting \", trait_arg, \" with alpha = \", alpha)\r\n print(\"Test set, Train Set ratio: 1:3\")\r\n\r\n # Test train split in 25 : 75 ratio\r\n x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=11)\r\n\r\n # TF-IDF vectorizer\r\n vectorizer = TfidfVectorizer()\r\n xx_train = vectorizer.fit_transform(x_train)\r\n xx_test = vectorizer.transform(x_test)\r\n\r\n # Multinomial Naive Bayes Classifier\r\n classifier = MultinomialNB(alpha=alpha)\r\n classifier.fit(xx_train, y_train)\r\n\r\n predictions = classifier.predict(xx_test)\r\n print(\"Confusion Matrix:\")\r\n print(classification_report(y_test, predictions))\r\n score = accuracy_score(y_test, predictions)\r\n print(\"Accuracy:\", score)", "def classify(self, X, y):\n\n clf = svm.SVC(kernel='linear', C=1)\n cv = StratifiedKFold(n_splits=5, random_state=0, shuffle=True)\n\n scores = cross_val_score(clf, X, y, cv=cv, scoring='balanced_accuracy')\n\n return scores", "def make_clf(x_train, y_train, x_test, y_test, clf, clf_name, level):\n print('----------{} at {} level ----------'.format(clf_name, level))\n totalTP, totalFP, totalFN, totalTN = 0, 0, 0, 0\n\n # apply SMOTE, train and test the model\n x_train, y_train = SMOTE(sampling_strategy=0.5).fit_resample(x_train, y_train)\n clf.fit(x_train, y_train)\n y_predict = clf.predict(x_test)\n\n for i in range(len(y_predict)):\n if y_test[i] and y_predict[i]:\n totalTP += 1\n if not y_test[i] and y_predict[i]:\n totalFP += 1\n if y_test[i] and not y_predict[i]:\n totalFN += 1\n if not y_test[i] and not y_predict[i]:\n totalTN += 1\n\n recall = totalTP / (totalTP + totalFN)\n return recall", "def classification_score(self, x, y):\t\n\t\tpass", "def _evaluate_classifer(self, classifier: object, X_test: np.ndarray, y_test: np.ndarray, scaler: StandardScaler, optimal_threshold: float, beta: float, calculate_confusion_matrix:bool = False) -> tuple:\n\n # If the data was scaled in the pipeline the scaler will be not none othersie (none) don't scale the data\n if scaler is not None:\n X_test = scaler.transform(X_test)\n\n # get probabilities for positive class\n y_pred = classifier.predict_proba(X_test)[:,1]\n\n # predict based on optimal_threshold\n threshold_predictions = [1 if y > optimal_threshold else 0 for y in y_pred]\n\n # calculate scores\n fb_score = fbeta_score(y_test, threshold_predictions, beta=beta)\n balanced_accurcacy = balanced_accuracy_score(y_test, threshold_predictions)\n\n if calculate_confusion_matrix:\n conf_mat = confusion_matrix(y_test, threshold_predictions)\n return fb_score, balanced_accurcacy, conf_mat\n\n return fb_score, balanced_accurcacy", "def NBAccuracy(features_train, labels_train, features_test, labels_test):\n ### import the sklearn module for GaussianNB\n from sklearn.naive_bayes import GaussianNB\n from sklearn.metrics import accuracy_score\n\n ### create classifier\n clf = GaussianNB()\n\n ### fit the classifier on the training features and labels\n clf.fit(features_train, labels_train)\n\n ### use the trained classifier to predict labels for the test features\n # method 1\n accuracy = clf.score(features_test, labels_test)\n \n # method 2\n pred = clf.predict(features_test)\n accuracy = accuracy_score(pred, labels_test)\n \n return accuracy", "def test_test_model(self):\n\n dataset = ClassificationTestDataset()\n model = ClassificationTestModel(dataset)\n preds = list(model.predict(dataset.examples))\n self.assertEqual(np.argmax(preds[0]['preds']), 2)\n self.assertEqual(np.argmax(preds[1]['preds']), 1)\n self.assertEqual(np.argmax(preds[2]['preds']), 4)\n self.assertEqual(np.argmax(preds[3]['preds']), 3)", "def train_model(classifier, X_train, y_train, X_test, y_test):\n\n # fit the training dataset on the classifier\n classifier.fit(X_train, y_train)\n \n # predict the labels on test dataset\n predictions = classifier.predict(X_test)\n \n return metrics.accuracy_score(predictions, y_test), metrics.confusion_matrix(predictions, y_test)", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred = model.predict(X_test)\n print(classification_report(Y_test, y_pred, target_names=category_names))\n pass", "def svm_classify(train_image_feats, train_labels, test_image_feats, kernel_type):\r\n\r\n categories = np.unique(train_labels)\r\n # [Desc] make 15 different SVM solver (one(each category) vs. the other(14 other category))\r\n svc_list = []\r\n num_categories = len(categories)\r\n for cat_i in tqdm(range(num_categories)):\r\n category = categories[cat_i]\r\n if kernel_type == 'RBF':\r\n svc = svm.SVC(kernel='rbf', probability=True)\r\n elif kernel_type == 'linear':\r\n svc = svm.SVC(kernel='linear', probability=True)\r\n new_label_for_svm = np.where(train_labels == category, 1, 0)\r\n\r\n svc.fit(train_image_feats, new_label_for_svm)\r\n svc_list.append(svc)\r\n\r\n # [Desc] get test images' class using trained svm\r\n probability_list = []\r\n for cat_i in range(num_categories):\r\n svc = svc_list[cat_i]\r\n logit = svc.decision_function(test_image_feats)\r\n probability = logit\r\n probability_list.append(probability)\r\n probability_mat = np.array(probability_list)\r\n probability_mat = np.transpose(probability_mat)\r\n # [Desc] get each class to argmax each logit value.\r\n argmax_class = np.argmax(probability_mat, axis=1)\r\n\r\n return categories[argmax_class]", "def classify (self, text_test):\n test_features = self.vectorizer.transform(text_test)\n return self.nbc.predict(test_features)", "def NBAccuracy(features_train, labels_train, features_test, labels_test):\n ### import the sklearn module for SVM\n from sklearn.svm import SVC\n\n ### create classifier specifying the kernel\n clf = SVC(kernel=\"rbf\", C = 10000)\n\n ### these lines effectively slice the training dataset down \n ### to 1% of its original size, tossing out 99% of the training data.\n #features_train = features_train[:len(features_train)/100] \n #labels_train = labels_train[:len(labels_train)/100]\n\n ### Calculate the Time spent to train our algorithm\n t0 = time()\n ### fit the classifier on the training features and labels\n clf.fit(features_train, labels_train)\n print \"Training time:\", round(time()-t0, 3), \"s\"\n\n ### Calculate the Time spent in the prediction\n t0 = time()\n ### use the trained classifier to predict labels for the test features\n pred = clf.predict(features_test)\n\n print \"Prediction time:\", round(time()-t0, 3), \"s\"\n\n print \"Prediction for element #10:\", pred[10]\n print \"Prediction for element #26:\", pred[26]\n print \"Prediction for element #50:\", pred[50]\n print \"We could predict \", (sum(i == 1 for i in pred)),\"in \", len(features_test),\"test events bilong to Chris\"\n\n ### calculate and return the accuracy on the test data\n from sklearn.metrics import accuracy_score\n accuracy = accuracy_score(pred, labels_test)\n \n ### Another way\n ### accuracy = clf.score(features_test, labels_test)\n return accuracy", "def train_classifier(segments, output_filename, fields=['count', 'orientation',\n 'red_mean', 'green_mean', 'blue_mean'], \n actual='class_id'):\n random_pct = 0.7\n training = segments.loc[(segments.class_id != 0) &\n (segments.random > random_pct), fields]\n\n training_class = segments.loc[(segments.class_id != 0) &\n (segments.random > random_pct), [actual]]\n\n X = training.values\n Y = training_class.values.reshape(-1)\n\n# clf = svm.SVC()\n# clf.fit(X, Y)\n# pprint(vars(clf))\n# pickle.dump(clf, open(output_filename, \"wb\"))\n# svm_pred = clf.predict(X)\n \n# scores = cross_val_score(clf, X, Y, cv=5)\n \n # specify parameters and distributions to sample from\n# param_dist = {'C': expon(scale=100),\n# 'gamma': expon(scale=.1),\n# 'kernel': ['rbf'],\n# 'class_weight':['balanced', None]}\n\n # run randomized search\n# n_iter_search = 20\n# random_search = RandomizedSearchCV(clf, param_distributions=param_dist,\n# n_iter=n_iter_search)\n#\n# random_search.fit(X, Y)\n# pprint(vars(random_search))\n# pickle.dump(random_search, open(output_filename, \"wb\"))\n# svm_pred = random_search.predict(X)\n \n # run optimized classifier\n best_clf = svm.SVC(C=14.344592902738631, cache_size=200, class_weight=None,\n coef0=0.0, decision_function_shape='ovr', degree=3,\n gamma=7.694015754766104e-05, kernel='rbf', max_iter=-1,\n probability=False, random_state=None, shrinking=True,\n tol=0.001, verbose=False)\n best_clf.fit(X, Y)\n pprint(vars(best_clf))\n pickle.dump(best_clf, open(output_filename, \"wb\"))\n svm_pred = best_clf.predict(X)\n \n crosstab = cross_tabulation(Y, svm_pred)\n print(crosstab)\n \n return best_clf", "def evaluate_prediction(classifier, test_data, labels):\n \n predictions = classifier.predict(test_data)\n \n return accuracy_score(labels, predictions)", "def evaluate_model(model, X_test, Y_test, category_names):\n# Print out Precision , recall F1_score and support for each column using classification_report function\n y_pred_test = model.predict(X_test)\n print(classification_report(Y_test, y_pred_test, target_names=category_names))", "def sk_test_suit(X, y):\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42)\r\n\r\n classifierDict = {\"Random Forest\": RandomForestClassifier(),\r\n \"Logistic Regression\": LogisticRegression(),\r\n \"Linear Discriminant Analysis\": LinearDiscriminantAnalysis(),\r\n \"Gaussian Naive Bayes\": GaussianNB(),\r\n \"Neural Network\": MLPClassifier()}\r\n\r\n\r\n try:\r\n for k, v in classifierDict.items():\r\n clf = v.fit(X_train, y_train)\r\n training_score = cross_val_score(clf, X_train, y_train)\r\n testing_score = cross_val_score(clf, X_test, y_test)\r\n print(k)\r\n print('Sk-learn {0} training accuracy: {1}'.format(k, training_score.mean()))\r\n print('Sk-learn {0} testing accuracy: {1}'.format(k, testing_score.mean()))\r\n except:\r\n pass\r\n # winsound.PlaySound('sound.wav', winsound.SND_FILENAME)\r", "def test_scores_gatn(atn_model_fn, clf_model_fn, student_model_fn, dataset_name, target_class_id,\n batchsize=128, atn_name=None, clf_name=None, student_name=None, device=None,\n shuffle=True):\n if device is None:\n if tf.test.is_gpu_available():\n device = '/gpu:0'\n else:\n device = '/cpu:0'\n\n # Load the dataset\n (_, _), (X_test, y_test) = generic_utils.load_dataset(dataset_name)\n\n # Split test set to get adversarial train and test split.\n (X_train, y_train), (X_test, y_test) = generic_utils.split_dataset(X_test, y_test)\n\n num_classes = y_train.shape[-1]\n image_shape = X_train.shape[1:]\n\n # cleaning data\n # idx = (np.argmax(y_test, axis=-1) != target_class_id)\n # X_test = X_test[idx]\n # y_test = y_test[idx]\n\n batchsize = min(batchsize, X_test.shape[0])\n\n # num_train_batches = X_train.shape[0] // batchsize + int(X_train.shape[0] % batchsize != 0)\n num_test_batches = X_test.shape[0] // batchsize + int(X_test.shape[0] % batchsize != 0)\n\n # build the datasets\n _, test_dataset = generic_utils.prepare_dataset(X_train, y_train,\n X_test, y_test,\n batch_size=batchsize,\n shuffle=shuffle,\n device=device)\n\n # construct the model on the correct device\n with tf.device(device):\n if clf_name is not None:\n clf_model = clf_model_fn(num_classes, name=clf_name) # type: tf.keras.Model\n else:\n clf_model = clf_model_fn(num_classes) # type: tf.keras.Model\n\n if student_name is not None:\n student_model = student_model_fn(num_classes, name=student_name) # type: tf.keras.Model\n else:\n student_model = student_model_fn(num_classes) # type: tf.keras.Model\n\n if atn_name is not None:\n atn_model = atn_model_fn(image_shape, name=atn_name) # type: tf.keras.Model\n else:\n atn_model = atn_model_fn(image_shape) # type: tf.keras.Model\n\n optimizer = tf.train.AdamOptimizer()\n\n atn_checkpoint = tf.train.Checkpoint(model=atn_model, optimizer=optimizer,\n global_step=tf.train.get_or_create_global_step())\n\n student_checkpoint = tf.train.Checkpoint(model=student_model)\n\n clf_model_name = clf_model.name if clf_name is None else clf_name\n basepath = 'weights/%s/%s/' % (dataset_name, clf_model_name)\n\n if not os.path.exists(basepath):\n os.makedirs(basepath, exist_ok=True)\n\n checkpoint_path = basepath + clf_model_name + '.pkl'\n\n # Restore the weights of the classifier\n if os.path.exists(checkpoint_path):\n clf_model = clf_model.restore(checkpoint_path)\n print(\"Classifier model restored !\")\n\n atn_model_name = atn_model.name if atn_name is None else atn_name\n gatn_basepath = 'gatn_weights/%s/%s/' % (dataset_name, atn_model_name + \"_%d\" % (target_class_id))\n\n # Restore student model\n student_model_name = student_model.name if student_name is None else student_name\n basepath = 'gatn_weights/%s/%s/' % (dataset_name, student_model_name)\n\n if not os.path.exists(basepath):\n os.makedirs(basepath, exist_ok=True)\n\n student_checkpoint_path = basepath + student_model_name\n\n student_checkpoint.restore(student_checkpoint_path)\n\n if not os.path.exists(gatn_basepath):\n os.makedirs(gatn_basepath, exist_ok=True)\n\n atn_checkpoint_path = gatn_basepath + atn_model_name + \"_%d\" % (target_class_id)\n\n atn_checkpoint.restore(atn_checkpoint_path)\n\n # Restore the weights of the atn\n print()\n\n # train loop\n test_acc_realistic = tfe.metrics.Mean()\n test_acc_optimistic = tfe.metrics.Mean()\n test_target_rate = tfe.metrics.Mean()\n test_mse = tfe.metrics.Mean()\n\n batch_id = 0\n adversary_ids = []\n\n with tqdm(test_dataset, desc='Evaluating',\n total=num_test_batches, unit=' samples') as iterator:\n\n for test_iter, (x, y) in enumerate(iterator):\n\n if test_iter >= num_test_batches:\n break\n\n _, x_test_grad = compute_target_gradient(x, student_model, target_class_id)\n x_test_adversarial = atn_model(x, x_test_grad, training=False)\n\n y_test_pred = clf_model(x, training=False)\n y_pred_adversarial = clf_model(x_test_adversarial, training=False)\n\n # compute and update the test target_accuracy\n acc_val_white, target_rate = generic_utils.target_accuracy(y, y_pred_adversarial, target_class_id)\n acc_val_black, _ = generic_utils.target_accuracy(y_test_pred, y_pred_adversarial, target_class_id)\n\n x_mse = tf.losses.mean_squared_error(x, x_test_adversarial, reduction=tf.losses.Reduction.NONE)\n\n test_acc_realistic(acc_val_white)\n test_acc_optimistic(acc_val_black)\n test_target_rate(target_rate)\n test_mse(x_mse)\n\n # find the adversary ids\n y_labels = tf.argmax(y, axis=-1).numpy().astype(int)\n y_pred_labels = generic_utils.checked_argmax(y_test_pred, to_numpy=True).astype(int)\n y_adv_labels = generic_utils.checked_argmax(y_pred_adversarial, to_numpy=True).astype(int) # tf.argmax(y_pred_adversarial, axis=-1)\n\n pred_eq_ground = np.equal(y_labels, y_pred_labels) # correct prediction\n pred_neq_adv_labels = np.not_equal(y_pred_labels, y_adv_labels) # correct prediction was harmed by adversary\n\n found_adversary = np.logical_and(pred_eq_ground, pred_neq_adv_labels)\n\n not_same = np.argwhere(found_adversary)[:, 0]\n not_same = batch_id * batchsize + not_same\n batch_id += 1\n\n adversary_ids.extend(not_same.tolist())\n\n return (test_mse.result().numpy(),\n test_acc_realistic.result().numpy(), test_acc_optimistic.result().numpy(),\n test_target_rate.result().numpy(), adversary_ids)", "def run_classification_experiment(data_set_path, learner, positive_class_name, data_type=float):\n print(\"Running {0} Experiment with positive class = {1}\".format(data_set_path, positive_class_name))\n\n # Network structure.\n print(\"Number of Hidden Layers: {}\".format(len(learner.weights)-1))\n print(\"Number of Nodes in First Hidden Layer: {}\".format(learner.num_in_hidden_layer_1))\n print(\"Number of Nodes in First Hidden Layer: {}\".format(learner.num_in_hidden_layer_2))\n\n all_data = CustomCSVReader.read_file(data_set_path, data_type)\n\n # Pre-process the data to split into 2 classes, positive and not positive.\n all_data = learner.pre_process(all_data, positive_class_name)\n\n cv = CrossValidation(5, learner)\n average_error_rate = cv.cross_validation_classification(all_data)\n\n print(\"Average Error Rate: {}\".format(average_error_rate[0]))\n print(\"Standard Deviation: {}\".format(average_error_rate[1]))\n\n # if not linearRegression:\n # print(\"Learned Naive Bayes Distribution: \")\n # print(\"Keys are structured as follows: (feature#, possible domain values 0 or 1, 'label', label value)\")\n # print(\"Special Key's that are ('label', possible_class_value) are the percentage of the distribution with \"\n # \"that class label\")\n # else:\n print(\"Learned NN Model (Weights) \")\n\n pp = pprint.PrettyPrinter(indent=2)\n pp.pprint(average_error_rate[2][4])\n print()\n\n print(\"Last Cross Validation Set Predicted Values: \\n(Predicted Value, Actual Value)\")\n cv_predicted_values = average_error_rate[3]\n cv_actual_values = average_error_rate[4]\n for predicted, actual in zip(cv_predicted_values[4], cv_actual_values[4]):\n # if linearRegression:\n # print(\"{0}, {1}\".format(predicted[0], actual))\n # else:\n print(\"{0}, {1}\".format(predicted, actual))\n\n return average_error_rate[0]", "def evaluate_model(model, X_test, Y_test, category_names):\n\n print(\"Testing Performance\")\n print(classification_report(Y_test, model.predict(X_test), target_names=category_names))\n\n #Todo cat names", "def evaluate(self, test_set, predicted_values, certainty):\r\n\r\n if self.classification_type == \"classification\":\r\n self.classification_evaluation(test_set, predicted_values, certainty)\r\n elif self.classification_type == \"regression\":\r\n self.regression_evaluation(test_set, predicted_values)", "def apply_classifier(self):\n for detected_object in self.detected_objects:\n detected_object.predict_class(self.original_image)", "def test_evaluate():\n X_train, X_test, y_train, y_test = src.load()\n clf, score = src.train(X_train, y_train)\n test_score = src.evaluate(clf, X_test, y_test)\n assert isinstance(test_score, float)", "def eval_classifier(clf, X, y_correct, classes, plot_cm=True):\n y_pred = clf.predict(X)\n return get_accuracy_and_plot_confusion(y_correct, list(y_pred), classes, plot=plot_cm)", "def evaluate_model(model, X_test, Y_test, category_names):\n \n Y_pred = model.predict(X_test)\n \n print(classification_report(Y_test.values, Y_pred, target_names=category_names))", "def evaluate_model(model, X_test, Y_test, category_names):\n \n y_preds = model.predict(X_test)\n predictions = pd.DataFrame(data=y_preds, columns=Y_test.columns, index=Y_test.index)\n for col in Y_test.columns:\n print(classification_report(predictions[col],Y_test[col]))", "def classify(trainX, trainY, testX, testY):\n trainC = getClasses(trainY)\n P = estimatePosterior(trainX, trainC, testX)\n E = fit(testX, P)\n (e_rate, se, interval) = error.confidenceInterval(testY, E)\n return (P, E, e_rate, se, interval)", "def evaluate_classifications(self):\n test_labels = open('./digitdata/testlabels', 'r')\n self.init_confusion_matrix()\n i = 0\n class_stats = {0:[0,0], 1:[0,0], 2:[0,0], 3:[0,0], 4:[0,0], 5:[0,0], 6:[0,0], 7:[0,0], 8:[0,0], 9:[0,0]}\n total_correct = 0\n num_labels = 1000\n for label in test_labels:\n int_label = int(label)\n if int_label == self.solutions[i]:\n class_stats[int_label][0] += 1\n self.confusion_matrix[int_label][self.solutions[i]] += 1\n else:\n self.confusion_matrix[int_label][self.solutions[i]] += 1\n class_stats[int_label][1] += 1\n i += 1\n for k in class_stats:\n print \"Class \" + str(k) + \": \" + str(float(class_stats[k][0])/class_stats[k][1])\n total_correct += float(class_stats[k][0])\n print \"Overall Accuracy: \" + str(total_correct/num_labels) \n for l in range(0,10):\n for w in range(0,10):\n self.confusion_matrix[l][w] = float(self.confusion_matrix[l][w]) / class_stats[l][1]\n \n s = [[str(e) for e in row] for row in self.confusion_matrix]\n lens = [len(max(col, key=len)) for col in zip(*s)]\n fmt = '\\t'.join('{{:{}}}'.format(x) for x in lens)\n table = [fmt.format(*row) for row in s]\n print '\\n'.join(table)\n #self.print_confusion_matrix() ", "def test_classification(dataset: str, model: str,\n outputs_to_probabilities: Optional[Callable[[Any], np.ndarray]],\n sparse_labels: bool, request: FixtureRequest):\n\n # Retrieve values\n dataset = request.getfixturevalue(dataset) # tf.data.Dataset\n model = request.getfixturevalue(model) # tf.keras.Model\n\n # Batch dataset\n dataset = dataset.batch(10)\n\n # Create callback\n datamap = tvl.learning.DataMapCallback(dataset,\n outputs_to_probabilities=outputs_to_probabilities,\n sparse_labels=sparse_labels)\n\n # Train\n N_EPOCHS = 5\n model.fit(dataset, epochs=N_EPOCHS, callbacks=[datamap])\n\n # Assert shape of gathered gold labeles probabilities are (n_samples, n_epochs)\n assert datamap.gold_labels_probabilities.shape == (100, N_EPOCHS)\n\n # Assert all probabilities are bound between 0 and 1\n assert datamap.gold_labels_probabilities.min() >= 0\n assert datamap.gold_labels_probabilities.max() <= 1\n\n # Assert training dynamics shapes\n assert datamap.confidence.shape == datamap.variability.shape == datamap.correctness.shape == (100,)", "def evaluate_classifier(classifier, data_x, data_y, matrix_title='', show=True):\n pred_y = classifier.predict(data_x)\n confusion_matrix = metrics.confusion_matrix(data_y, pred_y)\n f1_score = metrics.f1_score(data_y, pred_y, average='macro')\n print('\\nTest set F1 macro score: %0.4f .\\n' % f1_score)\n if show:\n show_confusion_matrix(confusion_matrix, f1_score, matrix_title)\n return f1_score", "def classify(trainData, testData, nNumFeatures, verbosity = False):\n path = os.path.dirname(trainData)\n trainFile = os.path.basename(trainData)\n testFile = os.path.basename(testData)\n outName = os.path.splitext(testData)[0] + '.out'\n callCommand = ['Timbl']\n callCommand.append('-mO:N1-%d' % nNumFeatures)\n callCommand.append('-o')\n callCommand.append(outName)\n callCommand.append('-P')\n callCommand.append(path)\n callCommand.append('-f')\n callCommand.append(trainFile)\n callCommand.append('-t')\n callCommand.append(testFile)\n if verbosity:\n call(callCommand)\n else:\n with open(os.devnull, 'w') as devnull:\n call(callCommand, stdout=devnull, stderr=devnull)\n predictV, predict = importC5(outName)\n os.remove(outName)\n return predict", "def stratifier(self, data, labels, classifiers, cv, output_dir):\n\t\tresults_proba = collections.defaultdict(dict)\n\t\tdict_y_test = collections.defaultdict()\n\t\tsss = StratifiedShuffleSplit(n_splits=cv, test_size=0.2, random_state=3)\n\t\tsss.get_n_splits(data, labels)\n\t\ti = 1\n\t\tself.logger.info('Training processing ...')\n\t\tloop = sss.split(data, labels)\n\t\tt = tqdm(loop)\n\t\tl = collections.defaultdict(dict)\n\t\tfor train_index, test_index in t:\n\t\t\tt.set_description('Cross-validation n°')\n\t\t\tx_train, x_test = data.values[train_index], data.values[test_index]\n\t\t\ty_train, y_test = labels[train_index], labels[test_index]\n\t\t\tdict_y_test[i] = y_test\n\t\t\tresults_proba, tmp_l = \\\n\t\t\t\tself.classification(\n\t\t\t\t\ti, classifiers, results_proba, x_train, x_test, y_train, y_test)\n\t\t\t[l[d].update(tmp_l[d]) for d in tmp_l]\n\t\t\ti += 1\n\t\t[l[clf].update({'Mean': np.mean(np.asarray(list(l[clf].values())))})\n\t\t for clf in l]\n\t\tlog_cv = pd.DataFrame(l)\n\t\tlog_cv.index.names = ['Cross-validation']\n\t\tlog_cv.to_csv(output_dir + '/Cross-validation_accuracy.csv',\n\t\t index=True, sep='\\t')\n\t\tprint('Cross-validation results : \\n')\n\t\tprint(log_cv)\n\n\t\treturn results_proba, dict_y_test, classifiers", "def evaluate_model(model, X_test, Y_test, category_names): \n \n Y_pred = model.predict(X_test)\n print(classification_report(Y_test, Y_pred))\n display_results(Y_test, Y_pred)", "def run(self, X_train, Y_train, X_test, Y_test, **kwargs):\n if self.clf is None:\n self.hidden_layer_sizes = (X_train.shape[1], X_train.shape[1])\n self.initMLPClassifier(**kwargs)\n\n self.clf.fit(X_train, Y_train.astype(int))\n Y_pred = self.clf.predict(X_test)\n return ClassificationResult(Y_test, Y_pred), self", "def evaluate_clf(\n clf, X, y, k=None, test_size=0.5, scoring=\"f1_weighted\", feature_names=None\n):\n X_train, X_test, y_train, y_true = model_selection.train_test_split(\n X, y, test_size=test_size\n )\n\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n\n print(\"Accuracy Score: %f\" % metrics.accuracy_score(y_true, y_pred))\n print()\n\n print(\"Classification report\")\n print(metrics.classification_report(y_true, y_pred))\n print()\n\n print(\"Confussion matrix\")\n print(metrics.confusion_matrix(y_true, y_pred))\n print()\n\n if hasattr(clf, \"feature_importances_\"):\n print(\"Feature importances\")\n if not feature_names:\n feature_names = [\"%d\" % i for i in range(X.shape[1])]\n for f, imp in zip(feature_names, clf.feature_importances_):\n print(\"%20s: %s\" % (f, round(imp * 100, 1)))\n print()\n\n if k:\n print(\"Cross validation\")\n kf = model_selection.KFold(n_splits=k)\n scores = model_selection.cross_val_score(clf, X_train, y_train, cv=kf, scoring=scoring)\n print(scores)\n print(\n \"%d-fold Cross Validation Accuracy: %0.2f (+/- %0.2f)\"\n % (k, scores.mean() * 100, scores.std() * 200)\n )", "def classify(self, expr, train=False):\n return self._classifier.classify(expr, train=train)", "def run_classification_experiment ( feature_matrix, target_array, colmap ):\n np.random.seed ( 7062020 ) # Due date\n\n # Split off validation set and cross-validation set\n X_validation = feature_matrix [ : feature_matrix.shape [ 0 ] // 10 ]\n X_cross_validation = feature_matrix [ feature_matrix.shape [ 0 ] // 10 : ]\n y_validation = target_array [ : feature_matrix.shape [ 0 ] // 10 ]\n y_cross_validation = target_array [ feature_matrix.shape [ 0 ] // 10 : ]\n\n experiment_results = {}\n experiment_num = 1\n\n # Use 5-Fold stratified CV\n kfold_strat = KFoldStratifiedCV ( number_of_folds = 5, shuffle = True )\n\n for train, test in kfold_strat.split ( feature_matrix = X_cross_validation, target_array = y_cross_validation ):\n logger.info ( f\"Experiment Number: { experiment_num }\" )\n\n # Get training set\n X_train = X_cross_validation [ train, : ]\n y_train = y_cross_validation [ train ]\n\n # Fit the tree\n d_tree = DecisionTreeClassifier ( evaluate_function = entropy, map_column_node_type = colmap )\n d_tree.fit ( X_train, y_train )\n\n # Prune the tree\n pruned_tree = PostPruner (\n d_tree,\n X_validation = X_validation,\n y_validation = y_validation,\n evaluate_function = accuracy,\n ).prune_tree()\n\n # Get post-pruned predictions\n pruned_preds = pruned_tree.predict ( X_cross_validation [ test, : ] )\n\n # Save the results\n experiment_results [ experiment_num ] = {\n \"actuals\": y_cross_validation [ test ],\n \"preds\": pruned_preds,\n \"model\": pruned_tree,\n }\n experiment_num += 1\n\n return experiment_results\n # End run_classification_experiment", "def classify(self):\n\n if self.classifier is None:\n raise ValueError('self.classifier is None')\n if self.df is None:\n raise ValueError('self.df is None')\n if self.features is None:\n raise ValueError('self.features is None')\n\n train_set = self.df[self.df[self.label_col] != CLASSIFIER_NAN]\n test_set = self.df[self.df[self.label_col] == CLASSIFIER_NAN]\n\n test_set_timestamps = list(test_set.index.strftime('%Y-%m-%d %H:%M:%S.%f'))\n\n self.classifier.fit(\n train_set[self.features],\n train_set[self.label_col]\n )\n\n preds = self.classifier.predict(test_set[self.features])\n probs = self.classifier.predict_proba(test_set[self.features])\n\n res = []\n\n for i in range(0, len(preds)):\n probability = max(probs[i])\n res.append([test_set_timestamps[i], preds[i], probability])\n\n return res", "def test_text_classifier_curate(self):\n pass", "def test_nb(x, y, tune):\n # Perform classification without tuning\n nb = GaussianNB()\n pipeline = create_pipeline(nb)\n return accuracy(pipeline, x, y)", "def test(classifier, data, labels):\n eval_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": data},\n y=labels,\n num_epochs=1,\n shuffle=False)\n eval_results = classifier.evaluate(input_fn=eval_input_fn)\n eval_results[\"F-Score\"] = 2 * eval_results[\"precision\"] * eval_results[\"recall\"] / (eval_results[\"precision\"] + eval_results[\"recall\"])\n# print(eval_results)\n return eval_results", "def test_get_cat_score(self):\n classes = ['blue skin', 'pointy ears']\n negated_classes = []\n categories = ['ear feature', 'skin feature']\n\n categorical_score = self.annot_scorer._get_categorical_score(\n classes, negated_classes, categories,\n self.negation_weight, self.mock_ic_values\n )\n\n assert categorical_score == 0.7002519289078384", "def test_model(model, x_test, y_test, batch_size=100):\n rng = np.random.RandomState(1)\n\n basedir = os.path.dirname(os.path.abspath(__file__))\n classifier.load_weights(os.path.join(basedir, 'mnist_cnn.h5'))\n\n baseline_score = 0\n correct_score = 0\n ssim_score = 0\n\n N = len(x_test)\n assert N % batch_size == 0, 'N should be divisible by batch_size'\n num_batches = N // batch_size\n\n for i in range(num_batches):\n imgs_orig = x_test[batch_size * i:batch_size * (i + 1)].astype(K.floatx())\n labels = y_test[batch_size * i:batch_size * (i + 1)]\n # Create corruption masks\n masks = []\n for _ in range(batch_size):\n # Choose square size\n s = rng.randint(7, 15)\n # Choose top-left corner position\n x = rng.randint(0, 29 - s)\n y = rng.randint(0, 29 - s)\n mask = np.zeros(imgs_orig.shape[1:], dtype=np.bool)\n # Set mask area\n mask[y:y + s, x:x + s] = True\n masks.append(mask)\n masks = np.stack(masks)\n\n # Add channel dimension\n channel_dim = 1 if K.image_data_format() == 'channels_first' else -1\n imgs_orig = np.expand_dims(imgs_orig, channel_dim)\n masks = np.expand_dims(masks, channel_dim)\n\n # Generate corrupted versions\n imgs_corrupted = imgs_orig.copy()\n imgs_corrupted[masks] = 1.\n\n # Generate restored images\n imgs_restored = model.predict_on_batch(imgs_corrupted)\n\n predicted_labels_orig = classifier.predict_on_batch(_preprocess_for_classifier(imgs_orig)).argmax(axis=-1).astype(labels.dtype)\n predicted_labels_restored = classifier.predict_on_batch(_preprocess_for_classifier(imgs_restored)).argmax(axis=-1).astype(labels.dtype)\n # Calculate classifier score:\n # baseline corresponds to the original samples which the classifier is able to correctly predict\n baseline = labels == predicted_labels_orig\n # Since the classifier is NOT 100% accurate, we ignore the prediction results\n # from the original samples which were misclassified by masking it using the baseline.\n correct = (labels == predicted_labels_restored) & baseline\n baseline_score += int(baseline.sum())\n correct_score += int(correct.sum())\n\n # Compute SSIM over the uncorrupted pixels\n imgs_orig[masks] = 0.\n imgs_restored[masks] = 0.\n imgs_orig = imgs_orig.squeeze()\n imgs_restored = imgs_restored.squeeze()\n for j in range(batch_size):\n ssim_score += ssim(imgs_orig[j], imgs_restored[j])\n\n classifier_score = correct_score / baseline_score\n ssim_score /= N\n\n print('Classifier score: {:.2f}\\nSSIM score: {:.2f}'.format(100 * classifier_score, 100 * ssim_score))", "def initialize(self):\r\n\r\n loader = ModelLoader()\r\n\r\n input = None\r\n testDataPath = None\r\n while True:\r\n input = utils.menue(\"Majority Vote classifier\", [\"add classifier\", \"save\", \"test + finish\", \"finish\"], False, True)\r\n if input == 2:\r\n self.modelSaver(self, -1)\r\n continue\r\n\r\n if input > 2:\r\n break\r\n\r\n # Display loading menu\r\n model = loader.show_loading_screen()\r\n if not model is None:\r\n if testDataPath is None:\r\n testDataPath = model.testData.get_root_path()\r\n self.modelSaver.datasetPath = testDataPath\r\n else:\r\n # check if the test datasets are the same\r\n if testDataPath != model.testData.get_root_path():\r\n print \"Could not load classifier {0} because the classifier was trained on different test data.\".format(model.name)\r\n continue\r\n self.classifiers.append((1, model)) # Tuple[0] = model weight (1 is default weight) | Tuple[1] = model\r\n\r\n # why did we leave the loop?\r\n if input == 5:\r\n print \"Cancel\"\r\n return False # cancel -> back to start\r\n else:\r\n # initialize test data for the majority classifier\r\n\r\n # check if test data path has changed\r\n if not utils.check_if_dir_exists(testDataPath):\r\n testDataPath = utils.value_question(\"[...]\", \"Root path to new Dataset Path\", \"s\")\r\n self.testData = TestData(testDataPath, 1, False)\r\n self.testData.segment_test_data({\"test\": 1})\r\n self.testData.new_segmentation()\r\n\r\n self.tester = ModelTester(self)\r\n\r\n # test classifier if input == 3\r\n if input == 3:\r\n # Be careful: the results might not reflect the actual accuracy of the classifier.\r\n # if not changed the tester will test on the whole test data set. This might include images that the \r\n # classifiers has been trained on. For a real accuracy test the images have to be separated manually.\r\n results = self.tester.test_classifier([\"test\"])\r\n self.tester.save_results(results, exportToCSV=False)\r\n print self.tester.format_results_string(results)\r\n testLoss = results[\"test\"][1]\r\n save = utils.radio_question(\"[?]\", \"Save/Update classifier?\", None, [\"Yes\", \"No\"], [True, False])\r\n if save:\r\n self.modelSaver(self, testLoss)\r\n\r\n\r\n return self.classifiers != [] # finish. Return True if classifiers where loaded, False if not.\r", "def predict_class(clf, X_test, Y_test, labels=None, stats_fname=None):\n expected = Y_test\n if isinstance(clf, KerasModel):\n char_probs = clf.predict(X_test)\n predicted = np.argmax(char_probs, axis=1)\n\n if len(Y_test.shape) > 1:\n expected = np.argmax(Y_test, axis=1)\n else:\n predicted = clf.predict(X_test)\n\n conf_mat = metrics.confusion_matrix(\n expected, predicted, labels=range(len(labels))\n )\n\n stats = {\n 'Accuracy': metrics.accuracy_score(expected, predicted),\n 'F1': metrics.f1_score(expected, predicted, average='weighted'),\n 'Precision': metrics.precision_score(expected, predicted,\n average='weighted'),\n 'Recall': metrics.recall_score(expected, predicted,\n average='weighted')\n }\n print('Accuracy: %f' % stats['Accuracy'])\n print('F1: %f' % stats['F1'])\n print('percision: %f' % stats['Precision'])\n print('recall: %f' % stats['Recall'])\n\n save_conf_mat(conf_mat, stats, labels, stats_fname)\n\n return predicted", "def train_predict_and_results(data, clf):\n tra_x, tst_x, tra_y, tst_y = data\n clf.fit(tra_x, tra_y)\n prd_y = clf.predict(tst_x)\n cnf = confusion_matrix(tst_y, prd_y)\n print (\"Classifier: %s \\tAccuracy score:%7.2f %%\"\n \"\\tTN:%5d FP:%5d FN:%5d TP:%5d\"\n % (clf.name, accuracy_score(tst_y, prd_y) * 100,\n cnf[0][0], cnf[0][1], cnf[1][0], cnf[1][1]))", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred_grid = model.predict(X_test)\n print(\n classification_report(Y_test.values, y_pred_grid, target_names=category_names)\n )", "def train_clf(x_train, y_train, clf_model=\"decision_tree\"):\n clf = classifiers[clf_model]\n clf.fit(x_train, y_train)\n return clf", "def classify(self, data):\n \"*** YOUR CODE HERE ***\"\n return self.sklearn_svm.predict(data)", "def test_text_classifier_vaporise(self):\n pass", "def make_sklearn_prediction_classification(logger, run_id, df_train_X,\n df_train_Y, df_test_X, kf,\n features=None, params=None,\n model_type=None, is_test=False,\n seed=42, model=None):\n yoof = np.zeros(len(df_train_X))\n yhat = np.zeros(len(df_test_X))\n cv_scores = []\n result_dict = {}\n\n fold = 0\n for in_index, oof_index in kf.split(df_train_X[features], df_train_Y):\n # Start a counter describing number of folds\n fold += 1\n # Number of splits defined as a part of KFold/StratifiedKFold\n n_splits = kf.get_n_splits()\n logger.info(f'fold {fold} of {n_splits}')\n X_in, X_oof = df_train_X.iloc[in_index].values, df_train_X.iloc[oof_index].values\n y_in, y_oof = df_train_Y.iloc[in_index].values, df_train_Y.iloc[oof_index].values\n\n model = model\n model.fit(X_in, y_in)\n\n yoof[oof_index] = model.predict_proba(X_oof)[:, 1]\n if is_test is False:\n yhat += model.predict_proba(df_test_X.values)[:, 1]\n\n cv_oof_score = roc_auc_score(y_oof, yoof[oof_index])\n logger.info(f'CV OOF Score for fold {fold} is {cv_oof_score}')\n cv_scores.append(cv_oof_score)\n\n del oof_index, X_oof, y_oof\n gc.collect()\n\n util.update_tracking(run_id, \"metric_fold_{}\".format(fold), cv_oof_score, is_integer=False)\n\n yhat /= n_splits\n\n oof_score = round(roc_auc_score(df_train_Y, yoof), 5)\n avg_cv_scores = round(sum(cv_scores)/len(cv_scores), 5)\n std_cv_scores = round(np.array(cv_scores).std(), 5)\n\n logger.info(f'Combined OOF score : {oof_score}')\n logger.info(f'Average of {fold} folds OOF score {avg_cv_scores}')\n logger.info(f'std of {fold} folds OOF score {std_cv_scores}')\n\n result_dict['yoof'] = yoof\n result_dict['prediction'] = yhat\n result_dict['oof_score'] = oof_score\n result_dict['cv_scores'] = cv_scores\n result_dict['avg_cv_scores'] = avg_cv_scores\n result_dict['std_cv_scores'] = std_cv_scores\n\n util.update_tracking(run_id, \"oof_score\", oof_score, is_integer=False)\n util.update_tracking(run_id, \"cv_avg_score\", avg_cv_scores, is_integer=False)\n util.update_tracking(run_id, \"cv_std_score\", std_cv_scores, is_integer=False)\n\n del yoof, yhat\n gc.collect()\n\n logger.info('Training/Prediction completed!')\n return result_dict", "def test(self, X, y):\n\t\tself.test_X = X\n\t\tself.test_y = y\n\n\t\tclassifier = self.classifier.fit(self.X, self.y)\n\t\ty_pred = classifier.predict(X) \t\t\t# class prediction\n\t\ty_prob = classifier.predict_proba(X)\t# probability of each class\n\t\tself.test_metrics = ModelMetrics(classifier, y, y_pred, y_prob, 'holdout')", "def train_and_test_model(self, X_train, y_train, X_test, y_test):\n\n\t\t# Fit the classification model on the whole training set (as opposed to cross-validation)\n\t\t# print(\"Y TRAIN: \", y_train[:10])\n\t\t# print(\"x TRAIN: \", X_train[:10])\n\t\tself.classifier.fit(X_train, y_train)\n\t\ty_train_predicted = self.classifier.predict(X_train)\n\t\tprint(\"np.mean Accuracy TRAINING: %s\" % np.mean(y_train_predicted == y_train))\n\n\t\t''' Predict the outcome on the test set\n\t\t\tNote that the clf classifier has already been fit on the training data.\n\t\t'''\n\t\ty_predicted = self.classifier.predict(X_test)\n\n\t\tprint(\"%.2f seconds: Finished training the model and predicting class labels for the test set\" % time.process_time())\n\n\t\t# Simple evaluation using numpy.mean\n\t\t# print(\"np.mean Accuracy: %s\" % np.mean(y_predicted == y_test))\n\n\t\t# Log the classification report\n\t\t# print(\"Classification report:\\n%s\" % metrics.classification_report(y_test, y_predicted))\n\n\t\t# The confusion matrix\n\t\t# confusion_matrix = metrics.confusion_matrix(y_test, y_predicted)\n\t\t# print(\"Confusion matrix:\\n%s\" % confusion_matrix)", "def test_xray_classifier():\n model = X_ray_Classifier()\n assert type(model) == X_ray_Classifier" ]
[ "0.71080446", "0.7000874", "0.685494", "0.66270775", "0.66078466", "0.6513761", "0.64321357", "0.6407826", "0.63719994", "0.636399", "0.635611", "0.62839556", "0.6274965", "0.6272943", "0.62652", "0.62624466", "0.6240519", "0.62150085", "0.6208279", "0.6197915", "0.61721116", "0.616227", "0.61596346", "0.6144701", "0.61438376", "0.61407995", "0.6133024", "0.61302173", "0.6117912", "0.6116349", "0.61087763", "0.609173", "0.6084682", "0.60806966", "0.60771286", "0.607679", "0.6072763", "0.60683054", "0.6048154", "0.6042817", "0.6037425", "0.6018778", "0.6009027", "0.60079294", "0.6005226", "0.59977424", "0.5994886", "0.5993921", "0.59920245", "0.59833723", "0.5976285", "0.59747773", "0.5968983", "0.59661", "0.59466225", "0.5933857", "0.5932432", "0.592523", "0.59245825", "0.59164417", "0.5915642", "0.5913956", "0.59122056", "0.5910076", "0.59043574", "0.58871156", "0.58793545", "0.586821", "0.58673435", "0.58654624", "0.5862383", "0.58595467", "0.58585805", "0.58585596", "0.5856527", "0.5838781", "0.58375573", "0.5825362", "0.58217776", "0.5811039", "0.580482", "0.5802032", "0.57987595", "0.5796793", "0.57824117", "0.57821226", "0.5781787", "0.5777218", "0.5765572", "0.5764679", "0.57635957", "0.57609993", "0.57595736", "0.575321", "0.57478833", "0.5745566", "0.57453537", "0.5743023", "0.572736", "0.5726808" ]
0.6559476
5
Reads a English > French text file and filters the lines based on the given filter_fn. If filter_fn is None, the default filter will be
def filter_nmt_file(filename,filter_fn=None): if filter_fn is None: filter_fn = lambda en : en.lower().startswith('i am') or \ en.lower().startswith('he is') or \ en.lower().startswith('she is') or \ en.lower().startswith('they are') or \ en.lower().startswith('you are') or \ en.lower().startswith('we are') filtered_lines = [] with open(filename) as file: lines = file.readlines() for line in lines: text = line.split('\t') en = text[0] fra = text[1] if filter_fn(en): filtered_lines.append(en.lower() + '\t' + fra.lower()) return filtered_lines
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_filter(self, fname, interp=True, lamb=None, *args, **kwargs):\n try:\n fil = UnitFilter.from_ascii(fname, *args, **kwargs)\n except Exception:\n content = self.content\n r = [k for k in content if fname in k]\n\n if len(r) <= 0: # try all lower for filenames (ascii convention)\n r = [k for k in content if fname.lower() in k]\n\n if len(r) > 1:\n print(\"auto correction found multiple choices\")\n print(r)\n raise ValueError('Refine name to one of {0}'.format(r))\n elif len(r) <= 0:\n raise ValueError('Cannot find filter {0}'.format(fname))\n else:\n fil = UnitFilter.from_ascii(r[0], *args, **kwargs)\n if (interp is True) and (lamb is not None):\n return fil.reinterp(lamb)\n else:\n return fil", "def generate_filter(filter_text):\n if ':' in filter_text:\n file_path_filter, _, contract_filter = filter_text.partition(':')\n else:\n file_path_filter = contract_filter = filter_text\n\n return functools.partial(check_if_matches_filter, file_path_filter, contract_filter)", "def test_filter_sff_file(self):\r\n\r\n try:\r\n fh = open(self.tiny_test)\r\n except IOError:\r\n self.fail(\r\n \"Could not open test file %s. Skipping test\" %\r\n self.tiny_test)\r\n\r\n # With no filters all flowgram should be in out file\r\n flowgrams, header = lazy_parse_sff_handle(fh)\r\n filter_list = []\r\n fd, out_file_name = mkstemp(\r\n prefix=\"test_filter_sff_file\",\r\n suffix=\".sff.txt\")\r\n close(fd)\r\n out_fh = open(out_file_name, \"w\")\r\n l = filter_sff_file(flowgrams, header, filter_list, out_fh)\r\n remove(out_file_name)\r\n fh.close()\r\n self.assertEqual(l, 114)\r\n\r\n # With good filters some should survive\r\n fh = open(self.tiny_test)\r\n flowgrams, header = lazy_parse_sff_handle(fh)\r\n filter_list = [lambda f:within_length(f, 100, 300)]\r\n fd, out_file_name = mkstemp(\r\n prefix=\"test_filter_sff_file\",\r\n suffix=\".sff.txt\")\r\n close(fd)\r\n out_fh = open(out_file_name, \"w\")\r\n l = filter_sff_file(flowgrams, header, filter_list, out_fh)\r\n remove(out_file_name)\r\n fh.close()\r\n self.assertEqual(l, 112)\r\n\r\n # With strong filters nothing should be in\r\n fh = open(self.tiny_test)\r\n flowgrams, header = lazy_parse_sff_handle(fh)\r\n filter_list = [lambda f:within_length(f, 0, 0)]\r\n fd, out_file_name = mkstemp(\r\n prefix=\"test_filter_sff_file\",\r\n suffix=\".sff.txt\")\r\n close(fd)\r\n out_fh = open(out_file_name, \"w\")\r\n l = filter_sff_file(flowgrams, header, filter_list, out_fh)\r\n remove(out_file_name)\r\n self.assertEqual(l, 0)", "def load_filter_file(self, file_path): \n self._pop_all_self()\n self.filter_list = []\n self.file_path = file_path \n \n with codecs.open(self.file_path, 'r', encoding='cp1252') as fid: \n for k, line in enumerate(fid):\n line = line.lstrip('\\n\\r ')\n if line.startswith('#'):\n continue \n split_line = [item.strip() for item in line.split('\\t')]\n if k==0:\n # Header\n header = split_line\n else:\n line_dict = dict(zip(header, split_line))\n self[line_dict['variable']] = SingleFilter(line_dict, self.parameter)\n\n # Save attributes\n for item in self.keys():\n setattr(self, item, self[item])\n \n self.header = sorted(header)\n \n if self.filter_type == 'data':\n self.year_list = [y for y in range(self['YEAR_INTERVAL'].value[0], \n self['YEAR_INTERVAL'].value[1]+1)]", "def pipeline(file):\n # special processing is performed to avoid sentence boundaries after abbrevs\n doc = nlp(text_processing.preprocess_text_ents(file))\n grid = get_grid(doc)\n distrib = get_distrib(grid, doc)\n return get_feats(distrib)", "def fileFiltRecGen(filePath, filt, delim = \",\"):\n\twith open(filePath, \"r\") as fp:\n\t\tfor line in fp:\t\n\t\t\tline = line[:-1]\n\t\t\tif delim is not None:\n\t\t\t\tline = line.split(delim)\n\t\t\tif filt(line):\n\t\t\t\tyield line", "def ascii_to_filter(filename, filter_name=None, detector=None, temperature=None, \n filter_type=None, wcol=0, tcol=None, **kwargs):\n strg = \"Reading a MiriFilter model from an ASCII file \"\n strg += \"is not longer supported.\"\n raise NotImplementedError(strg)", "def LoadSourceFilter(coverable_file_name):\n \n with open(coverable_file_name, \"r\") as cov_file:\n file_list = [line.strip() for line in cov_file.readlines()]\n return SourceFilter(file_list)", "def filter(ctx: click.Context):\n vcf: Reader = vcfpy.Reader.from_path(ctx.obj[\"vcf_file\"])\n filter_settings: Dict[str, Dict] = SV_FILTER_SETTINGS[\"tiddit_tumor_normal\"]\n\n # Update VCF header\n vcf.header.add_info_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", \"AF_T_MAX\"),\n (\"Number\", \".\"),\n (\"Type\", \"Float\"),\n (\n \"Description\",\n \"Max AF in tumor, for rows with merged overlapping variants\",\n ),\n ]\n )\n )\n\n vcf.header.add_info_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", \"AF_N_MAX\"),\n (\"Number\", \".\"),\n (\"Type\", \"Float\"),\n (\n \"Description\",\n \"Max AF in normal, for rows with merged overlapping variants\",\n ),\n ]\n )\n )\n\n vcf.header.add_filter_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", \"normal_variant\"),\n (\"Description\", \"AF_T_MAX == 0 and ctg_t == False\"),\n ]\n )\n )\n\n vcf.header.add_filter_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", f\"{filter_settings['max_normal_allele_frequency']['filter']}\"),\n (\n \"Description\",\n f\"AF_N_MAX > {filter_settings['max_normal_allele_frequency']['value']}\",\n ),\n ]\n )\n )\n\n vcf.header.add_filter_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", f\"{filter_settings['max_tin_fraction']['filter']}\"),\n (\n \"Description\",\n f\"(AF_N_MAX / AF_T_MAX) > {filter_settings['max_tin_fraction']['value']}\",\n ),\n ]\n )\n )\n\n vcf.header.add_filter_line(\n vcfpy.OrderedDict(\n [\n (\"ID\", \"in_normal\"),\n (\"Description\", \"ctg_n == True and AF_N_MAX == 0 and AF_T_MAX <= 0.25\"),\n ]\n )\n )\n\n writer = vcfpy.Writer.from_path(\"/dev/stdout\", vcf.header)\n\n # Set soft filters for variants based on presence in the normal sample\n for variant in vcf:\n variant_info: dict = variant.INFO\n\n # Collect evidence of variant in tumor and normal sample\n evidence_dict: dict = get_tumor_normal_evidence(variant_info)\n allele_frequency_tumor: float = evidence_dict[\"tumor_max_af\"]\n allele_frequency_normal: float = evidence_dict[\"normal_max_af\"]\n tumor_has_contig: bool = evidence_dict[\"tumor_has_contig\"]\n normal_has_contig: bool = evidence_dict[\"normal_has_contig\"]\n\n # Add AF_MAX to info field\n variant.INFO[\"AF_T_MAX\"] = [round(allele_frequency_tumor, 4)]\n variant.INFO[\"AF_N_MAX\"] = [round(allele_frequency_normal, 4)]\n\n # Set filter statuses\n if allele_frequency_tumor == 0 and not tumor_has_contig:\n variant.add_filter(\"normal_variant\")\n writer.write_record(variant)\n continue\n\n # Regardless of CTG, set filter if AF_T / AF_N > max_tin_fraction\n normal_tumor_af_ratio = (\n float(allele_frequency_normal / allele_frequency_tumor)\n if allele_frequency_tumor > 0\n else 0\n )\n if normal_tumor_af_ratio > filter_settings[\"max_tin_fraction\"][\"value\"]:\n variant.add_filter(\"high_normal_af_fraction\")\n\n # Set filter if AF_N > 0.25\n if (\n allele_frequency_normal\n > filter_settings[\"max_normal_allele_frequency\"][\"value\"]\n ):\n variant.add_filter(\"high_normal_af\")\n\n # Set filter if CTG_N = True, AF_N is 0 and AF_T is below 0.25\n if (\n normal_has_contig\n and allele_frequency_normal == 0\n and allele_frequency_tumor <= 0.25\n ):\n variant.add_filter(\"in_normal\")\n\n writer.write_record(variant)", "def filter(self, fn):\r\n\t\treturn FilterProjectedList(self, [fn])", "def _load_filter(self, fname, interp=True, lamb=None):\n ftab = self.hdf\n if hasattr(fname, 'decode'):\n fnode = ftab.get_node('/filters/' + fname.decode('utf8'))\n else:\n fnode = ftab.get_node('/filters/' + fname)\n flamb = fnode[:]['WAVELENGTH']\n transmit = fnode[:]['THROUGHPUT']\n dtype = 'photon'\n unit = None\n\n attrs = fnode.attrs\n if 'DETECTOR' in attrs:\n dtype = attrs['DETECTOR']\n if 'WAVELENGTH_UNIT' in attrs:\n unit = attrs['WAVELENGTH_UNIT']\n\n fil = UnitFilter(flamb, transmit, name=fnode.name,\n dtype=dtype, unit=unit)\n\n if interp & (lamb is not None):\n fil = fil.reinterp(lamb)\n return fil", "def apply_word_filter(self, fn):\n self._apply_filter(lambda ng, f: any(fn(w) for w in ng))", "def language_filter_func(self, model, iter, data):\n if self.current_filter_language is None or self.current_filter_language == \"None\":\n return True\n else:\n return model[iter][2] == self.current_filter_language", "def ft_filter(fnct, tab):\n res = []\n for i in tab:\n if fnct:\n if fnct(i):\n res.append(i)\n else:\n if i:\n res.append(i)\n return res", "def readAndRewrite(self):\n try:\n with open(self.dataFile, 'r') as source:\n self.initDictionnary()\n lineCount = 0\n filteredData = [] # array of filtered flights\n for line in source:\n lineCount += 1\n line = line.strip()\n if line != \"\" and line[0] != \"#\":\n f = Flight(line, self.vocabulary)\n if self.filter:\n f.rewrite(self.summaryDict)\n f.filter(filteredData, self.listOfTerms, self.threshold)\n print(\"End : Displaying general summary\")\n self.displaySummary(self.summaryDict, lineCount)\n print(\"-------------- End of general summary ---------------\")\n if len(filteredData) != 0:\n print(\"Beginning summary on filtered data (\" + str(len(filteredData)) + \" entries)\")\n for data in filteredData:\n data.rewrite(self.summaryFilteredDict)\n print(\"End of summary for filtered data\")\n self.displaySummary(self.summaryFilteredDict, len(filteredData))\n print(\"Finding correlations\")\n self.findLinkedTerms()\n print(\"Printing correlations with \" + str(self.listOfTerms) + \" and threshold : \" + str(self.threshold))\n #for key in self.correlationDict.keys():\n #print(str(key) + \" : \" + str(self.correlationDict[key]))\n self.findAtypicalTerms()\n print(\"Printing atypical terms with \" + str(self.listOfTerms) + \" and threshold : \" + str(self.threshold))\n #for term in self.atypicalTermsDict.keys():\n #print(str(term) + \" : \" + str(self.atypicalTermsDict[term]))\n display = Display(self.vocabulary)\n display.displayPieChartSummary(self.summaryDict, \"General Summary for 2008 flights in the USA\")\n display.displayPieChartSummary(self.summaryFilteredDict, \"General Summary for 2008 flights with \"+str(self.listOfTerms)+\" and threshold : \" + str(self.threshold))\n display.displayBubbleChart(self.correlationDict,\"Linked terms in 2008 flights with \" + str(self.listOfTerms) + \" and threshold = \" + str(self.threshold))\n display.displayBubbleChart(self.atypicalTermsDict,\"Atypical terms in 2008 flights with \" + str(self.listOfTerms) + \" and threshold = \" + str(self.threshold))\n else:\n print(\"Filter returned no entry\")\n except:\n raise Exception(\"Error while loading the dataFile %s\" % self.dataFile)", "def filter(self, ffun):\n # BEGIN\n lst = []\n for item in WordSet(self.text).words():\n # if len(item) == len(ffun):\n # lst.append(item)\n if ffun(item) == True:\n lst.append(item)\n return lst\n\n # END", "def filter(ctx, fil, filter_host, filter_port):\n if not fil:\n raise ValueError(\"Must specify at least one filtering operaion (of the form '<filter>=<value>'\")\n client = aceclient.FilterClient(host=filter_host, port=filter_port)\n filters = {}\n for f in fil:\n filters.update(parse_tag(f))\n client.update(**filters)", "def filter_it(self, _filter):\n with open(self.path) as _file:\n for line in _file:\n tokens = self._tokenize(line)\n if tokens:\n _ip = tokens.group('ip')\n if _filter.match(_ip):\n yield line", "def get_special_filters(filepath):\n filters = {}\n with open(filepath, \"r\") as f:\n reader = csv.DictReader(f, delimiter=';')\n for row in reader:\n function = row[\"Function\"]\n filters.setdefault(function, {})\n filters[function][\"description\"] = row[\"Description\"]\n filters[function][\"parameters\"] = row[\"Parameters\"].split(\",\")\n filters[function][\"example\"] = row[\"Example\"]\n return filters", "def filter(self, fn):\n self.__filter_chain.append(fn)", "def filter_sff_file(flowgrams, header, filter_list, out_fh):\r\n\r\n write_sff_header(header, out_fh)\r\n\r\n l = 0\r\n for f in flowgrams:\r\n passed = True\r\n for filter in filter_list:\r\n passed = passed and filter(f)\r\n if not passed:\r\n # bail out\r\n break\r\n if (passed):\r\n out_fh.write(f.createFlowHeader() + \"\\n\")\r\n l += 1\r\n return l", "def add_filter(self, f):\n raise NotImplementedError", "def load_filter(filename):\n # parse config file\n if not os.path.isfile(filename):\n raise IOError('File \"%s\" does not exist' % filename)\n try:\n f = open(filename)\n except IOError:\n raise IOError('Could not open file \"%s\"' % filename)\n\n cfg_items = []\n for (i, line) in enumerate(f):\n try:\n # remove all comments and unnecessary whitespace\n normalizer = shlex.shlex(line)\n normalizer.wordchars += '.-'\n normal_line = ' '.join([t for t in normalizer])\n if normal_line:\n # split up normalized line and build dictionary\n cfg_item = {}\n for part in normal_line.split(','):\n cfg_split = shlex.split(part)\n key = cfg_split.pop(0)\n value = cfg_split\n cfg_item[key] = value\n cfg_items.append(cfg_item)\n except (IndexError, ValueError):\n raise RuntimeError( \\\n 'Could not parse line %i of file \"%s\"' % (i, filename))\n\n # look for global bit settings\n bits_global = None\n factor_bits_global = None\n norm_bits_global = None\n for cfg_item in cfg_items:\n if 'bits_global' in cfg_item:\n if bits_global is None:\n [bits_global] = cfg_item.pop('bits_global')\n bits_global = int(bits_global)\n else:\n raise RuntimeError( \\\n 'bits_global must not be specified more than once')\n if 'factor_bits_global' in cfg_item:\n if factor_bits_global is None:\n [factor_bits_global] = cfg_item.pop('factor_bits_global')\n factor_bits_global = int(factor_bits_global)\n else:\n raise RuntimeError( \\\n 'factor_bits_global must not be specified more than once')\n if 'norm_bits_global' in cfg_item:\n if norm_bits_global is None:\n [norm_bits_global] = cfg_item.pop('norm_bits_global')\n norm_bits_global = int(norm_bits_global)\n else:\n raise RuntimeError( \\\n 'norm_bits_global must not be specified more than once')\n\n # remove empty items from cfg_items, only node definitions should be left\n cfg_items = filter(None, cfg_items)\n\n # look for filter nodes\n filter_nodes = {}\n adjacency = {}\n input_node = None\n output_node = None\n for cfg_item in cfg_items:\n # mandatory settings\n try:\n [node] = cfg_item['node']\n except KeyError:\n raise RuntimeError('Node type not specified')\n try:\n [name] = cfg_item['name']\n except KeyError:\n raise RuntimeError('Name not specified')\n # optional settings\n if 'bits' in cfg_item:\n [bits] = map(int, cfg_item['bits'])\n else:\n bits = bits_global\n if 'connect' in cfg_item:\n connect = cfg_item['connect']\n else:\n connect = []\n if 'input' in cfg_item:\n if input_node is None:\n input_node = name\n else:\n raise RuntimeError('More than one input node specified')\n if 'output' in cfg_item:\n if output_node is None:\n output_node = name\n else:\n raise RuntimeError('More than one output node specified')\n\n # make filter node\n if name not in filter_nodes:\n if bits is not None:\n if node == 'Const':\n filter_nodes[name] = Const(bits)\n elif node == 'Add':\n filter_nodes[name] = Add(bits)\n elif node == 'Delay':\n filter_nodes[name] = Delay(bits)\n elif node == 'Multiply':\n if 'factor_bits' in cfg_item:\n [factor_bits] = cfg_item['factor_bits']\n factor_bits = int(factor_bits)\n else:\n factor_bits = factor_bits_global\n if 'norm_bits' in cfg_item:\n [norm_bits] = cfg_item['norm_bits']\n norm_bits = int(norm_bits)\n else:\n norm_bits = norm_bits_global\n if (factor_bits is not None and norm_bits is not None):\n filter_nodes[name] = Multiply(\n bits, factor_bits, norm_bits)\n if 'factor' in cfg_item:\n [factor] = cfg_item['factor']\n factor = float(factor)\n filter_nodes[name].set_factor(factor, norm=True)\n else:\n raise ValueError('Unknown node type: %s' % node)\n else:\n raise RuntimeError('Number of bits for node \"%s\" not specified' \\\n % name)\n adjacency[name] = connect\n else:\n raise RuntimeError('Node \"%s\" already present' % name)\n\n # make filter\n if input_node is None:\n raise RuntimeError('No input node specified')\n elif output_node is None:\n raise RuntimeError('No output node specified')\n else:\n return Filter(filter_nodes, adjacency, input_node, output_node)", "def readFiltered(f):\n line = f.readline()\n while line:\n line = line.strip()\n if len(line) != 0:\n if line == \"### NEW EXPERIMENT ###\":\n # print (\"readFiltered: ''\")\n yield \"\"\n elif line[0] != \"#\":\n # print (\"readFiltered: '\",line,\"'\")\n yield line\n line = f.readline()\n # print (\"readFiltered: '\",line,\"'\")\n return line", "def prefilter(json_arg, initial_prefilter):\n\n if not initial_prefilter:\n logging.info(\"prefilter not found!\")\n # whether it is filtered or not, return as json so it can be handled uniformly from now on\n return json.loads(json_arg)\n\n with open(initial_prefilter) as f:\n lines = f.read().splitlines()\n logging.info(\"prefilter:lines in prefilter file: %d \", len(lines))\n lines = filter(lambda k: not k.startswith(\"#\"), lines)\n logging.info(\"prefilter:lines after removing comments: %d \", len(lines))\n json_args_as_json = json.loads(json_arg)\n for filtering_line in lines:\n json_args_as_json = apply_filter(json_args_as_json, filtering_line)\n\n return json_args_as_json", "def get_filter_word_list(self):\n self.filter_words = self.read_word_file(self.filter_word_file)", "def filterRansac():\n pass", "def _load_filter(self, fname, **kwargs):\n with self as current_lib:\n return UnitLickIndex(fname, current_lib._content[fname])", "def fromfile(cls, f):\n raise NotImplementedError(\"ScalableRedisLocalBloomFilter not support fromfile\")", "def read_filter(filter_file):\n\n fd = open(filter_file, \"r\")\n lines = fd.readlines()\n fd.close()\n\n wavelengths = []\n weights = []\n for line in lines:\n line = line.strip()\n words = line.split()\n wavelengths.append(float(words[0]))\n weights.append(float(words[1]))\n\n return (wavelengths, weights)", "def ParseFilterFile(input_lines):\n # Strip comments and whitespace from each line and filter non-empty lines.\n stripped_lines = (l.split('#', 1)[0].strip() for l in input_lines)\n filter_lines = [l for l in stripped_lines if l]\n\n # Split the tests into positive and negative patterns (gtest treats\n # every pattern after the first '-' sign as an exclusion).\n positive_patterns = [l for l in filter_lines if l[0] != '-']\n negative_patterns = [l[1:] for l in filter_lines if l[0] == '-']\n return positive_patterns, negative_patterns", "def filterfn(read):\n return (read.is_proper_pair and read.is_paired and read.tlen > 0 and not read.is_supplementary and not read.is_duplicate and not read.is_unmapped and not read.mate_is_unmapped)", "def __call__(self, text, **kwargs):\n if 'filter_name' in kwargs:\n filter_name = kwargs['filter_name']\n del kwargs['filter_name']\n filter_kwargs = {}\n else:\n from django.conf import settings\n filter_name, filter_kwargs = settings.MARKUP_FILTER\n if filter_name is None:\n return text\n if filter_name not in self._filters:\n raise ValueError(\"'%s' is not a registered markup filter. Registered filters are: %s.\" % (filter_name,\n ', '.join(self._filters.iterkeys())))\n filter_func = self._filters[filter_name]\n filter_kwargs.update(**kwargs)\n return filter_func(text, **filter_kwargs)", "def get_filters():\n \n \"\"\"\"\"\"\"\"\n \n \"\"\"Messeges to genrate filters\"\"\"\n\tnote_messege = 'In this project, we make use of Python to explore data related to bike share systems for three major cities in the United States\\n'\n welcome_messege = 'Hello! Let\\'s explore some US bikeshare data!\\n'\n enter_city_name_messege = 'Which city would you like to filter by? Chicago, New York City or Washington? '\n filter_definition_messege = '\\nWould you like to filter the data by - \\n1. Month\\n2. Day\\n3. Both\\n4. No Filter\\n\\nPlease choose the appropriate filter name.\\nNote: Incorrect filter name will result as \\'no filter selected\\' by the user.\\n'\n enter_filter_messege = 'Desired filter (e.g: Month, Day, Both or No Filter): '\n enter_month_name_messege = 'Enter month name (e.g: january, february, march, april, may or june): '\n enter_day_name_messege = 'Enter day of the week (e.g: monday, tuesday, wednesday, thursday, friday, saturday, sunday): '\n exception_messege = '\\nWarning! That is not a valid input.\\n'\n warning_city_name_messege = '\\nWarning! Invalid city name. Select city name from the following cities only - Chicago, New York City or Washington.' \n warning_month_name_messege = '\\nWarning! Invalid month name. Select month name from the following months only - january, february, march, april, may or june'\n warning_day_name_messege = '\\nWarning! Invalid day name. Select day name from the following days only - monday, tuesday, wednesday, thursday, friday, saturday, sunday'\n \"\"\"\"\"\"\"\"\n \n \"\"\"City, Month and Day List\"\"\"\n city_list = ['chicago', 'new york city', 'washington']\n month_list = ['january', 'february', 'march', 'april', 'may', 'june']\n day_list = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']\n \"\"\"\"\"\"\"\"\n \n\tprint(note_messege)\n print(welcome_messege)\n \n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs \n while True:\n try:\n city = input(enter_city_name_messege)\n break\n except:\n print(exception_messege)\n \n while city.lower() not in city_list:\n while True:\n try: \n print(warning_city_name_messege)\n city = input(enter_city_name_messege)\n break\n except:\n print(exception_messege)\n \n print(filter_definition_messege)\n while True:\n try:\n filter_choice = input(enter_filter_messege)\n break\n except:\n print(exception_messege)\n while True: \n if filter_choice.lower() == 'month':\n # TO DO: get user input for month (all, january, february, ... , june)\n while True:\n try:\n month = input(enter_month_name_messege) \n break\n except:\n print(exception_messege)\n while month.lower() not in month_list:\n while True:\n try: \n print(warning_month_name_messege)\n month = input(enter_month_name_messege) \n break\n except:\n print(exception_messege)\n day = 'all'\n break\n \n elif filter_choice.lower() == 'day':\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday) \n while True:\n try:\n day = input(enter_day_name_messege)\n break\n except:\n print(exception_messege)\n while day.lower() not in day_list:\n while True:\n try: \n print(warning_day_name_messege)\n day = input(enter_day_name_messege) \n break\n except:\n print(exception_messege)\n month = 'all'\n break\n \n elif filter_choice.lower() == 'both':\n # TO DO: get user input for month (all, january, february, ... , june)\n while True:\n try:\n month = input(enter_month_name_messege)\n break\n except:\n print(exception_messege)\n while month.lower() not in month_list:\n while True:\n try: \n print(warning_month_name_messege)\n month = input(enter_month_name_messege) \n break\n except:\n print(exception_messege)\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n while True:\n try:\n day = input(enter_day_name_messege)\n break\n except:\n print(exception_messege)\n while day.lower() not in day_list:\n while True:\n try: \n print(warning_day_name_messege)\n day = input(enter_day_name_messege) \n break\n except:\n print(exception_messege)\n break\n \n else:\n month = 'all'\n day = 'all'\n break\n \n\n print('-'*40)\n return city.lower(), month.lower(), day.lower()", "def __filter( self, text ):\n return text", "def get_filters(filepath):\n filters = {}\n with open(filepath, \"r\") as f:\n reader = csv.DictReader(f, delimiter=';')\n for row in reader:\n filter_id = row[\"Filter Column\"]\n filters.setdefault(filter_id, {})\n filters[filter_id][\"results\"] = row[\"Result\"].split(\", \")\n filters[filter_id][\"type\"] = row[\"Type\"]\n filters[filter_id][\"description\"] = ''.join(row[\"Description\"])\n return filters", "def _read_filter_data(filename):\n gains = []\n freqs = []\n freq_scale = 0\n with open(filename) as f:\n for line in f:\n words = line.split()\n if line.startswith('Freq'):\n _, scale = words[0].split(\"(\")\n scale = scale.rstrip(\")\")\n if scale==\"Hz\":\n freq_scale = 1\n elif scale==\"kHz\":\n freq_scale = 1e3\n elif scale==\"MHz\":\n freq_scale = 1e6\n elif scale==\"GHz\":\n freq_scale = 1e9\n else:\n raise ValueError(\"Cannot parse line: '\"+line+\"'\")\n elif len(words)==3 and words[0]!=\"Total\":\n f, g, p = line.split(\",\")\n freq = float(f) * freq_scale\n gain = float(g)\n phase = float(p)\n freqs.append(freq)\n gains.append(gain * np.exp(1j*phase))\n\n return np.array(gains), np.array(freqs)", "def _apply_filter(self, fn=lambda ngram, freq: False):\n tmp_ngram = FreqDist()\n for ngram, freq in self.ngram_fd.items():\n if not fn(ngram, freq):\n tmp_ngram[ngram] = freq\n self.ngram_fd = tmp_ngram", "def filter_lines(in_filename, in_filename2,out_filename):\n proper_convert = 0\n missing_convert = 0\n fourteen_set = set()\n with open(in_filename, 'r') as in_f, open(in_filename2, 'r') as in_f2, open(out_filename, 'w') as out_f:\n for line in in_f:\n vals = line.strip().split(\",\")\n fips = vals[0]\n if(fips not in fourteen_set):\n fourteen_set.add(fips)\n \n for line in in_f2:\n vals = line.strip().split(\",\")\n fips = vals[0]\n count = vals[1]\n proper_convert += 1\n if(fips not in fourteen_set):\n new_line = str(fips)+\",\"+str(count)+\"\\n\"\n out_f.write(new_line)\n missing_convert += 1\n\n return (proper_convert, missing_convert)", "def loadFilterFromString(spec):\n return _loadPluginFromString(spec, \"ufo2ft.filters\", isValidFilter)", "def _filterfunc(self,*args,**kwargs):\n self._filterfunc = self.f\n return self.f(*args,**kwargs)", "def test_filter_function_settings(self):\n def foo():\n \"\"\"Dummy function.\"\"\"\n return True\n\n self.es.register_filter(foo)\n self.assertEqual(self.es.filter['all'][0], foo)\n self.assertEqual(self.es.filter['any'], [])\n self.assertEqual(self.es.filter['none'], [])\n\n self.es.register_filter(foo, ftype='none')\n self.assertEqual(self.es.filter['all'][0], foo)\n self.assertEqual(self.es.filter['any'], [])\n self.assertEqual(self.es.filter['none'][0], foo)\n\n self.es.register_filter(foo, ftype='any')\n self.assertEqual(self.es.filter['all'][0], foo)\n self.assertEqual(self.es.filter['any'][0], foo)\n self.assertEqual(self.es.filter['none'][0], foo)", "def apply_filter(input_file, output_file, features):\n lines = input_file.readlines()\n lines = list(map(clean, lines))\n\n for i in range(0, len(lines)):\n line = lines[i]\n feat = extract(line[\"features\"], features)\n output_line = line[\"rank\"] + \" \" + line[\"qid\"]\n for key in features:\n output_line += \" \" + str(key) + \":\" + str(feat[key])\n output_line += \" #\" + line[\"comment\"]\n output_file.write(output_line)", "def filter(self, filters):", "def _readfile_with(file_name, fn) :\n with open(file_name) as f:\n for line in f:\n yield fn(line) if not fn is None else line", "def filter_files(files, lang=EXTES):\n\n lang_specific = []\n\n for f in files:\n if f.endswith(lang):\n lang_specific.append(f)\n return lang_specific", "def _load_filter(self, *args, **kwargs):\n raise NotImplementedError", "def set_raw_ftrace_entry_filter(self, flt):\n self._raw_ftrace_entry_filter = flt", "def _load_filter(self, fname, **kwargs):\n with self as s:\n return LickIndex(fname, s._content[fname])", "def parse_filtering_file(file_path):\n\n # Initialize variables\n filtering_options = []\n\n # Read in the default filtering options\n with open(file_path, 'r') as input_fh:\n # Iterate through every line of the file\n for line in input_fh.readlines():\n\n # Check to make sure the line is formatted correctly\n if len(line.strip()) > 2 and line.startswith(('-', '+')):\n # Append the filtering option\n if line[1] == ' ':\n filtering_options.append((line[0], line.strip()[2:]))\n elif not line.startswith('#'):\n logging.warning('\\tInvalid regex filtering line: {}'.format(line.strip()))\n\n return filtering_options", "def test_filter_function_any(self):\n self.es.register_filter(lambda x: True, ftype='any')\n self.assertTrue(self.es.streamfilter(self.data))\n self.es.register_filter(lambda x: False, ftype='any')\n self.assertTrue(self.es.streamfilter(self.data))", "def string_chain(text, filters):\n if filters is None:\n return text\n\n for filter_function in filters:\n text = filter_function(text)\n\n return text", "def filter_files(self, pattern, filter_fn=None):\n def filter_function(f):\n return re.search(pattern, f) != None\n if not filter_fn:\n filter_fn = filter_function\n return filter(filter_fn, self.files)", "def test_filter_linear(self):\n input_image = np.array([\n 0.01, 0.1, 0.2, 0.5, 0.75, 0.99\n ])\n expected_image_lower = np.array([\n 0.240, 0.329, 0.427, 0.725, 0.974, 1.213\n ])\n expected_image_upper = np.array([\n 0.241, 0.330, 0.428, 0.726, 0.975, 1.214\n ])\n output_config = FilterImageConfig()\n output_config.blur.linear = True\n output_config.blur.sigma = 5\n output_config.effect.mode = \"global\"\n output_config.effect.lum_scale = 5\n output_config.effect.chrom_scale = .1\n output_config.effect.level = 5\n output = localHDR.filter_image(input_image, output_config)\n self.assertTrue(np.allclose(output, expected_image_lower, atol=6e-03))\n self.assertTrue(np.allclose(output, expected_image_upper, atol=6e-03))", "def add_filters(fnames):\n with Database(writable=True) as base:\n for fname in fnames:\n with open(fname, 'r') as f_fname:\n filter_name = f_fname.readline().strip('# \\n\\t')\n filter_type = f_fname.readline().strip('# \\n\\t')\n filter_description = f_fname.readline().strip('# \\n\\t')\n filter_table = np.genfromtxt(fname)\n # The table is transposed to have table[0] containing the\n # wavelength and table[1] containing the transmission.\n filter_table = filter_table.transpose()\n # We convert the wavelength from Å to nm.\n filter_table[0] *= 0.1\n\n print(\"Importing {}... ({} points)\".format(filter_name,\n filter_table.shape[1]))\n\n new_filter = Filter(filter_name, filter_description, filter_type,\n filter_table)\n\n # We normalise the filter and compute the effective wavelength.\n # If the filter is a pseudo-filter used to compute line fluxes, it\n # should not be normalised.\n if not filter_name.startswith('PSEUDO'):\n new_filter.normalise()\n else:\n new_filter.effective_wavelength = np.mean(\n filter_table[0][filter_table[1] > 0]\n )\n\n base.add_filter(new_filter)", "def on_filter_changed(self, text):\n\n self._filter.setFilterWildcard(text)\n self.update_label()", "def search_translation(file_name,line_original_language,line_new_language,to_translate):\n csv_file = open(file_name, 'rb')\n rd = csv.reader(csv_file, delimiter=';',quoting=csv.QUOTE_ALL)\n nb_row = 0\n for row in rd:\n nb_row += 1\n if line_original_language >= 0 and line_new_language >= 0:\n if row[line_original_language] == to_translate:\n to_translate = row[line_new_language]\n break\n return to_translate", "def filter_local_edges(self, filterfn, transaction_id):\n assert transaction_id >= self._transaction_id, \\\n \"Transactions arrived out of order.\"\n\n return self.copy(local_edges=_apply_filter.remote(\n filterfn, self.local_edges), transaction_id=transaction_id)", "def filter_matches(filename, e):\n transcription_factors = []\n with open(filename) as f:\n for record in NCBIXML.parse(f):\n best = e\n if record.alignments:\n for alignment in record.alignments:\n for hsp in alignment.hsps:\n if hsp.expect < best:\n best = hsp.expect\n\n if best < e:\n iden = record.query.split(\" \")[0]\n locus = iden.split(\"|\")[1]\n transcription_factors.append(locus)\n\n return transcription_factors", "def filter(self, name, filterfn) :\n\n ct = list(zip(self.get_cols(), self.get_types()))\n new_rows = [row for row in self if filterfn(row.as_dict())]\n new_table = self.factory.new_table(name, ct)\n new_table.add_rows(new_rows)\n return new_table", "def fromfile(cls, f, n=-1):\n raise NotImplementedError(\"RedisLocalBloomFilter not support fromfile\")", "def test_filter_function_all(self):\n self.es.register_filter(lambda x: True)\n self.assertTrue(self.es.streamfilter(self.data))\n self.es.register_filter(lambda x: False)\n self.assertFalse(self.es.streamfilter(self.data))", "def get_filtered_file(filename: Text,\n stop_words: Optional[Container[Text]] = None\n ) -> Tokens_str:\n from re import compile as regex\n\n ws_filter = regex(r\"\\s+\")\n with open(filename, 'rb') as f:\n decoded_str = f.read().decode(errors=\"ignore\").strip().lower()\n return filter_tokens(ws_filter.split(decoded_str), stop_words)\n\n raise ValueError(\"Invalid File name!\")", "def language_text_sources(lang):\n return [\n DATA + \"/tokenized/{source}/{lang}.txt\".format(source=source, lang=lang)\n for source in LANGUAGE_SOURCES[lang]\n if source in FULL_TEXT_SOURCES\n ]", "def get_filter_stringlist(self):\n return text_filter", "def filter_line(self, line):\n if line.startswith(\"<\"):\n # Simply filter out all lines beginning with '<', which are metadata\n return None\n\n # Some metadata-like text is also included at the start of lines, followed by \". - \"\n if u\". - \" in line:\n __, __, line = line.partition(u\". - \")\n\n # Remove -s and spaces from the start of lines\n # Not sure why they're often there, but it's just how the transcripts were formatted\n line = line.lstrip(u\"- \")\n\n # Skip lines that are fully surrounded by brackets: they're typically descriptions of what happened\n # E.g. (Applause)\n if line.startswith(u\"(\") and line.endswith(u\")\"):\n return None\n\n # It's common for a speaker's first utterance to start with a marker indicating the original language\n line = language_indicator_re.sub(u\"\", line)\n return line", "def readNormalizer(language):\n\n encoding = None\n\n fname = os.path.join(nm_dir, '%s.txt' % language) \n if not os.path.exists(fname):\n return []\n\n lst = []\n for l in open(fname): \n if not l.strip(): continue\n\n mo = enc_reg.match(l)\n if mo:\n encoding= mo.group(1)\n continue\n\n if l.startswith('#'): continue\n\n fields = l.split()\n if len(fields) == 1:\n fields = (fields[0], '') # replace XX with ''\n\n k = unicode(fields[0], encoding) \n v = unicode(fields[1], encoding) \n\n lst.append((k, v))\n\n return lst", "def __init__(self, source, parameter='', file_path=None):\n super().__init__() \n self.filter_type = 'data'\n self.source = source\n self.parameter = parameter\n self._initate_filter_items()\n if file_path:\n self.load_filter_file(file_path)", "def filter_or_link_transition(\n source_file, dest_file, filter_file, transitions, filter_dict,\n):\n if filter_file is not None:\n filter_dict[dest_file] = {\"all_reads\": source_file, \"filter\": filter_file}\n else:\n transitions[dest_file] = source_file", "def read_file(filename, tokenizer, is_cased):\n sents = []\n with open(filename) as f:\n for line in f:\n sents.append(tokenizer(line, is_cased))\n return sents", "def filter_profanities(text,replace_handler=profanity_word_handler):\n profanities=CensoredWord.objects.get_profanities_wordlist()\n return word_filter(text,profanities,replace_handler)", "def load_filter_file(self, filter_path):\n logger.debug(\"Adding filter file {}\", filter_path)\n try:\n with open(filter_path, \"r\") as filter_file:\n try:\n json_filter_data = json.load(filter_file)\n except Exception as err:\n msg = \"Unable to parse filter file {} as a json file. {!r}\".format(\n filter_path, err)\n logger.debug(msg)\n raise errors.ParserError(msg)\n except IOError:\n raise errors.ParserError(\n \"Unable to access filter path '{}'\".format(filter_path))\n\n if \"version\" not in json_filter_data:\n raise errors.ParserError(\n \"Loading filter-file {} failed. Missing 'version' key.\".format(\n filter_path))\n\n if \"filters\" not in json_filter_data:\n raise errors.ParserError(\n \"Loading filter-file {} failed. Missing 'filters' key.\".format(\n filter_path))\n\n if not isinstance(json_filter_data[\"version\"], dict):\n raise errors.ParserError(\n \"Loading filter-file {} failed. \"\n \"Expecting value of 'version' entry to be a dictionary \"\n \"but instead its a {}.\".format(filter_path,\n type(json_filter_data[\"version\"])))\n\n version_info = json_filter_data[\"version\"]\n\n if \"major\" not in version_info:\n raise errors.ParserError(\n \"Loading filter-file {} failed. \"\n \"Missing 'major' key in 'version' value.\".format(filter_path))\n\n if \"minor\" not in version_info:\n raise errors.ParserError(\n \"Loading filter-file {} failed. \"\n \"Missing 'minor' key in 'version' value.\".format(filter_path))\n\n if not isinstance(version_info[\"major\"], int):\n raise errors.ParserError(\n \"Loading filter-file {} failed. \"\n \"Expecting int for major version found {} instead.\".format(\n filter_path, type(version_info[\"major\"])))\n\n if not isinstance(version_info[\"minor\"], int):\n raise errors.ParserError(\n \"Loading filter-file {} failed. \"\n \"Expecting int for minor version found {} instead.\".format(\n filter_path, type(version_info[\"minor\"])))\n\n if version_info[\"major\"] != FILTER_JSON_FORMAT_MAJOR_VERSION:\n raise errors.ParserError(\n \"Loading filter-file {} failed. \"\n \"Found unexpected major version in JSON filter file.\".format(\n filter_path))\n\n self._add_filters(json_filter_data[\"filters\"], filter_path)", "def test_filter_function_none(self):\n self.es.register_filter(lambda x: False, ftype='none')\n self.assertTrue(self.es.streamfilter(self.data))\n self.es.register_filter(lambda x: True, ftype='none')\n self.assertFalse(self.es.streamfilter(self.data))", "def apply_ngram_filter(self, fn):\n self._apply_filter(lambda ng, f: fn(*ng))", "def filter_text_by_locale(column, term, locale=None):\n\n mapping = {'de_CH': 'german', 'fr_CH': 'french', 'it_CH': 'italian',\n 'rm_CH': 'english'}\n return SearchableArchivedResultCollection.match_term(\n column, mapping.get(locale, 'english'), term\n )", "def load_filter():\n if not os.path.isfile(FILTER):\n print('no filter found, creating square grid')\n return []\n with open(FILTER, 'r') as ff:\n reader = csv.reader(ff)\n l = list(reader)\n ar = numpy.asarray(l)\n # ar = numpy.transpose(ar, (0, 1))\n # ar = numpy.flip(ar, 1)\n # ar = numpy.rot90(ar, k=3, axes=(0, 1))\n # ar = numpy.swapaxes(ar, 0, 1)\n f = list(map(list, ar))\n return f", "def _feature_country_process(self):\n if 'Country' not in self._df_invoice_line.columns:\n return\n\n list_countries_keep = ['United Kingdom']\n rows_before = self._df_invoice_line.shape[0]\n \n df_invoice_line_new = pd.DataFrame()\n for country in list_countries_keep : \n df_invoice_line_new = df_invoice_line_new.append(\\\n self._df_invoice_line[self._df_invoice_line['Country']==country]\\\n , ignore_index=True)\n\n self.df_invoice_line = df_invoice_line_new\n del(df_invoice_line_new)\n \n rows_after = self._df_invoice_line.shape[0] \n _print_stat_rows(\"Countries filtering : \",rows_before, rows_after)\n\n \n #-------------------------------------------------------------------------\n # Due to the fact only one country is used, then this feature is dropped\n #-------------------------------------------------------------------------\n list_col_to_keep = [col for col in self._df_invoice_line.columns \\\n if col not in 'Country']\n \n self._df_invoice_line = self._df_invoice_line[list_col_to_keep] \n\n return", "def set_filter():\n try:\n #=======================================================================\n # isofilter=[arg.partition('=')[-1] for arg in argv if 'atomfilter=' in arg][0][1:-1].split(',')\n #=======================================================================\n isofilter = config.arg('atomfilter')[1:-1].split(',')\n isofilter = [f.split(':') for f in isofilter]\n for f in isofilter:\n if len(f) < 2:\n f.append('True')\n if len(f) < 3:\n f.append('True')\n if len(f) < 4:\n f.append('None')\n except:\n isofilter = [['element', 'H', 'True', 'None']]\n try:\n #=======================================================================\n # isopartnerfilter=[arg.partition('=')[-1] for arg in argv if 'partnerfilter=' in arg][0][1:-1].split(',')\n #=======================================================================\n isopartnerfilter = config.arg('partnerfilter')[1:-1].split(',')\n isopartnerfilter = [f.split(':') for f in isopartnerfilter]\n for f in isopartnerfilter:\n if len(f) < 2:\n f.append('True')\n if len(f) < 3:\n f.append('True')\n if len(f) < 4:\n f.append('None')\n except:\n isopartnerfilter = [['None', 'None', 'None', 'None']]\n return isofilter, isopartnerfilter\n isofilterlist = []\n isopartnerfilterlist = []\n for i in xrange(len(isofilter) / 2):\n isofilterlist.append(tuple(isofilter[2 * i:2 * i + 2]))\n for i in xrange(len(isopartnerfilter) / 2):\n isopartnerfilterlist.append(tuple(isopartnerfilter[2 * i:2 * i + 2]))\n\n return [isofilterlist, isopartnerfilterlist]", "def FS_filter(self, at_data, *args, **kwargs) -> dict:\n\n b_status : bool = True\n l_file : list = []\n l_dirHits : list = []\n l_dir : list = []\n str_path : str = at_data[0]\n al_file : list = at_data[1]\n\n if len(self.args['fileFilter']):\n if self.args['fileFilterLogic'].upper() == 'OR':\n al_file = [x \\\n for y in self.args['fileFilter'].split(',') \\\n for x in al_file if y in x]\n else:\n for y in self.args['fileFilter'].split(','):\n al_file = [x for x in al_file if y in x]\n\n if len(self.args['dirFilter']):\n l_dirHits = [str_path \\\n for y in self.args['dirFilter'].split(',') \\\n if y in str_path]\n if self.args['dirFilterLogic'].upper() == 'AND':\n if len(l_dirHits) == len(self.args['dirFilter'].split(',')):\n for y in self.args['dirFilter'].split(','):\n l_dirHits = [x for x in l_dirHits if y in x]\n else:\n l_dirHits = []\n if len(l_dirHits):\n # Remove any duplicates in the l_dirHits: duplicates can occur\n # if the tokens in the filter expression map more than once\n # into the leaf node in the <str_path>, as a path that is\n #\n # /some/dir/in/the/space/1234567\n #\n # and a search filter on the dirspace of \"123,567\"\n [l_dir.append(x) for x in l_dirHits if x not in l_dir]\n else:\n # If no dir hits for this dir, then we zero out the\n # file filter\n al_file = []\n\n if len(al_file):\n al_file.sort()\n l_file = al_file\n b_status = True\n else:\n self.dp.qprint( \"No valid files to analyze found in path %s!\" %\n str_path, comms = 'warn', level = 5)\n l_file = None\n b_status = False\n return {\n 'status': b_status,\n 'l_file': l_file\n }", "def add_filter(self, label):\n if label not in self.FILTER:\n if \"PASS\" in self.FILTER:\n self.FILTER = [f for f in self.FILTER if f != \"PASS\"]\n self.FILTER.append(label)", "def FilterESLintForChangedLines(self, es_output, lines_to_filter):\n filter_output = [es_line for es_line in es_output.split('\\n') if any(\n line in es_line for line in lines_to_filter)]\n return '\\n'.join(filter_output)", "def test_filter_translate(tr_arg, tr_src, tr_dest):\n args = parser.parse_args([\"-tr\", *tr_arg])\n filters = renamer.initfilters(args)\n dest = renamer.get_renames(tr_src, filters, args.extension, args.raw)\n assert dest == tr_dest", "def extract_data_from_txt(filename: str) -> any:\r\n # List[Dict[str, List[int]]]\r\n ce_file = open(filename, 'r')\r\n ce_text = ce_file.read()\r\n ce_structure_pattern = re.compile(r\"\"\"\r\n \\n(\\w+)\\n # the line with the country name \r\n \\s+Total\\s.*\\n # the line start with at least 1 spaces and \"Total Fossil-Fuel\"\r\n Year\\s+.*\\n\\n # the line start with \"Year Emissions\"\r\n ((\\d+\\s+\\d+\\s+.*\\n)+) # the lines with actual data\r\n \"\"\", re.VERBOSE | re.MULTILINE)\r\n ce_line_pattern = re.compile(r\"\"\"\r\n (\\d+) # year number\r\n \\s+ # the space between year number and Total Fossil-Fuel Emissions data \r\n (\\d+) # the Total Fossil-Fuel Emissions data\r\n \\s+ # the spaces after Total Fossil-Fuel Emissions\r\n .*\\n # the rest of the line\r\n \"\"\", re.VERBOSE)\r\n countries = ce_structure_pattern.findall(ce_text)\r\n\r\n data = [process_row_c(country_data, ce_line_pattern) for country_data in countries]\r\n return data", "def filter(self, f, include_directories=False):\n return self._filter(f=f, include_directories=include_directories)", "def filterby(self, filterval, valueoffilter):\n if valueoffilter == '':\n fatal([\n 'Invalid flag \"value\"',\n 'value is required to flag \"filter\"'\n ])\n\n ok = self.validate_filterval(filterval)\n\n if ok is False:\n fatal([\n 'Invalid flag \"filter\"',\n 'The available filter values are:',\n 'description (name)|fulldescription (description)|completed',\n 'Use instead:',\n '$ tasks-app show --filter=description|fulldescription|completed --value={}'.format(valueoffilter)\n ])\n\n if filterval == 'completed':\n if valueoffilter != 'True' and valueoffilter != 'False':\n fatal([\n 'Invalid flag \"value\"',\n 'the available values for completed filter flag are:',\n 'True|False',\n 'Use instead:',\n '$ tasks-app show --filter={filterval} --value=True|False',\n ])\n\n if filterval == 'completed':\n if valueoffilter == 'True':\n valueoffilter = 1\n elif valueoffilter == 'False':\n valueoffilter = 0\n\n if not filterval == 'completed':\n sql = 'SELECT * FROM Tasks WHERE {} LIKE \"{}%\"'.format(filterval, valueoffilter)\n else:\n sql = 'SELECT * FROM Tasks WHERE {} LIKE \"{}\"'.format(filterval, valueoffilter)\n\n conn = sqlite3.connect(DATABASE['file'])\n cur = conn.cursor()\n cur.execute(sql)\n\n if not len(list(cur)) == 0:\n print('Tasks found')\n\n cur.execute(sql)\n\n for description, fulldescription, completed in cur:\n if completed == 0:\n completed = 'Incompleted'\n else:\n completed = 'Completed'\n\n print(' > {} - {} ({})'.format(description, fulldescription, completed))\n\n cur.execute(sql)\n\n if len(list(cur)) == 0:\n print('No tasks found with search {}={}'.format(filterval, valueoffilter))\n\n conn.close()", "def get_filters(available_cities, all_flag):\n # Make a string from the city key list for available data files.\n city_string = ', '.join(available_cities)\n\n # Get user input for city.\n # Note that \"All\" is not an option here - cities cannot be combined.\n city = unique_selection('\\nWhich city (available are: '\n + city_string + ')? ', available_cities + ['Quit'])\n print('You selected: ', city)\n if city == 'Quit': # Quit at city, so skip getting month and day.\n month = 'Quit'\n day = 'Quit'\n else:\n # Check command line option to switch off optional filtering.\n if all_flag:\n month = 'All'\n day = 'All'\n else:\n # Get user input for month.\n # There is a dummy entry at the start of the month list for\n # later mapping, so start slicing at 1 and end at 12.\n # TODO perhaps this dummy is an unnecessary complication.\n # In addition to the months, \"All\" and \"Quit\" are allowed.\n month = unique_selection('\\nWhich month (or \"all\")? ',\n list(MONTHS[1:13]) + ['All','Quit'])\n print('You selected: ', month)\n if month == 'Quit': # Quit at month, so skip getting day.\n day = 'Quit'\n else:\n # Get user input on day of week.\n # In addition to the days, \"All\" and \"Quit\" are allowed.\n day = unique_selection('\\nWhich day of the week (or \"all\")? ',\n list(WEEKDAYS) + ['All','Quit'])\n print('You selected: ', day)\n return city, month, day", "async def wordfilter(self, ctx):\n pass", "def get_data_filter(args):\n diff_data(args, \"filter\")", "def loadFilters(ufo):\n preFilters, postFilters = [], []\n for filterDict in ufo.lib.get(FILTERS_KEY, []):\n namespace = filterDict.get(\"namespace\", \"ufo2ft.filters\")\n try:\n filterClass = getFilterClass(filterDict[\"name\"], namespace)\n except (ImportError, AttributeError):\n from pprint import pformat\n\n logger.exception(\"Failed to load filter: %s\", pformat(filterDict))\n continue\n filterObj = filterClass(\n *filterDict.get(\"args\", []),\n include=filterDict.get(\"include\"),\n exclude=filterDict.get(\"exclude\"),\n pre=filterDict.get(\"pre\", False),\n **filterDict.get(\"kwargs\", {}),\n )\n if filterObj.pre:\n preFilters.append(filterObj)\n else:\n postFilters.append(filterObj)\n return preFilters, postFilters", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')", "def filter(self, function):\n return FunctionalWrapper(filter(function, self.data))", "def doFiltering(self, searchfunc, filters=None):\n F=[]\n for f in self.filters:\n F.append(f.getFilter())\n #print F\n sets = []\n for f in F:\n col, val, op, boolean = f\n names = searchfunc(col, val, op)\n sets.append((set(names), boolean))\n names = sets[0][0]\n for s in sets[1:]:\n b=s[1]\n if b == 'AND':\n names = names & s[0]\n elif b == 'OR':\n names = names | s[0]\n elif b == 'NOT':\n names = names - s[0]\n names = list(names)\n self.updateResults(len(names))\n return names", "def use_filter(filter_func, url, input):\n output = filter_func(url, input)\n\n if output is None:\n # If the filter does not return a value, it is\n # assumed that the input does not need filtering.\n # In this case, we simply return the input.\n return input\n\n return output", "def parse_csv_files(self, filter_fn=None):\n def filter_function(f):\n return f is not None and f.endswith(\".csv\")\n if not filter_fn:\n filter_fn = filter_function\n files = self.filter_files(None,filter_fn)\n dicts = {}\n for f in files:\n with open(f) as fh:\n dicts[f] = [r for r in csv.DictReader(fh)]\n return dicts", "def read_file(inp_fn):\n lines = [line.strip().split(\",\")\n for line in open(inp_fn)\n if not (line.startswith(\"#\"))]\n return [(int(line[0]), year_record({\"male\": int(line[-3]),\n \"female\": int(line[-2]),\n \"unknown\": int(line[-1])},\n None, None))\n for line in lines[1:]]", "def autoSaveFilter(filename):", "def read_data(self, filePath):\n with open(filePath, 'r', encoding='iso-8859-1') as f:\n for sentence in f.readlines():\n sentence = sentence.replace('\\n', '')\\\n .replace('\"', '')\\\n .replace('\\'', '')\\\n .replace('.', '')\\\n .replace(',', '')\\\n .replace('[', '')\\\n .replace(']', '')\\\n .replace('(', '')\\\n .replace(')', '')\\\n .replace(':', '')\\\n .replace('--', '')\\\n .replace('-', '')\\\n .replace('\\\\', '')\\\n .replace('0', '')\\\n .replace('1', '')\\\n .replace('2', '')\\\n .replace('3', '')\\\n .replace('4', '')\\\n .replace('5', '')\\\n .replace('6', '')\\\n .replace('7', '')\\\n .replace('8', '')\\\n .replace('9', '')\\\n .replace('`', '')\\\n .replace('=', '')\\\n .replace('$', '')\\\n .replace('/', '')\\\n .replace('*', '')\\\n .replace(';', '')\\\n .replace('<b>', '')\\\n .replace('%', '')\n sentence = sentence.split(' ')\n sentence = list(filter(lambda x: x, sentence))\n if sentence:\n self.word_num += len(sentence)\n self.maxlen = self.maxlen if self.maxlen >= len(\n sentence) else len(sentence)\n self.minlen = self.minlen if self.minlen <= len(\n sentence) else len(sentence)\n if 'pos' in filePath:\n self.Pos.append([sentence, self.feelMap['pos']])\n else:\n self.Neg.append([sentence, self.feelMap['neg']])", "def from_ascii(cls, fname, dtype='csv', **kwargs):\n lamb = kwargs.pop('lamb', None)\n name = kwargs.pop('name', None)\n detector = kwargs.pop('detector', 'photon')\n unit_ = kwargs.pop('unit', None)\n\n if not isinstance(fname, SimpleTable):\n t = SimpleTable(fname, dtype=dtype, **kwargs)\n else:\n t = fname\n w = t['WAVELENGTH'].astype(float)\n r = t['THROUGHPUT'].astype(float)\n keys = [k for k in t.keys() if 'THROUGHPUT_' in k]\n\n # update properties from file header\n detector = t.header.get('DETECTOR', detector)\n unit_ = t.header.get('WAVELENGTH_UNIT', unit_)\n\n # try from the comments in the header first\n if name in (None, 'None', 'none', ''):\n name = [k.split()[1]\n for k in t.header.get('COMMENT', '').split('\\n')\n if 'COMPNAME' in k]\n name = ''.join(name).replace('\"', '').replace(\"'\", '')\n # if that did not work try the table header directly\n if name in (None, 'None', 'none', ''):\n name = t.header['NAME']\n\n if len(keys) > 0:\n samp = np.array([t[key] for key in keys])\n _filter = cls(w, r, samp, name=name, dtype=detector, unit=unit_)\n else:\n _filter = UnitFilter(w, r, name=name, dtype=detector, unit=unit_)\n\n # reinterpolate if requested\n if lamb is not None:\n _filter = _filter.reinterp(lamb)\n\n return _filter", "def filter_rows(filename, filters):\n with open(filename, \"rU\") as renderfile:\n renders = csv.reader(renderfile)\n for row in renders:\n # Create RenderRow object\n render = RenderRow(*row)\n\n # Check whether to skip this render\n # (i.e. if the render application and/or renderer don't match given values)\n skip_render = any((\n filters['app'] and filters['renderer'] and\n (filters['app'] != render.app or filters['renderer'] != render.renderer),\n filters['app'] and filters['app'] != render.app,\n filters['renderer'] and filters['renderer'] != render.renderer\n ))\n\n if skip_render:\n continue\n\n # yield only if render was successful or 'failed' flag is set to true\n if render.success or filters['failed']:\n yield render", "def translator(*args, defaultFileRule: bool=True, defaultOptions: Union[AnyStr, bool]=\"\",\n extension: bool=True, fileCompression: Union[AnyStr, bool]=\"\", filter: bool=True,\n list: bool=True, loaded: bool=True, objectType: bool=True, optionsScript:\n bool=True, readSupport: bool=True, writeSupport: bool=True, q=True, query=True,\n **kwargs)->Union[bool, Any]:\n pass" ]
[ "0.594911", "0.5432073", "0.5307598", "0.5264026", "0.5253835", "0.52506196", "0.5130263", "0.49939936", "0.49880716", "0.49808767", "0.4957064", "0.494303", "0.49124965", "0.49102247", "0.49089", "0.48704535", "0.48687115", "0.48383683", "0.48080763", "0.47974768", "0.47960532", "0.47910362", "0.4785495", "0.47619236", "0.47166225", "0.47120875", "0.47058204", "0.4693129", "0.4689234", "0.46866822", "0.46797755", "0.4675416", "0.46747994", "0.4665005", "0.46643648", "0.46416485", "0.4634322", "0.46280658", "0.46276087", "0.4622686", "0.4616952", "0.46077666", "0.46009362", "0.45973897", "0.45914787", "0.45872027", "0.4580887", "0.45802432", "0.4572109", "0.45704803", "0.45608008", "0.45602077", "0.45569143", "0.45458537", "0.45455977", "0.45319393", "0.45189753", "0.45072094", "0.4488364", "0.44813725", "0.4477762", "0.447689", "0.4464415", "0.44564345", "0.44537833", "0.44532812", "0.44375095", "0.44370502", "0.44321862", "0.44074574", "0.44001043", "0.43994248", "0.43952814", "0.43946722", "0.43888596", "0.43886352", "0.43837002", "0.4379782", "0.43644154", "0.43601492", "0.4354758", "0.4353081", "0.43529972", "0.43445814", "0.4338507", "0.433664", "0.43333548", "0.43274945", "0.43264842", "0.43235964", "0.43221632", "0.43195203", "0.43189442", "0.4317105", "0.43121365", "0.4305365", "0.43052158", "0.42993656", "0.42967162", "0.42963746" ]
0.73677015
0
Given a list of lines of English/French text, creates a DataFrame with train/val/test split labels.
def create_nmt_data(text,train_pct=0.7,val_pct=0.15): if train_pct + val_pct >= 1: raise Exception("train_pct + val_pct must be < 1.0") source = [] target = [] for line in text: text = line.split('\t') source.append(text[0]) target.append(text[1]) text_df = pd.DataFrame({'source_language':source,'target_language':target}) text_df['split'] = 'train' text_df = text_df.sample(frac=1).reset_index(drop=True) idx = int(len(text_df)*train_pct) text_df.loc[:idx,'split'] = 'train' idx2 = idx + int(len(text_df)*val_pct) text_df.loc[idx:idx2,'split'] = 'val' text_df.loc[idx2:,'split'] = 'test' return text_df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_training_data_file(list_of_word_lines, language):\r\n # To store each feature vector\r\n feature_vector = []\r\n\r\n # To store the entire dataset\r\n data = []\r\n\r\n for sentence in list_of_word_lines:\r\n\r\n # Contains Q\r\n CONTAINS_Q = 'N'\r\n\r\n # Contains Q\r\n CONTAINS_X = 'N'\r\n\r\n # Contains more than 1 vowel\r\n VOWELS = 'N'\r\n\r\n # Contains common dutch substrings\r\n DUTCH_SUBSTRING = 'N'\r\n\r\n # Contains is-was\r\n ISWAS = 'N'\r\n\r\n # Contains come\r\n COME = 'N'\r\n\r\n # Contains common english words\r\n COMMON_ENGLISH_WORDS = 'N'\r\n\r\n # Contains common dutch words\r\n DUTCH_WORDS = 'N'\r\n\r\n # Contains dutch ij\r\n IJ = 'N'\r\n\r\n # Contains and\r\n AND = 'N'\r\n\r\n # Contains they, he, she\r\n COLLECTIVES = 'N'\r\n\r\n for word in sentence:\r\n\r\n if re.match('[0-9]*', word):\r\n word = re.sub('[0-9]*', '', word)\r\n\r\n if re.match('[!?~`@#$%&)(_=+/.,\"»;«-]', word):\r\n word = re.sub('[!?~`@#$%&)(_=+/.,\"»;«-]', '', word)\r\n\r\n word = word.lower()\r\n if \"de\" == word or \"het\" == word or \"dat\" == word or \"en\" == word or \"een\" == word or \"voor\" == word or \"van\" == word or \"welke\" == word \\\r\n or \"te\" == word or \"hij\" == word or \"zij\" == word or \"op\" == word or \"ik\" == word or \"bij\" == word:\r\n DUTCH_WORDS = 'Y'\r\n\r\n if \"ij\" in word:\r\n IJ = 'Y'\r\n\r\n if \"the\" == word or \"but\" == word or \"for\" == word or \"which\" == word or \"that\" == word or \"and\" == word or \"not\" == word \\\r\n or \"to\" == word or \"in\" == word:\r\n COMMON_ENGLISH_WORDS = 'Y'\r\n\r\n if \"q\" in word:\r\n CONTAINS_Q = 'Y'\r\n\r\n if \"x\" in word:\r\n CONTAINS_X = 'Y'\r\n\r\n if \"aa\" in word or \"ee\" in word or \"ii\" in word or \"uu\" in word:\r\n VOWELS = 'Y'\r\n\r\n if \"ijk\" in word or \"sch\" in word or \"ijn\" in word:\r\n DUTCH_SUBSTRING = 'Y'\r\n\r\n if \"is\" == word or \"of\" == word or \"was\" == word or \"all\" in word:\r\n ISWAS = 'Y'\r\n\r\n if \"come\" == word or \"a\" == word:\r\n COME = 'Y'\r\n\r\n if \"and\" == word:\r\n AND = 'Y'\r\n\r\n if \"he\" == word or \"she\" == word or \"it\" == word or \"they\" == word:\r\n COLLECTIVES = 'Y'\r\n\r\n feature_vector.append([DUTCH_WORDS, IJ, COMMON_ENGLISH_WORDS, CONTAINS_Q, CONTAINS_X,\r\n VOWELS, DUTCH_SUBSTRING, ISWAS,\r\n COME, AND, COLLECTIVES, language])\r\n\r\n data.append(feature_vector)\r\n feature_vector = []\r\n return data", "def read_traindata (filename, labels = ['pos', 'neg']):\n def split (l):\n \"\"\"split one line into words and label\"\"\"\n segs = l.strip().split ('\\t')\n label = segs [-1]\n words = segs [:-1]\n return words, label\n \n encoding = chardet.detect(open (filename).read ()) ['encoding']\n \n with codecs.open (filename, 'r', encoding) as f:\n for line in f.readlines ():\n row = split (line)\n assert len (row) == 2\n assert isinstance(row [0], list)\n assert isinstance(row [1], basestring)\n print row [1]\n assert row [1] in labels\n yield row", "def splits(cls, text_field, label_field, root='.data',\n train='training.1600000.processed.noemoticon.csv', \n test='testdata.manual.2009.06.14.csv', \n neutral = None, **kwargs):\n \n path_train = root + train\n path_test = root + test\n \n if not os.path.exists(root):\n os.mkdir(root)\n \n if not os.path.exists(path_train) or not os.path.exists(path_test):\n path = cls.download(root)\n path_train = path + train\n path_test = path + test\n \n train_dataset = Sentiment140(path_train, text_field, label_field, neutral=neutral, **kwargs)\n test_dataset = Sentiment140(path_test, text_field, label_field, **kwargs)\n \n return train_dataset, test_dataset", "def feature_extraction(inputFile, text, label):\r\n df = pd.read_csv(inputFile, encoding=\"utf8\")\r\n df[text].replace(np.nan, '', inplace=True)\r\n for idx, line in df.iterrows():\r\n try:\r\n words = line[text]\r\n newWords = ''.join(words.split())\r\n df.set_value(idx, text, newWords)\r\n except:\r\n pass\r\n tf = TfidfVectorizer(analyzer='char', encoding=\"utf8\", min_df=10)\r\n\r\n x = tf.fit_transform(df[text])\r\n x = x.toarray()\r\n print(x.shape)\r\n y = df[label]\r\n\r\n return x, y", "def create_train_test(dataframe_all):\n label_encoder=LabelEncoder()\n split = StratifiedShuffleSplit(n_splits=1, test_size=0.1, random_state=42)\n for train_index, test_index in split.split(dataframe_all['word_values'], dataframe_all['document_label']):\n strat_train_set = dataframe_all.loc[train_index]\n strat_test_set = dataframe_all.loc[test_index]\n\n strat_train_set = strat_train_set.dropna(subset=['word_values'])\n strat_test_set = strat_test_set.dropna(subset=['word_values'])\n pipe=su.pipe()\n x_train, y_train = pipe.fit_transform(strat_train_set), label_encoder.fit_transform(\n strat_train_set['document_label'])\n x_test, y_test = pipe.transform(strat_test_set), label_encoder.fit_transform(\n strat_test_set['document_label'])\n\n return x_train,x_test,y_train,y_test", "def split_data(name, is_train = True):\r\n data = pd.read_csv(name, header = 0, encoding = 'ISO-8859-1')\r\n X = data['text']\r\n if is_train:\r\n Y = data['polarity']\r\n return X, Y\r\n return X", "def textFeature(mode):\r\n \r\n classlist = ['negative', 'positive']\r\n data = pd.DataFrame()\r\n\r\n for label in classlist:\r\n path = 'C:\\\\Users\\\\Tom\\\\Documents\\\\Informatiekunde\\\\Thesis\\\\data\\\\' + mode + '\\\\' + label + '\\\\'\r\n allFiles = glob.glob(path + \"*.txt\")\r\n df1 = pd.DataFrame()\r\n for review in allFiles:\r\n title = review.strip('.txt').split('\\\\')[-1]\r\n text = open(review, 'r', encoding='utf8').read()\r\n df = pd.DataFrame({'File': [title], 'Text': [text], 'Label': [label]}).set_index('File')\r\n df1 = df1.append(df)\r\n data = data.append(df1)\r\n \r\n return data", "def _read(self, documents):\n data = []\n X,Y = [], []\n for document in documents:\n d_ata = pd.read_csv(document, sep='\\t', names=['review','label'])\n data.append(d_ata)\n data = pd.concat(data)\n self.data = data\n Y = data.label\n self.vec.fit(data.review)\n X = self.preprocess(data)\n \n return train_test_split(X,Y)", "def build_dataframe(textline):\n column_names = []\n records = [line.split(u',') for line in textline]\n records = [pd.np.nan if token in (u'\\\\N', 'NULL') else token for token in records]\n # df_line = pd.read_csv(textline, header=None, names=column_names)\n df = pd.DataFrame(records, columns=column_names)\n df = df.convert_objects(convert_numeric=True)\n df.set_index('msisdn', inplace=True)\n print('-----', df.dtypes)\n return df", "def load_dataset(train_path, test_path, tokenizer):\n train_dataset = TextDataset(\n tokenizer=tokenizer,\n file_path=train_path,\n block_size=128)\n\n test_dataset = TextDataset(\n tokenizer=tokenizer,\n file_path=test_path,\n block_size=128)\n\n data_collator = DataCollatorForLanguageModeling(\n tokenizer=tokenizer, mlm=False,\n )\n return train_dataset, test_dataset, data_collator", "def load_text_and_label(data_file):\n # load data from file\n\n # splite by word\n dfRaw = pd.read_csv(data_file)\n dfRec = dfRaw[['Review Text', 'Recommended IND']].dropna()\n pos_examples = dfRec[dfRec['Recommended IND'] == 1]['Review Text'].tolist()\n neg_examples = dfRec[dfRec['Recommended IND'] == 0]['Review Text'].tolist()\n\n x_text = pos_examples + neg_examples\n x_text = np.array([clean_str(sentence) for sentence in x_text])\n # generate label (y)\n pos_labels = [[0,1] for _ in pos_examples]\n neg_labels = [[1,0] for _ in neg_examples]\n y = np.array(pos_labels + neg_labels)\n return [x_text, y]", "def convert_text_to_df(text):\n new_list = [i.strip() for i in text.splitlines() if i.strip() != \"\"]\n new_dict = {}\n col_name = new_list[0].strip().split()\n index_name = new_list[1].strip()\n for item in new_list[2:]:\n index, *others = item.split()\n others = [float(i) for i in others]\n new_dict[index] = others\n new_df = pd.DataFrame(new_dict).transpose()\n new_df.index.name = index_name\n new_df.columns = col_name\n return new_df", "def create_pandas_dataframes():\n train, test = Email.load_emails_from_data()\n\n train_y = [int(t.is_spam) for t in train]\n test_y = [int(t.is_spam) for t in test]\n\n vocab = get_vocabulary_vector(train)\n print(\"[ INF ] Vocab Size:\", len(vocab))\n\n train = [t.vectorize_tokens(vocab) for t in train]\n test = [t.vectorize_tokens(vocab) for t in test]\n\n train = pd.DataFrame.from_records(train, columns=vocab)\n test = pd.DataFrame.from_records(test, columns=vocab)\n\n train['is_spam'] = train_y\n test['is_spam'] = test_y\n\n return train, test", "def convert_examples_to_features_for_train(examples, label_list, max_seq_length, tokenizer):\r\n label_map = {label : i for i, label in enumerate(label_list)} #label -> i index dictionary\r\n features = []\r\n for (ex_index, example) in enumerate(examples):\r\n label_list = example.label.split(' ')\r\n\r\n tokens = []\r\n labels = []\r\n for i, word in enumerate(example.text_a.split(' ')): #textlist\r\n token_wordpiece = tokenizer.tokenize(word)\r\n tokens.extend(token_wordpiece)\r\n label_current = label_list[i]\r\n for m in range(len(token_wordpiece)):\r\n if m == 0:\r\n labels.append(label_current)\r\n else:\r\n labels.append('X')\r\n\r\n # max_seq_length-1\r\n if len(tokens) >= max_seq_length - 1:\r\n tokens = tokens[0:(max_seq_length - 2)]\r\n labels = labels[0:(max_seq_length - 2)]\r\n\r\n ntokens = []\r\n segment_ids = []\r\n label_ids = []\r\n\r\n ntokens.append('[CLS]')\r\n segment_ids.append(0)\r\n label_ids.append(label_map['[CLS]'])\r\n # print(tokens, labels)\r\n for i, token in enumerate(tokens):\r\n ntokens.append(token)\r\n segment_ids.append(0)\r\n label_ids.append(label_map[labels[i]])\r\n\r\n ntokens.append('[SEP]')\r\n segment_ids.append(0)\r\n label_ids.append(label_map['[SEP]'])\r\n\r\n input_ids = tokenizer.convert_tokens_to_ids(ntokens)\r\n input_mask = [1] * len(input_ids)\r\n\r\n #if the length is short, tianbu 0\r\n while len(input_ids) < max_seq_length:\r\n input_ids.append(0)\r\n input_mask.append(0)\r\n segment_ids.append(0)\r\n #we do not concerned about it\r\n label_ids.append(0)\r\n ntokens.append('NULL')\r\n\r\n assert len(input_ids) == max_seq_length\r\n assert len(input_mask) == max_seq_length\r\n assert len(segment_ids) == max_seq_length\r\n assert len(label_ids) == max_seq_length\r\n\r\n features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_ids=label_ids))\r\n return features", "def create_lm_dataset(opt, logger=None):\n # Using spacy to tokenize text\n spacy_en = spacy.load('en')\n # Add <unk> special case is due to wiki text which has raw <unk>\n spacy_en.tokenizer.add_special_case(\"<unk>\", [{ORTH: \"<unk>\"}])\n\n def tokenize(text):\n \"\"\"tokenize sentence\"\"\"\n return [item.text for item in spacy_en.tokenizer(text)]\n\n is_lower = True\n if opt.data_type == \"ptb\":\n is_lower = False\n TEXT = torchtext.data.Field(\n sequential=True,\n tokenize=tokenize,\n lower=is_lower\n )\n\n resources_dir = os.path.expanduser(opt.resources_dir)\n if opt.data_type == \"wiki3\":\n train, valid, test = torchtext.datasets.WikiText103.splits(\n text_field=TEXT,\n root=resources_dir\n )\n if opt.data_type == \"wiki2\":\n train, valid, test = torchtext.datasets.WikiText2.splits(\n text_field=TEXT,\n root=resources_dir\n )\n if opt.data_type == \"ptb\":\n train, valid, test = torchtext.datasets.PennTreebank.splits(\n text_field=TEXT,\n root=resources_dir\n )\n\n if logger:\n logger.info(f\"train token: {len(train.examples[0].text)}\")\n logger.info(f\"test token: {len(test.examples[0].text)}\")\n logger.info(f\"valid token: {len(valid.examples[0].text)}\")\n\n device = torch.device(opt.device)\n if opt.input_vector is not None:\n opt.input_vector = os.path.expanduser(opt.input_vector)\n head, tail = os.path.split(opt.input_vector)\n torchtext_vectors = torchtext.vocab.Vectors(name=tail, cache=head)\n torchtext_vectors.vectors.to(device)\n # print(f\"len: {len(torchtext_vectors.stoi)}\")\n # print(f\"size: {torchtext_vectors.vectors.size()}\")\n # Here the list of list is to simulate the real dataset\n # where first dim is sentence and second is word.\n limited_train = [[word] for word in torchtext_vectors.stoi.keys()]\n TEXT.build_vocab(limited_train, vectors=torchtext_vectors)\n else:\n TEXT.build_vocab(train)\n\n train_iter, val_iter, test_iter = torchtext.data.BPTTIterator.splits(\n (train, valid, test),\n batch_size=opt.batch_size,\n bptt_len=opt.bptt_len,\n device=device,\n repeat=False\n )\n return (TEXT, train_iter, test_iter, val_iter)", "def train_validation_test_split(col_stratify='Kind of offensive language',\n train_percent=0.6,\n validate_percent=0.2,\n test_percent=0.2,\n random_state=101):\n\n data = pd.read_csv('cleaned_data.csv', header=0)\n\n if train_percent + validate_percent + test_percent != 1.0:\n raise ValueError(f'Sum of train, validate and test is not 1.0')\n\n if col_stratify not in data.columns:\n raise ValueError(f'{col_stratify} is not a column in the dataframe')\n\n X = data\n y = data[[col_stratify]]\n\n # Split original dataframe into train and temp dataframes.\n data_train, data_temp, y_train, y_temp = train_test_split(X,\n y,\n stratify=y,\n test_size=(\n 1.0 - train_percent),\n random_state=random_state)\n # Split the temp dataframe into val and test dataframes.\n test_to_split = test_percent / (validate_percent + test_percent)\n data_val, data_test, y_val, y_val = train_test_split(data_temp,\n y_temp,\n stratify=y_temp,\n test_size=test_to_split,\n random_state=random_state)\n\n assert len(data) == len(data_train) + len(data_val) + len(data_test)\n\n return data_train, data_val, data_test, y_train, y_val, y_val", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n\n label_map = {label: i for i, label in enumerate(label_list, 1)}\n\n features = []\n for (ex_index, example) in enumerate(examples):\n textlist = example.text_a.split(' ')\n labellist = example.label\n tokens = []\n labels = []\n valid = []\n label_mask = []\n for i, word in enumerate(textlist):\n token = tokenizer.tokenize(word)\n tokens.extend(token)\n label_1 = labellist[i]\n for m in range(len(token)):\n if m == 0:\n labels.append(label_1)\n valid.append(1)\n label_mask.append(True)\n else:\n valid.append(0)\n if len(tokens) >= max_seq_length - 1:\n tokens = tokens[0:(max_seq_length - 2)]\n labels = labels[0:(max_seq_length - 2)]\n valid = valid[0:(max_seq_length - 2)]\n label_mask = label_mask[0:(max_seq_length - 2)]\n ntokens = []\n segment_ids = []\n label_ids = []\n ntokens.append(\"[CLS]\")\n segment_ids.append(0)\n valid.insert(0, 1)\n label_mask.insert(0, True)\n label_ids.append(label_map[\"[CLS]\"])\n for i, token in enumerate(tokens):\n ntokens.append(token)\n segment_ids.append(0)\n if len(labels) > i:\n label_ids.append(label_map[labels[i]])\n ntokens.append(\"[SEP]\")\n segment_ids.append(0)\n valid.append(1)\n label_mask.append(True)\n label_ids.append(label_map[\"[SEP]\"])\n input_ids = tokenizer.convert_tokens_to_ids(ntokens)\n input_mask = [1] * len(input_ids)\n label_mask = [True] * len(label_ids)\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n label_ids.append(0)\n valid.append(1)\n label_mask.append(False)\n while len(label_ids) < max_seq_length:\n label_ids.append(0)\n label_mask.append(False)\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n assert len(label_ids) == max_seq_length\n assert len(valid) == max_seq_length\n assert len(label_mask) == max_seq_length\n\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_ids,\n valid_ids=valid,\n label_mask=label_mask))\n return features", "def build_data_cv(file, split_dict, label_dict, clean_string=False):\n revs = []\n f = open(file)\n vocab = defaultdict(float)\n \n for index, line in enumerate(f.readlines()): \n rev = []\n rev.append(line.strip())\n if clean_string:\n orig_rev = clean_str(\" \".join(rev))\n else:\n orig_rev = \" \".join(rev)\n words = set(orig_rev.split())\n for word in words:\n vocab[word] += 1\n datum = {\"y\":label_dict[index], \n \"text\": orig_rev, \n \"num_words\": len(orig_rev.split()),\n \"split\": split_dict[index]}#1 or 2\n revs.append(datum)\n\n return revs, vocab", "def _create_examples(self, lines: List[str], mode: Split):\n examples = []\n text_index = 1 if mode == Split.test else 0\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (mode.value, i)\n text_a = line[text_index]\n if len(line) > text_index + 1:\n label = line[text_index + 1]\n else:\n label = None\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))\n return examples", "def _create_examples(self, lines: List[str], mode: Split):\n # id,title,content,label\n test_mode = mode == Split.test\n title_index = 1\n content_index = 2\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (mode.value, line[0])\n try:\n text_a = line[title_index]\n text_b = line[content_index]\n if test_mode:\n label = None\n else:\n label = line[3]\n except IndexError:\n continue\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def split_train_test(df_train, labels):\n n_train = np.shape(df_train)[0]\n X = {'train': [], 'holdout': []} # features\n Y = {'train': [], 'holdout': []} # labels\n p10 = int(0.1 * n_train)\n X['holdout'] = df_train.iloc[-p10:]\n Y['holdout'] = labels[-p10:]\n X['train'] = df_train.iloc[:(n_train - p10)]\n Y['train'] = labels[:(n_train - p10)]\n return X, Y", "def load_dataset(self, fn):\n df = pandas.read_csv(fn,\n sep = self.sep,\n header = 0,\n keep_default_na = False)\n\n # Encode one-hot representation of the labels\n if self.classes_() is None:\n self.encoder.fit(df.label.values)\n\n # Split according to sentences and encode\n sents = self.get_sents_from_df(df)\n return (self.encode_inputs(sents),\n self.encode_outputs(sents))", "def load_data_and_labels(data_file=train_file):\n \"\"\"\n There are 7 categories - \n 1. DEMO\n 2. DISE\n 3. TRMT\n 4. GOAL\n 5. PREG\n 6. FMLY\n 7. SOCL\n \"\"\"\n d = {}\n d['DEMO'] = [1, 0, 0, 0, 0, 0, 0]\n d['DISE'] = [0, 1, 0, 0, 0, 0, 0]\n d['TRMT'] = [0, 0, 1, 0, 0, 0, 0]\n d['GOAL'] = [0, 0, 0, 1, 0, 0, 0]\n d['PREG'] = [0, 0, 0, 0, 1, 0, 0]\n d['FAML'] = [0, 0, 0, 0, 0, 1, 0]\n d['SOCL'] = [0, 0, 0, 0, 0, 0, 1]\n\n max_len = -1\n\n #Load data from files\n samples = []\n with open(data_file, 'rb') as csvfile:\n spamreader = csv.reader(csvfile, delimiter='\\t', quotechar='|')\n for i, row in enumerate(spamreader):\n if (row[0] == \"Category\"):\n continue\n print (i, row[1])\n #samples.append([row[0], row[2]])\n #getting class and title = row[0] and row[1] respectively\n samples.append([row[1], row[2], row[0]])\n #split by words\n\n return samples", "def load_data(trainfile, testfile):\n raw_train = pd.read_csv(trainfile, header=None)\n raw_test = pd.read_csv(testfile, header=None)\n train = raw_train.values\n test = raw_test.values\n train_features = train[0::, 1::]\n train_label = train[::, 0]\n test_features = test[0::, 1::]\n test_label = test[::, 0]\n train, cv , train_label, cv_label = train_test_split(train_features,train_label, test_size=0.33, random_state=42)\n return train, train_label, \\\n cv, cv_label, \\\n test_features, test_label", "def _convert_loops_to_df(text_loops):\n \n # Convert the list to a table\n df_loop = DataFrame(text_loops, columns=[u'text'])\n \n # Append columns which classify each row as a loop tag,\n # stop tag, label tab, or data values\n df_loop = _set_loops(df_loop)\n df_loop = _set_labels(df_loop)\n df_loop = _set_stops(df_loop)\n df_loop = _set_values(df_loop)\n \n # Extract the data into a table\n df_list = _extract_loop_data(df_loop)\n \n return df_list", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n\n # label_map = {label : i for i, label in enumerate(label_list)}\n\n features = []\n exindex = {}\n passagelens = []\n\n sum_of_labels = 0\n\n for (ex_index, example) in tqdm(enumerate(examples), desc=\"Tokenizing:\"):\n if example.text_a not in tokenmap.keys():\n tokens_a = tokenizer.tokenize(example.text_a)\n tokenmap[example.text_a] = tokens_a\n else:\n tokens_a = tokenmap[example.text_a]\n\n tokens_b = None\n if example.text_b:\n if example.text_b not in tokenmap.keys():\n tokens_b = tokenizer.tokenize(example.text_b)\n tokenmap[example.text_b] = tokens_b\n else:\n tokens_b = tokenmap[example.text_b]\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n\n passagelens.append(len(tokens_a) + len(tokens_b) + 3)\n\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambigiously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n segment_ids = [0] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n # label_id = label_map[example.label]\n label_id = example.label\n\n sum_of_labels += label_id\n\n if ex_index < 5:\n logger.info(\"*** Example ***\")\n logger.info(\"guid: %s\" % (example.guid))\n logger.info(\"tokens: %s\" % \" \".join(\n [str(x) for x in tokens]))\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n logger.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n logger.info(\"label: %s (id = %d)\" % (str(example.label), 0))\n\n exindex[ex_index] = example.guid\n features.append(\n InputFeatures(uuid=ex_index,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id))\n\n print(\"Passage Token Lengths Distribution\", passagelens[-1], np.percentile(passagelens, 50),\n np.percentile(passagelens, 90), np.percentile(passagelens, 95), np.percentile(passagelens, 99))\n return features, exindex", "def pre_process_df(train_data, test_data):\n train_data[\"text\"] = train_data[\"sentence1\"] + \", \" + train_data[\"sentence2\"] # noqa\n test_data[\"text\"] = test_data[\"sentence1\"] + \", \" + test_data[\"sentence2\"]\n train_data.drop([\"sentence1\", \"sentence2\"], axis=1, inplace=True)\n test_data.drop([\"sentence1\", \"sentence2\"], axis=1, inplace=True)\n train_data = train_data[[\"text\", \"label\"]]\n test_data = test_data[[\"text\", \"label\"]]\n simple_pre_process_text_df(train_data)\n simple_pre_process_text_df(test_data)\n return train_data, test_data", "def convert_examples_to_features(tokens_set, labels_set, max_seq_length, tokenizer):\r\n\r\n #label_map = {label: i for i, label in enumerate(label_list, 1)}\r\n\r\n input_ids, input_masks, segment_ids, labels = [], [], [], []\r\n for index in tqdm_notebook(range(len(tokens_set)),desc=\"Converting examples to features\"):\r\n textlist = tokens_set[index] #example.text_a.split(' ')\r\n labellist = labels_set[index]\r\n input_id, input_mask, segment_id,label = convert_single_example(\r\n textlist, labellist,max_seq_length,tokenizer\r\n )\r\n input_ids.append(input_id)\r\n input_masks.append(input_mask)\r\n segment_ids.append(segment_id)\r\n labels.append(label)\r\n return (\r\n np.array(input_ids),\r\n np.array(input_masks),\r\n np.array(segment_ids),\r\n np.array(labels)\r\n )", "def get_data(train_path,\n test_path,\n tokenize='spacy',\n max_vocab_size=25000,\n train_valid_split=0.8,\n toy=False):\n train_data = pd.read_csv(train_path)\n test_data = pd.read_csv(test_path)\n\n if toy:\n train_data = train_data.head(100)\n test_data = test_data.head(100)\n\n train_data, test_data = pre_process_df(train_data, test_data)\n\n train_data_path = \"train_processed.csv\"\n test_data_path = \"test_processed.csv\"\n\n train_data.to_csv(train_data_path, header=False, index=False)\n test_data.to_csv(test_data_path, header=False, index=False)\n\n if tokenize == 'spacy':\n TEXT = data.Field(tokenize=tokenize)\n else:\n TEXT = data.Field()\n\n LABEL = data.LabelField(dtype=torch.float)\n train = data.TabularDataset(path=train_data_path,\n format=\"csv\",\n fields=[('text', TEXT),\n ('label', LABEL)])\n test = data.TabularDataset(path=test_data_path,\n format=\"csv\",\n fields=[('text', TEXT),\n ('label', LABEL)])\n\n os.remove(train_data_path)\n os.remove(test_data_path)\n\n train, valid = train.split(train_valid_split)\n\n TEXT.build_vocab(train, max_size=max_vocab_size)\n LABEL.build_vocab(train)\n\n return TEXT, LABEL, train, valid, test", "def _create_examples(self, df, mode):\n idx_tr, idx_te = next(ShuffleSplit(test_size=0.3, random_state=1234).split(df.title, df.totalViews))\n\n examples = []\n\n iterind = idx_tr if mode == \"train\" else idx_te\n\n for i in iterind:\n examples.append(\n InputExample(guid=i, text_a=df.title.values[i], label=df.totalViews.values[i]))\n\n return examples", "def _preprocess_wiki_tokens(split: str) -> lmp.dataset.LanguageModelDataset:\n if not isinstance(split, str):\n raise TypeError('`split` must be an instance of `str`.')\n\n file_path = os.path.join(f'{lmp.path.DATA_PATH}', f'wiki.{split}.tokens')\n\n if not os.path.exists(file_path):\n raise FileNotFoundError(f'file {file_path} does not exist.')\n\n with open(file_path, 'r', encoding='utf8') as input_file:\n data = input_file.read()\n\n # Split based on section pattern.\n data = re.split(r' \\n( =){1,3} .+ (= ){1,3}\\n ', data)\n data = list(filter(\n lambda sample: sample.strip()\n and not re.match(r'( =){1,3}', sample)\n and not re.match(r'(= ){1,3}', sample),\n data\n ))\n\n # Normalized by unicode NFKC.\n data = [unicodedata.normalize('NFKC', sample) for sample in data]\n\n # Convert all new lines and consecutive whitespace into single whitespace.\n data = [re.sub(r'\\s+', ' ', sample) for sample in data]\n\n # Strip leading and trailing whitespaces.\n data = [sample.strip() for sample in data]\n\n return lmp.dataset.LanguageModelDataset(batch_sequences=data)", "def read(train_path, test_path, label_name):\n train_dataset = pd.read_csv(train_path)\n test_dataset = pd.read_csv(test_path)\n\n train_labels = train_dataset.pop(label_name)\n\n imputer = DataFrameImputer().fit(train_dataset)\n train_dataset = imputer.transform(train_dataset)\n test_dataset = imputer.transform(test_dataset)\n\n train_dataset = pd.get_dummies(train_dataset)\n test_dataset = pd.get_dummies(test_dataset)\n\n train_dataset = train_dataset.drop(train_dataset.columns.difference(test_dataset.columns), axis=1)\n test_dataset = test_dataset.drop(test_dataset.columns.difference(train_dataset.columns), axis=1)\n\n scaler = StandardScaler().fit(train_dataset)\n train_dataset = scaler.transform(train_dataset)\n test_dataset = scaler.transform(test_dataset)\n\n return train_dataset, train_labels, test_dataset", "def prepare_text_data(descriptions):\n text_data = []\n for line in descriptions:\n tokens = prepare_text_for_lda(line)\n text_data.append(tokens)\n return text_data", "def create_article_dataset(record_list, dataset_dir, sess,\n validation_size=10,\n eval_every=100,\n input_feature='text',\n max_input_sequence_length=Article.max_text+2,\n target_feature='short_description',\n max_target_sequence_length=Article.max_short_description+2,\n hparams=None):\n input_table = tf.contrib.lookup.index_table_from_file(vocabulary_file=os.path.join(dataset_dir, '{}_vocab.txt'.format(input_feature)),\n num_oov_buckets=1,\n default_value=3)\n target_table = tf.contrib.lookup.index_table_from_file(vocabulary_file=os.path.join(dataset_dir, '{}_vocab.txt'.format(target_feature)),\n num_oov_buckets=1,\n default_value=3)\n lookup_table = tf.contrib.lookup.index_to_string_table_from_file(\n os.path.join(dataset_dir, '{}_vocab.txt'.format(target_feature)),\n default_value='<U>')\n\n input_lookup_table = tf.contrib.lookup.index_to_string_table_from_file(\n os.path.join(dataset_dir, '{}_vocab.txt'.format(input_feature)),\n default_value='<U>')\n\n tf.tables_initializer().run(session=sess)\n\n dataset = tf.data.Dataset.from_tensor_slices(record_list)\n\n def _read_npy(filename):\n parsed = np.load(filename.decode('utf-8'))\n data = parsed.item()\n i = list(map(lambda x: x.decode('utf-8'), data[input_feature].tolist()))\n i_l = data['{}_length'.format(input_feature)]\n t = list(map(lambda x: x.decode('utf-8'), data[target_feature].tolist()))\n t_l = data['{}_length'.format(target_feature)]\n return i, i_l, t, t_l\n\n def next_example(input_feature, input_sequence_length, target_feature, target_sequence_length):\n input_sequence_length.set_shape([1])\n input_feature.set_shape([max_input_sequence_length])\n target_sequence_length.set_shape([1])\n target_feature.set_shape([max_target_sequence_length])\n\n feature_input_sequences = {\n 'input_sequence_length': tf.cast([max_input_sequence_length], tf.int32),\n }\n feature_target_sequences = {\n 'target_sequence_length': tf.cast([max_target_sequence_length], tf.int32),\n }\n\n feature_target_sequences['target'] = target_table.lookup(target_feature)\n feature_input_sequences['input'] = input_table.lookup(input_feature)\n return feature_input_sequences, feature_target_sequences\n\n dataset = dataset.map(lambda filename: tf.py_func(_read_npy, [filename], [tf.string, tf.int64, tf.string, tf.int64]))\n dataset = dataset.map(next_example)\n dataset = dataset.shuffle(buffer_size=hparams.shuffle_buffer_size)\n\n def training_set(dataset):\n iterator = dataset.make_initializable_iterator()\n iterator.initializer.run(session=sess)\n def train():\n return iterator.get_next()\n return train\n\n def validation_set(dataset):\n iterator = dataset.make_initializable_iterator()\n iterator.initializer.run(session=sess)\n def validate():\n return iterator.get_next()\n return validate\n\n dataset = dataset.batch(hparams.batch_size)\n validation_dataset = dataset.take(validation_size)\n training_dataset = dataset.repeat(hparams.epochs)\n training_dataset = training_dataset.shuffle(buffer_size=hparams.shuffle_buffer_size)\n train = training_set(training_dataset)\n valid = validation_set(validation_dataset)\n input_vocab_size = input_table.size().eval(session=sess)\n target_vocab_size = target_table.size().eval(session=sess)\n\n return train, valid, (input_vocab_size, target_vocab_size), lookup_table, input_lookup_table", "def prepare_data(labeled_data_file, sep=\"|\", char_level=False):\n\n x_raw = []\n y_raw = []\n\n for line in open(labeled_data_file, encoding=\"utf8\"):\n address, country = line.strip().split(sep)\n x_raw.append(address)\n y_raw.append(country)\n\n print(\"Number of samples:\", len(x_raw))\n\n tokenizer_x = Tokenizer(MAXWORDS, char_level=char_level)\n\n tokenizer_x.fit_on_texts(x_raw)\n print(\"tokenizer_x.word_index:\", tokenizer_x.word_index)\n # reverse_word_index = {v:k for k, v in tokenizer_x.word_index.items()}\n\n x_tts_raw = [tokenizer_x.texts_to_sequences(a) for a in x_raw]\n x_tts = []\n for row in x_tts_raw:\n x_tts.append([c[0] for c in row])\n\n del x_raw\n del x_tts_raw\n\n tokenizer_y = Tokenizer(MAXWORDS, char_level=False)\n tokenizer_y.fit_on_texts(y_raw)\n print(\"tokenizer_y.word_index:\", tokenizer_y.word_index)\n\n num_classes = len(tokenizer_y.word_index.values())\n print(\"Number of classes:\", num_classes)\n\n y_tts = [tokenizer_y.word_index[a.lower()] for a in y_raw]\n\n del y_raw\n\n x_train, x_test, y_train, y_test = train_test_split(\n x_tts, y_tts, test_size=TEST_SPLIT\n )\n\n y_train_one_hot_labels = to_categorical(y_train, num_classes=num_classes + 1)\n y_test_one_hot_labels = to_categorical(y_test, num_classes=num_classes + 1)\n\n x_train_pad = sequence.pad_sequences(\n x_train, maxlen=MAXLEN, padding=\"post\", truncating=\"post\"\n )\n x_test_pad = sequence.pad_sequences(\n x_test, maxlen=MAXLEN, padding=\"post\", truncating=\"post\"\n )\n\n return (\n num_classes,\n x_train_pad,\n y_train_one_hot_labels,\n x_test_pad,\n y_test_one_hot_labels,\n tokenizer_x,\n tokenizer_y,\n )", "def read_data(filename,label=None,preprocessor=space_tokenizer):\n df = pd.read_csv(filename)\n return [preprocessor(string) for string in df['sentences'].values]", "def splits(cls, text_field, label_field, root='../data',\n train='train.txt', validation='valid.txt', test='test.txt'):\n print(\"root path for relation dataset: {}\".format(root))\n path = cls.download_or_unzip(root)\n prefix_fname = 'annotated_fb_data_'\n return super(SimpleQaRelationDataset, cls).splits(\n os.path.join(path, prefix_fname), train, validation, test,\n format='TSV', fields=[('subject', None), ('relation', label_field), (object, None), ('question', text_field)]\n )", "def read_dataset_from_list(self, lineLst):\n data = []\n for line in lineLst:\n if self.sos != '':\n data.append(self.sos)\n for word in line:\n word = self.replace_special_chars(word)\n _word = word\n if self.unit == \"oracle\":\n if \"+\" in word:\n # double check\n if word.startswith(\"word\") and len(word.split('+'))>1 \\\n and len(word.split('+')[0].split(\":\"))>1:\n _word = word.split('+')[0].split(\":\")[1]\n else:\n continue\n if self.unit == \"morpheme\":\n _word = re.sub(\"@@\", \"\", word)\n if not self.is_hyperlink(_word.lower()) and len(_word) <= 100:\n data.append(word)\n if self.eos != '':\n data.append(self.eos)\n return data", "def preprocessing(text, tokenization=0, rm_stopwords=0, numbers_to_text=0, to_tfidf=0):\n\ttrain_data = pd.DataFrame(columns=['text', 'response'])\n\n\tprep_0 = [strip_non_alphanum(line) for line in text]\n\tprep_1 = [line for line in prep_0 if line.rstrip()]\n\tprep_2 = [strip_multiple_whitespaces(line) for line in prep_1]\n\tprep_3 = [line.lower() for line in prep_2]\n\n\tif to_tfidf == 1:\n\t\t#when using tf_idf, removes single character words given that they are ignored by sklearn's TfidfVectorizer\n\t\tprep_3 = [' '.join([word for word in line.split() if len(word) > 1]) for line in prep_3]\n\n\tif tokenization == 1:\n\t\tprep_3 = [line.split(' ') for line in prep_3]\n\t\t#removes whitespaces from the list\n\t\tprep_3 = [list(filter(None, line)) for line in prep_3]\n\telse:\n\t\tprep_3 = [line[:-1] if line[-1] == \" \" else line for line in prep_3]\n\n\tif numbers_to_text == 1 and tokenization == 1:\n\t\t#convert all numbers to integers and convert these numbers to its written form\n\t\ttemp_prep = []\n\t\tfor sentence in prep_3:\n\t\t\ttemporary_sentence = []\n\t\t\tfor word in sentence:\n\t\t\t\tif str(word).isdigit():\n\t\t\t\t\tconverted_words = num2words(int(word), to='cardinal', lang='pt').split(' ')\n\t\t\t\t\tif to_tfidf == 1 and rm_stopwords == 0:\n\t\t\t\t\t\tconverted_words = [word for word in converted_words if word != 'e']\n\t\t\t\t\ttemporary_sentence.extend(converted_words)\n\t\t\t\telse:\n\t\t\t\t\ttemporary_sentence.append(word)\n\t\t\ttemp_prep.append(temporary_sentence)\n\n\t\tprep_3 = temp_prep\n\telif numbers_to_text == 1 and tokenization == 0:\n\t\t#convert all numbers to integers and convert these numbers to its written form\n\t\ttemp_prep = []\n\t\tfor sentence in prep_3:\n\t\t\ttemporary_sentence = []\n\t\t\tfor word in sentence.split(' '):\n\t\t\t\tif str(word).isdigit():\n\t\t\t\t\tconverted_words = num2words(int(word), to='cardinal', lang='pt').split(' ')\n\t\t\t\t\tif to_tfidf == 1 and rm_stopwords == 0:\n\t\t\t\t\t\tconverted_words = [word for word in converted_words if word != 'e']\n\t\t\t\t\ttemporary_sentence.extend(converted_words)\n\t\t\t\telse:\n\t\t\t\t\ttemporary_sentence.append(word)\n\t\t\ttemporary_sentence = ' '.join(temporary_sentence)\n\t\t\ttemp_prep.append(temporary_sentence)\n\t\tprep_3 = temp_prep\n\n\tif rm_stopwords == 1:\n\t\tstp = set(stopwords.words('portuguese') + list(punctuation))\n\t\tif tokenization == 1:\n\t\t\tprep_3 = [[word for word in sentence if word not in stp] for sentence in prep_3]\n\t\telif tokenization == 0:\n\t\t\tprep_3 = [' '.join([word for word in sentence.split(' ') if word not in stp]) for sentence in prep_3]\n\n\ttmp = pd.DataFrame({'text':prep_3[::2], 'response':prep_3[1::2]})\n\ttrain_data = train_data.append(tmp[['text', 'response']], ignore_index=True)\n\n\treturn train_data", "def load_data_multilabel(traning_data_path,vocab_word2index, vocab_label2index,sentence_len,training_portion=0.95):\n file_object = codecs.open(traning_data_path, mode='r', encoding='utf-8')\n lines = file_object.readlines()\n random.shuffle(lines)\n label_size=len(vocab_label2index)\n X = []\n Y = []\n for i,line in enumerate(lines):\n raw_list = line.strip().split(\"__label__\")\n input_list = raw_list[0].strip().split(\" \")\n input_list = [x.strip().replace(\" \", \"\") for x in input_list if x != '']\n x=[vocab_word2index.get(x,UNK_ID) for x in input_list]\n label_list = raw_list[1:]\n label_list=[l.strip().replace(\" \", \"\") for l in label_list if l != '']\n label_list=[vocab_label2index[label] for label in label_list]\n y=transform_multilabel_as_multihot(label_list,label_size)\n X.append(x)\n Y.append(y)\n if i<10:print(i,\"line:\",line)\n\n X = pad_sequences(X, maxlen=sentence_len, value=0.) # padding to max length\n number_examples = len(lines)\n training_number=int(training_portion* number_examples)\n train = (X[0:training_number], Y[0:training_number])\n\n test_number=int((number_examples-training_number)/2)\n\n\n test = (X[training_number+ 1:training_number+test_number], Y[training_number + 1:training_number+test_number])\n valid = (X[training_number + test_number + 1:],\n Y[training_number + test_number + 1:])\n\n return train,test,valid", "def split_line(line):\n arr = tf.strings.split(line, \"\\t\")\n label = tf.expand_dims(tf.cast(tf.strings.to_number(arr[0]), tf.int32), axis=0)\n text = tf.expand_dims(arr[1], axis=0)\n return text, label", "def _create_examples(self, lines: List[str], mode: Split):\n test_mode = mode == Split.test\n q1_index = 1 if test_mode else 3\n q2_index = 2 if test_mode else 4\n examples = []\n for (i, line) in enumerate(lines):\n if i == 0:\n continue\n guid = \"%s-%s\" % (mode.value, line[0])\n try:\n text_a = line[q1_index]\n text_b = line[q2_index]\n label = None if test_mode else line[5]\n except IndexError:\n continue\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n return examples", "def load_data_multilabel(traning_data_path,vocab_word2index, vocab_label2index,sentence_len,training_portion=0.95):\n file_object = codecs.open(traning_data_path, mode='r', encoding='utf-8')\n lines = file_object.readlines()\n random.shuffle(lines)\n label_size=len(vocab_label2index)\n X = []\n Y = []\n for i,line in enumerate(lines):\n raw_list = line.strip().split(\"__label__\")\n input_list = raw_list[0].strip().split(\" \")\n input_list = [x.strip().replace(\" \", \"\") for x in input_list if x != '']\n x=[vocab_word2index.get(x,UNK_ID) for x in input_list]\n label_list = raw_list[1:]\n label_list=[l.strip().replace(\" \", \"\") for l in label_list if l != '']\n label_list=[vocab_label2index[label] for label in label_list]\n y=transform_multilabel_as_multihot(label_list,label_size)\n X.append(x)\n Y.append(y)\n if i<10:print(i,\"line:\",line)\n\n X = pad_sequences(X, maxlen=sentence_len, value=0.) # padding to max length\n number_examples = len(lines)\n training_number=int(training_portion* number_examples)\n train = (X[0:training_number], Y[0:training_number])\n valid_number=min(1000,number_examples-training_number)\n test = (X[training_number+ 1:training_number+valid_number+1], Y[training_number + 1:training_number+valid_number+1])\n return train,test", "def parse_test_data(best_result, vocab):\n tDocs, labels = read_data(os.path.join('data', 'test'))\n min_Freq = best_result['min_freq']\n punctuation = best_result['punct']\n features = best_result['features']\n X, vocab = vectorize([tokenize(d, punctuation) for d in tDocs], features, min_Freq, vocab)\n return tDocs, labels, X", "def dataset_preparation():\r\n with open('../data/patterns_num.txt', 'r') as f:\r\n data = f.readlines()\r\n X, Y = [], []\r\n for line in data:\r\n x, y = line.split('\\t')\r\n if len(x) > 5 and x not in X: # better results are achieved excluding short query patterns\r\n X.append(x.replace(\"X\", \"\").replace(\"Y\", \"\").lower())\r\n Y.append(int(y.replace('\\n', '')))\r\n test_size = 0.2\r\n # print('Test size:', test_size, '\\nWrong classifications:\\n')\r\n\r\n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state=42, stratify=Y)\r\n return X_train, y_train, X_test, y_test", "def load_dataset(path, test_or_train):\n senta_batch, sentb_batch, scores_batch = [], [], []\n with open(path, encoding='utf-8') as f:\n for i, line in enumerate(f):\n items = line.strip().split('\\t')\n if test_or_train == 'train':\n senta, sentb, score = items[-2], items[-1], float(items[-3])\n elif test_or_train in ['dev', 'test']:\n senta, sentb, score = items[-2], items[-1], float(items[-3])\n else:\n raise Exception(\"{} error\".format(test_or_train))\n senta_batch.append(senta)\n sentb_batch.append(sentb)\n scores_batch.append(score)\n return senta_batch, sentb_batch, scores_batch", "def train(df: pd.DataFrame) -> Tuple[MultiLabelBinarizer, Pipeline]:\n print(df)\n\n # Data preprocessing\n df.genres = df.genres.apply(lambda x: x.split(\" \")) # Convert string of genres to list of genres per movie\n df[\"synopsis\"] = clean_data(df[\"synopsis\"])\n\n # Extract input and output\n X = df[[\"synopsis\", \"year\"]].to_numpy()\n y = df.genres.to_list()\n\n # Transforms genres of movie to list of 1's and 0's:\n # For each genre, 1 if movie has it, 0 if not.\n multilabel_binarizer = MultiLabelBinarizer()\n y_learner = multilabel_binarizer.fit_transform(y)\n\n # Pipeline to fit, transform and predict on input data\n pipe = Pipeline((\n # Transform text to numerical features and concatenate one-hot encoding of year\n (\"transformer\", ColumnTransformer([(\"text\", TfidfVectorizer(sublinear_tf=True,\n ngram_range=(1, 2)), 0),\n (\"year\", OneHotEncoder(handle_unknown=\"ignore\"), [1])])),\n # Multi-label Logistic Regression classifier\n (\"clf\", OneVsRestClassifier(LogisticRegression(C=20, solver=\"sag\", max_iter=300),\n n_jobs=-1))))\n\n pipe.fit(X, y_learner) # Learn model\n return multilabel_binarizer, pipe", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n\n\tlabel_map = {label : i for i, label in enumerate(label_list,1)}\n\t\n\tfeatures = []\n\tfor (ex_index,example) in enumerate(examples):\n\n\t\ttext_a, entity_a, entity_b = example.text_a.split('[RE]')\n\n\t\ttokens_a = tokenizer.tokenize(text_a)\n\t\ttokens_b = None\n\t\ttokens_ea = tokenizer.tokenize(entity_a)\n\t\ttokens_eb = tokenizer.tokenize(entity_b)\n\n\t\t# Account for [CLS] and [SEP] with \"- 2\"\n\t\tif (len(tokens_a) + len(tokens_ea) + len(tokens_eb)) > (max_seq_length - 4) :\n\t\t\ttokens_a = tokens_a[0:(max_seq_length - 4 - len(tokens_ea) - len(tokens_eb))]\n\n\t\ttokens = []\n\t\tsegment_ids = []\n\t\ttokens.append(\"[CLS]\")\n\t\tsegment_ids.append(0)\n\t\tfor token in tokens_a:\n\t\t\ttokens.append(token)\n\t\t\tsegment_ids.append(0)\n\t\ttokens.append(\"[SEP]\")\n\t\tsegment_ids.append(0)\n\t\tfor token in tokens_ea:\n\t\t\ttokens.append(token)\n\t\t\tsegment_ids.append(0)\n\n\t\ttokens.append(\"[SEP]\")\n\t\tsegment_ids.append(0)\n\n\t\tfor token in tokens_eb:\n\t\t\ttokens.append(token)\n\t\t\tsegment_ids.append(0)\n\n\t\ttokens.append(\"[SEP]\")\n\t\tsegment_ids.append(0)\n\n\t\tinput_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n\t\t# The mask has 1 for real tokens and 0 for padding tokens. Only real\n\t\t# tokens are attended to.\n\t\tinput_mask = [1] * len(input_ids)\n\n\t\t# Zero-pad up to the sequence length.\n\t\twhile len(input_ids) < max_seq_length:\n\t\t\tinput_ids.append(0)\n\t\t\tinput_mask.append(0)\n\t\t\tsegment_ids.append(0)\n\n\t\tassert len(input_ids) == max_seq_length\n\t\tassert len(input_mask) == max_seq_length\n\t\tassert len(segment_ids) == max_seq_length\n\n\t\tlabel_id = label_map[example.label]\n\t\tif ex_index < 2:\n\t\t\tlogger.info(\"*** Example ***\")\n\t\t\tlogger.info(\"guid: %s\" % (example.guid))\n\t\t\tlogger.info(\"tokens: %s\" % \" \".join(\n\t\t\t\t\t[str(x) for x in tokens]))\n\t\t\tlogger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n\t\t\tlogger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n\t\t\tlogger.info(\n\t\t\t\t\t\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n\t\t\tlogger.info(\"label: %s (id = %d)\" % (example.label, label_id))\n\n\t\tfeatures.append(InputFeatures(\n\t\t\tinput_ids=input_ids,\n\t\t\tinput_mask=input_mask,\n\t\t\tsegment_ids=segment_ids,\n\t\t\tlabel_id=label_id))\n\treturn features", "def train_dataset():\n return TabularDataset.from_path('tests/data/dummy_tabular/train.csv', sep=',')", "def train_test_split(df):\n training_size = int(len(df) * .67)\n test_size = int(len(df) - training_size)\n train, test = df[0:training_size], df[training_size:len(df)]\n return train, test", "def prepareSentimentDataset(df,sentcol='sentiment',listcol='token',labelthreshold_pos = 1,labelthreshold_neg = -1,\\\n keep_neutral = False,train_ratio = 1):\n if not keep_neutral:\n data = df[np.logical_or(df.loc[:,sentcol]>=labelthreshold_pos,df.loc[:,sentcol]<=labelthreshold_neg)]\n else:\n data = df.copy()\n trainindex = len(data.index)*train_ratio\n if train_ratio<1:\n return (data.loc[data.index[:trainindex],listcol],data.loc[data.index[:trainindex],sentcol],\\\n data.loc[data.index[trainindex:],listcol],data.loc[data.index[trainindex:],sentcol])\n else:\n return (data.loc[:,listcol],data.loc[:,sentcol])", "def load_data_and_labels(data_source, remove_stopword=False, run_with_keras=False):\n # Read the CSV file and get its contents\n with open(data_source, 'r', encoding='utf-8', errors='ignore') as f:\n csv_reader = csv.reader(f)\n # get the header\n header = next(csv_reader)\n label_idx = header.index('label')\n content_idx = header.index('content')\n print(f'The label index is : {label_idx} and the content index is : {content_idx}')\n\n y_text = list()\n x_text = list()\n\n for line in csv_reader:\n # get the sentence from the line\n sentence = line[content_idx].strip()\n x_text.append(sentence)\n y_text.append(int(line[label_idx]))\n\n # preprocess input text\n if run_with_keras:\n x_text = [clean_str(sent, remove_stopword) for sent in x_text]\n else:\n x_text = [clean_str(sent, remove_stopword).split(' ') for sent in x_text]\n\n # get the lengths for every line\n lengths = np.array(list(map(len, [sent for sent in x_text])))\n\n return [x_text, y_text, lengths]", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n\n\tlabel_map = {label : i for i, label in enumerate(label_list)}\n\n\tfeatures = []\n\tfor (ex_index, example) in enumerate(examples):\n\t\ttokens_a = tokenizer.tokenize(example.text_a)\n\n\t\ttokens_b = None\n\t\tif example.text_b:\n\t\t\ttokens_b = tokenizer.tokenize(example.text_b)\n\t\t\t# Modifies `tokens_a` and `tokens_b` in place so that the total\n\t\t\t# length is less than the specified length.\n\t\t\t# Account for [CLS], [SEP], [SEP] with \"- 3\"\n\t\t\t_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n\t\telse:\n\t\t\t# Account for [CLS] and [SEP] with \"- 2\"\n\t\t\tif len(tokens_a) > max_seq_length - 2:\n\t\t\t\ttokens_a = tokens_a[:(max_seq_length - 2)]\n\n\t\t# The convention in BERT is:\n\t\t# (a) For sequence pairs:\n\t\t# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n\t\t# type_ids: 0 0 0\t0\t0\t 0\t 0 0\t1 1 1 1 1 1\n\t\t# (b) For single sequences:\n\t\t# tokens: [CLS] the dog is hairy . [SEP]\n\t\t# type_ids: 0 0 0 0 0\t 0 0\n\t\t#\n\t\t# Where \"type_ids\" are used to indicate whether this is the first\n\t\t# sequence or the second sequence. The embedding vectors for `type=0` and\n\t\t# `type=1` were learned during pre-training and are added to the wordpiece\n\t\t# embedding vector (and position vector). This is not *strictly* necessary\n\t\t# since the [SEP] token unambigiously separates the sequences, but it makes\n\t\t# it easier for the model to learn the concept of sequences.\n\t\t#\n\t\t# For classification tasks, the first vector (corresponding to [CLS]) is\n\t\t# used as as the \"sentence vector\". Note that this only makes sense because\n\t\t# the entire model is fine-tuned.\n\t\ttokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n\t\tsegment_ids = [0] * len(tokens)\n\n\t\tif tokens_b:\n\t\t\ttokens += tokens_b + [\"[SEP]\"]\n\t\t\tsegment_ids += [1] * (len(tokens_b) + 1)\n\n\t\tinput_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n\t\t# The mask has 1 for real tokens and 0 for padding tokens. Only real\n\t\t# tokens are attended to.\n\t\tinput_mask = [1] * len(input_ids)\n\n\t\t# Zero-pad up to the sequence length.\n\t\tpadding = [0] * (max_seq_length - len(input_ids))\n\t\tinput_ids += padding\n\t\tinput_mask += padding\n\t\tsegment_ids += padding\n\n\t\tassert len(input_ids) == max_seq_length\n\t\tassert len(input_mask) == max_seq_length\n\t\tassert len(segment_ids) == max_seq_length\n\n\t\t\n\t\tlabels_ids = [label_map[example.label]]\n\n#\t\t label_id = label_map[example.label]\n\t\tif ex_index < 0:\n\t\t\tlogger.info(\"*** Example ***\")\n\t\t\tlogger.info(\"guid: %s\" % (example.guid))\n\t\t\tlogger.info(\"tokens: %s\" % \" \".join(\n\t\t\t\t\t[str(x) for x in tokens]))\n\t\t\tlogger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n\t\t\tlogger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n\t\t\tlogger.info(\n\t\t\t\t\t\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n\t\t\tlogger.info(\"label: %s (id = %s)\" % (example.labels, labels_ids))\n\n\t\tfeatures.append(\n\t\t\t\tInputFeatures(input_ids=input_ids,\n\t\t\t\t\t\t\t input_mask=input_mask,\n\t\t\t\t\t\t\t segment_ids=segment_ids,\n\t\t\t\t\t\t\t label_ids=labels_ids))\n\treturn features", "def sentences_to_features(self, sentences, labels):\n\n input_examples = [run_classifier.InputExample(guid=\"\", text_a=s, text_b=None, label=l) for s, l in\n zip(sentences, labels)] # here, \"\" is just a dummy label\n input_features = run_classifier.convert_examples_to_features(input_examples, self.label_list,\n self.params[\"MAX_SEQ_LENGTH\"],\n self.tokenizer)\n return input_features", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n\n label_map = {label : i for i, label in enumerate(label_list)}\n\n features = []\n for (ex_index, example) in enumerate(examples):\n tokens = example.text\n\n# # Account for [CLS] and [SEP] with \"- 2\"\n# if len(tokens) > max_seq_length - 2:\n# tokens = tokens[:(max_seq_length - 2)]\n\n bert_tokens = []\n orig_to_tok_map = []\n\n bert_tokens.append(\"[CLS]\")\n for token in tokens:\n new_tokens = tokenizer.tokenize(token)\n if len(bert_tokens) + len(new_tokens) > max_seq_length - 1:\n # print(\"You shouldn't see this since the test set is already pre-separated.\")\n break\n else:\n orig_to_tok_map.append(len(bert_tokens))\n bert_tokens.extend(new_tokens)\n bert_tokens.append(\"[SEP]\")\n\n if len(bert_tokens) == 2: # edge case\n continue\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambigiously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n\n input_ids = tokenizer.convert_tokens_to_ids(bert_tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n\n segment_ids = [0] * max_seq_length # no use for our problem\n\n labels = example.label\n label_ids = [0] * max_seq_length\n label_mask = [0] * max_seq_length\n\n for label, target_index in zip(labels, orig_to_tok_map):\n label_ids[target_index] = label_map[label]\n label_mask[target_index] = 1\n\n assert len(segment_ids) == max_seq_length\n assert len(label_ids) == max_seq_length\n assert len(label_mask) == max_seq_length\n\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_ids=label_ids,\n label_mask=label_mask))\n return features", "def prepare_dataset():\n with open('gold-posts.txt', encoding='utf-8') as f:\n posts = f.readlines()\n with open('gold-labels.txt', encoding='utf-8') as f:\n labels = f.readlines()\n\n def to_cat(x: str) -> int:\n if x == 'p':\n return 1\n elif x == 'n':\n return 2\n else:\n return 0\n X = np.array([x.strip() for x in posts])\n y = np.array([to_cat(x.strip()) for x in labels])\n\n # DOES NOT WORK - too imbalanced\n #skf = StratifiedKFold(n_splits=5, random_state=None, shuffle=False)\n #for train_index, test_index in skf.split(X, y):\n # X_train, X_test = X[train_index], X[test_index]\n # y_train, y_test = y[train_index], y[test_index]\n # break\n\n # WORKS better\n trI, teI = balanced_split(y)\n\n train_texts = X[trI].tolist()\n train_labels = y[trI].tolist()\n valid_texts = X[teI].tolist()\n valid_labels = y[teI].tolist()\n return train_texts, train_labels, valid_texts, valid_labels", "def train_dataset_reversed():\n return TabularDataset.from_path('tests/data/dummy_tabular/train.csv', sep=',',\n columns=['label', 'text'])", "def surface_labelled_data_preparation_pipeline(word_list: [str]):\n X = []\n\n for word in word_list:\n segments = word.split('-')\n segment_features = []\n for i in range(len(segments)):\n features = {}\n\n segment_length = len(segments[i])\n features['length'] = segment_length\n\n features['segment.lower()'] = segments[i].lower()\n features['pos_in_word'] = i\n\n if segment_length % 2 == 0:\n features['even'] = 1\n else:\n features['odd'] = 1\n\n features['begin'] = segments[i][0]\n features['end'] = segments[i][len(segments[i]) - 1]\n\n try:\n features['prev_segment'] = segments[i - 1]\n except IndexError:\n features['prev_segment'] = ''\n # continue\n\n try:\n features['next_segment'] = segments[i + 1]\n except IndexError:\n features['next_segment'] = ''\n\n if segments[0].isupper():\n features['start_upper'] = 1\n else:\n features['start_lower'] = 1\n\n if segments[0] in 'aeiou':\n features['first_vowel'] = 1\n else:\n features['first_const'] = 1\n\n segment_features.append(features)\n\n X.append(segment_features)\n\n return X", "def split_text_file(data_file, model_dir, eval_fraction):\n with io.open(data_file, 'r', encoding='utf-8') as fp:\n data = fp.readlines()\n\n random.shuffle(data)\n\n root, ext = os.path.splitext(data_file)\n train_file = os.path.join(model_dir, \"{}-train{}\".format(root, ext))\n eval_file = os.path.join(model_dir,\"{}-eval{}\".format(root, ext))\n train_offset = int(len(data)*(1-eval_fraction))\n\n if not os.path.exists(train_file) or not os.path.exists(eval_file):\n tf.logging.info('Splitting into train and test datasets..')\n with io.open(train_file, 'w', encoding='utf-8') as tfp,\\\n io.open(eval_file, 'w', encoding='utf-8') as efp:\n\n for i, line in enumerate(data):\n if i < train_offset:\n tfp.write(line)\n else:\n efp.write(line)\n\n return train_file, eval_file", "def split_test_train(df, num_months_test = 1):\n train_set = df.iloc[0:len(df)-num_months_test]\n test_set = df.iloc[len(df)-num_months_test:len(df)]\n \n return train_set, test_set", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\r\n\r\n label_map = {label : i for i, label in enumerate(label_list)}\r\n\r\n features = []\r\n for (ex_index, example) in enumerate(examples):\r\n tokens_a = tokenizer.tokenize(example.text_a)\r\n\r\n tokens_b = None\r\n if example.text_b:\r\n tokens_b = tokenizer.tokenize(example.text_b)\r\n # Modifies `tokens_a` and `tokens_b` in place so that the total\r\n # length is less than the specified length.\r\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\r\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\r\n else:\r\n # Account for [CLS] and [SEP] with \"- 2\"\r\n if len(tokens_a) > max_seq_length - 2:\r\n tokens_a = tokens_a[:(max_seq_length - 2)]\r\n\r\n # The convention in BERT is:\r\n # (a) For sequence pairs:\r\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\r\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\r\n # (b) For single sequences:\r\n # tokens: [CLS] the dog is hairy . [SEP]\r\n # type_ids: 0 0 0 0 0 0 0\r\n #\r\n # Where \"type_ids\" are used to indicate whether this is the first\r\n # sequence or the second sequence. The embedding vectors for `type=0` and\r\n # `type=1` were learned during pre-training and are added to the wordpiece\r\n # embedding vector (and position vector). This is not *strictly* necessary\r\n # since the [SEP] token unambigiously separates the sequences, but it makes\r\n # it easier for the model to learn the concept of sequences.\r\n #\r\n # For classification tasks, the first vector (corresponding to [CLS]) is\r\n # used as as the \"sentence vector\". Note that this only makes sense because\r\n # the entire model is fine-tuned.\r\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\r\n segment_ids = [0] * len(tokens)\r\n\r\n if tokens_b:\r\n tokens += tokens_b + [\"[SEP]\"]\r\n segment_ids += [1] * (len(tokens_b) + 1)\r\n\r\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\r\n\r\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\r\n # tokens are attended to.\r\n input_mask = [1] * len(input_ids)\r\n\r\n # Zero-pad up to the sequence length.\r\n padding = [0] * (max_seq_length - len(input_ids))\r\n input_ids += padding\r\n input_mask += padding\r\n segment_ids += padding\r\n\r\n assert len(input_ids) == max_seq_length\r\n assert len(input_mask) == max_seq_length\r\n assert len(segment_ids) == max_seq_length\r\n\r\n label_id = example.label\r\n if ex_index < 5:\r\n logger.info(\"*** Example ***\")\r\n logger.info(\"guid: %s\" % (example.guid))\r\n logger.info(\"tokens: %s\" % \" \".join(\r\n [str(x) for x in tokens]))\r\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\r\n logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\r\n logger.info(\r\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\r\n # logger.info(\"label: %s (id = %d)\" % (example.label, label_id))\r\n\r\n features.append(\r\n InputFeatures(input_ids=input_ids,\r\n input_mask=input_mask,\r\n segment_ids=segment_ids,\r\n label_id=label_id))\r\n return features", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\r\n\r\n label_map = {label : i for i, label in enumerate(label_list)}\r\n\r\n features = []\r\n for (ex_index, example) in enumerate(examples):\r\n tokens_a = tokenizer.tokenize(example.text_a)\r\n\r\n tokens_b = None\r\n if example.text_b:\r\n tokens_b = tokenizer.tokenize(example.text_b)\r\n # Modifies `tokens_a` and `tokens_b` in place so that the total\r\n # length is less than the specified length.\r\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\r\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\r\n else:\r\n # Account for [CLS] and [SEP] with \"- 2\"\r\n if len(tokens_a) > max_seq_length - 2:\r\n tokens_a = tokens_a[:(max_seq_length - 2)]\r\n\r\n # The convention in BERT is:\r\n # (a) For sequence pairs:\r\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\r\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\r\n # (b) For single sequences:\r\n # tokens: [CLS] the dog is hairy . [SEP]\r\n # type_ids: 0 0 0 0 0 0 0\r\n #\r\n # Where \"type_ids\" are used to indicate whether this is the first\r\n # sequence or the second sequence. The embedding vectors for `type=0` and\r\n # `type=1` were learned during pre-training and are added to the wordpiece\r\n # embedding vector (and position vector). This is not *strictly* necessary\r\n # since the [SEP] token unambigiously separates the sequences, but it makes\r\n # it easier for the model to learn the concept of sequences.\r\n #\r\n # For classification tasks, the first vector (corresponding to [CLS]) is\r\n # used as as the \"sentence vector\". Note that this only makes sense because\r\n # the entire model is fine-tuned.\r\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\r\n segment_ids = [0] * len(tokens)\r\n\r\n if tokens_b:\r\n tokens += tokens_b + [\"[SEP]\"]\r\n segment_ids += [1] * (len(tokens_b) + 1)\r\n\r\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\r\n\r\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\r\n # tokens are attended to.\r\n input_mask = [1] * len(input_ids)\r\n\r\n # Zero-pad up to the sequence length.\r\n padding = [0] * (max_seq_length - len(input_ids))\r\n input_ids += padding\r\n input_mask += padding\r\n segment_ids += padding\r\n\r\n assert len(input_ids) == max_seq_length\r\n assert len(input_mask) == max_seq_length\r\n assert len(segment_ids) == max_seq_length\r\n\r\n # label_id = label_map[example.label]\r\n label_id = example.label\r\n label_id = float(label_id)\r\n if ex_index < 5:\r\n logger.info(\"*** Example ***\")\r\n logger.info(\"guid: %s\" % (example.guid))\r\n logger.info(\"tokens: %s\" % \" \".join(\r\n [str(x) for x in tokens]))\r\n logger.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\r\n logger.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\r\n logger.info(\r\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\r\n logger.info(\"label: %s (id = %s)\" % (example.label, label_id))\r\n\r\n features.append(\r\n InputFeatures(input_ids=input_ids,\r\n input_mask=input_mask,\r\n segment_ids=segment_ids,\r\n label_id=label_id))\r\n return features", "def divide_train_test(self, sentences, tags):\n logging.info('Dividindo dataset em 10 folds')\n kf = KFold(n_splits=10)\n train, test = [], []\n for train_index, test_index in kf.split(sentences):\n train.append(train_index)\n test.append(test_index)\n return train, test", "def load_data(self):\n with open(self.file_name) as f:\n lines = f.readlines()\n\n labels = list()\n all_dat = list()\n for i, l in enumerate(lines):\n\n labels.append(int(l[0]))\n\n l = gensim.utils.any2unicode(l)\n all_dat.append(LabeledSentence(l.split(\"\\t\")[-1], [i]))\n\n return all_dat, np.asarray(labels)", "def convert_examples_to_features(examples,label_list, max_seq_length,tokenizer):\r\n label_map = {}\r\n for (i, label) in enumerate(label_list):\r\n label_map[label] = i\r\n\r\n input_data=[]\r\n for (ex_index, example) in enumerate(examples):\r\n tokens_a = tokenizer.tokenize(example.text_a)\r\n tokens_b = None\r\n if example.text_b:\r\n tokens_b = tokenizer.tokenize(example.text_b)\r\n if tokens_b:\r\n # Modifies `tokens_a` and `tokens_b` in place so that the total\r\n # length is less than the specified length.\r\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\r\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\r\n else:\r\n # Account for [CLS] and [SEP] with \"- 2\"\r\n if len(tokens_a) > max_seq_length - 2:\r\n tokens_a = tokens_a[0:(max_seq_length - 2)]\r\n\r\n if ex_index % 10000 == 0:\r\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\r\n\r\n # The convention in BERT is:\r\n # (a) For sequence pairs:\r\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\r\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\r\n # (b) For single sequences:\r\n # tokens: [CLS] the dog is hairy . [SEP]\r\n # type_ids: 0 0 0 0 0 0 0\r\n #\r\n # Where \"type_ids\" are used to indicate whether this is the first\r\n # sequence or the second sequence. The embedding vectors for `type=0` and\r\n # `type=1` were learned during pre-training and are added to the wordpiece\r\n # embedding vector (and position vector). This is not *strictly* necessary\r\n # since the [SEP] token unambigiously separates the sequences, but it makes\r\n # it easier for the model to learn the concept of sequences.\r\n #\r\n # For classification tasks, the first vector (corresponding to [CLS]) is\r\n # used as as the \"sentence vector\". Note that this only makes sense because\r\n # the entire model is fine-tuned.\r\n tokens = []\r\n segment_ids = []\r\n tokens.append(\"[CLS]\")\r\n segment_ids.append(0)\r\n for token in tokens_a:\r\n tokens.append(token)\r\n segment_ids.append(0)\r\n tokens.append(\"[SEP]\")\r\n segment_ids.append(0)\r\n\r\n if tokens_b:\r\n for token in tokens_b:\r\n tokens.append(token)\r\n segment_ids.append(1)\r\n tokens.append(\"[SEP]\")\r\n segment_ids.append(1)\r\n\r\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\r\n\r\n input_mask = [1] * len(input_ids)\r\n\r\n while len(input_ids) < max_seq_length:\r\n input_ids.append(0)\r\n input_mask.append(0)\r\n segment_ids.append(0)\r\n assert len(input_ids) == max_seq_length\r\n assert len(input_mask) == max_seq_length\r\n assert len(segment_ids) == max_seq_length\r\n\r\n label_id = label_map[example.label]\r\n if ex_index < 3:\r\n tf.logging.info(\"*** Example ***\")\r\n tf.logging.info(\"guid: %s\" % (example.guid))\r\n tf.logging.info(\"tokens: %s\" % \" \".join([tokenization.printable_text(x) for x in tokens]))\r\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\r\n tf.logging.info(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\r\n tf.logging.info(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\r\n tf.logging.info(\"label: %s (id = %d)\" % (example.label, label_id))\r\n\r\n features = collections.OrderedDict()\r\n features[\"input_ids\"] = input_ids\r\n features[\"input_mask\"] = input_mask\r\n features[\"segment_ids\"] = segment_ids\r\n features[\"label_ids\"] =label_id\r\n input_data.append(features)\r\n\r\n return input_data", "def train_test_split(df, frac):\n frac = round(len(df)*frac)\n train = df[:frac]\n test = df[frac:]\n\n return train, test", "def transform_train_data(df):\n return df.rdd.map(\n lambda x: (\n Vectors.dense([x.amount, x.split, x.maintain4, x.maintain12]),\n x.intime\n )\n ).toDF([\"features\", \"label\"])", "def load_data_and_labels_without_shuffled():\n # Load data from files\n with codecs.open('./data/train_pos.txt', 'r+', 'utf-8') as f:\n train_pos = f.readlines()\n with codecs.open('./data/dev_pos.txt', 'r+', 'utf-8') as f:\n dev_pos = f.readlines()\n with codecs.open('./data/train_neg.txt', 'r+', 'utf-8') as f:\n train_neg = f.readlines()\n with codecs.open('./data/dev_neg.txt', 'r+', 'utf-8') as f:\n dev_neg = f.readlines()\n\n positive_examples1 = []\n positive_examples2 = []\n negative_examples1 = []\n negative_examples2 = []\n\n for i in train_pos:\n item1, item2 = i.split('\\t')\n item1 = remove_stop_word(item1)\n item2 = remove_stop_word(item2)\n positive_examples1.append(item1)\n positive_examples2.append(item2)\n\n for i in train_neg:\n item1, item2 = i.split('\\t')\n item1 = remove_stop_word(item1)\n item2 = remove_stop_word(item2)\n negative_examples1.append(item1)\n negative_examples2.append(item2)\n\n # Split by words\n x_text_train1 = positive_examples1 + negative_examples1\n x_text_train2 = positive_examples2 + negative_examples2\n\n positive_dev1 = []\n positive_dev2 = []\n negative_dev1 = []\n negative_dev2 = []\n\n for i in dev_pos:\n item1, item2 = i.split('\\t')\n item1 = remove_stop_word(item1)\n item2 = remove_stop_word(item2)\n positive_dev1.append(item1)\n positive_dev2.append(item2)\n\n for i in dev_neg:\n item1, item2 = i.split('\\t')\n item1 = remove_stop_word(item1)\n item2 = remove_stop_word(item2)\n negative_dev1.append(item1)\n negative_dev2.append(item2)\n\n x_text_dev1 = positive_dev1 + negative_dev1\n x_text_dev2 = positive_dev2 + negative_dev2\n\n # Generate labels\n train_positive_labels = [[0, 1] for _ in train_pos]\n dev_positive_labels = [[0, 1] for _ in dev_pos]\n train_negative_labels = [[1, 0] for _ in train_neg]\n dev_negative_labels = [[1, 0] for _ in dev_neg]\n y_train = np.concatenate([train_positive_labels, train_negative_labels], 0)\n y_dev = np.concatenate([dev_positive_labels, dev_negative_labels], 0)\n\n return [x_text_train1, x_text_train2, x_text_dev1, x_text_dev2, y_train, y_dev]", "def convert_example_to_features_for_test(example, max_seq_length, tokenizer):\r\n features = []\r\n tokens = []\r\n labels_temp = []\r\n for i, word in enumerate(example.text_a.split(' ')):\r\n token_wordpiece = tokenizer.tokenize(word) #04/26/1882 ['04', '/', '26', '/', '1882']\r\n tokens.extend(token_wordpiece)\r\n for m in range(len(token_wordpiece)):\r\n if m == 0:\r\n labels_temp.append(0)\r\n else:\r\n labels_temp.append('X')\r\n # max_seq_length-1\r\n if len(tokens) >= max_seq_length - 1:\r\n tokens = tokens[0:(max_seq_length - 2)]\r\n labels_temp = labels_temp[0:(max_seq_length - 2)]\r\n\r\n ntokens = []\r\n new_labels_temp = []\r\n segment_ids = []\r\n\r\n ntokens.append('[CLS]')\r\n new_labels_temp.append('[CLS]')\r\n segment_ids.append(0)\r\n for i, token in enumerate(tokens):\r\n ntokens.append(token)\r\n new_labels_temp.append(labels_temp[i])\r\n segment_ids.append(0)\r\n ntokens.append('[SEP]')\r\n new_labels_temp.append('[SEP]')\r\n segment_ids.append(0)\r\n input_ids = tokenizer.convert_tokens_to_ids(ntokens)\r\n input_mask = [1] * len(input_ids)\r\n\r\n #if the length is short, tianbu 0\r\n while len(input_ids) < max_seq_length:\r\n input_ids.append(0)\r\n input_mask.append(0)\r\n segment_ids.append(0)\r\n #we do not concerned about it\r\n ntokens.append('NULL')\r\n\r\n assert len(input_ids) == max_seq_length\r\n assert len(input_mask) == max_seq_length\r\n assert len(segment_ids) == max_seq_length\r\n\r\n features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_ids=None))\r\n return features, new_labels_temp", "def splitData(groupList, trainSize):\r\n from sklearn.model_selection import StratifiedShuffleSplit\r\n\r\n groupList[0]['text'] = cleanRealTexts(list(groupList[0]['text']))\r\n\r\n classLabels = np.array([])\r\n for i, group in enumerate(groupList):\r\n classLabels = np.append(classLabels, np.repeat(i, len(group)))\r\n\r\n classData = pd.concat(groupList).reset_index(drop=True)\r\n\r\n splits = list(StratifiedShuffleSplit(n_splits=i,\r\n test_size=1-trainSize,\r\n train_size=trainSize,\r\n random_state=0).split(X=classData, y=classLabels))[0]\r\n trainIdx, testIdx = splits\r\n\r\n trainData = classData.iloc[trainIdx]\r\n testData = classData.iloc[testIdx]\r\n trainLabels = classLabels[trainIdx]\r\n testLabels = classLabels[testIdx]\r\n\r\n return [[trainData, trainLabels], [testData, testLabels]]", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n\n label_map = {} # label\n for (i, label) in enumerate(label_list): # ['0', '1']\n label_map[label] = i\n\n features = [] # feature\n for (ex_index, example) in enumerate(examples):\n text_a_id = int(example.text_a_id)\n text_b_id = int(example.text_b_id)\n\n text_a_fields = example.text_a.split(\" _eop_ \")\n \n tokens_a = []\n text_a_subtype = []\n for text_a_field_idx, text_a_field in enumerate(text_a_fields):\n text_a_field_token = tokenizer.tokenize(text_a_field)\n tokens_a.extend(text_a_field_token)\n text_a_subtype.extend([text_a_field_idx]*len(text_a_field_token))\n assert len(tokens_a) == len(text_a_subtype)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b) # text_b tokenize\n\n if tokens_b: # if has b\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) # truncate\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[0:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because # (?)\n # the entire model is fine-tuned.\n tokens = []\n segment_ids = []\n subtype_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n subtype_ids.append(0)\n for token_idx, token in enumerate(tokens_a):\n tokens.append(token)\n segment_ids.append(0)\n subtype_ids.append(text_a_subtype[token_idx])\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n subtype_ids.append(1)\n\n if tokens_b:\n for token_idx, token in enumerate(tokens_b):\n tokens.append(token)\n segment_ids.append(1)\n subtype_ids.append(2)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n subtype_ids.append(2)\n\n input_sents = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_sents) # mask\n\n # Zero-pad up to the sequence length.\n while len(input_sents) < max_seq_length:\n input_sents.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n subtype_ids.append(0)\n\n assert len(input_sents) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n assert len(subtype_ids) == max_seq_length\n\n label_id = label_map[example.label]\n\n if ex_index%2000 == 0:\n print('convert_{}_examples_to_features'.format(ex_index))\n\n features.append(\n InputFeatures( # object\n text_a_id=text_a_id,\n text_b_id=text_b_id,\n input_sents=input_sents,\n input_mask=input_mask,\n segment_ids=segment_ids,\n subtype_ids=subtype_ids,\n label_id=label_id))\n\n return features", "def get_labels_df():\n labels_df = pd.read_csv('data/train/truth_train.csv', header=None)\n return labels_df", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n if label_list:\n label_map = {label: i for i, label in enumerate(label_list)}\n else:\n label_map = None\n\n features = []\n tokenslist = []\n for (ex_index, example) in enumerate(examples):\n tokens_a = tokenizer.tokenize(example.text_a)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambigiously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n base_tokens = [\"[UNK]\"] + [\"[UNK]\"]*len(tokens_a) + [\"[UNK]\"]\n segment_ids = [0] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n base_tokens += [\"[UNK]\"]*len(tokens_b) + [\"[UNK]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n baseline_ids = tokenizer.convert_tokens_to_ids(base_tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n baseline_ids += padding\n input_mask += padding\n segment_ids += padding\n\n assert len(baseline_ids) == max_seq_length\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n if label_map:\n label_id = label_map[example.label]\n else:\n label_id = float(example.label)\n if ex_index < 2:\n logger.debug(\"*** Example ***\")\n logger.debug(\"guid: %s\" % (example.guid))\n logger.debug(\"tokens: %s\" % \" \".join(\n [str(x) for x in tokens]))\n logger.debug(\"input_ids: %s\" %\n \" \".join([str(x) for x in input_ids]))\n logger.debug(\"input_mask: %s\" %\n \" \".join([str(x) for x in input_mask]))\n logger.debug(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n logger.debug(\"label: %s (id = %d)\" % (example.label, label_id))\n\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id,\n baseline_ids=baseline_ids))\n tokenslist.append({\"token\":tokens, \"golden_label\":example.label, \"pred_label\":None})\n return features, tokenslist", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n\n label_map = {label: i for i, label in enumerate(label_list)}\n\n features = []\n for (ex_index, example) in enumerate(examples):\n tokens_a = tokenizer.tokenize(example.text_a)\n\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n # Modifies `tokens_a` and `tokens_b` in place so that the total\n # length is less than the specified length.\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:(max_seq_length - 2)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambigiously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n segment_ids = [0] * len(tokens)\n\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n label_id = label_map[example.label]\n features.append(\n InputFeatures(input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n label_id=label_id))\n return features", "def dataloader_vatexEnglish_test(args, tokenizer, subset=\"test\"):\n\n vatexEnglish_dataset = VATEXENGLISH_multi_sentence_dataLoader(\n subset=subset,\n data_path=args.data_path,\n features_path=args.features_path,\n max_words=args.max_words,\n feature_framerate=args.feature_framerate,\n tokenizer=tokenizer,\n max_frames=args.max_frames,\n )\n\n dataloader = DataLoader(\n vatexEnglish_dataset,\n batch_size=args.batch_size_val,\n num_workers=args.num_thread_reader,\n shuffle=False,\n drop_last=False,\n )\n return dataloader, len(vatexEnglish_dataset)", "def splits(cls, text_field, label_field, shuffle=True ,root='.',path=\"../../../datasets/HateSPic/MMHS50K/lstm_data_noOtherHard/\", **kwargs):\n\n # LOAD image texts\n img_txt_path = '../../../datasets/HateSPic/MMHS50K/lstm_data/tweets.img_txt'\n img_txts = {}\n for line in open(img_txt_path,'r'):\n id = int(line.strip(',')[0])\n text = line.strip(',')[1].replace('\\n', '').replace('\\r', '')\n img_txts[id] = text\n\n # LOAD img_ids vocab to have them in vocab\n all_img_ids_examples = cls(text_field, label_field, img_txts = img_txts, path=path, split='all_img_ids', **kwargs).examples\n\n train_examples = cls(text_field, label_field, img_txts = img_txts, path=path, split='train', **kwargs).examples\n if shuffle: random.shuffle(train_examples)\n\n dev_examples = cls(text_field, label_field, img_txts = img_txts, path=path, split='val', **kwargs).examples\n if shuffle: random.shuffle(dev_examples)\n\n # dev_index = int(len(examples) - 0.05 * len(examples))\n # train_examples = examples[0:dev_index]\n # dev_examples = examples[dev_index:]\n # random.shuffle(train_examples)\n # random.shuffle(dev_examples)\n\n print('train:',len(train_examples),'dev:',len(dev_examples))\n return cls(text_field, label_field, examples=train_examples), cls(text_field, label_field, examples=dev_examples), cls(text_field, label_field, examples=all_img_ids_examples)", "def process_wiki_tokenized() -> pd.DataFrame:\n text_ids = []\n text_string = []\n articles = []\n text_ids_intro = []\n\n with open(WIKI_ARTICLES_TOKENIZED_PATH, \"r\") as json_file:\n json_list = list(json_file)\n\n for json_str in tqdm(json_list):\n result = json.loads(json_str)\n sections = result[\"tokenized_text\"]\n raw_text = result[\"raw_text\"]\n\n if not sections:\n continue\n\n # The original structure of a Wikipedia article is article <- sections <- paragraphs <- sentences <- words\n # This removes the `sections` dimension\n article_text_ids = list(itertools.chain.from_iterable(sections))\n article_raw_text = list(itertools.chain.from_iterable(raw_text))\n\n if not article_text_ids:\n continue\n\n if sections[0]:\n article_text_ids_intro = sections[0]\n else:\n article_text_ids_intro = [article_text_ids[0]]\n # Workaround for the cases where the introduction is null\n\n text_ids.append(article_text_ids)\n text_string.append(article_raw_text)\n articles.append(clean_title(result[\"title\"]))\n text_ids_intro.append(article_text_ids_intro)\n\n return pd.DataFrame(\n list(zip(articles, text_ids, text_string, text_ids_intro)),\n columns=[\"article\", \"text_ids\", \"raw_text\", \"text_ids_intro\"],\n )", "def train_transpose(string):\r\n \r\n data = []\r\n linedata = []\r\n worddata = []\r\n for letter in string:\r\n if letter == \"\\n\":\r\n linedata.append(worddata)\r\n data.append(linedata)\r\n linedata = []\r\n worddata = []\r\n elif letter == \" \" or letter == \":\":\r\n linedata.append(worddata)\r\n worddata = []\r\n else:\r\n worddata.append(letter)\r\n linedata.append(worddata)\r\n data.append(linedata)\r\n return data", "def load_data():\r\n train = convert_corpus_to_lists(ConllCorpusReader('CoNLL-2003', 'train.txt', ['words', 'pos', 'ignore', 'chunk']))\r\n val = convert_corpus_to_lists(ConllCorpusReader('CoNLL-2003', 'valid.txt', ['words', 'pos', 'ignore', 'chunk'])) # testa will be our val set\r\n test = convert_corpus_to_lists(ConllCorpusReader('CoNLL-2003', 'test.txt', ['words', 'pos', 'ignore', 'chunk']))\r\n\r\n return train, val, test", "def get_train_data(data, lang):\n train_files = open(os.path.join('traintestsplit', lang+'.trainlist')).read().split()\n return [data[lang][filename+'.npytxt'] for filename in train_files]", "def load_unpacker_dataset(sentences):\n return TFRecordDataset([path.join(TFRUDIR, sentence+'.tfr')\n for sentence in sentences])\\\n .map(\n lambda record: \\\n tf.parse_single_example(\n record,\n features={\n 's': tf.FixedLenFeature([], tf.string),\n 'l': tf.FixedLenFeature([NL], tf.float32),\n 't': tf.FixedLenFeature([NT], tf.float32)\n }\n )\n )\\\n .map(\n lambda feature: (feature['l'], feature['s'], feature['t'])\n )", "def create_tokenizer(dataset):\n lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(char_level=True)\n lang_tokenizer.fit_on_texts([x['input'] for x in dataset])\n return lang_tokenizer", "def get_features(self, para, label_list, tokenizer, max_seq_length):\n\t\tlabel_map = {label : i for i, label in enumerate(label_list)}\n# self.reverse_label_map = {v: k for k, v in label_map.items()}\n\t\tguid = \"%s-%s\" % (\"test\", 1)\n\t\ttext_a = para[\"model_answer\"]\n\t\ttext_b = para[\"candidate_answer\"]\n\t\tlabel = label_list[0]\n\t\texample = InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)\n\t\t\n\t\ttokens_a = tokenizer.tokenize(example.text_a)\n\n\t\ttokens_b = tokenizer.tokenize(example.text_b)\n\t\tif example.text_b:\n\t\t\ttokens_b = tokenizer.tokenize(example.text_b)\n\t\t\t# Modifies `tokens_a` and `tokens_b` in place so that the total\n\t\t\t# length is less than the specified length.\n\t\t\t# Account for [CLS], [SEP], [SEP] with \"- 3\"\n\t\t\tself._truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n\t\telse:\n\t\t\t# Account for [CLS] and [SEP] with \"- 2\"\n\t\t\tif len(tokens_a) > max_seq_length - 2:\n\t\t\t\ttokens_a = tokens_a[:(max_seq_length - 2)]\n\n\t\t# The convention in BERT is:\n\t\t# (a) For sequence pairs:\n\t\t# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n\t\t# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n\t\t# (b) For single sequences:\n\t\t# tokens: [CLS] the dog is hairy . [SEP]\n\t\t# type_ids: 0 0 0 0 0 0 0\n\t\t#\n\t\t# Where \"type_ids\" are used to indicate whether this is the first\n\t\t# sequence or the second sequence. The embedding vectors for `type=0` and\n\t\t# `type=1` were learned during pre-training and are added to the wordpiece\n\t\t# embedding vector (and position vector). This is not *strictly* necessary\n\t\t# since the [SEP] token unambigiously separates the sequences, but it makes\n\t\t# it easier for the model to learn the concept of sequences.\n\t\t#\n\t\t# For classification tasks, the first vector (corresponding to [CLS]) is\n\t\t# used as as the \"sentence vector\". Note that this only makes sense because\n\t\t# the entire model is fine-tuned.\n\t\ttokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n\t\tsegment_ids = [0] * len(tokens)\n\n\t\tif tokens_b:\n\t\t\ttokens += tokens_b + [\"[SEP]\"]\n\t\t\tsegment_ids += [1] * (len(tokens_b) + 1)\n\n\t\tinput_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n\t\t# The mask has 1 for real tokens and 0 for padding tokens. Only real\n\t\t# tokens are attended to.\n\t\tinput_mask = [1] * len(input_ids)\n\n\t\t# Zero-pad up to the sequence length.\n\t\tpadding = [0] * (max_seq_length - len(input_ids))\n\t\tinput_ids += padding\n\t\tinput_mask += padding\n\t\tsegment_ids += padding\n\n\t\tassert len(input_ids) == max_seq_length\n\t\tassert len(input_mask) == max_seq_length\n\t\tassert len(segment_ids) == max_seq_length\n\t\tlabel_id = label_map[example.label]\n# print(\"*** Example ***\")\n# print(\"guid: %s\" % (example.guid))\n# print(\"tokens: %s\" % \" \".join(\n# [str(x) for x in tokens]))\n# print(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n# print(\"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n# print(\"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n\n\t\t\n\t\treturn InputFeatures(input_ids=input_ids,\n\t\t\t\t\t\t\t input_mask=input_mask,\n\t\t\t\t\t\t\t segment_ids=segment_ids,\n\t\t\t\t\t\t\t label_id=label_id)", "def load_testset(self, fn):\n w = codecs.open(fn, 'r', 'utf-8')\n data = w.read().split('\\n')[:-1]\n w.close()\n\n # split labels and sentences\n data = [i.split(':') for i in data]\n # reverse elements and connect subsentences in case of additional colons\n self.test_set = [(':'.join(z[1:]), z[0]) for z in data]\n return self.test_set", "def pretrain(texts_list: List[List[str]]) -> Any:\n \n return None", "def load_data(args) -> pd.DataFrame:\n\n df = pd.read_csv(os.path.join(args.data_dir, args.training_file), delimiter=\"\\t\").sample(frac=1, random_state=args.random_seed).reset_index(drop=True)\n df_test = pd.read_csv(os.path.join(args.data_dir, args.testing_file), delimiter=\"\\t\")\n\n # startified validation split\n if not args.use_custom_split:\n train_df, valid_df = train_test_split(\n df, stratify=df[args.label_col], test_size=args.split_size\n )\n # add is_valid column\n train_df[args.validation_col] = False\n valid_df[args.validation_col] = True\n df = pd.concat([train_df, valid_df]).reset_index(drop=True)\n # free up memory\n del train_df, valid_df\n\n return df, df_test", "def load_word_samples_and_labels(data_path, header=True, train=True):\n if header:\n start_index = 1\n else:\n start_index = 0\n\n with open(data_path, 'r', encoding='utf-8') as f:\n lines = f.read().splitlines()[start_index:]\n word_samples = [line.split(',')[2] for line in lines]\n word_samples = [word_sample.split() for word_sample in word_samples]\n\n if train:\n labels = [int(line.split(',')[3]) for line in lines]\n else:\n labels = []\n\n return word_samples, labels", "def transform_texts(model, texts_series):\n\n # falls ein unbekanntes wort vorkommt, dieses ignorieren, d.h. auch\n # nicht zur gesamtzahl der worte zählen, durch die am Ende geteilt wird\n \n n_dims = model.layer1_size # 100 by default\n text_vectors = []\n n_broken_rows = 0\n n_unknown_words = 0\n \n for j, text in enumerate(texts_series):\n tokens = text.split()\n \n known_words = [w for w in tokens if w in model.wv] # only consider words in the dictionary of the w2v model\n if len(tokens) != len(known_words):\n # print(f\"Text: {text} ({len(tokens)} total, {len(known_words)} known)\")\n n_unknown_words += len(tokens) - len(known_words)\n if len(known_words) == 0:\n print(colored(f'\"{text}\" enthält 0 bekannte Wörter!', 'red'))\n \n text_vectors.append([np.nan for i in range(n_dims)])\n n_broken_rows += 1\n continue # to next text\n \n text_vector = [\n np.mean([model.wv[word][i] for word in known_words])\n for i in range(n_dims)\n ]\n text_vectors.append(text_vector)\n \n df = pd.DataFrame(text_vectors)\n #df.to_csv(f'data/text_features_{n_dims}dim.csv', index=False)\n\n #print(f\"{len(texts_series)} texts vectorized in total ({n_unknown_words/len(texts_series):.3f} mean unknown words per text.)\")\n c = 'green' if n_broken_rows == 0 else 'red'\n #print(colored(f\"{n_broken_rows} invalid input texts!\", c))\n\n return df", "def set_lm_labels(dataset, vocab, stm_lex, stm_win=3):\n n_records = len(dataset)\n for i in range(n_records):\n words = dataset[i]['words']\n # labels of language modeling and sentiment aware language modeling\n lm_labels_f, lm_labels_b = [], []\n n_w = len(words)\n # language modeling in forward direction\n for j in range(n_w):\n if j == n_w - 1:\n next_word = -1\n else:\n if words[j+1] in stm_lex:\n next_word = stm_lex[words[j+1]]\n else:\n next_word = -1\n next_word = -1* next_word + 1\n lm_labels_f.append(next_word)\n for j in range(n_w-1, -1, -1):\n if j == 0:\n next_word = -1\n else:\n if words[j-1] in stm_lex:\n next_word = stm_lex[words[j-1]]\n else:\n next_word = -1\n next_word = -1* next_word + 1\n lm_labels_b.append(next_word)\n dataset[i]['lm_labels_f'] = list(lm_labels_f)\n dataset[i]['lm_labels_b'] = list(lm_labels_b)[::-1]\n # sentiment aware language modeling\n stm_lm_labels = []\n opn_labels = []\n for j in range(n_w):\n if words[j] in stm_lex:\n opn_labels.append(1)\n else:\n opn_labels.append(0)\n # left boundary of sentimental context\n stm_ctx_lb = j - stm_win\n if stm_ctx_lb < 0:\n stm_ctx_lb = 0\n stm_ctx_rb = j + stm_win + 1\n left_ctx = words[stm_ctx_lb:j]\n right_ctx = words[j+1:stm_ctx_rb]\n stm_ctx = left_ctx + right_ctx\n flag = False\n for w in stm_ctx:\n if w in stm_lex:\n flag = True\n break\n if flag:\n stm_lm_labels.append(1)\n else:\n stm_lm_labels.append(0)\n dataset[i]['stm_lm_labels'] = list(stm_lm_labels)\n dataset[i]['opn_labels'] = list(opn_labels)\n return dataset", "def load_data(file_fake, file_real):\r\n # load the data\r\n fake_dt = [line.strip() for line in open(file_fake, 'r')]\r\n real_dt = [line.strip() for line in open(file_real, 'r')]\r\n dataset = fake_dt + real_dt\r\n label = [1] * len(fake_dt) + [0] * len(real_dt)\r\n with_label = [(item, 1)for item in fake_dt] + [(item, 0)for item in real_dt]\r\n\r\n # vectorize the text file into a sparse matrix\r\n vec = CountVectorizer()\r\n headlines = vec.fit_transform(dataset)\r\n hl_names = vec.get_feature_names()\r\n\r\n # randomize dataset order and split into training, test, and validation sets\r\n hl_train, hl_temp, label_train, label_temp = train_test_split(headlines, label, test_size=0.3, random_state=30)\r\n hl_test, hl_val, label_test, label_val = train_test_split(hl_temp, label_temp, test_size=0.5, random_state=30)\r\n\r\n return hl_train, hl_test, hl_val, label_train, label_test, label_val", "def run():\r\n \r\n LABEL = data.LabelField(use_vocab=True)\r\n TEXT = data.Field(sequential=True, tokenize=lambda x:x.split(), lower=True, fix_length=config.MAX_LENGTH)\r\n\r\n### 1/5\r\n dataset = data.TabularDataset(path=config.TRAIN_DATASET_FNAME, \r\n format='csv', \r\n fields=[('text', TEXT),('label', LABEL)], \r\n skip_header=True)\r\n # split the dataset, 8:2\r\n train_dataset, valid_dataset = dataset.split(split_ratio=[0.8,0.2], random_state=random.getstate())\r\n \r\n test_data = data.TabularDataset(path=config.TEST_DATASET_FNAME,\r\n format='csv', \r\n fields=[('text', TEXT),('label', LABEL)], \r\n skip_header=True)\r\n \r\n### 2\r\n# train_dataset = data.TabularDataset(path=config.TRAIN_DATASET_FNAME, \r\n# format='csv', \r\n# fields=[('text', TEXT),('label', LABEL)], \r\n# skip_header=True) \r\n# valid_dataset = data.TabularDataset(path=config.VAL_DATASET_FNAME, \r\n# format='csv', \r\n# fields=[('text', TEXT),('label', LABEL)], \r\n# skip_header=True) \r\n \r\n# test_data = data.TabularDataset(path=config.TEST_DATASET_FNAME,\r\n# format='csv', \r\n# fields=[('text', TEXT),('label', LABEL)], \r\n# skip_header=True)\r\n \r\n### 3/4\r\n# train_dataset = data.TabularDataset(path=config.TRAIN_DATASET_FNAME, \r\n# format='csv', \r\n# fields=[('text', TEXT),('label', LABEL)], \r\n# skip_header=True) \r\n \r\n# dataset = data.TabularDataset(path=config.TEST_DATASET_FNAME, \r\n# format='csv', \r\n# fields=[('text', TEXT),('label', LABEL)], \r\n# skip_header=True)\r\n# # split the dataset, 5:5\r\n# valid_dataset, test_data = dataset.split(split_ratio=[0.5,0.5], random_state=random.getstate())\r\n\r\n### 5\r\n\r\n\r\n\r\n # load embeddings\r\n vectors_data = load_vectors(config.EMBEDDING_FNAME)\r\n\r\n TEXT.build_vocab(train_dataset, vectors=vectors_data)\r\n LABEL.build_vocab(train_dataset)\r\n print ('vector size:',TEXT.vocab.vectors.size())\r\n embedding_pretrained_matrix = TEXT.vocab.vectors\r\n \r\n # create torch device\r\n print(\"To device...\")\r\n USE_CUDA = torch.cuda.is_available()\r\n device = torch.device(\"cuda\" if USE_CUDA else \"cpu\")\r\n\r\n train_it, valid_it = data.BucketIterator.splits((train_dataset, valid_dataset),\r\n batch_sizes=(config.TRAIN_BATCH_SIZE,config.VAL_BATCH_SIZE), \r\n device=device, \r\n sort_key=lambda x: len(x.text),\r\n sort_within_batch=False,\r\n shuffle=True,\r\n repeat=False)\r\n test_it = data.BucketIterator(test_data, \r\n batch_size=config.TEST_BATCH_SIZE, \r\n sort_key=lambda x: len(x.text), \r\n shuffle=False,\r\n device=device)\r\n \r\n \r\n # fetch model\r\n vocab_size = len(TEXT.vocab) # TEXT.vocab.vectors.size()\r\n# pretrained_vec = TEXT.vocab.vectors\r\n \r\n # selecte network \r\n x = import_module('networks.'+config.NETWORK)\r\n model = x.Model(vocab_size,embedding_pretrained=embedding_pretrained_matrix)\r\n \r\n # send model to device\r\n model.to(device)\r\n\r\n # initialize Adam optimizer\r\n optimizer = torch.optim.Adam(model.parameters(), lr=config.LEARNING_RATE)\r\n\r\n # if you have multiple GPUs, model model to DataParallel to use multiple GPUs\r\n if torch.cuda.device_count() > 1:\r\n model = nn.DataParallel(model)\r\n \r\n params_list = []\r\n # train and validate for all epochs\r\n for epoch in range(config.EPOCHS):\r\n epoch_start_time = time.time()\r\n\r\n ###----Train--------\r\n train_outputs, train_labels, train_loss = engine.train_fn(train_it, model, optimizer, device)\r\n train_outputs = torch.Tensor(train_outputs)\r\n _, train_predicted = torch.max(train_outputs, dim=1)\r\n train_parameters_dict = metrics_func.performance_evaluation_func(train_predicted,train_labels,epoch=str(epoch))\r\n # save train paremeters\r\n params_list.append(train_parameters_dict)\r\n train_f1 = train_parameters_dict['f1_score_macro']\r\n train_prec = train_parameters_dict['precision_macro']\r\n train_recall = train_parameters_dict['precision_macro']\r\n print('\\n')\r\n print(f\" Train Epoch: {epoch}, F1 = {train_f1},precision = {train_prec},recall = {train_recall}\")\r\n ###------------\r\n \r\n # validate\r\n val_outputs, val_labels, valid_loss = engine.evaluate_fn(valid_it, model, device)\r\n val_outputs = torch.Tensor(val_outputs)\r\n _, val_predicted = torch.max(val_outputs, dim=1) \r\n # calculate evaluation paremeters\r\n val_parameters_dict = metrics_func.performance_evaluation_func(val_predicted, val_labels, epoch=str(epoch),flag='val')\r\n # save evaluation paremeters\r\n params_list.append(val_parameters_dict)\r\n \r\n val_f1 = val_parameters_dict['f1_score_macro']\r\n val_prec = val_parameters_dict['precision_macro']\r\n val_recall = val_parameters_dict['recall_macro']\r\n print(f\"Val Epoch: {epoch},F1 = {val_f1},precision = {val_prec}, recall = {val_recall}\")\r\n \r\n ###-------Test-----------------------\r\n test_outputs, test_labels, test_loss = engine.evaluate_fn(test_it, model, device)\r\n test_outputs = torch.Tensor(test_outputs)\r\n _, test_predicted = torch.max(test_outputs, dim=1) \r\n # calculate evaluation paremeters\r\n test_parameters_dict = metrics_func.performance_evaluation_func(test_predicted, test_labels, epoch=str(epoch),flag='test')\r\n # save evaluation paremeters\r\n params_list.append(test_parameters_dict)\r\n \r\n test_f1 = test_parameters_dict['f1_score_macro']\r\n test_prec = test_parameters_dict['precision_macro']\r\n test_recall = test_parameters_dict['recall_macro']\r\n print(f\"test Epoch: {epoch},F1 = {test_f1},precision = {test_prec}, recall = {test_recall}\")\r\n \r\n lr_scheduler = LRScheduler(optimizer)\r\n lr_scheduler(valid_loss)\r\n \r\n \r\n # simple early stopping\r\n# val_f1 = float(val_f1)\r\n #f1 = (float(train_f1) + float(val_f1)) / 2\r\n val_loss = float(valid_loss)\r\n early_stopping(val_loss, model)\r\n if early_stopping.early_stop:\r\n print(\"Early stopping\")\r\n break\r\n # 获得 early stopping 时的模型参数\r\n# model.load_state_dict(torch.load('checkpoint.pt'))\r\n\r\n# save_model_func(model, epoch, path='outputs')\r\n \r\n metrics_func.save_parameters_txt(params_list)", "def _train_val_split(self, df, val_split):\n # Compute the number of validation examples\n val_size = round(df.shape[0] * val_split)\n\n # Compute validation examples by keeping all questions related\n # to the same context within the same split\n val_actual_size = 0\n val_keys = []\n for t, n in df[\"title\"].value_counts().to_dict().items():\n if val_actual_size + n > val_size:\n break\n val_keys.append(t)\n val_actual_size += n\n\n # Build the train and validation DataFrames\n train_df = df[~df[\"title\"].isin(val_keys)].reset_index(drop=True)\n val_df = df[df[\"title\"].isin(val_keys)].reset_index(drop=True)\n return train_df, val_df", "def load_data():\r\n from sklearn.feature_extraction.text import CountVectorizer\r\n\r\n # Load the data\r\n\r\n with open(\"clean_real.txt\", 'r') as RealNews:\r\n RealStrAr = RealNews.read().split('\\n')\r\n\r\n with open(\"clean_fake.txt\", 'r') as FakeNews:\r\n FakeStrAr = FakeNews.read().split('\\n')\r\n\r\n # Preprocess it using a vectorizer\r\n\r\n MyCoolVectorizer = CountVectorizer()\r\n X = MyCoolVectorizer.fit_transform(RealStrAr + FakeStrAr)\r\n\r\n RealLabels = np.ones((len(RealStrAr), 1)) # means real\r\n FakeLabels = np.zeros((len(FakeStrAr), 1)) # means fake\r\n AllLabels = np.append(RealLabels, FakeLabels, axis=0)\r\n\r\n FinalTensor = np.append(X.toarray(), AllLabels, axis=1)\r\n\r\n # Randomize it and split it\r\n\r\n np.random.shuffle(FinalTensor)\r\n\r\n # divide and multiply by 2 just to make sure it's even\r\n ROUGHLY70 = 2 * ((FinalTensor.shape[0] * 70 / 100) / 2)\r\n ROUGHLY15 = (FinalTensor.shape[0] - ROUGHLY70) / 2\r\n\r\n # TEST SET VALIDATION SET TRAINING SET DICTIONARY\r\n return (FinalTensor[:ROUGHLY15], FinalTensor[ROUGHLY15 : 2 * ROUGHLY15], FinalTensor[-ROUGHLY70:], MyCoolVectorizer.get_feature_names())", "def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):\n label_map = {label: i for i, label in enumerate(label_list)}\n features = []\n for (ex_index, example) in enumerate(examples):\n tokens_a = tokenizer.tokenize(example.text_a)\n tokens_b = None\n if example.text_b:\n tokens_b = tokenizer.tokenize(example.text_b)\n _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)\n else:\n if len(tokens_a) > max_seq_length - 2:\n tokens_a = tokens_a[:(max_seq_length - 2)]\n ####\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n segment_ids = [0] * len(tokens)\n ####\n if tokens_b:\n tokens += tokens_b + [\"[SEP]\"]\n segment_ids += [1] * (len(tokens_b) + 1)\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n ####\n input_mask = [1] * len(input_ids)\n ####\n padding = [0] * (max_seq_length - len(input_ids))\n input_ids += padding\n input_mask += padding\n segment_ids += padding\n ####\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n ####\n label_id = label_map[example.label]\n in_f = InputFeatures(\n input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id\n )\n in_f.tokens = tokens\n features.append(in_f)\n return features", "def create_train_label_tokenizer(data):\n data.label_tokenizer = Tokenizer(\n filters=\"\", split=data.arg.split, oov_token=\"<unk>\"\n )\n\n # open training data file\n input_file_path = os.path.join(data.full_train_path, data.arg.input_file)\n with open(input_file_path, encoding=data.arg.encode_mode) as fin:\n\n # store each line (ending with \\n) in a list of list of\n lines = fin.readlines()\n lines = [line.strip().lower().split(\"\\t\") for line in lines]\n try:\n _, _, intent = zip(*lines)\n except Exception:\n print(lines)\n raise FileNotFoundError(\"The input training data file is invalid!\")\n\n # Updates internal {index: word} and {index: doc} vocabularies\n # based on the list of utterances, their IOB tags and their intent label\n data.label_tokenizer.fit_on_texts(intent)\n\n # record acquired knowledge\n with open(\"./knowledge\", 'w') as file: \n file.write(\"Learnt intents from training corpus:\\n\\n\")\n for key, value in data.label_tokenizer.index_word.items():\n file.write(f\"{key} : {value}\\n\")\n return data", "def pre_process_dataset(self):\n sentences = []\n idx = 1\n # Iterates of dataframe to collect sentences and labels\n for index, row in self.df.iterrows():\n # Normalizing and separate words of each sentence\n norm_sentence = self.norm_text(row['comment_text'])\n word_sentences = re.sub(\"[^\\w]\", \" \", norm_sentence).split()\n sentences.append(word_sentences)\n # Creating a word dictionary\n for word in word_sentences:\n if word not in self.word_2_idx:\n self.word_2_idx[word] = idx\n idx += 1\n # Getting all labels and creates a one-hot vector\n row_label = row[['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']].values\n self.labels.append(row_label)\n\n # Collect word indexes from prepared word dictionary\n for words_sentence in sentences:\n self.input_data.append([self.word_2_idx[w] for w in words_sentence])", "def split_data(paragraphs):\n para_count = len(paragraphs)\n training_index = int(para_count * 0.7)\n validation_index = int(para_count * 0.9)\n training_data = paragraphs[:training_index]\n validation_data = paragraphs[training_index:validation_index]\n test_data = paragraphs[validation_index:]\n return training_data, validation_data, test_data", "def load_test_data(label_fname, data_fname):\n labels = load_csv(label_fname)\n data = load_csv(data_fname, 'excel-tab')\n\n # Join all data together on the ids given in the files\n joined_data = {}\n for label in labels:\n id = label[0]\n joined_data[id] = {'class': label[1]}\n for rec in data:\n id = rec[0]\n if id in joined_data:\n joined_data[id]['data'] = rec[1]\n\n # Clean and convert the data to reals\n max_features = 0\n for id in joined_data:\n words = clean_text(joined_data[id]['data'])\n reals = convert_to_reals(words)\n joined_data[id]['data'] = reals\n if len(reals) > max_features:\n max_features = len(reals)\n\n # Pad the data\n for id in joined_data:\n reals = joined_data[id]['data']\n joined_data[id]['data'] = reals + (max_features - len(reals)) * [0.0]\n\n # Prepare the data for training\n training_data = np.array([joined_data[id]['data'] for id in joined_data])\n training_labels = [joined_data[id]['class'] == 'OFF' for id in joined_data]\n return training_labels, training_data, max_features", "def from_lines(cls, lines: List[str], mode: str):\n for line in lines:\n if line.startswith('Original Input'):\n _input = line[line.find(':') + 1 :].strip()\n elif line.startswith('Predicted Str'):\n pred = line[line.find(':') + 1 :].strip()\n elif line.startswith('Ground-Truth'):\n target = line[line.find(':') + 1 :].strip()\n elif line.startswith('Ground Classes'):\n classes = line[line.find(':') + 1 :].strip()\n return cls(_input, target, pred, classes, mode)", "def get_dataset_features(text):\n return model.extract(text)" ]
[ "0.6267126", "0.6177644", "0.6156957", "0.614642", "0.6129366", "0.612271", "0.6122403", "0.60591143", "0.5948062", "0.59287256", "0.592589", "0.5915757", "0.58811563", "0.5854081", "0.58518916", "0.5847722", "0.58189434", "0.58174044", "0.58112806", "0.58017504", "0.5774996", "0.57625914", "0.5726544", "0.5725933", "0.5721964", "0.5704764", "0.57015544", "0.56984866", "0.5696608", "0.5686199", "0.5680797", "0.5668954", "0.5662379", "0.5658515", "0.56447643", "0.5641292", "0.5640968", "0.56355345", "0.56342494", "0.56193364", "0.5617289", "0.56168795", "0.56167936", "0.5614845", "0.5612039", "0.5602774", "0.5601326", "0.5598765", "0.55962", "0.5594484", "0.55869615", "0.5582409", "0.55779094", "0.5565303", "0.55579203", "0.55559105", "0.5541169", "0.5531335", "0.55259234", "0.5513646", "0.5512927", "0.5512927", "0.5510614", "0.55102843", "0.5507909", "0.55052215", "0.5502617", "0.55016506", "0.5500607", "0.5495053", "0.54947597", "0.5494573", "0.5481173", "0.5473979", "0.5469571", "0.54661137", "0.54655355", "0.54644203", "0.5463451", "0.5462712", "0.54505455", "0.544664", "0.54404926", "0.5424817", "0.54221505", "0.54210895", "0.54210854", "0.5416662", "0.541623", "0.5415165", "0.54123294", "0.5412039", "0.54116166", "0.5410248", "0.54093987", "0.5406306", "0.5404075", "0.5398512", "0.53959346", "0.5394398" ]
0.6742829
0
Reads a glove word embedding text file and generates a DataFrame with the embeddings.
def process_glove_data(filename): word_list = [] embed_list = [] with open(filename,encoding="utf8") as file: lines = file.readlines() for line in lines: toks = line.split(' ') word_list.append(toks[0]) vec = [float(tok) for tok in toks[1:]] embed_list.append(vec) embed = np.array(embed_list,dtype=float) embed_df = pd.DataFrame(embed,index=word_list) embed_df.index = embed_df.index.str.lower() return embed_df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_embeddings(filename):\n labels = []\n rows = []\n with open(filename, encoding='utf-8') as infile:\n for i, line in enumerate(infile):\n items = line.rstrip().split(' ')\n if len(items) == 2:\n # This is a header row giving the shape of the matrix\n continue\n labels.append(items[0])\n values = np.array([float(x) for x in items[1:]], 'f')\n rows.append(values)\n\n arr = np.vstack(rows)\n return pd.DataFrame(arr, index=labels, dtype='f')", "def load_glove_embeddings():\n data = open(\"glove.6B.50d.txt\",'r',encoding=\"utf-8\")\n embeddings = []\n word_index_dict = {'UNK':0}\n index = 1\n for lines in data:\n wordVector = lines.split(\" \")\n if(wordVector[0] in string.punctuation or any(char.isdigit() for char in wordVector[0])):\n continue\n embeddings.append(wordVector[1:-1])\n word_index_dict[wordVector[0]] = index\n index+=1\n print(\"done\")\n\n return embeddings, word_index_dict", "def load_glove_data():\n glove_path = path.join('..', 'data', 'glove', 'glove.twitter.27B.200d.txt')\n f = open(glove_path,'r')\n \n model = {}\n for line in f:\n splitLine = line.split()\n word = splitLine[0]\n embedding = np.array([float(val) for val in splitLine[1:]])\n model[word] = embedding\n \n return model", "def load_embedding(self, glove_dir='glove.6B/'):\n\n f = open(os.path.join(glove_dir, 'glove.6B.100d.txt'))\n for line in f:\n values = line.split()\n word = values[0]\n self.embeddings_index[word] = np.asarray(values[1:], dtype='float32')\n f.close()", "def read_txt_embeddings(path, params):\n word2id = {}\n vectors = []\n\n # load pretrained embeddings\n _emb_dim_file = params.emb_dim\n with io.open(path, 'r', encoding='utf-8', newline='\\n', errors='ignore') as f:\n for i, line in enumerate(f):\n if i == 0:\n split = line.split()\n assert len(split) == 2\n assert _emb_dim_file == int(split[1])\n continue\n word, vect = line.rstrip().split(' ', 1)\n vect = np.fromstring(vect, sep=' ')\n if word in word2id:\n logger.warning(\"Word \\\"%s\\\" found twice!\" % word)\n continue\n if not vect.shape == (_emb_dim_file,):\n logger.warning(\"Invalid dimension (%i) for word \\\"%s\\\" in line %i.\"\n % (vect.shape[0], word, i))\n continue\n assert vect.shape == (_emb_dim_file,)\n word2id[word] = len(word2id)\n vectors.append(vect[None])\n\n assert len(word2id) == len(vectors)\n logger.info(\"Loaded %i pretrained word embeddings from %s\" % (len(vectors), path))\n\n # compute new vocabulary / embeddings\n embeddings = np.concatenate(vectors, 0)\n embeddings = torch.from_numpy(embeddings).float()\n\n assert embeddings.size() == (len(word2id), params.emb_dim)\n return word2id, embeddings", "def load_embeddings(embedding_path):\n print('loading word embeddings from %s' % embedding_path)\n weight_vectors = []\n word_idx = {}\n with codecs.open(embedding_path, encoding='utf-8') as f:\n for line in f:\n word, vec = line.split(u' ', 1)\n word_idx[word] = len(weight_vectors)\n weight_vectors.append(np.array(vec.split(), dtype=np.float32))\n # Annoying implementation detail; '(' and ')' are replaced by '-LRB-' and\n # '-RRB-' respectively in the parse-trees.\n word_idx[u'-LRB-'] = word_idx.pop(u'(')\n word_idx[u'-RRB-'] = word_idx.pop(u')')\n # Random embedding vector for unknown words.\n weight_vectors.append(np.random.uniform(\n -0.05, 0.05, weight_vectors[0].shape).astype(np.float32))\n return np.stack(weight_vectors), word_idx", "def construct_embedding(self):\n i = 0\n self.load_dicts()\n embedding_shape = (max(self.word2idx.values()) + 1,\n self.embedding_size)\n self.embedding = np.zeros(embedding_shape)\n\n with open(self.config.word_vec_fi_glove, 'r') as fi:\n for line in fi:\n word_vec = line.split(\" \")[1:]\n self.embedding[i, :] = np.array(word_vec, dtype=np.float32)\n i += 1\n\n self.write_embedding()", "def gen_embedding(path):\r\n word_emb = {}\r\n with open(path, encoding='utf-8') as f:\r\n for line in tqdm(f):\r\n values = line.split()\r\n word_emb[values[0]] = np.asarray(values[1:], dtype='float32')\r\n return word_emb", "def load_embeddings(filename):\n count = 0\n matrix = []\n word_map = {}\n with open(filename, encoding=\"utf8\") as f:\n # with open(filename) as f:\n for line in f:\n line = line.strip()\n items = line.split()\n word = items[0]\n rest = items[1:]\n # print(\"word:\", word)\n word_map[word] = count\n count += 1\n\n rest = list(map(float, rest))\n matrix.append(rest)\n matrix = np.array(matrix)\n return word_map, matrix", "def get_glove_embedding():\n embedding = {}\n N = 400_000\n print(\"Reading glove embedding...\")\n with open(GLOVE_EMBD_PATH, \"rb\") as f:\n for line in tqdm(f, total=N):\n line = line.decode().split()\n word = line[0].lower()\n vector = np.array(line[1:]).astype(np.float32)\n embedding[word] = vector\n\n return embedding", "def load_pretrained_words_data(embeddings_filename, vocab):\n words = dict()\n emb_dim = None\n with gzip.open(cached_path(embeddings_filename), 'rb') as embeddings_file:\n for line in embeddings_file:\n fields = line.decode('utf-8').strip().split(' ')\n if len(fields) == 0:\n continue\n word = fields[0]\n if emb_dim is None:\n emb_dim = len(fields) - 1\n if emb_dim < 10: # my pretrained file is poisonous 😭\n emb_dim = None\n else:\n assert emb_dim == len(fields) - 1, \"{}, {}\".format(emb_dim, len(fields) - 1)\n words.update({word: [float(i) for i in fields[1:]]})\n print(\"Embedding dim: {}\".format(emb_dim))\n tokens = vocab.get_index_to_token_vocabulary(\"tokens\")\n n_tokens = len(tokens)\n data = []\n for i in tokens:\n if tokens[i] in words:\n data.append(words[tokens[i]])\n else:\n data.append([0] * emb_dim)\n return torch.tensor(data), emb_dim", "def load_embed_text(embed_file):\n \n emb_dict = dict()\n emb_size = None\n with codecs.getreader(\"utf-8\")(tf.gfile.GFile(embed_file, \"rb\")) as f:\n for line in f:\n tokens = line.strip().split(\" \")\n word = tokens[0]\n vec = list(map(float, tokens[1:]))\n emb_dict[word] = vec\n if emb_size:\n assert emb_size == len(vec), \"All embeddings should be same size\"\n else:\n emb_size = len(vec)\n return emb_dict, emb_size", "def load_embeddings(glove_path, vocab):\n vocab_size = vocab.get_vocab_size()\n words_to_keep = set(vocab.get_index_to_token_vocabulary().values())\n glove_embeddings = {}\n embedding_dim = None\n\n logger.info(\"Reading GloVe embeddings from {}\".format(glove_path))\n with open(glove_path) as glove_file:\n for line in tqdm(glove_file,\n total=get_num_lines(glove_path)):\n fields = line.strip().split(\" \")\n word = fields[0]\n if word in words_to_keep:\n vector = np.asarray(fields[1:], dtype=\"float32\")\n if embedding_dim is None:\n embedding_dim = len(vector)\n else:\n assert embedding_dim == len(vector)\n glove_embeddings[word] = vector\n\n all_embeddings = np.asarray(list(glove_embeddings.values()))\n embeddings_mean = float(np.mean(all_embeddings))\n embeddings_std = float(np.std(all_embeddings))\n logger.info(\"Initializing {}-dimensional pretrained \"\n \"embeddings for {} tokens\".format(\n embedding_dim, vocab_size))\n embedding_matrix = torch.FloatTensor(\n vocab_size, embedding_dim).normal_(\n embeddings_mean, embeddings_std)\n # Manually zero out the embedding of the padding token (0).\n embedding_matrix[0].fill_(0)\n # This starts from 1 because 0 is the padding token, which\n # we don't want to modify.\n for i in range(1, vocab_size):\n word = vocab.get_token_from_index(i)\n\n # If we don't have a pre-trained vector for this word,\n # we don't change the row and the word has random initialization.\n if word in glove_embeddings:\n embedding_matrix[i] = torch.FloatTensor(glove_embeddings[word])\n return embedding_matrix", "def get_word_embeddings(self):\n embedding_index = {}\n with open('./glove/glove.6B.100d.txt', encoding=\"utf8\") as f:\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embedding_index[word] = coefs\n return embedding_index", "def read_old_glove(filepath):\n print('reading glove files:', filepath)\n\n word2idx = {}\n word_embed = [['0'] * 300] # word_embed[0] = [0] * 300, represent the <PAD>\n\n with open(filepath, 'r') as f:\n for idx, line in enumerate(f):\n line_list = line.split()\n word = ' '.join(line_list[: len(line_list)-300])\n embed = [num for num in line_list[len(line_list)-300:]]\n\n word2idx[word] = idx + 1\n word_embed.append(embed)\n\n return word2idx, word_embed", "def load_embeddings(path):\r\n\r\n embeds = dict() # dictionary mapping words to vectors\r\n for line in open(path, encoding='utf-8'):\r\n row = line.strip().split('\\t')\r\n embeds[row[0]] = np.array(row[1:], dtype=np.float32)\r\n\r\n embeddings_dim = embeds[list(embeds)[0]].shape[0]\r\n\r\n return embeds, embeddings_dim", "def load_word_vectors(filepath, word_index, vector_size):\n embedding_matrix = np.zeros((len(word_index) + 1, vector_size))\n\n fin = io.open(filepath, \"r\", encoding=\"utf-8\", newline=\"\\n\", errors=\"ignore\")\n n, d = map(int, fin.readline().split())\n\n for line in fin:\n tokens = line.rstrip().split(\" \")\n if tokens[0] in word_index:\n w = word_index[tokens[0]]\n embedding_matrix[w] = np.fromiter(map(float, tokens[1:]), \"float\")\n\n return embedding_matrix", "def load_embedding_file(self):\n if self.language == 'en':\n embed_file_dir = self.embedding_path\n wv = KeyedVectors.load_word2vec_format(embed_file_dir, binary=True)\n self.pretrained_embedding = {}\n for word in wv.vocab.keys():\n normalized_word = normalization.process(self.language.upper(), word, letters_to_keep='', letters_to_remove='',\n lowercase=True, remove_repetitions_count=-1, remove_punct=True,\n remove_digits=True, remove_vowels=False, remove_diacritics=True,\n remove_spaces=False, remove_apostrophe=True, copy_through=False,\n keep_romanized_text=False)\n self.pretrained_embedding[normalized_word] = wv[word]\n self.embed_dim = 300\n\n else:\n embed_file_dir = self.embedding_path\n fin = open(embed_file_dir, 'r', encoding='utf-8', newline='\\n', errors='ignore')\n data = {}\n for line in fin:\n if len(line.split()) == 2: # header\n continue\n tokens = line.rstrip().split(' ')\n word = tokens[0]\n normalized_word = normalization.process(self.language.upper(), word, letters_to_keep='', letters_to_remove='',\n lowercase=True, remove_repetitions_count=-1, remove_punct=True,\n remove_digits=True, remove_vowels=False, remove_diacritics=True,\n remove_spaces=False, remove_apostrophe=True, copy_through=False,\n keep_romanized_text=False)\n data[normalized_word] = np.array(tokens[1:])\n self.pretrained_embedding = data\n self.embed_dim = 300", "def load_embed(file_name, vocab_size):\n\n with tf.io.gfile.Open(file_name, 'r') as embed_file:\n vocab = []\n embeds = []\n depth = -1\n for index, line in enumerate(embed_file):\n if vocab_size > 0 and index >= vocab_size:\n break\n line = line.strip()\n tokens = line.strip().split(' ')\n word = tokens[0]\n vocab.append(word)\n if depth == -1:\n embed = [float(token) for token in tokens[1:]]\n else:\n embed = [float(token) for token in tokens[-depth:]]\n d = len(embed)\n if depth == -1:\n depth = d\n if d != depth:\n raise ValueError('Inconsistent embedding sizes')\n embeds.append(embed)\n\n embeds = np.stack(embeds)\n\n return vocab, embeds, depth", "def load_embedding(embedding_file_path, word_index, embedding_dim):\n # Create a Numpy Placeholder for Embedding\n max_features = len(word_index)+1\n embedding_weights = np.random.random([max_features, embedding_dim])\n count = 0\n glove_file = open(embedding_file_path)\n for line in glove_file:\n word, vector = line.split(' ')[0], line.split(' ')[1:]\n if word in word_index and word_index[word] <= max_features:\n count += 1\n vector = list(map(float, vector))\n embedding_weights[word_index[word]] = [float(i) for i in vector]\n\n print('Fraction found in glove {}'.format(count/len(embedding_weights)))\n return embedding_weights", "def glove_embedding(self, texts, file):\n self.embedding_dict = dict()\n glove_file = open(file, encoding='utf-8')\n for line in glove_file:\n word_vector = line.split()\n word = word_vector[0]\n word_vector_arr = np.asarray(word_vector[1:], dtype='float32')\n self.embedding_dict[word] = word_vector_arr\n glove_file.close()\n \n i = 0\n with pgb.ProgressBar(max_value=len(texts)) as bar:\n for text in texts:\n vec = []\n text = text.split()\n for t in text:\n try:\n vec.append(self.embedding_dict[t.lower()])\n except KeyError:\n pass\n ## There are no matched words\n if len(vec) == 0:\n print(\"len 0 vec\")\n self.word_vec.append(np.zeros((100)))\n else:\n #print(np.array(vec))\n #print(np.array(vec).shape)\n sentence = self.sentence_vec(np.array(vec))\n #print(sentence)\n #print(sentence.shape)\n self.word_vec.append(sentence)\n i += 1\n bar.update(i)\n self.word_vec = np.array(self.word_vec)\n print(self.word_vec.shape)", "def load_data(self, file_path):\n \n dataset = []\n \n for line in open(file_path):\n arr = line.strip().split('\\t')\n label = [w for w in arr[0].split(' ')]\n sentence = [w for w in arr[1].split(' ')]\n cname = ' '.join(label)\n \n # The line is useless if the class is\n # not in the class dictionary.\n if cname not in self.class_list:\n raise Exception(\"{} not in class list.\".format(cname))\n \n # Build the sample dictionary.\n sample = {}\n sample['sentence_w2v'] = []\n \n for word in sentence:\n if word not in self.w2v.vocab.keys():\n continue # ignore sentence\n \n # In the loading embedding (see self.load_embedding()), we\n # stack one additional layer of zeros in front to handle padding.\n # Thus here we append the embedding index plus one.\n sample['sentence_w2v'].append(torch.Tensor([self.w2v.vocab[word].index + 1]))\n\n sample['length'] = len(sample['sentence_w2v'])\n sample['label_onehot'] = self.onehot(self.class_indices[cname])\n sample['label_w2v'] = self.class_w2v[cname]\n dataset.append(sample)\n \n return dataset", "def load_text_dims(file: Union[str, bytes, int, PathLike],\n lossy: bool = False) -> Embeddings:\n with open(file, encoding='utf8',\n errors='replace' if lossy else 'strict') as inf:\n rows, cols = next(inf).split()\n return _load_text(inf, int(rows), int(cols))", "def load_embeddings(embeddings_path):\n\n embeddings_index = {}\n f = open(embeddings_path, encoding='utf-8')\n for line in tqdm(f):\n values = line.rstrip().split(' ')\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n f.close()\n print('Found {} word vectors.'.format(len(embeddings_index)))\n return embeddings_index", "def getWordEmbeddingsMatrix(script_directory, embedding_file):\n translator = str.maketrans('', '', string.punctuation)\n all_words = []\n print(\"Loading vocab from text files in:\")\n for d in os.listdir(script_directory):\n print(d)\n for fname in os.listdir(\"%s/%s\" % (script_directory, d)):\n with open(\"%s/%s/%s\" % (script_directory, d, fname), 'r') as f:\n words = [w.translate(translator) for w in f.read().split() if w.translate(translator) != \"\"]\n all_words.extend(words)\n\n model = KeyedVectors.load_word2vec_format(embedding_file, binary=True)\n vocab = {\"PAD\" : 0, \"EOS\" : 1}\n vocab.update({w : i + 2 for i,w in enumerate([w1 for w1 in set(all_words) if w1 in model]) })\n inv_dict = vocab.keys()\n ## Take a minute to load...\n\n vocab_size = len(inv_dict)\n emb_size = 300 # or whatever the size of your embeddings\n embeddings = np.zeros((vocab_size + 1, emb_size))\n for k,v in vocab.items():\n embeddings[v] = model[k]\n vocab[\"UNK\"] = len(vocab.keys())\n embeddings[vocab[\"UNK\"]] = np.ones(emb_size)\n del model\n ## Now we have a numpy matrix of embeddings...\n # x_model = tf.placeholder(tf.int32, shape=[None, input_size])\n # with tf.device(\"/cpu:0\"):\n # embedded_x = tf.nn.embedding_lookup(embeddings, x_model)\n return embeddings, vocab", "def create_embedding_matrix(filepath, word_index, embedding_dim):\n vocab_size = len(word_index) + 1 # Adding again 1 because of reserved 0 index\n embedding_matrix = np.zeros((vocab_size, embedding_dim))\n\n with open(filepath) as f:\n for line in f:\n word, *vector = line.split()\n if word in word_index:\n idx = word_index[word] \n embedding_matrix[idx] = np.array(\n vector, dtype=np.float32)[:embedding_dim]\n\n return embedding_matrix", "def load_embeddings(filepath, vocabulary, retain):\n \n word2index = dict()\n word_vectors = list()\n\n def add_entry(word, vector):\n word2index[word] = len(word2index)\n word_vectors.append(vector)\n\n model = gensim.models.KeyedVectors.load(filepath)\n\n # adding special tokens <FIL>, <UNK> and <NUM>\n dim = model.vector_size\n add_entry('<fil>', np.zeros((dim,)))\n for special in ['<unk>', '<num>']:\n vector = np.random.uniform(-0.025, 0.025, (dim,))\n add_entry(special, vector)\n\n if retain:\n for word, _ in model.vocab.items():\n add_entry(word, model[word])\n else:\n for word in vocabulary:\n if word in model:\n add_entry(word, model[word])\n\n vocabulary = vocabulary.intersection(word2index.keys())\n return word2index, np.asarray(word_vectors)", "def get_embeddings():\n embeddings = dict(get_coefs(*o.strip().split()) for o in open(EMBEDDING_FILE))\n return embeddings", "def load_glove_embeddings():\n\n emmbed_file = Path(\"./embeddings.pkl\")\n if emmbed_file.is_file():\n # embeddings already serialized, just load them\n print(\"Local Embeddings pickle found, loading...\")\n with open(\"./embeddings.pkl\", 'rb') as f:\n return pk.load(f)\n else:\n # create the embeddings\n print(\"Building embeddings dictionary...\")\n data = open(\"glove.6B.50d.txt\", 'r', encoding=\"utf-8\")\n embeddings = [[0] * EMBEDDING_SIZE]\n word_index_dict = {'UNK': 0} # first row is for unknown words\n index = 1\n for line in data:\n splitLine = line.split()\n word = tf.compat.as_str(splitLine[0])\n embedding = [float(val) for val in splitLine[1:]]\n embeddings.append(embedding)\n word_index_dict[word] = index\n index += 1\n data.close()\n\n # pickle them\n with open('./embeddings.pkl', 'wb') as f:\n print(\"Creating local embeddings pickle for faster loading...\")\n # Pickle the 'data' dictionary using the highest protocol available.\n pk.dump((embeddings, word_index_dict), f, pk.HIGHEST_PROTOCOL)\n\n return embeddings, word_index_dict", "def set_glove_embedding(self,fpath,embedding_dim):\n\t\temb = np.random.randn(self._count,embedding_dim)\n#\ttf.logging.info(emb[0])\n\t\twith open(fpath) as f: #python 3.x support \n\t\t\tfor k,line in enumerate(f):\n\t\t\t\tfields = line.split()\n\t\t\t\tif len(fields) - 1 != embedding_dim:\n\t\t\t\t\t# Sometimes there are funny unicode parsing problems that lead to different\n\t\t\t\t\t# fields lengths (e.g., a word with a unicode space character that splits\n\t\t\t\t\t# into more than one colum n). We skip those lines. Note that if you have\n\t\t\t\t\t# some kind of long header, this could result in all of your lines getting\n\t\t\t\t\t# skipped. It's hard to check for that here; you just have to look in the\n\t\t\t\t\t# embedding_misses_file and at the model summary to make sure things look\n\t\t\t\t\t# like they are supposed to.\n\t\t\t\t\t#logger.warning(\"Found line with wrong number of dimensions (expected %d, was %d): %s\",\n\t\t\t\t\t\t\t# embedding_dim, len(fields) - 1, line)\n\t\t\t\t\traise Exception(\"Found line with wrong number of dimensions (expected %d, was %d): %s\",\n\t\t\t\t\t\t\t\t\t\t\t embedding_dim, len(fields) - 1, line)\n\t\t\t\t\tcontinue\n\t\t\t\tword = fields[0]\n\t\t\t\tif word in self._word_to_id:\n\t\t\t\t\tvector = np.asarray(fields[1:], dtype='float32')\n\t\t\t\t\temb[self._word_to_id[word]] = vector\n#\t\tif k%1000 == 0:\n#\t\t tf.logging.info('glove : %d',k)\n\t\tself.glove_emb = emb", "def load_glove_vectors(filename, vocab):\n dct = {}\n vectors = array.array('d')\n current_idx = 0\n with open(filename, \"r\", encoding=\"utf-8\") as f:\n for _, line in enumerate(f):\n tokens = line.split(\" \")\n word = tokens[0]\n entries = tokens[1:]\n if not vocab or word in vocab:\n dct[word] = current_idx\n vectors.extend(float(x) for x in entries)\n current_idx += 1\n word_dim = len(entries)\n num_vectors = len(dct)\n return [np.array(vectors).reshape(num_vectors, word_dim), dct]", "def load_glove(file_path: str, return_embedding_size: bool=False) ->Dict[str, np.ndarray]:\n logger.info(f' Loading Glove format file {file_path}')\n embeddings = {}\n embedding_size = 0\n with open_file(file_path, 'r', encoding='utf-8') as f:\n found_line = False\n while not found_line:\n line = f.readline()\n if line:\n embedding_size = len(line.split()) - 1\n found_line = True\n with open_file(file_path, 'r', encoding='utf-8') as f:\n for line_number, line in enumerate(f):\n if line:\n try:\n split = line.split()\n if len(split) != embedding_size + 1:\n raise ValueError(f'Line {line_number} is of length {len(split)}, while expected length is {embedding_size + 1}.')\n word = split[0]\n embedding = np.array([float(val) for val in split[-embedding_size:]])\n embeddings[word] = embedding\n except ValueError:\n logger.warning('Line {} in the GloVe file {} is malformed, skipping it'.format(line_number, file_path))\n logger.info(f' {len(embeddings)} embeddings loaded')\n if return_embedding_size:\n return embeddings, embedding_size\n return embeddings", "def get_word_embeddings(t, folder, lang=\"en\"):\n vecs_url = f\"https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.{lang}.300.vec.gz\"\n vecs_gz_filename = vecs_url.rpartition(\"/\")[2]\n os.makedirs(folder, exist_ok=True)\n vecs_gz_filepath = os.path.join(folder, vecs_gz_filename)\n\n tokenizer_vocab_size = len(t.vocab)\n\n if wait_for_file_stable(vecs_gz_filepath):\n print(\"Using existing embeddings file\")\n else:\n print(\"Downloading word vectors...\")\n subprocess.run([\" \".join([\"wget\", \"-NP\", folder, vecs_url])], check=True, shell=True)\n\n print(\"Loading into memory...\")\n embeddings_index = dict()\n with gzip.open(vecs_gz_filepath, \"rt\") as zipf:\n firstline = zipf.readline()\n emb_vocab_size, emb_d = firstline.split(\" \")\n emb_vocab_size = int(emb_vocab_size)\n emb_d = int(emb_d)\n for line in zipf:\n values = line.split()\n word = values[0]\n # Only load subset of the embeddings recognised by the tokenizer:\n if word in t.vocab.stoi:\n coefs = np.asarray(values[1:], dtype=\"float32\")\n embeddings_index[word] = coefs\n print(\"Loaded {} of {} word vectors for tokenizer vocabulary length {}\".format(\n len(embeddings_index),\n emb_vocab_size,\n tokenizer_vocab_size,\n ))\n\n # create a weight matrix for words in training docs\n embedding_matrix = np.zeros((tokenizer_vocab_size, emb_d))\n for word, i in t.vocab.stoi.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n\n return embedding_matrix", "def _load_glove_vec(fname, vocab):\n print 'load glove...'\n word_vecs = {}\n cnt = 0\n l = open(fname,'r').readline()\n embedding_size = len(l.strip().split()) -1\n print 'embedding vector size: %d'%(embedding_size)\n with open(fname, \"r\") as f:\n for l in f:\n stemp = l.strip().split(' ',1)\n assert len(stemp) == 2\n word = stemp[0]\n if word in vocab:\n word_vecs[stemp[0]] = np.fromstring(' '.join(stemp[1:]),sep = ' ')\n cnt+=1\n if cnt%10000==0:\n print '%d lines...'%cnt\n return (word_vecs,embedding_size)", "def glove(data_fname='glove.840B.300d.txt', out_fname='glove.pkl'):\n words, U, dim = [], [], None\n with open(DATA_DIR + data_fname, 'rb') as f:\n for j, line in enumerate(f):\n x = line.strip().split()\n word, vector, d = x[0], np.ravel(x[1:]), len(x) - 1\n if dim is None: dim = d\n elif d != dim: raise Exception('{0}: {1}!={2}'.format(j, dim, d))\n U.append(vector)\n words.append(word)\n U = np.array(U)\n print \"Found {0} words\".format(len(words))\n print \"Found {0}x{1} embedding matrix\".format(*U.shape)\n with open(DATA_DIR + out_fname, 'wb') as f:\n cPickle.dump((words, U), f)", "def load_document_embeddings(path):\n embedding_dimension = 0\n \n # First pass to work out maximum topic ID to create numpy embeddings\n with open(path, 'rb') as avro_file:\n avro_reader = reader(avro_file)\n for document_embedding in avro_reader:\n topic_probs = document_embedding['topic_probs']\n \n for topic_prob in topic_probs:\n topic_id = topic_prob['topic_id']\n if topic_id + 1 > embedding_dimension:\n embedding_dimension = topic_id + 1\n \n # Second pass to actually store the embeddings\n x = []\n y = []\n \n with open(path, 'rb') as avro_file:\n avro_reader = reader(avro_file)\n for document_embedding in avro_reader:\n label = document_embedding['label']\n topic_probs = document_embedding['topic_probs']\n \n embedding = np.zeros(shape=embedding_dimension, dtype=np.float32)\n \n for topic_prob in topic_probs:\n topic_id = topic_prob['topic_id']\n prob = topic_prob['prob']\n embedding[topic_id] = prob\n \n x.append(embedding)\n y.append(label)\n \n return x, y", "def load_text(file: Union[str, bytes, int, PathLike],\n lossy: bool = False) -> Embeddings:\n with open(file, encoding='utf8',\n errors='replace' if lossy else 'strict') as inf:\n try:\n first = next(inf)\n except StopIteration:\n raise ValueError(\"Can't read from empty embeddings file.\")\n line = _ASCII_WHITESPACE_PAT.split(first.rstrip())\n cols = len(line[1:])\n rows = sum(1 for _ in inf) + 1\n inf.seek(0)\n return _load_text(inf, rows, cols)", "def load_word2vect(self, file_path):\n self.embeddings = []\n self.word_to_idx = {'<pad>' : 0}\n self.vocab = ['<pad>']\n\n model = w2v.load(file_path)\n self.embedding_size = model.vectors.shape[1]\n pad_embedding = np.zeros(self.embedding_size, \"float32\")\n self.embeddings.append(pad_embedding)\n\n train_words_set = set([word for text in self.train_data for word in\n text[1].split(\" \")])\n\n for w in model.vocab:\n if w in train_words_set:\n self.word_to_idx[w] = len(self.vocab)\n self.vocab.append(w)\n self.embeddings.append(model[w])\n\n del model", "def training_examples_to_vec(test_file, embeddings_file, num_words, word_dim):\n x = []\n ignore_words = stopwords.words('english')\n lemmatizer = WordNetLemmatizer()\n stemmer = SnowballStemmer('english')\n word_idx, word_vectors = hf.create_indices_for_vectors(embeddings_file, return_vectors=True)\n with open(test_file, 'r') as f:\n for line in f:\n stemmedWords = set([])\n long_string = line.split(' ')\n total_words = int(len(long_string) / 2)\n total_example_vec = np.empty([num_words, word_dim], dtype=np.float32)\n if total_words - 1 <= num_words:\n continue\n count = 0\n\n for i in range(1, total_words):\n word = long_string[2 * i].split(\"'\")[0]\n\n if (word in ignore_words) or (len(word) <= 3):\n continue\n\n if not word.isalpha():\n continue\n\n try:\n stem = stemmer.stem(word)\n lemma = lemmatizer.lemmatize(word)\n except UnicodeDecodeError:\n continue\n\n if stem in stemmedWords:\n continue\n\n try:\n idx_num = word_idx[word]\n except KeyError:\n\n try:\n idx_num = word_idx[lemma]\n except KeyError:\n\n try:\n idx_num = word_idx[stem]\n except KeyError:\n continue\n\n word_vec = word_vectors[idx_num]\n total_example_vec[count] = word_vec\n stemmedWords.add(stem)\n count += 1\n if count >= num_words:\n break\n x.append(total_example_vec)\n return x", "def loadGLOVE(filename, vocab):\n dct = {}\n vectors = array.array('d')\n current_idx = 0\n with codecs.open(filename, \"r\", encoding=\"utf-8\") as f:\n for _, line in enumerate(f):\n tokens = line.split(\" \")\n word = tokens[0]\n entries = tokens[1:]\n if not vocab or word in vocab:\n dct[word] = current_idx\n vectors.extend(float(x) for x in entries)\n current_idx += 1\n word_dim = len(entries)\n num_vectors = len(dct)\n tf.logging.info(\"Found {} out of {} vectors in Glove\".format(num_vectors, len(vocab)))\n return [np.array(vectors).reshape(num_vectors, word_dim), dct]", "def load_data_and_embedding():\n\n # Load data\n df_data = pd.read_csv('../new_data/train_ids_and_labels_1400.txt',nrows=10000)\n y = df_data['class'] - 1 # class (0 ~ 18)\n X = df_data.drop(['class'], axis=1).values\n\n # Transform to binary class matrix\n y = to_categorical(y.values)\n\n # Randomly shuffle data\n np.random.seed(10)\n\n shuffle_indices = np.random.permutation(range(len(y)))\n X_shuffled = X[shuffle_indices]\n y_shuffled = y[shuffle_indices]\n\n # Split to train/test set\n # TODO: This is very crude, should use cross validation\n val_sample_index = -1 * int(0.2 * len(y))\n X_train, X_val = X_shuffled[:val_sample_index], X_shuffled[val_sample_index:]\n y_train, y_val = y_shuffled[:val_sample_index], y_shuffled[val_sample_index:]\n\n del df_data, X, y, X_shuffled, y_shuffled\n\n embedding_matrix = np.load(\"../embedding/word-embedding-200d-mc5.npy\")\n\n return X_train, y_train, X_val, y_val,embedding_matrix", "def load_google_embeddings(embeddings_path):\n\n embeddings = KeyedVectors.load_word2vec_format(\n embeddings_path,\n binary=True\n )\n\n dim = embeddings['dog'].size\n\n return embeddings", "def init_word_embeddings(session, model, embeddings_file):\n # Create word embedding array from word2vec file\n vocab_size = FLAGS.vocab_size\n embeddings = []\n with tf.gfile.Open(embeddings_file) as f:\n i = 0\n while i < vocab_size:\n numbers = f.readline().split()\n if len(numbers) > 0:\n embeddings.append([float(n) for n in numbers])\n i += 1\n else:\n break # Last line of embeddings file is empty\n\n # Eliminate the random word embeddings and introduce word2vec to the realm of variable scopes.\n # The victims will be:\n # \"embedding_attention_seq2seq/RNN/EmbeddingWrapper/embedding\"\n # \"embedding_attention_seq2seq/embedding_attention_decoder/embedding\"\n np_embeddings = np.array(embeddings)\n feed_dict = {model.word2vec_placeholder: np_embeddings}\n session.run(model.word2vec_assign_encoder_op, feed_dict=feed_dict)\n session.run(model.word2vec_assign_decoder_op, feed_dict=feed_dict)", "def load_embeddings():\n return embedding_utils.PretrainedWordEmbeddings(\n lowercase=FLAGS.lowercase,\n embeddings_path=FLAGS.fasttext_embeddings,\n max_vocab_size=FLAGS.max_vocab_size,\n skip_header=True)", "def load_sequences_from_file(path, voc_path):\n table = str.maketrans('', '', string.punctuation)\n \n with open(voc_path) as f:\n word_to_idx = dict(json.load(f))\n dict_size = len(word_to_idx)\n corpus = []\n with open(path, \"r\") as fin:\n for line in fin:\n line = line.lower()\n sentence = []\n for word in line.split():\n word = word.translate(table) # remove punctuation\n sentence.append(word_to_idx[word])\n corpus.append(sentence)\n\n for sentence in corpus:\n for i in range(max_len-len(sentence)):\n sentence.append(PAD)\n\n return torch.tensor(corpus)", "def read_embeddings(embedding_file):\r\n print(\"Reading embeddings...\", end=\"\")\r\n\r\n embeddings = dict()\r\n with h5py.File(embedding_file, 'r') as f:\r\n for key in f.keys():\r\n embeddings[key] = np.array(f[key], dtype=np.float32)\r\n\r\n print(f\"done! Found {len(embeddings.keys())} proteins.\")\r\n\r\n return embeddings", "def glove():\n import numpy as np\n \"\"\" Read from Gluons embedding pickle files\"\"\"\n with np.load(glove_model_path) as f:\n matrix = f['idx_to_vec']\n matrix.setflags(write=0)\n return matrix, f['idx_to_token'], token_to_idx(f['idx_to_token'])", "def load_embeddings(db):\n size = db['size'].values\n emb = db['embedding'].values\n emb = [np.load(i).flatten() for i in emb]\n return emb, size", "def load_embeddings(emb_file, word_map):\n\n # Find embedding dimension\n with open(emb_file, 'r') as f:\n emb_dim = len(f.readline().split(' ')) - 1\n\n vocab = set(word_map.keys())\n\n # Create tensor to hold embeddings, initialize\n embeddings = torch.FloatTensor(len(vocab), emb_dim)\n init_embedding(embeddings)\n\n # Read embedding file\n print(\"\\nLoading embeddings...\")\n for line in open(emb_file, 'r'):\n line = line.split(' ')\n\n emb_word = line[0]\n embedding = list(map(lambda t: float(t), filter(lambda n: n and not n.isspace(), line[1:])))\n\n # Ignore word if not in train_vocab\n if emb_word not in vocab:\n continue\n\n embeddings[word_map[emb_word]] = torch.FloatTensor(embedding)\n\n return embeddings, emb_dim", "def load_glove_matrix(w2i, glove_file):\n f = open(glove_file, 'rb')\n vocab_size = len(w2i)\n embedding_dim = 0\n\n # Load all glove vectors, put them in a matrix\n for line in f:\n split_line = line.split()\n word = split_line[0]\n embedding = np.array([float(val) for val in split_line[1:]])\n if embedding_dim == 0:\n embedding_dim = len(embedding)\n embeddings_matrix = np.zeros((vocab_size, embedding_dim))\n\n # Use only words that are in the corpus\n if word in w2i:\n embeddings_matrix[w2i[word], :] = embedding\n\n # Replace zero vectors with random numbers\n for i, row in enumerate(embeddings_matrix):\n if not (False in [n == 0 for n in row]):\n vec = np.random.rand(1, embedding_dim)\n embeddings_matrix[i, :] = vec / np.linalg.norm(vec)\n return embeddings_matrix", "def load_embeddings(path, vocab, source_domain, target_domain, emb_name):\n\n pkl = './work/embeddings/%s_%s_%s.pkl' % (source_domain, target_domain, emb_name)\n if os.path.exists(pkl):\n print(\"Load embeddings from existing pkl file %s...\" % pkl)\n # word embeddings weights have been loaded\n embeddings = pickle.load(open(pkl, 'rb'))\n else:\n print(\"Load embedding from %s...\" % path)\n raw_embeddings = {}\n if emb_name == 'yelp_electronics':\n with open(path) as fp:\n for line in fp:\n word_vector = line.split(\",\")[:-1]\n vector_list = []\n for element in word_vector[len(word_vector) - 100:]:\n vector_list.append(float(element))\n word = ','.join(word_vector[:len(word_vector) - 100])\n vector = np.asarray(vector_list)\n if word in vocab:\n raw_embeddings[word] = vector\n else:\n with open(path) as fp:\n for line in fp:\n eles = line.strip().split(' ')\n word = eles[0]\n if word in vocab:\n raw_embeddings[word] = eles[1:]\n\n dim_w = len(raw_embeddings['the'])\n n_words = len(vocab)\n embeddings = np.zeros(shape=(n_words, dim_w))\n for w in vocab:\n wid = vocab[w]\n if w in raw_embeddings:\n embeddings[wid] = np.array([float(ele) for ele in raw_embeddings[w]])\n else:\n # for OOV words, add random initialization\n embeddings[wid] = np.random.uniform(-0.25, 0.25, dim_w)\n print(\"Find %s word embeddings...\" % len(embeddings))\n if not os.path.exists('./work/embeddings'):\n os.mkdir('./work/embeddings')\n emb_path = './work/embeddings/%s_%s_%s.pkl' % (source_domain, target_domain, emb_name)\n # write the embedding weights back to the disk\n pickle.dump(embeddings, open(emb_path, 'wb'))\n embeddings = np.array(embeddings, dtype='float32')\n return embeddings", "def generate_conll2003_embeddings():\n glove_embedding = get_glove_embedding()\n\n word2index = {}\n idx2word = {}\n embed_array = []\n\n word2index[\"<pad>\"] = 1\n embed_array.append(init_embedding())\n\n word2index[\"<unk>\"] = 0\n embed_array.append(init_embedding())\n\n data = []\n with open(TRAIN_DATA_PATH, \"r\") as f:\n for line in f:\n data.append(json.loads(line))\n\n idx = 2\n\n for sample in tqdm(data, total=len(data)):\n words = sample[\"tokens\"]\n\n for w in words:\n w = w.lower()\n\n # if word is not present in dictionary, add to dictionary and append embedding vector\n if w not in word2index.keys():\n word2index[w] = idx\n idx += 1\n if w not in glove_embedding.keys():\n ev = init_embedding()\n else:\n ev = glove_embedding[w]\n\n embed_array.append(ev)\n\n else:\n continue\n\n # save embeddings\n embed_array = np.vstack(embed_array)\n np.save(EMBD_OUTPUT_PATH, embed_array)\n\n # save dictionary\n print(\"Dicitionary Size: \", len(word2index))\n with open(DICTIONARY_OUTPUT_PATH, \"w\") as f:\n json.dump(word2index, f)", "def load_glove_vec(fname, vocab):\n word_vecs = {}\n with open(fname, \"rb\") as f:\n for i,line in enumerate(f):\n L = line.split()\n word = L[0].lower()\n if word in vocab:\n word_vecs[word] = np.array(L[1:], dtype='float32')\n return word_vecs", "def gen_embeddings(vocab, file, emb_size, emb_dim):\n # embeddings = np.random.randn(vocab.n_words, emb_size) * 0.01\n embeddings = np.zeros((vocab.n_words, emb_size))\n print('Embeddings: %d x %d' % (vocab.n_words, emb_size))\n if file is not None:\n print('Loading embedding file: %s' % file)\n pre_trained = 0\n for line in open(file).readlines():\n sp = line.split()\n if(len(sp) == emb_dim + 1):\n if sp[0] in vocab.word2index:\n pre_trained += 1\n embeddings[vocab.word2index[sp[0]]] = [float(x) for x in sp[1:]]\n else:\n print(sp[0])\n print('Pre-trained: %d (%.2f%%)' % (pre_trained, pre_trained * 100.0 / vocab.n_words))\n return embeddings", "def _load_word_embedding(self, lang):\n dict_fold = 'train' # which fold of the data will be used to produce results\n if self.args.task == 'conneau' or self.args.task == 'xling':\n data_dir = os.path.join(self.args.data_dir, 'MUSE')\n lang_path = os.path.join(data_dir, 'wiki.' + lang + '.vec')\n elif self.args.task == 'dinu':\n data_dir = os.path.join(self.args.data_dir, 'dinu')\n lang_path = os.path.join(data_dir, 'embeddings', lang + '.emb.txt')\n elif self.args.task == 'zhang':\n order = [lang,trg]\n if lang == 'en':\n order = order[::-1]\n data_dir = os.path.join(self.args.home_dir,'pkg/UBiLexAT/data/','-'.join(order))\n lang_path = os.path.join(data_dir, 'word2vec.' + lang)\n\n langfile = open(lang_path, encoding=self.args.encoding, errors='surrogateescape')\n words, xs = embeddings.read(langfile, self.args.maxs)\n langfile.close()\n # Build word to index map\n word2ind = {word: i for i, word in enumerate(words)}\n\n return xs, words, word2ind", "def load_embeddings(emb_file, oovs=[], pads=[], sep=' ', lower=False, case_dim=True):\n word2emb = {}\n word2idx = {}\n\n # read and store all word vectors\n for line in open(emb_file, errors = 'ignore', encoding = 'utf-8'):\n try:\n fields = line.strip().split(sep)\n word = fields[0]\n vec = np.asarray(fields[1:], dtype='float32')\n if case_dim:\n is_upper = float(word[0].isupper())\n vec = np.insert(vec, 0, is_upper, axis=0)\n if lower:\n word = word.lower()\n word2emb[word] = vec\n if word not in word2idx:\n word2idx[word] = len(word2idx)\n except Exception as e:\n print('[WARNING] Exception in `load_embeddings`:', e)\n\n # get dimensions from the last vector added\n emb_dim = word2emb[word].shape[0]\n\n # add custom embeddings for special characters\n mu = 0\n sigma = 0.01\n\n for word in pads:\n if word not in word2idx:\n if case_dim:\n vec = npr.normal(mu, sigma, emb_dim-1)\n vec = np.insert(vec, 0, 0., axis=0)\n else:\n vec = npr.normal(mu, sigma, emb_dim)\n word2emb[word] = vec\n word2idx[word] = len(word2idx)\n else:\n print('[WARNING] Padding item ' + word + ' has an embedding vector')\n\n for word in oovs:\n if word not in word2idx:\n if case_dim:\n vec = npr.normal(mu, sigma, emb_dim-1)\n is_upper = float(word[0].isupper())\n vec = np.insert(vec, 0, is_upper, axis=0)\n else:\n vec = npr.normal(mu, sigma, emb_dim)\n word2emb[word] = vec\n word2idx[word] = len(word2idx)\n else:\n print('[WARNING] OOV alias ' + word + ' has an embedding vector')\n\n # create an embedding matrix\n vocab_size = len(word2emb)\n emb_matrix = np.zeros((vocab_size, emb_dim))\n for word, idx in word2idx.items():\n if word in word2emb:\n vec = word2emb[word]\n if vec is not None and vec.shape[0] == emb_dim:\n emb_matrix[idx] = np.asarray(vec)\n\n # print feedback data and return mappings\n print('[INFO] Embedding vocabulary:', emb_matrix.shape[0], '(lowercase: ' + str(lower) + ')')\n print('[INFO] OOV aliases:', oovs)\n print('[INFO] Padding items:', pads)\n print('[INFO] Embedding dimensions:', emb_dim, '(extra case dimension: ' + str(case_dim) + ')')\n return word2idx, np.asarray(emb_matrix), emb_dim", "def load_embeddings(embedding_path, embedding_size, embedding_format):\n print(\"Loading word embeddings from {}...\".format(embedding_path))\n\n if embedding_format in ['vec', 'txt']:\n default_embedding = np.zeros(embedding_size)\n embedding_dict = collections.defaultdict(lambda: default_embedding)\n skip_first = embedding_format == \"vec\"\n with open(embedding_path) as f:\n for i, line in enumerate(f.readlines()):\n if skip_first and i == 0:\n continue\n splits = line.split(' ')\n assert len(splits) == embedding_size + 1\n word = splits[0]\n embedding = np.array([float(s) for s in splits[1:]])\n embedding_dict[word] = embedding\n elif embedding_format == 'bin':\n embedding_dict = fasttext.load_model(embedding_path)\n else:\n raise ValueError('Not supported embeddings format {}'.format(embedding_format))\n print(\"Done loading word embeddings.\")\n return embedding_dict", "def load_embedding_matrix(filepath, word2vec_format = False):\n if word2vec_format:\n return gensim.models.KeyedVectors.load_word2vec_format(filepath, binary=True)\n else: #own pretrained model\n return gensim.models.Word2Vec.load(filepath)", "def get_embeddings(emb_path, emb_length, vocab_size, embedding_type):\n print(\"Loading {} embeddings from file: {}...\".format(embedding_type, emb_path))\n\n emb_matrix = []\n str2id = {}\n idx = 0\n with open(emb_path, 'r') as fh:\n for line in tqdm(fh, total=vocab_size):\n line = line.lstrip().rstrip().split(\" \")\n word = line[0]\n vector = list(map(float, line[1:]))\n if emb_length != len(vector):\n raise Exception(\n \"{}: Expected vector of size {}, but got vector of size {}.\".format(idx, emb_length, len(vector)))\n emb_matrix.append(vector)\n str2id[word] = idx\n idx += 1\n\n emb_matrix = np.array(emb_matrix, dtype=np.float32)\n print(\"Loaded {} embedding matrix with shape {}.\".format(embedding_type, emb_matrix.shape))\n\n return emb_matrix, str2id", "def load_embedding(fpath, VOCAB):\n print(\"Loading embeddings...\")\n emb = dict()\n wv_from_bin = KeyedVectors.load_word2vec_format(fpath, limit=VOCAB)\n for word, vector in tqdm(zip(wv_from_bin.vocab, wv_from_bin.vectors)):\n coefs = np.asarray(vector, dtype='float32')\n if word not in emb:\n emb[word] = coefs\n return emb", "def loadEmbModel(embFile, logger):\n logger.info(\"Loading Embedding Model\")\n f = open(embFile,'r')\n model = {}\n v = []\n for line in f:\n splitLine = line.split(' ')\n word = splitLine[0]\n try:\n embedding = np.array([float(val) for val in splitLine[1:]])\n except:\n logger.info(len(v), line)\n model[word] = embedding\n v.append(embedding)\n mean = np.array(v).mean(0)\n logger.info(mean.shape)\n model['<unk>'] = torch.tensor(mean)\n model['<pad>'] = torch.zeros(embedding.shape)\n model['<start>'] = torch.zeros(embedding.shape)\n model['<end>'] = torch.zeros(embedding.shape)\n logger.info(\"Done.\",len(model),\" words loaded!\")\n return model", "def _read_adv_embeddings(identity, target):\n embeddings_file = os.path.join(\n FLAGS.output_directory,\n identity,\n FLAGS.attack_type,\n target\n )\n embeddings_file = os.path.join(FLAGS.image_directory,\n identity,\n 'embeddings.h5')\n with h5py.File(embeddings_file, 'r') as f:\n return f['embeddings'][:].astype(np.float32)", "def load(cls, filepath) -> 'Word2VecEmbedding':\n with open(filepath, 'rb') as f:\n embedding = pickle.load(f)\n embedding.word2idx = {spell: idx for idx, spell in enumerate(embedding.vocab.idx2word)}\n return embedding", "def buildEmbeddingMatrix(path_to_gloVe, tokenizer, embedding_dimen=300):\n logging.info(\"Loading GloVe vector model..\")\n t = time()\n # Loads the gloVe model into a dictionary\n with open(path_to_gloVe, encoding='utf8') as file:\n embeddings = dict()\n for line in file:\n values = line.split()\n # key is the word, value is the numpy array for the corresponding word vector\n embeddings[values[0]] = np.asarray(values[1:], 'float32')\n # Create a 2D tensor of shape(num_unique_words+1, embedding_dimen) (Index 0 is used for padding)\n embedding_matrix = np.zeros((len(nltk_tokenizer.word_index) + 1, embedding_dimen))\n word_found_in_embedding = 0\n for word, index in nltk_tokenizer.word_index.items():\n embedding_vector = embeddings.get(word)\n # Only populate word vectors that exist in GloVe model,\n # words not found (e.g: spelling error) will be padded with zeroes as their word vector\n if embedding_vector is not None:\n embedding_matrix[index] = embedding_vector\n word_found_in_embedding += 1\n logging.info(\"Done!\")\n logging.info(\"Loaded {} word vectors into the embedding.\".format(len(embedding_matrix)))\n logging.info(\"Found {} word vectors that exist in the GloVe model.\".format(word_found_in_embedding))\n logging.info(\"Time taken to load pre-trained GloVe model: {} mins\".format(round(((time() - t) / 60), 2)))\n return embedding_matrix", "def load_embeddings(self, f_name, dims=128):\n emb_df = pd.read_csv(f_name, sep=' ', skiprows=1, header=None, index_col=None)\n if not self.embeddings:\n self.embeddings = {}\n for i in range(0, emb_df.shape[0]):\n key = emb_df.iloc[i, 0]\n if str(key) in '</s>':\n continue\n emb = np.array(emb_df.iloc[i, 1: dims + 1].tolist())\n emb = emb.astype(float)\n self.embeddings[int(key)] = emb\n self.make_emb_cols(dims)", "def index_embedding_words(embedding_file):\n words = set()\n with open(embedding_file) as f:\n for line in f:\n w = Dictionary.normalize(line.rstrip().split(' ')[0])\n words.add(w)\n return words", "def index_embedding_words(embedding_file):\n words = set()\n with open(embedding_file, encoding='utf-8') as f:\n for line in f:\n w = Dictionary.normalize(line.rstrip().split(' ')[0])\n words.add(w)\n return words", "def load_gloves(self, dir):\n self.word2vec = {}\n glove_file = os.path.join(dir, 'glove.6B.'+str(self.dim_embed)+'d.txt')\n with open(glove_file, encoding=\"utf8\") as f:\n for line in f:\n l = line.split()\n self.word2vec[l[0]] = [float(x) for x in l[1:]]\n self.word2vec[\"<RARE>\"] = [0. for i in range(self.dim_embed)]\n self.word2vec[\"<EMPTY>\"] = [0. for i in range(self.dim_embed)]", "def preprocess(data_path, glove_path, embed_size):\n train_data = read_imdb(data_path, 'train')\n test_data = read_imdb(data_path, 'test')\n\n train_tokenized = []\n test_tokenized = []\n for review, _ in train_data:\n train_tokenized.append(tokenizer(review))\n for review, _ in test_data:\n test_tokenized.append(tokenizer(review))\n\n vocab = set(chain(*train_tokenized))\n vocab_size = len(vocab)\n print(\"vocab_size: \", vocab_size)\n\n word_to_idx = {word: i + 1 for i, word in enumerate(vocab)}\n word_to_idx['<unk>'] = 0\n\n train_features = np.array(pad_samples(encode_samples(train_tokenized, word_to_idx))).astype(np.int32)\n train_labels = np.array([score for _, score in train_data]).astype(np.int32)\n test_features = np.array(pad_samples(encode_samples(test_tokenized, word_to_idx))).astype(np.int32)\n test_labels = np.array([score for _, score in test_data]).astype(np.int32)\n\n weight_np = collect_weight(glove_path, vocab, word_to_idx, embed_size)\n return train_features, train_labels, test_features, test_labels, weight_np, vocab_size", "def get_word_embeddings(embeddingsPath, allwords, splittype=\" \"):\n word2Idx = {}\n wordEmbeddings = []\n print('Using word embeddings:', embeddingsPath)\n openfile = _get_correct_open(embeddingsPath)\n with openfile(embeddingsPath,'r', encoding='utf-8') as fEmbeddings:\n c = 0\n for line in fEmbeddings:\n c+=1\n split = line.strip().split(splittype)\n if '' in split:\n raise ValueError(\"Error in spacing of word vector file:\"+\n \" bad formatting. Consider using different value of splittype.\")\n word = split[0]\n\n # fastText and Yogatama pre-trained embedding starts with a line\n # with 2 elements that we need to ignore:\n if len(split) <= 5:\n print('Ignoring line '+str(c))\n continue\n\n if len(word2Idx) == 0: #Add padding+unknown\n word2Idx[\"PADDING_TOKEN\"] = len(word2Idx)\n vector = np.zeros(len(split)-1) #Zero vector for 'PADDING' word\n wordEmbeddings.append(vector)\n\n word2Idx[\"UNKNOWN_TOKEN\"] = len(word2Idx)\n vector = np.random.uniform(-0.25, 0.25, len(split)-1)\n wordEmbeddings.append(vector)\n\n if word.lower() in allwords:\n vector = np.array([float(num) for num in split[1:]])\n wordEmbeddings.append(vector)\n word2Idx[word] = len(word2Idx)\n\n wordEmbeddings = np.array(wordEmbeddings)\n return wordEmbeddings, word2Idx", "def make_embedding(path, words, indices):\n #root = '/'.join(path.split('/')[0:-1])\n #all_paths = [root+'/'+x for x in os.listdir(root)] #'/'.join(path.split('/')[0:-1]))\n #for path in all_paths:\n vec_path = 'data/'+path.split('/')[-1]+'_'+mode\n print(vec_path)\n if os.path.exists(vec_path+'.npy'):\n np_vecs = np.load(vec_path+'.npy')\n else:\n words_len = len(words)\n vecs = []\n if mode == 'word':\n f = load_model('wiki.en.bin')\n for i, w in enumerate(words):\n if mode == 'word':\n vec = f.get_word_vector(w)\n else:\n vec = eye[indices[w]]\n vecs.append(vec) \n if i % 10000 == 0:\n print(\"{} / {}\".format(i, words_len))\n np_vecs = np.asarray(vecs, dtype=np.int8)\n np.save(vec_path, np_vecs)\n return np_vecs", "def load_glove(path):\n with open(path) as f:\n glove = {}\n for line in f.readlines():\n values = line.split()\n word = values[0]\n vector = np.array(values[1:], dtype='float32')\n glove[word] = vector\n return glove", "def load_glove(path):\n with open(path) as f:\n glove = {}\n for line in f.readlines():\n values = line.split()\n word = values[0]\n vector = np.array(values[1:], dtype='float32')\n glove[word] = vector\n return glove", "def _read_words(filename):\n with tf.gfile.GFile(filename, \"r\") as f:\n return f.read().replace(\"\\n\", \"<eos>\").split()", "def load_model(self, file=FILENAME, dim=DIMENSION, normalize=False):\n print(\"Loading pretrained Glove vectors from file {}\".format(FILENAME))\n self.dimension = dim\n self.normalize = normalize\n with open(file, \"r\", encoding=\"utf-8\") as textfile:\n self.num_tokens = count_lines(textfile)\n self.tokens_arr = [\"\" for i in range(self.num_tokens)]\n self.embeddings_mat = np.zeros((self.num_tokens, self.dimension))\n\n for idx, line in enumerate(textfile):\n line = line.split()\n token = ''.join(line[:-self.dimension])\n self.tokens_arr[idx] = token\n self.token_to_idx[token] = idx \n vec = list(map(float, line[-self.dimension:]))\n if self.normalize: \n # normalize the vectors as they are put into the matrix\n vec = vec / np.linalg.norm(vec)\n self.embeddings_mat[idx] = vec \n if (idx+1) % 200000 == 0:\n print(\" --{}% loaded.\".format(round(idx/self.num_tokens*100, 2)))\n print(\"Finished loading Glove model. {} vectors loaded\".format(self.num_tokens))", "def load_word2vec_embeddings(word2vec_file, word_map):\n # Load word2vec model into memory\n w2v = gensim.models.KeyedVectors.load(word2vec_file, mmap='r')\n\n\n\n # Create tensor to hold embeddings for words that are in-corpus\n # word_map 내 단어들에 대한 임베딩 벡터 만들기\n embeddings = torch.FloatTensor(len(word_map), w2v.vector_size)\n init_embedding(embeddings)\n\n # Read embedding file\n\n for word in word_map:\n if word in w2v.vocab:\n embeddings[word_map[word]] = torch.FloatTensor(w2v[word])\n\n\n return embeddings, w2v.vector_size", "def init_embeddings_from_file(self, filepath, mode=None, **kwargs):\n words = self.d.vocab\n weight, words = EmbeddingLoader(filepath, mode).load(words, **kwargs)\n self.init_embeddings(weight, words)", "def load_glove_vec(fname):\n word_vecs = {}\n length = 0\n with open(fname, \"rb\") as f:\n for i, line in enumerate(f):\n L = line.split()\n word = L[0].lower()\n word_vecs[word] = np.array(L[1:], dtype='float32')\n if length == 0:\n length = len(word_vecs[word])\n return word_vecs, length", "def index_embedding_words(self, embedding_file):\n words = set()\n with open(embedding_file) as f:\n for line in f:\n w = TokenDictionary.normalize(line.rstrip().split(' ')[0])\n words.add(w)\n return words", "def load_data():\n # Load and preprocess data\n sentences, labels = load_data_and_labels()\n sentences_padded = pad_sentences(sentences)\n vocabulary, vocabulary_inv = build_vocab(sentences_padded)\n x, y = build_input_data(sentences_padded, labels, vocabulary)\n return [x, y, vocabulary, vocabulary_inv]", "def embedding_sentence(input_file, save_path, max_length):\n lines = _read_csv(input_file)\n split_lines = []\n label_list = []\n for line in lines:\n split_lines.append(sentence_split(line[1], max_length))\n label_list.append(int(line[2]))\n del lines\n\n writer = tf.python_io.TFRecordWriter(save_path)\n for index, line in enumerate(split_lines):\n bytes_words = []\n for word in line:\n bytes_words.append(str.encode(word))\n example = tf.train.Example(features=tf.train.Features(feature={\n \"label\":\n tf.train.Feature(int64_list=tf.train.Int64List(value=[label_list[index]])),\n \"features\":\n tf.train.Feature(bytes_list=tf.train.BytesList(value=bytes_words))\n }))\n writer.write(example.SerializeToString())", "def read_data(file_path):\n words=[]\n dic_word={}\n actual_text=[]\n for line in open(file_path,encoding='utf-8'):\n words_line=line.strip().split(' ')\n for ite in words_line:\n if ite not in dic_word:\n dic_word[ite]=1\n words.extend(words_line)\n actual_text.append(words_line)\n\n\n #with zipfile.ZipFile(file_path) as f:\n #words = tf.compat.as_str(f.read(f.namelist()[0])).split()\n\n return words,len(dic_word),actual_text", "def get_embeddings():\n # Load the raw embedding data\n X_train = np.load('./train_embeddings.npy')\n \n y_train = np.load('./train_labels.npy')\n \n X_valid = np.load('./valid_embeddings.npy')\n \n y_valid = np.load('./valid_labels.npy')\n \n X_test = np.load('./test_embeddings.npy')\n \n y_test = np.load('./test_labels.npy')\n\n #return X_train, y_train\n return X_train, y_train, X_valid, y_valid, X_test, y_test", "def get_google_word2vec_W(fname, vocab, vocab_size=1000000, index_from=3):\n f = open(fname, 'rb')\n header = f.readline()\n vocab1_size, embedding_dim = list(map(int, header.split()))\n binary_len = np.dtype('float32').itemsize * embedding_dim\n vocab_size = min(len(vocab) + index_from, vocab_size)\n W = np.zeros((vocab_size, embedding_dim))\n\n found_words = {}\n for i, line in enumerate(range(vocab1_size)):\n word = []\n while True:\n ch = f.read(1)\n if ch == ' ':\n word = ''.join(word)\n break\n if ch != '\\n':\n word.append(ch)\n if word in vocab:\n wrd_id = vocab[word] + index_from\n if wrd_id < vocab_size:\n W[wrd_id] = np.fromstring(\n f.read(binary_len), dtype='float32')\n found_words[wrd_id] = 1\n else:\n f.read(binary_len)\n\n cnt = 0\n for wrd_id in range(vocab_size):\n if wrd_id not in found_words:\n W[wrd_id] = np.random.uniform(-0.25, 0.25, embedding_dim)\n cnt += 1\n assert cnt + len(found_words) == vocab_size\n\n f.close()\n\n return W, embedding_dim, vocab_size", "def embedding_matrix(path_to_embedding : str,embedding_dim: int, word_index : dict) -> np.array:\n embeddings_index = {}\n f = open(path_to_embedding, encoding='utf-8')\n for line in f:\n try:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n except:\n pass\n \n f.close()\n\n embedding_matrix = np.zeros((len(word_index) + 1,embedding_dim))\n found = 0\n for word, i in word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n # words not found in embedding index will be all-zeros.\n found +=1\n embedding_matrix[i] = embedding_vector\n\n return embedding_matrix", "def getEmbeddings(embed_loc, wrd_list, embed_dims):\n embed_list = []\n\n wrd2embed = {}\n for line in open(embed_loc, encoding='utf-8', errors='ignore'):\n data = line.strip().split(' ')\n\n # wrd, embed = data[0], data[1:]\n\n # Some words may be separated by space (telephone numbers, for example).\n # It's more robust to load data as follows.\n embed = data[-1 * embed_dims:]\n wrd = ' '.join(data[: -1 * embed_dims])\n\n embed = list(map(float, embed))\n wrd2embed[wrd] = embed\n\n for wrd in wrd_list:\n if wrd in wrd2embed:\n embed_list.append(wrd2embed[wrd])\n else:\n print('Word not in embeddings dump {}'.format(wrd))\n embed_list.append(np.random.randn(embed_dims))\n\n return np.array(embed_list, dtype=np.float32)", "def load_vocab(self):\n keys = []\n values = []\n with open(self.embed_file, 'r') as f:\n lines = f.readlines()\n\n for line in lines:\n key = line.split(\" \")[0]\n value = line.split(\" \")[1:]\n keys.append(key)\n values.append(value)\n # form <dict>\n # vocab = dict(zip(keys, values))\n return keys, values", "def load_to_dataframe(self) -> DataFrame:\n return read_csv(self._csv_path, converters={\n # Check if embedding size is the empty string,\n # as it would be for Count models\n \"Embedding size\": lambda v: int(float(v)) if len(v) > 0 else nan\n })", "def load_pretrained_embeddings(self, embedding_path):\n trained_embeddings = {}\n with open(embedding_path, 'r', encoding='utf-8') as fin:\n for line in fin:\n contents = line.strip().split(\" \")\n term = contents[0]\n if term not in self.term2id:\n continue\n trained_embeddings[term] = list(map(float, contents[1:]))\n if self.embed_dim is None:\n self.embed_dim = len(contents) - 1\n filtered_terms = trained_embeddings.keys()\n # rebuild the term x id map\n self.term2id = {}\n self.id2term = {}\n for term in self.initial_terms:\n self.add(term, count=0)\n for term in filtered_terms:\n self.add(term, count=0)\n # load embeddings\n self.embeddings = np.zeros([self.size(), self.embed_dim])\n for term in self.term2id.keys():\n if term in trained_embeddings:\n self.embeddings[self.get_id(term)] = trained_embeddings[term]", "def read_data(cls, input_file):\n with tf.gfile.Open(input_file, \"r\") as f:\n lines = []\n for line in f:\n line = line.strip()\n if line.startswith('-DOCSTART-'):\n continue\n else:\n word_labels = line.split('-seq-')\n assert len(word_labels) == 2\n\n words = word_labels[0]\n labels = word_labels[1]\n lines.append([words, labels])\n\n return lines", "def _index(self, corpus):\n\n # Transform documents to embeddings vectors\n ids, dimensions, stream = self.embedder.model.index(corpus)\n\n # Load streamed embeddings back to memory\n embeddings = np.empty((len(ids), dimensions), dtype=np.float32)\n with open(stream, \"rb\") as queue:\n for x in range(embeddings.shape[0]):\n embeddings[x] = pickle.load(queue)\n\n # Remove temporary file\n os.remove(stream)\n\n all_text = []\n for para_id, text, _ in corpus:\n all_text.append([text, para_id])\n\n df = pd.DataFrame(all_text, columns=[\"text\", \"paragraph_id\"])\n\n embedding_path = os.path.join(\n self.index_path, self.embed_paths[\"embeddings\"])\n dataframe_path = os.path.join(\n self.index_path, self.embed_paths[\"dataframe\"])\n ids_path = os.path.join(self.index_path, self.embed_paths[\"ids\"])\n\n # Load new data\n if os.path.isfile(embedding_path) and (self.encoder_args[\"overwrite\"] is False):\n logger.info(f\"Loading new data from {embedding_path}\")\n\n # Load existing embeddings\n old_embeddings = np.load(embedding_path) # LOAD EMBEDDINGS\n # Remove embeddings with document id overlaps\n embeddings = np.vstack((old_embeddings, embeddings))\n\n # load IDs\n old_ids = [doc_id[:-1] for doc_id in open_txt(ids_path)]\n logger.debug(f\"New ID Length = {len(ids)}\")\n logger.debug(f\"Old ID Length = {len(old_ids)}\")\n # Remove document ids overlaps\n logger.debug(f\"New ID Length = {len(ids)}\")\n ids = old_ids + ids\n logger.debug(f\"Merged ID Length = {len(ids)}\")\n\n # Append new dataframe\n old_df = pd.read_csv(dataframe_path)\n df = pd.concat([old_df, df])\n\n # Store embeddings and document index\n # for future reference\n np.save(embedding_path, embeddings)\n with open(ids_path, \"w\") as fp:\n fp.writelines([i + \"\\n\" for i in ids])\n\n # Save data csv\n df.to_csv(dataframe_path, index=False)\n\n # Normalize embeddings\n self.embedder.normalize(embeddings)\n\n # Save embeddings metadata\n self.embedder.config[\"ids\"] = ids\n self.embedder.config[\"dimensions\"] = dimensions\n\n # Create embeddings index\n logger.info(f\"Creating embeddings and index\")\n self.embedder.embeddings = ANN.create(self.embedder.config)\n logger.info(f\"Created embeddings\")\n\n # Build the index\n self.embedder.embeddings.index(embeddings)\n logger.info(f\"Built the embeddings index\")", "def load_glove_vocabulary(dim,\n include_punc=False,\n size=100000):\n if dim not in set([25,50,100,200]):\n raise ValueError(\"GloVe dimension must be one of [25, 50, 100, 200]\")\n ## Expected File Path\n glove_file = os.path.join(os.path.dirname(os.path.abspath(__file__)) +\"/../../\",\n RESOURCE_DIR,\n f\"glove.twitter.27B.{dim}d.txt\"\n )\n if not os.path.exists(glove_file):\n raise FileNotFoundError(f\"Could not find glove embeddings file: {glove_file}\")\n ## Load Vocabulary\n glove_vocab = []\n punc = set(string.punctuation)\n for l, line in enumerate(open(glove_file, \"r\")):\n if l >= size:\n break\n if line.startswith(\"<\"):\n continue\n token, _ = line.split(\" \", 1)\n if token.startswith(\"#\"):\n if len(token) == 1:\n continue\n else:\n token = \"<HASHTAG={}>\".format(token[1:])\n if not include_punc and token in punc:\n continue\n glove_vocab.append(token)\n return glove_vocab", "def load_pretrained_embeddings(self, embedding_path):\n trained_embeddings = {}\n with open(embedding_path, 'r') as fin:\n for line in fin:\n contents = line.strip().split()\n token = contents[0]\n if token not in self.token2id:\n continue\n trained_embeddings[token] = list(map(float, contents[1:]))\n embed_size = len(contents) - 1\n # load embeddings\n self.embeddings = np.random.randn([self.size, embed_size])\n for token in self.id2token:\n if token in trained_embeddings:\n self.embeddings[self.token2id[token]] = trained_embeddings[token]", "def load_embedding_tf(word_to_index, tf_embeddings_file_path, nb_dims):\n # 1. Define the variable that will hold the embedding:\n tf_embedding = tf.Variable(\n tf.constant(0.0, shape=[len(word_to_index)-1, nb_dims]),\n trainable=False,\n name=\"Embedding\"\n )\n\n # 2. Restore the embedding from disks to TensorFlow, GPU (or CPU if GPU unavailable):\n variables_to_restore = [tf_embedding]\n embedding_saver = tf.compat.v1.train.Saver(variables_to_restore)\n embedding_saver.restore(sess, save_path=tf_embeddings_file_path)\n print(\"TF embeddings restored from '{}'.\".format(tf_embeddings_file_path))\n \n return tf_embedding", "def _read_fasttext_embeddings(self, vocab: vocabs.Vocab, init_fastext):\n with open(init_fastext, encoding='utf-8') as embeddings_file_handle:\n _, dimension = next(embeddings_file_handle).split()\n if int(dimension) != self.emb_dim:\n raise Exception(f\"An embedding size of {self.emb_dim} was specified, but the pretrained embeddings have size {dimension}\")\n\n # Poor man's Glorot initializer for missing embeddings\n bound = np.sqrt(6/(self.vocab_size + self.emb_dim))\n\n total_embs = 0\n in_vocab = 0\n missing = 0\n\n embeddings = np.empty((self.vocab_size, self.emb_dim), dtype='float')\n found = np.zeros(self.vocab_size, dtype='bool_')\n\n for line in embeddings_file_handle:\n total_embs += 1\n word, vals = line.strip().split(' ', 1)\n if word in vocab.w2i:\n in_vocab += 1\n index = vocab.w2i[word]\n embeddings[index] = np.fromstring(vals, sep=\" \")\n found[index] = True\n\n for i in range(self.vocab_size):\n if not found[i]:\n missing += 1\n embeddings[i] = np.random.uniform(-bound, bound, self.emb_dim)\n\n logger.info(f\"{in_vocab} vocabulary matches out of {total_embs} total embeddings; \"\n f\"{missing} vocabulary words without a pretrained embedding out of {self.vocab_size}\")\n\n return embeddings", "def load_words_from_file(path, voc_path=None):\n label_to_idx = {}\n dict_size = 0\n label_ids = []\n with open(path, \"r\") as fin:\n for label in fin:\n if label not in label_to_idx:\n label_to_idx[label] = dict_size\n dict_size += 1\n label_ids.append(label_to_idx[label])\n if voc_path:\n with open(voc_path, \"w+\") as fout:\n json.dump(label_to_idx, fout)\n return torch.tensor(label_ids)", "def make_embeddings(self):\n\t\tprint(\"Presetting embedding weights\")\n\t\t\t\n\t\tnp.random.seed(0)\n\t\tweights = np.random.uniform(low = -0.05, high = 0.05, size = (self.FREQCAP, self.EMB_SIZE))\n\t\t\n\t\tcounter = 0\n\n\t\twords = []\n\t\tweights_tmp = []\n\n\t\twith open(self.embeddingpath) as handle:\n\t\t\tfor i, line in enumerate(handle):\n\t\t\t\ttmp = line.strip()\n\t\t\t\tif len(tmp) > 0:\n\t\t\t\t\tsplit = tmp.split(\" \")\n\t\t\t\t\tif split[0] in self.worddict and len(split[1:]) == 300:\n\t\t\t\t\t\twords.append(split[0])\n\t\t\t\t\t\tweights_tmp.append([float(a) for a in split[1:]])\n\t\t\n\t\tweights_tmp = np.array(weights_tmp)\n\n\t\tfor word, column in zip(words, weights_tmp):\n\t\t\tif self.worddict[word] < self.FREQCAP:\n\t\t\t\tcounter += 1\n\t\t\t\tweights[self.worddict[word],:] = column\n\t\t\n\t\tprint(\"Set\", counter, \"of\", weights.shape[0], \"columns\")\n\t\t\n\t\tif self.EMB_SIZE < weights.shape[-1]:\n\t\t\tprint(\"Reducing dimensionality to\", self.EMB_SIZE)\n\t\t\tpca = PCA(self.EMB_SIZE)\n\t\t\tweights = pca.fit_transform(weights)\n\t\t\n\t\tself.embeddings = [weights]", "def load_word_embed(path: str,\n dimension: int,\n *,\n skip_first: bool = False,\n freeze: bool = False,\n sep: str = ' '\n ) -> Tuple[nn.Embedding, Dict[str, int]]:\n vocab = {'$$$UNK$$$': 0}\n embed_matrix = [[0.0] * dimension]\n with open(path) as r:\n if skip_first:\n r.readline()\n for line in r:\n segments = line.rstrip('\\n').rstrip(' ').split(sep)\n word = segments[0]\n vocab[word] = len(vocab)\n embed = [float(x) for x in segments[1:]]\n embed_matrix.append(embed)\n print('Loaded %d word embeddings' % (len(embed_matrix) - 1))\n \n embed_matrix = torch.FloatTensor(embed_matrix)\n \n word_embed = nn.Embedding.from_pretrained(embed_matrix,\n freeze=freeze,\n padding_idx=0)\n return word_embed, vocab", "def load_text_embed(filepath: Union[str, os.PathLike], load_dir: str = 'model') \\\n -> Tuple[TransformerEmbedding, Callable]:\n model_dir = Path(filepath).joinpath(load_dir)\n tokenizer = AutoTokenizer.from_pretrained(str(model_dir.resolve()))\n args = dill.load(open(model_dir.joinpath('embedding.dill'), 'rb'))\n emb = TransformerEmbedding(\n str(model_dir.resolve()), embedding_type=args['embedding_type'], layers=args['layers']\n )\n return emb, tokenizer", "def load_embeddings(self, str_file):\n\n with open(str_file, 'rb') as f_read:\n self.embeddings_entity = pickle.load(f_read)\n self.embeddings_relation = pickle.load(f_read)\n self.dict_paras = pickle.load(f_read)" ]
[ "0.7637871", "0.72984564", "0.72906333", "0.7238396", "0.7030938", "0.6931128", "0.69254005", "0.6910939", "0.6879712", "0.6877971", "0.68639934", "0.6831232", "0.6824664", "0.68067014", "0.679522", "0.6794145", "0.6751845", "0.67482585", "0.67427385", "0.66889936", "0.666392", "0.665094", "0.6634601", "0.6600874", "0.65529066", "0.6495853", "0.6495826", "0.6474231", "0.6467967", "0.6462704", "0.6460578", "0.6429134", "0.64100695", "0.63857543", "0.637913", "0.63766426", "0.63718164", "0.63266426", "0.6292317", "0.6289675", "0.62887514", "0.62645787", "0.62540895", "0.6242553", "0.6234264", "0.62329185", "0.62318313", "0.6228364", "0.62219733", "0.62200487", "0.62139875", "0.6213855", "0.6201881", "0.62014425", "0.6200808", "0.6182696", "0.618226", "0.61773676", "0.6174702", "0.6173223", "0.6172694", "0.61481786", "0.6123567", "0.61104965", "0.60887116", "0.6079538", "0.60726446", "0.60715884", "0.6058485", "0.6056105", "0.6037935", "0.6030778", "0.6030778", "0.60279244", "0.6006321", "0.5997117", "0.59922147", "0.5991799", "0.59760696", "0.5967961", "0.5958604", "0.59527516", "0.595271", "0.59501", "0.5945104", "0.59450376", "0.59425014", "0.59352267", "0.59227717", "0.5910801", "0.5904618", "0.5900094", "0.589793", "0.58866704", "0.5877443", "0.58722097", "0.58595973", "0.5856287", "0.58557904", "0.58517045" ]
0.7851951
0
Creates a Tensor for use as an Embedding initialization from the source vocabulary and predefined word embeddings.
def get_pretrained_embeddings(source_vocab,embed_df): num_tokens = len(source_vocab) embedding_dim = embed_df.shape[1] weights = np.zeros((num_tokens,embedding_dim),dtype=np.float32) for idx in range(num_tokens): token = source_vocab.lookup_index(idx) if token in embed_df.index: weights[idx,:] = embed_df.loc[token] else: weights[idx,:] = np.random.randn(1,embedding_dim) embed_tensor = torch.FloatTensor(weights) return embed_tensor
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_embedding_layer(inputs_, vocab_size, embed_size):\n embedding = tf.Variable(tf.random_uniform((vocab_size, embed_size), -1, 1))\n embed = tf.nn.embedding_lookup(embedding, inputs_)\n \n return embed", "def init_word_embed(config):\n embedding_mat_val = np.load(config.wordembed_params)\n with tf.variable_scope('vc'):\n with tf.variable_scope('lstm', reuse=True):\n embedding_mat = tf.get_variable(\"embedding_mat\", [config.num_vocab, config.embed_dim])\n init_we = tf.assign(embedding_mat, embedding_mat_val)\n return [init_we]", "def build_word_embeddings(self):\n if self.mode == \"encode\":\n # Word embeddings are fed from an external vocabulary which has possibly\n # been expanded (see vocabulary_expansion.py).\n encode_emb1 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb1\")\n # No sequences to decode.\n encode_emb2 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb2\")\n elif self.mode == \"test\":\n encode_emb1 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb1\")\n # No sequences to decode.\n encode_emb2 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb2\")\n else:\n word_emb = tf.get_variable(\n name=\"word_embedding\",\n shape=[self.config.vocab_size, self.config.word_embedding_dim],\n initializer=self.uniform_initializer)\n\n encode_emb1 = tf.nn.embedding_lookup(word_emb, self.encode_ids1)\n encode_emb2 = tf.nn.embedding_lookup(word_emb, self.encode_ids2)\n\n\n self.encode_emb1 = encode_emb1\n self.encode_emb2 = encode_emb2", "def _build(self, ids):\n # Construct embeddings.\n if self._existing_vocab is None:\n if self.EMBEDDINGS not in self._initializers:\n self._initializers[self.EMBEDDINGS] = basic.create_linear_initializer(\n self._vocab_size)\n self._embeddings = tf.get_variable(\n \"embeddings\",\n shape=[self._vocab_size, self._embed_dim],\n dtype=tf.float32,\n initializer=self._initializers[self.EMBEDDINGS],\n partitioner=self._partitioners.get(self.EMBEDDINGS, None),\n regularizer=self._regularizers.get(self.EMBEDDINGS, None),\n trainable=self._trainable)\n else:\n self._embeddings = tf.get_variable(\n \"embeddings\",\n dtype=tf.float32,\n initializer=self._existing_vocab,\n regularizer=self._regularizers.get(self.EMBEDDINGS, None),\n trainable=self._trainable)\n\n # Lookup embeddings\n return tf.nn.embedding_lookup(\n self._embeddings, ids, name=\"embedding_lookup\")", "def embedding_layer(self):\n with tf.name_scope(\"Embedding_Layer\"):\n V_size = len(self.vocab)\n embed_dim = len(self.embed[0]) \n W_embed_ = tf.get_variable(\"W_embed\",shape=[V_size, embed_dim],trainable=False).assign(np.asarray(self.embed))\n W_analogy_embed_ = tf.get_variable(\"W_analogy_embed\",shape=[V_size, embed_dim],trainable=True,initializer=tf.random_uniform_initializer(minval=-1,maxval=1))\n return W_embed_, W_analogy_embed_", "def add_embedding(self):\n ### YOUR CODE HERE (~4-6 lines)\n embeddingTensor = tf.Variable(self.pretrained_embeddings)\n embeddings = tf.nn.embedding_lookup(embeddingTensor, self.input_placeholder)\n embeddings = tf.reshape(embeddings, [-1, self.max_length, Config.n_features * Config.embed_size])\n ### END YOUR CODE\n return embeddings", "def _embed(self):\n with tf.variable_scope('word_embedding'):\n self.pretrained_word_mat = tf.get_variable(\"word_emb_mat\",\n [self.vocab.word_size() - 2, self.vocab.word_embed_dim],\n dtype=tf.float32,\n initializer=tf.constant_initializer(\n self.vocab.word_embeddings[2:],\n dtype=tf.float32),\n trainable=False)\n self.word_pad_unk_mat = tf.get_variable(\"word_unk_pad\",\n [2, self.pretrained_word_mat.get_shape()[1]],\n dtype=tf.float32,\n initializer=tf.constant_initializer(\n self.vocab.word_embeddings[:2],\n dtype=tf.float32),\n trainable=True)\n\n self.word_mat = tf.concat([self.word_pad_unk_mat, self.pretrained_word_mat], axis=0)\n self.p_emb = tf.nn.embedding_lookup(self.word_mat, self.p)\n self.q_emb = tf.nn.embedding_lookup(self.word_mat, self.q)", "def __init__(self,\n vocab_size=None,\n embed_dim=None,\n existing_vocab=None,\n initializers=None,\n partitioners=None,\n regularizers=None,\n trainable=True,\n custom_getter=None,\n name=\"embed\"):\n if vocab_size is None and existing_vocab is None:\n raise ValueError(\"Must provide on of vocab_size or existing_vocab.\")\n\n if existing_vocab is not None and not all(\n x is None for x in [vocab_size, embed_dim, initializers, partitioners]):\n raise ValueError(\"If existing_vocab is provided, none of vocab_size, \"\n \"embedding_dim, initializers, or partitioners is \"\n \"needed.\")\n\n super(Embed, self).__init__(custom_getter=custom_getter, name=name)\n self._existing_vocab = None\n if existing_vocab is None:\n self._vocab_size = vocab_size\n self._embed_dim = embed_dim or _embedding_dim(self._vocab_size)\n else:\n self._existing_vocab = tf.convert_to_tensor(\n existing_vocab, dtype=tf.float32)\n existing_vocab_shape = self._existing_vocab.get_shape().with_rank(2)\n existing_vocab_shape.assert_is_fully_defined()\n self._vocab_size, self._embed_dim = existing_vocab_shape.as_list()\n\n self._initializers = util.check_initializers(\n initializers, self.POSSIBLE_INITIALIZER_KEYS)\n self._partitioners = util.check_partitioners(\n partitioners, self.POSSIBLE_INITIALIZER_KEYS)\n self._regularizers = util.check_regularizers(\n regularizers, self.POSSIBLE_INITIALIZER_KEYS)\n self._trainable = trainable", "def _initialize_embeddings(self):\n with tf.variable_scope(self.scope):\n init_temporal_s = np.sqrt(\n 6. / (self._config.nact_dict[\"num_s\"] + self._config.ndim_emb + 1))\n\n self.w_dt = tf.get_variable(\n name=\"w_dt\",\n shape=[1, self._config.ndim_emb],\n initializer=tf.initializers.random_uniform(\n -init_temporal_s, init_temporal_s))\n\n if self._config.embedding_type not in self._embedding_classes:\n raise ValueError(\n f\"Unknown embedding type: {self._config.embedding_type}.\")\n self.embedding = self._embedding_classes[self._config.embedding_type](\n self._config, self._embed_dim_dict)", "def add_embedding(self, prefix=''):\n with tf.variable_scope(prefix + 'embed'):\n if self.cfg.fix_emb:\n assert (hasattr(self.cfg, 'W_emb'))\n W_emb = pkl.load(open(self.cfg.W_emb_path, 'rb'))\n W = tf.get_variable('W', initializer= W_emb, trainable=True)\n print(\"iniitalize word embedding finished\")\n else:\n weightInit = tf.random_uniform_initializer(-0.001, 0.001)\n vocab = pkl.load(open(self.cfg.vocab_path, 'rb'))\n W = tf.get_variable('W', [len(vocab), self.cfg.emb_size], initializer=weightInit)\n if hasattr(self.cfg, 'relu_w') and self.cfg.relu_w:\n W = tf.nn.relu(W)\n return W", "def tf_word2vec(sentences, vocab, epochs, learning_rate, num_sampled,\n window_size, batch_size, embed_size, tensorboard):\n vocab_size = len(vocab)\n\n # Clears the default graph stack and resets the global default graph;\n # this line is crucial if we want to re-run the class in interactive\n # environment such as jupyter notebook\n tf.reset_default_graph()\n\n # when building out tensorflow's computation graph, it's a good practice to\n # group nodes/operations that have similar purposes together using name_scope;\n # this additional step will give us nicer graph representation in Tensorboard,\n # which is tool that gives us nice graphical representation of the computation\n # graph we have defined\n with tf.name_scope('data'):\n # for target_words:\n # we will use it with tensorflow's loss later, and the function requires rank 2\n # input, that's why there's an extra dimension in the shape\n center_words = tf.placeholder(tf.int32, shape = [batch_size], name = 'center_words')\n target_words = tf.placeholder(tf.int32, shape = [batch_size, 1], name = 'target_words')\n\n with tf.name_scope('embedding_matrix'):\n # the actual word vectors\n embed_matrix = tf.Variable(\n tf.random_uniform([vocab_size, embed_size], -1.0, 1.0), name = 'embed_matrix')\n\n with tf.name_scope('loss'):\n # input -> hidden layer\n embed = tf.nn.embedding_lookup(embed_matrix, center_words, name = 'embed')\n\n # hidden layer -> output layer's weights\n stddev = 1.0 / embed_size ** 0.5\n output_weight = tf.Variable(\n tf.truncated_normal([vocab_size, embed_size], stddev = stddev), name = 'output_weight')\n\n output_bias = tf.Variable(tf.zeros([vocab_size]), name = 'output_bias')\n\n # hidden layer -> output layer + sampled softmax loss\n total_loss = tf.reduce_mean(tf.nn.sampled_softmax_loss( # tf.nn.nce_loss(\n weights = output_weight, biases = output_bias,\n labels = target_words, inputs = embed,\n num_sampled = num_sampled, num_classes = vocab_size), name = 'loss')\n\n # create a summary scalar that reports the loss\n tf.summary.scalar('total_loss', total_loss)\n summary_op = tf.summary.merge_all()\n\n optimizer = tf.train.AdagradOptimizer(learning_rate)\n train_step = optimizer.minimize(total_loss)\n init = tf.global_variables_initializer()\n\n # batch_iters = len(data) // batch_size\n with tf.Session() as sess:\n sess.run(init)\n\n # record the average loss in the last skip_step steps\n history = []\n writer = tf.summary.FileWriter(tensorboard, sess.graph)\n for epoch in trange(epochs):\n iterator = generate_sample(sentences, vocab, window = window_size)\n batch_gen = get_batch(iterator, batch_size)\n\n # for _ in range(batch_iters):\n # try:\n centers, targets = next(batch_gen)\n feed_dict = {center_words: centers, target_words: targets}\n _, loss, summary = sess.run([train_step, total_loss, summary_op], feed_dict)\n\n writer.add_summary(summary, epoch)\n history.append(loss)\n\n writer.close()\n word_vectors = sess.run(embed_matrix)\n\n return word_vectors, history", "def _embed(self):\n with tf.variable_scope('word_embedding'):\n self.word_embeddings = tf.get_variable(\n 'word_embeddings',\n shape=(self.term_vocab.size(), self.term_vocab.embed_dim),\n initializer=tf.constant_initializer(self.term_vocab.embeddings),\n trainable=True\n )\n self.p_word_emb = tf.nn.embedding_lookup(self.word_embeddings, self.p)\n self.q_word_emb = tf.nn.embedding_lookup(self.word_embeddings, self.q)\n\n with tf.variable_scope('char_embedding'):\n self.char_embeddings = tf.get_variable(\n 'char_embeddings',\n shape=(self.char_vocab.size(), self.char_vocab.embed_dim),\n initializer=tf.constant_initializer(self.char_vocab.embeddings),\n trainable=True\n )\n self.p_char_emb = tf.nn.embedding_lookup(self.char_embeddings, self.p_char) # [batch, seqlen, max_char_num, embedding_size]\n self.q_char_emb = tf.nn.embedding_lookup(self.char_embeddings, self.q_char)\n\n self.p_char_emb = self.cnn_emb(self.p_char_emb, \"p_emb\")\n self.q_char_emb = self.cnn_emb(self.q_char_emb, \"q_emb\")\n '''\n self.p_char_emb = tf.reshape(self.p_char_emb, [-1, self.max_char_num, self.emb_size])\n self.q_char_emb = tf.reshape(self.q_char_emb, [-1, self.max_char_num, self.emb_size])\n\n self.p_char_emb = cnn_layer.conv(self.p_char_emb, self.emb_size,\n bias=True, activation=tf.nn.relu, kernel_size=5, name=\"char_conv\", reuse=None)\n self.q_char_emb = cnn_layer.conv(self.q_char_emb, self.emb_size,\n bias=True, activation=tf.nn.relu, kernel_size=5, name=\"char_conv\", reuse=True)\n\n self.p_char_emb = tf.reduce_max(self.p_char_emb, axis=1) # [batch*seqlen, 1, emb_size]\n self.q_char_emb = tf.reduce_max(self.q_char_emb, axis=1)\n\n batch_size = tf.shape(self.p_word_emb)[0]\n self.p_char_emb = tf.reshape(self.p_char_emb, [batch_size, -1, self.emb_size])\n self.q_char_emb = tf.reshape(self.q_char_emb, [batch_size, -1, self.emb_size])\n\n self.p_char_emb = tf.nn.dropout(self.p_char_emb, 0.95)\n self.q_char_emb = tf.nn.dropout(self.q_char_emb, 0.95)\n '''\n self.p_emb = tf.concat([self.p_word_emb, self.p_char_emb], -1)\n self.q_emb = tf.concat([self.q_word_emb, self.q_char_emb], -1)", "def instantiate_weights(self):\n with tf.variable_scope(\"embedding_projection\"), tf.device('/cpu:0'): # embedding matrix\n self.Embedding = tf.get_variable(\"Embedding\", shape=[self.vocab_size, self.embed_size],\n initializer=self.initializer)\n # self.Embedding_label = tf.get_variable(\"Embedding_label\", shape=[self.num_classes, self.embed_size],\n # dtype=tf.float32) # ,initializer=self.initializer\n # self.W_projection = tf.get_variable(\"W_projection\", shape=[self.sequence_length * self.d_model, self.num_classes],\n # initializer=self.initializer) # [embed_size,label_size]\n # self.b_projection = tf.get_variable(\"b_projection\", shape=[self.num_classes])", "def create_embedding(num_symbol, embedding_size, embedding_name):\n return tf.Variable(tf.random_uniform([num_symbol, embedding_size], -0.1, 0.1, tf.float32), name=embedding_name, trainable=True)", "def __init__(self,\n vocab_size,\n embed_dim,\n dropout,\n pretrained,\n embedding=None,\n num_gpus=1,\n default_gpu_id=0,\n regularizer=None,\n random_seed=0,\n trainable=True,\n scope=\"word_feat\"):\n self.vocab_size = vocab_size\n self.embed_dim = embed_dim\n self.dropout = dropout\n self.pretrained = pretrained\n self.embedding = embedding\n self.num_gpus = num_gpus\n self.default_gpu_id = default_gpu_id\n self.regularizer = regularizer\n self.random_seed = random_seed\n self.trainable = trainable\n self.scope = scope\n \n with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):\n self.embedding_layer = create_embedding_layer(self.vocab_size, self.embed_dim, self.pretrained,\n self.embedding, self.num_gpus, self.default_gpu_id, None, self.random_seed, self.trainable)\n \n self.dropout_layer = create_dropout_layer(self.dropout, self.num_gpus, self.default_gpu_id, self.random_seed)", "def get_embed(input_data, vocab_size, embed_dim):\n embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))\n embed = tf.nn.embedding_lookup(embedding, input_data)\n\n return embed", "def __init__(self, lstm_step=80, input_d=300, vocab_size=2196018, embedding=None):\n self.raw_premise = tf.placeholder(shape=[None, lstm_step], dtype=tf.int32, name='premise')\n self.premise_length = tf.placeholder(shape=[None], dtype=tf.int32, name='premise_length')\n\n self.raw_hypothesis = tf.placeholder(shape=[None, lstm_step], dtype=tf.int32, name='hypothesis')\n self.hypothesis_length = tf.placeholder(shape=[None], dtype=tf.int32, name='hypothesis_length')\n\n self.label = tf.placeholder(shape=[None], dtype=tf.int32)\n # Those operations take too many memory\n # Use cpu for those operations (deprecated when using truncate embedding)\n if embedding is not None:\n self.input_embedding = tf.placeholder(dtype=tf.float32, shape=embedding.shape, name='word_embedding')\n self.embedding = tf.Variable(tf.zeros(embedding.shape, dtype=tf.float32))\n else:\n \"\"\"\n If embedding is not provided, then use random number as embedding\n \"\"\"\n self.embedding = tf.Variable(tf.random_uniform([vocab_size, input_d], minval=-0.05, maxval=0.05))\n \"\"\"\n This is the embedding operation. It will be invoked by loading embedding function in the actual model\n \"\"\"\n self.load_embedding_op = self.embedding.assign(self.input_embedding)\n\n self.premise = tf.nn.embedding_lookup(self.embedding, self.raw_premise)\n self.hypothesis = tf.nn.embedding_lookup(self.embedding, self.raw_hypothesis)", "def add_word_embeddings_op(self):\n with tf.variable_scope(\"words\"):\n if self.config.embeddings is None:\n self.logger.info(\"WARNING: randomly initializing word vectors\")\n _word_embeddings = tf.get_variable(\n name=\"_word_embeddings\",\n dtype=tf.float32,\n shape=[self.config.nwords, self.config.dim_word])\n else:\n _word_embeddings = tf.Variable(\n self.config.embeddings,\n name=\"_word_embeddings\",\n dtype=tf.float32,\n trainable=self.config.train_embeddings)\n\n word_embeddings = tf.nn.embedding_lookup(_word_embeddings,\n self.word_ids, name=\"word_embeddings\")\n\n with tf.variable_scope(\"chars\"):\n if self.config.use_chars:\n # get char embeddings matrix\n _char_embeddings = tf.get_variable(\n name=\"_char_embeddings\",\n dtype=tf.float32,\n shape=[self.config.nchars, self.config.dim_char])\n char_embeddings = tf.nn.embedding_lookup(_char_embeddings,\n self.char_ids, name=\"char_embeddings\")\n\n # put the time dimension on axis=1\n s = tf.shape(char_embeddings)\n char_embeddings = tf.reshape(char_embeddings,\n shape=[s[0]*s[1], s[-2], self.config.dim_char])\n word_lengths = tf.reshape(self.word_lengths, shape=[s[0]*s[1]])\n\n # bi lstm on chars\n cell_fw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_char,\n state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(self.config.hidden_size_char,\n state_is_tuple=True)\n _output = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, char_embeddings,\n sequence_length=word_lengths, dtype=tf.float32)\n\n # read and concat output\n _, ((_, output_fw), (_, output_bw)) = _output\n output = tf.concat([output_fw, output_bw], axis=-1)\n\n # shape = (batch size, max sentence length, char hidden size)\n output = tf.reshape(output,\n shape=[s[0], s[1], 2*self.config.hidden_size_char])\n word_embeddings = tf.concat([word_embeddings, output], axis=-1)\n\n self.word_embeddings = tf.nn.dropout(word_embeddings, self.dropout)", "def _get_embedding_layer(self, input_data, doc_input_data):\n opts = self._options\n word_embedding = tf.Variable(tf.random_uniform((self.vocab_size, opts.embed_dim), -1.0, 1.0))\n embed = []\n\n temp = tf.zeros([opts.batch_size, opts.embed_dim])\n embed_d = []\n for n in range(opts.sentence_sample):\n temp = tf.add(temp, tf.nn.embedding_lookup(word_embedding, doc_input_data[:, n]))\n embed_d.append(temp)\n\n if opts.concat == 'True':\n combined_embed_vector_length = opts.embed_dim * opts.window_size + opts.embed_dim\n for j in range(opts.window_size):\n embed_w = tf.nn.embedding_lookup(word_embedding, input_data[:, j])\n embed.append(embed_w)\n embed.append(embed_d)\n else:\n combined_embed_vector_length = opts.embed_dim\n embed_w = tf.zeros([opts.batch_size, opts.embed_dim])\n for j in range(opts.window_size):\n embed_w += tf.nn.embedding_lookup(word_embedding, input_data[:, j])\n embed_w += embed_d\n embed.append(embed_w)\n\n return tf.concat(embed, 1), word_embedding, combined_embed_vector_length", "def embedding_setup(self, embedding, emb_trainable):\n if emb_trainable == True:\n emb_variable = tf.get_variable(\n name=\"embedding_matrix\", shape=embedding.shape,\n initializer = tf.constant_initializer(embedding))\n return emb_variable\n else:\n return embedding", "def add_embedding(self):\n #with tf.variable_scope(\"RNN\", reuse = tf.AUTO_REUSE):\n embeddings = tf.get_variable(\"embeddings\", initializer = self.pretrained_embeddings,trainable=True)\n inputs = self.input_placeholder\n inputs = tf.reshape(inputs, [self.config.batch_size, -1 , self.config.n_features])\n embeddings = tf.nn.embedding_lookup(embeddings, self.input_placeholder)\n embeddings = tf.reshape(embeddings, [self.config.batch_size, -1, self.config.n_features* self.config.embed_size])\n embeddings = tf.cast(embeddings, tf.float32)\n return embeddings", "def embedding(inputs,\n vocab_dim,\n embedding_dim,\n reuse,\n validate_indices=False,\n w_init=tf.random_uniform_initializer(-1., 1.),\n trainable=True,\n normalize=False,\n vocab_freqs=None,\n name=\"Embedding\"):\n\n input_shape = util.get_input_shape(inputs)\n assert len(input_shape) == 2, \"Input Tensor shape must be 2-D\"\n\n with tf.variable_scope(name, reuse=reuse):\n with tf.device('/cpu:0'):\n W = tf.get_variable(\n \"W\", shape=[vocab_dim, embedding_dim], initializer=w_init, trainable=trainable)\n if normalize:\n assert vocab_freqs is not None\n vocab_freqs = tf.constant(vocab_freqs, dtype=tf.float32, shape=(vocab_dim, 1))\n W = _normalize(W, vocab_freqs)\n\n output = tf.cast(inputs, tf.int32)\n output = tf.nn.embedding_lookup(W, output, validate_indices=validate_indices)\n\n shape = [-1] + output.get_shape().as_list()[1:3] + [1]\n # seq_length = util.retrieve_seq_length(tf.reshape(inputs, shape))\n\n return output", "def add_word_embedding_op(self):\n if self.pos:\n print(\"adding pos embeddings\")\n with tf.variable_scope(\"pos\"):\n _pos_embeddings = tf.Variable(self.pos_embeddings,\n name=\"la_pos_embeddings\",\n dtype=tf.float32, trainable=False)\n pos_embeddings = tf.nn.embedding_lookup(_pos_embeddings, self.pos_ids,\n name=\"pos_embeddings\")\n self.pos_vecs = pos_embeddings\n print(\"adding word_embeddings\")\n with tf.variable_scope(\"words\"):\n _word_embeddings = tf.Variable(self.embeddings, name=\"_word_embeddings\",\n dtype=tf.float32, trainable=False)\n word_embeddings = tf.nn.embedding_lookup(_word_embeddings,\n self.word_ids,\n name=\"word_embeddings\")\n if self.use_window:\n print(\"Concatenating word vectors of context words\")\n word_embeddings_sl = tf.nn.embedding_lookup(_word_embeddings,\n self.word_ids_sl,\n name=\"word_embeddings_sl\")\n word_embeddings_sr = tf.nn.embedding_lookup(_word_embeddings,\n self.word_ids_sr,\n name=\"word_embeddings_sr\")\n word_embeddings = tf.concat([word_embeddings_sr, word_embeddings,\n word_embeddings_sl], axis=-1)\n if self.use_char_embeddings:\n print(\"adding CNN for char embeddings\")\n with tf.variable_scope(\"chars\"):\n _char_embeddings = tf.get_variable(name=\"_char_embeddings\",\n dtype=tf.float32,\n shape=[self.char_count, \n self.c_dim_input])\n char_embeddings = tf.nn.embedding_lookup(_char_embeddings, \n self.char_ids, \n name=\"char_embeddings\")\n s = char_embeddings.shape\n # the shape of our char_embeddings is now (batch_size, max number of words\n # in each sentence, max number of chars in each word, self.c_dim )\n char_filter = tf.get_variable(\"char_filter\", dtype=tf.float32,\n shape=[self.c_filter_width, \n self.c_filter_height,\n self.c_dim_input,\n self.c_dim_output])\n print(\"adding 2d convolution layer\")\n char_conv_layer = tf.nn.conv2d(char_embeddings, char_filter, \n strides=[1, 1, 1, 1], \n padding=\"SAME\")\n char_conv_layer = tf.nn.tanh(char_conv_layer)\n print(\"adding 2d pooling layer\")\n char_conv_layer = tf.layers.max_pooling2d(char_conv_layer, \n 1, \n strides=1)\n char_output = tf.reshape(char_conv_layer, shape=[-1, self.max_len, \n self.max_word_length*\n self.c_dim_output])\n word_embeddings = tf.concat([word_embeddings, char_output], axis=-1)\n if self.pos and self.concat_pos:\n print(\"concatenating pos with word_embeddings\")\n word_embeddings = tf.concat([word_embeddings, pos_embeddings], axis=-1)\n self.word_embeddings = word_embeddings\n if self.use_additional and self.hybrid:\n print(\"using additional embeddings\")\n _word_embeddings_2 = tf.Variable(self.additional_embeddings,\n name=\"two_word_embeddings\",\n dtype=tf.float32, trainable=False)\n word_embeddings_2 = tf.nn.embedding_lookup(_word_embeddings_2,\n self.word_ids,\n name=\"two_word_embeddings\")\n self.word_embeddings_2 = word_embeddings_2", "def add_embeddings(self):\n\n with tf.device('/cpu:0'):\n with tf.variable_scope('Embedding_Layer'):\n embeddings = tf.Variable(self.initial_embeddings,name = 'Embeddings')\n self.input_embeddings = tf.nn.embedding_lookup(embeddings, self.inputs_placeholder) #(N,S,D)\n self.question_embeddings = tf.nn.embedding_lookup(embeddings, self.questions_placeholder) #(N,S,D)", "def pretrained_embedding_layer(word_to_vec_map, word_to_index):\n \n vocab_size = len(word_to_index) + 1 # adding 1 to fit Keras embedding (requirement)\n any_word = list(word_to_vec_map.keys())[0]\n emb_dim = word_to_vec_map[any_word].shape[0] # define dimensionality of your GloVe word vectors (= 50)\n \n ### START CODE HERE ###\n # Step 1\n # Initialize the embedding matrix as a numpy array of zeros.\n # See instructions above to choose the correct shape.\n emb_matrix = np.zeros((vocab_size, emb_dim))\n \n # Step 2\n # Set each row \"idx\" of the embedding matrix to be \n # the word vector representation of the idx'th word of the vocabulary\n for word, idx in word_to_index.items():\n emb_matrix[idx, :] = word_to_vec_map[word]\n\n # Step 3\n # Define Keras embedding layer with the correct input and output sizes\n # Make it non-trainable.\n embedding_layer = tensorflow.keras.layers.Embedding(input_dim = vocab_size, output_dim = emb_dim, trainable = False)\n ### END CODE HERE ###\n\n # Step 4 (already done for you; please do not modify)\n # Build the embedding layer, it is required before setting the weights of the embedding layer. \n embedding_layer.build((None,)) # Do not modify the \"None\". This line of code is complete as-is.\n \n # Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.\n embedding_layer.set_weights([emb_matrix])\n \n return embedding_layer", "def build_input_embed(self, n_input, t_input):\n n_embed_matrix = tf.Variable(tf.random_uniform(\n [self.num_ntoken, self.n_embed_dim], minval=-0.05, maxval=0.05), name='n_embed_matrix')\n t_embed_matrix = tf.Variable(tf.random_uniform(\n [self.num_ttoken, self.t_embed_dim], minval=-0.05, maxval=0.05), name='t_embed_matrix')\n n_input_embedding = tf.nn.embedding_lookup(n_embed_matrix, n_input)\n t_input_embedding = tf.nn.embedding_lookup(t_embed_matrix, t_input)\n return n_input_embedding, t_input_embedding", "def construct_embedding(self):\n i = 0\n self.load_dicts()\n embedding_shape = (max(self.word2idx.values()) + 1,\n self.embedding_size)\n self.embedding = np.zeros(embedding_shape)\n\n with open(self.config.word_vec_fi_glove, 'r') as fi:\n for line in fi:\n word_vec = line.split(\" \")[1:]\n self.embedding[i, :] = np.array(word_vec, dtype=np.float32)\n i += 1\n\n self.write_embedding()", "def build(self, input_shapes):\n (word_embeddings_shape, _) = input_shapes\n width = word_embeddings_shape.as_list()[-1]\n self.type_embeddings = None\n if self.use_type_embeddings:\n self.type_embeddings = self.add_weight(\n \"type_embeddings\",\n shape=[self.token_type_vocab_size, width],\n initializer=tf.keras.initializers.TruncatedNormal(\n stddev=self.initializer_range),\n dtype=self.dtype)\n\n self.position_embeddings = None\n if self.use_position_embeddings:\n self.position_embeddings = self.add_weight(\n \"position_embeddings\",\n shape=[self.max_position_embeddings, width],\n initializer=tf.keras.initializers.TruncatedNormal(\n stddev=self.initializer_range),\n dtype=self.dtype)\n\n self.output_layer_norm = tf.keras.layers.LayerNormalization(\n name=\"layer_norm\", axis=-1, epsilon=1e-12, dtype=tf.float32)\n self.output_dropout = tf.keras.layers.Dropout(\n rate=self.dropout_prob, dtype=tf.float32)\n super(EmbeddingPostprocessor, self).build(input_shapes)", "def build(self,unused):\n # (word_embeddings_shape, _) = input_shapes\n # width = word_embeddings_shape.as_list()[-1]\n if self.use_type_embeddings:\n self.type_embeddings = self.add_weight(\n \"token_type_embeddings\",\n shape=[self.token_type_vocab_size, self.word_embedding_width],\n initializer=get_initializer(self.initializer_range),\n dtype=self.dtype)\n\n self.position_embeddings = None\n if self.use_position_embeddings:\n self.position_embeddings = self.add_weight(\n \"position_embeddings\",\n shape=[self.max_position_embeddings, self.word_embedding_width],\n initializer=get_initializer(self.initializer_range),\n dtype=self.dtype)\n\n self.output_layer_norm = tf.keras.layers.LayerNormalization(\n name=LAYER_NORM_NAME, axis=-1, epsilon=1e-12, dtype=tf.float32)\n self.output_dropout = tf.keras.layers.Dropout(\n rate=self.dropout_prob, dtype=tf.float32)\n super().build(unused)", "def source_embedding_fairseq(self):\r\n return tf.get_variable(\r\n name=\"W\",\r\n shape=[self.params[\"feature.dim\"], self.params[\"embedding.dim\"]],\r\n initializer=tf.random_normal_initializer(\r\n mean=0.0,\r\n stddev=0.1))", "def get_embed(input_data, vocab_size, embed_dim):\n embedding = tf.Variable(tf.random_uniform((vocab_size,embed_dim), -1, 1))\n embed = tf.nn.embedding_lookup(embedding, input_data)\n #print (\"embed_dim: \",embed_dim) # 向量表达维度为 256\n #print (\"input_data.shape: \",input_data.shape) # (50, 5)\n #print (\"embed.shap: \", embed.shape) # word 的向量表达 ==特征 (50, 5, 256) ==(batch_size, num_step, embed_dim)\n return embed # 返回input的向量表达", "def __init__(self, vocab_size):\n super(Model, self).__init__()\n\n # TODO: initialize vocab_size, embedding_size\n self.vocab_size = vocab_size\n self.embedding_size = 256\n self.batch_size = 1000\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=0.0005)\n\n # TODO: initialize embeddings and forward pass weights (weights, biases)\n self.E = tf.Variable(tf.random.truncated_normal(shape=[self.vocab_size, self.embedding_size], mean=0, stddev=0.1))\n self.W = tf.Variable(tf.random.truncated_normal(shape=[self.embedding_size * 2, self.vocab_size], mean=0, stddev=0.1))\n self.b = tf.Variable(tf.random.truncated_normal(shape=[1, self.vocab_size], mean=0, stddev=0.1))", "def setup_embeddings(self):\n with vs.variable_scope(\"embeddings\"):\n vec_embeddings = tf.get_variable(\"embeddings\", initializer=self.pretrained_embeddings, trainable=False)\n context_batch_embeddings = tf.nn.embedding_lookup(vec_embeddings, self.context_placeholder)\n question_batch_embeddings = tf.nn.embedding_lookup(vec_embeddings, self.question_placeholder)\n context_embeddings = tf.reshape(context_batch_embeddings,\n (-1, self.max_context_len, self.vocab_dim))\n question_embeddings = tf.reshape(question_batch_embeddings,\n (-1, self.max_question_len, self.vocab_dim))\n return context_embeddings, question_embeddings", "def load_embedding_tf(word_to_index, tf_embeddings_file_path, nb_dims):\n # 1. Define the variable that will hold the embedding:\n tf_embedding = tf.Variable(\n tf.constant(0.0, shape=[len(word_to_index)-1, nb_dims]),\n trainable=False,\n name=\"Embedding\"\n )\n\n # 2. Restore the embedding from disks to TensorFlow, GPU (or CPU if GPU unavailable):\n variables_to_restore = [tf_embedding]\n embedding_saver = tf.compat.v1.train.Saver(variables_to_restore)\n embedding_saver.restore(sess, save_path=tf_embeddings_file_path)\n print(\"TF embeddings restored from '{}'.\".format(tf_embeddings_file_path))\n \n return tf_embedding", "def init_word_embeddings(session, model, embeddings_file):\n # Create word embedding array from word2vec file\n vocab_size = FLAGS.vocab_size\n embeddings = []\n with tf.gfile.Open(embeddings_file) as f:\n i = 0\n while i < vocab_size:\n numbers = f.readline().split()\n if len(numbers) > 0:\n embeddings.append([float(n) for n in numbers])\n i += 1\n else:\n break # Last line of embeddings file is empty\n\n # Eliminate the random word embeddings and introduce word2vec to the realm of variable scopes.\n # The victims will be:\n # \"embedding_attention_seq2seq/RNN/EmbeddingWrapper/embedding\"\n # \"embedding_attention_seq2seq/embedding_attention_decoder/embedding\"\n np_embeddings = np.array(embeddings)\n feed_dict = {model.word2vec_placeholder: np_embeddings}\n session.run(model.word2vec_assign_encoder_op, feed_dict=feed_dict)\n session.run(model.word2vec_assign_decoder_op, feed_dict=feed_dict)", "def embedded(self, word_ids, embedding_tensor, scope=\"embedding\"):\n with tf.variable_scope(scope):\n with tf.device(\"/cpu:0\"):\n inputs = tf.nn.embedding_lookup(embedding_tensor, word_ids)\n return inputs", "def make_embedding(src_emb_hparams, src_token_to_id_map,\n tgt_emb_hparams=None, tgt_token_to_id_map=None,\n emb_init_share=False):\n src_embedding = MonoTextData.make_embedding(src_emb_hparams,\n src_token_to_id_map)\n\n if emb_init_share:\n tgt_embedding = src_embedding\n else:\n tgt_emb_file = tgt_emb_hparams[\"file\"]\n tgt_embedding = None\n if tgt_emb_file is not None and tgt_emb_file != \"\":\n tgt_embedding = Embedding(tgt_token_to_id_map, tgt_emb_hparams)\n\n return src_embedding, tgt_embedding", "def __init__(self,\n vocab_size,\n embed_dim,\n unit_dim,\n window_size,\n hidden_activation,\n pooling_type,\n dropout,\n num_gpus=1,\n default_gpu_id=0,\n regularizer=None,\n random_seed=0,\n trainable=True,\n scope=\"subword_feat\"):\n self.vocab_size = vocab_size\n self.embed_dim = embed_dim\n self.unit_dim = unit_dim\n self.window_size = window_size\n self.hidden_activation = hidden_activation\n self.pooling_type = pooling_type\n self.dropout = dropout\n self.num_gpus = num_gpus\n self.default_gpu_id = default_gpu_id\n self.regularizer = regularizer\n self.random_seed = random_seed\n self.trainable = trainable\n self.scope = scope\n \n with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):\n self.embedding_layer = create_embedding_layer(self.vocab_size, self.embed_dim, False,\n None, self.num_gpus, self.default_gpu_id, None, self.random_seed, self.trainable)\n \n self.dropout_layer = create_dropout_layer(self.dropout, self.num_gpus, self.default_gpu_id, self.random_seed)\n \n self.conv_layer = create_convolution_layer(\"multi_1d\", 1, self.embed_dim,\n self.unit_dim, 1, self.window_size, 1, \"SAME\", self.hidden_activation, [0.0], None,\n False, False, True, self.num_gpus, self.default_gpu_id, self.regularizer, self.random_seed, self.trainable)\n \n self.pooling_layer = create_pooling_layer(self.pooling_type, self.num_gpus, self.default_gpu_id)", "def embedding_table(inputs, vocab_size, embed_size, zero_pad=False,\n trainable=True, scope=\"embedding\", reuse=None):\n with tf.variable_scope(scope, reuse=reuse):\n embed_table = tf.get_variable('embedding_table',\n shape=[vocab_size, embed_size],\n initializer=_init,\n trainable=trainable,\n dtype=tf.float32)\n if zero_pad:\n embed_table = tf.concat((tf.zeros(shape=[1, embed_size]), embed_table[1:, :]),\n axis=0)\n\n return tf.nn.embedding_lookup(embed_table, inputs)", "def get_embed(input_data, vocab_size, embed_dim):\n # todo 需要编程:\n # 1、构建嵌入矩阵的查找表\n lookup_w = tf.Variable(\n initial_value=tf.random_uniform([vocab_size, embed_dim], -1.0, 1.0)\n )\n # 2、获得嵌入输出\n embed = tf.nn.embedding_lookup(params=lookup_w, ids=input_data)\n # [N, n_steps, embed_size]\n return embed", "def __init__(self, vocab_size: int, embedding_dim: int, hidden_size: int, dropout: float = 0.2,\n read_context: bool = False, pad_idx: int = Vocabulary.pad_idx):\n super(FullVocabularyModel, self).__init__()\n self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=pad_idx)\n self.embed_dropout = nn.Dropout(dropout)\n self.rnn = nn.LSTM(embedding_dim, hidden_size)\n self.linear = nn.Linear(hidden_size, vocab_size)\n self.loss_fn = nn.CrossEntropyLoss(ignore_index=pad_idx)\n\n self.vocab_size = vocab_size\n self.read_context = read_context\n self.pad_idx = pad_idx\n\n initrange = 0.5 / embedding_dim\n self.embedding.weight.data.uniform_(-initrange, initrange)\n self.embedding.weight.data[pad_idx].zero_()", "def embedding(x, vocab_size, dense_size, name=None, reuse=None, multiplier=1.0):\n with tf.variable_scope(\n name, default_name=\"embedding\", values=[x], reuse=reuse):\n embedding_var = tf.get_variable(\"kernel\", [vocab_size, dense_size])\n emb_x = tf.gather(embedding_var, x)\n if multiplier != 1.0:\n emb_x *= multiplier\n return emb_x", "def __init__(self, layer_id,\n shape, X):\n prefix = 'Embedding' + layer_id\n self.n_words, self.in_size = shape\n\n # weights for embedding, the only parameters\n self.W = init_weights(shape=(self.n_words, self.in_size),\n name=prefix + '#W')\n\n self.params = [self.W]\n\n # Compute the embedded samples\n self.n_timesteps = X.shape[0]\n self.n_samples = X.shape[1]\n\n self.activation = self.W[X.flatten()].reshape([self.n_timesteps,\n self.n_samples,\n self.in_size])", "def createTheModel(vocabulary, window=configuration['mlp']['posWindow']):\n inputLayers, interLayers = [], []\n inputToken = Input((3 + configuration['embedding']['useB1'] + configuration['embedding']['useB-1'],))\n inputLayers.append(inputToken)\n tokenEmb = Embedding(len(vocabulary.tokenIndices), configuration['mlp']['tokenEmb'],\n trainable=configuration['mlp']['trainable'])(inputToken)\n tokenFlatten = Flatten()(tokenEmb)\n interLayers.append(tokenFlatten)\n posNum = (2 * window + 1) * (3 + configuration['embedding']['useB1'] + configuration['embedding']['useB-1'])\n inputPos = Input((posNum,))\n inputLayers.append(inputPos)\n posEmb = Embedding(len(vocabulary.posIndices), configuration['mlp']['posEmb'],\n trainable=configuration['mlp']['trainable'])(inputPos)\n posFlatten = Flatten()(posEmb)\n interLayers.append(posFlatten)\n\n interLayers = keras.layers.concatenate(interLayers)\n lastLayer = Dense(configuration['mlp']['dense1UnitNumber'],\n activation=configuration['nn']['dense1Activation'])(interLayers)\n # dropout=configuration['mlp']['dense1Dropout'])(interLayers)\n lastLayer = Dropout(configuration['mlp']['dense1Dropout'])(lastLayer)\n softmaxLayer = Dense(8 if enableCategorization else 4, activation='softmax')(lastLayer)\n return inputLayers, softmaxLayer", "def embedding(org_input):\n # Create the embedding list\n for f in range(Config.num_feature):\n num_cat_value = Config.schema[f]\n\n if num_cat_value == 1:\n pass\n elif num_cat_value > 1:\n embed_dict[f] = tf.get_variable(\n name=\"embed_\" + str(f),\n shape=[num_cat_value, Config.embed_size[f]],\n trainable=True)\n else:\n raise ValueError(\"Schema values should be positive integers!\")\n\n # Create embedded inputs\n f_size = np.sum(Config.embed_size)\n embedded_input = embed_events(org_input, f_size)\n\n return embedded_input", "def _add_seq2seq(self):\n hps = self._hps\n vsize = self._vocab.size() # size of the vocabulary\n \n with tf.variable_scope('seq2seq'):\n # Some initializers\n self.rand_unif_init = tf.random_uniform_initializer(-hps.rand_unif_init_mag, hps.rand_unif_init_mag, seed=123)\n self.trunc_norm_init = tf.truncated_normal_initializer(stddev=hps.trunc_norm_init_std)\n\n\n with tf.variable_scope('embedding'):\n if hps.pretrained_embeddings:\n word2vec = load_embeddings(hps.embeddings_path, self._vocab.word2id, hps.rand_unif_init_mag)\n self.embedding = tf.get_variable('embedding', [vsize, hps.emb_dim],\n dtype=tf.float32, initializer=tf.constant_initializer(word2vec))\n # self.assign_embedding = tf.assign(self.embedding, word2vec)\n else:\n self.embedding = tf.get_variable('embedding', [vsize, hps.emb_dim],\n dtype=tf.float32, initializer=self.trunc_norm_init)\n if hps.mode==\"train\": self._add_emb_vis(self.embedding) # add to tensorboard\n\n # tensor with shape (batch_size, max_enc_steps, emb_size)\n emb_enc_inputs = tf.nn.embedding_lookup(self.embedding, self._enc_batch)\n if self._hps.hier:\n enc_batch_sections = tf.unstack(self._enc_batch_sections, axis=1)\n sec_emb_enc_inputs = [tf.nn.embedding_lookup(self.embedding, section)\n for section in enc_batch_sections]\n # list length max_dec_steps containing shape (batch_size, emb_size)\n emb_dec_inputs = [tf.nn.embedding_lookup(self.embedding, x)\n for x in tf.unstack(self._dec_batch, axis=1)]\n\n\n # Hierarchical attention model\n if self._hps.hier:\n with tf.variable_scope('encoder'), tf.device(self._next_device()):\n sec_enc_outs = []\n states_fw = []\n states_bw = []\n states = []\n\n # level 1, encode words to sections\n with tf.variable_scope(\"word_level_encoder\", reuse=tf.AUTO_REUSE) as scope:\n encoder_outputs_words = []\n cell_fw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n fw_st, bw_st = None, None\n if self._hps.use_do: # DropOut\n cell_fw = tf.contrib.rnn.DropoutWrapper(cell_fw, output_keep_prob=1.0 - self._hps.do_prob)\n cell_bw = tf.contrib.rnn.DropoutWrapper(cell_bw, output_keep_prob=1.0 - self._hps.do_prob)\n for i in range(self._hps.num_sections):\n encoder_tmp_output, (fw_st, bw_st) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, inputs=sec_emb_enc_inputs[i], dtype=tf.float32,\n sequence_length=self._batch_sections_len[:,i], swap_memory=True, initial_state_bw=bw_st, initial_state_fw=fw_st)\n # concatenate the forwards and backwards states\n encoder_tmp_output = tf.concat(axis=2, values=encoder_tmp_output) #shape=[batch x seq_len x hidden_size]\n \n encoder_outputs_words.append(encoder_tmp_output)\n # instead of concating the fw and bw states, we use a ff network\n combined_state = self._reduce_states(fw_st, bw_st)\n states.append(combined_state)\n scope.reuse_variables()\n \n # level 2, encode sections to doc\n encoder_outputs_words = tf.stack(encoder_outputs_words, axis=1) # shape [batch x num_sections x seq_len x hidden_size]\n shapes = encoder_outputs_words.shape\n encoder_outputs_words = tf.reshape(encoder_outputs_words, (shapes[0].value, -1, shapes[-1].value)) #shape=[batch x (seq_len * num_sections) x hidden_size]\n\n doc_sections_h = tf.stack([s.h for s in states], axis=1) # [batch x num_sections x hidden_size]\n doc_sections_c = tf.stack([s.c for s in states], axis=1) # [batch x num_sections x hidden_size]\n\n with tf.variable_scope(\"section_level_encoder\"):\n if FLAGS.section_level_encoder == 'RNN':\n cell_fw_1 = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw_1 = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n if self._hps.use_do:\n cell_fw_1 = tf.contrib.rnn.DropoutWrapper(cell_fw_1, output_keep_prob=1.0 - self._hps.do_prob)\n cell_bw_1 = tf.contrib.rnn.DropoutWrapper(cell_bw_1, output_keep_prob=1.0 - self._hps.do_prob)\n encoder_output_sections, (fw_st_2, bw_st_2) =\\\n tf.nn.bidirectional_dynamic_rnn(cell_fw_1, cell_bw_1, inputs=doc_sections_h, sequence_length=self._doc_sec_lens, dtype=tf.float32, swap_memory=True)\n encoder_output_sections = tf.concat(axis=2, values=encoder_output_sections)\n doc_sections_state = self._reduce_states(fw_st_2, bw_st_2)\n else:\n if FLAGS.section_level_encoder == 'AVG': # average section cells\n doc_sections_state_h = tf.reduce_mean(doc_sections_h, axis=1)\n doc_sections_state_c = tf.reduce_mean(doc_sections_c, axis=1)\n elif FLAGS.section_level_encoder == 'FF': # use a feedforward network to combine section cells\n doc_sections_state_h = tf.reshape([doc_sections_h.shape[0].eval(), -1])\n doc_sections_state_h = tf.layers.dense(\n inputs=doc_sections_state_h,\n units=self._hps.hidden,\n activation=tf.nn.relu) \n doc_sections_state_c = tf.reshape([doc_sections_c.shape[0].eval(), -1])\n doc_sections_state_c = tf.layers.dense(\n inputs=doc_sections_state_c,\n units=self._hps.hidden,\n activation=tf.nn.relu)\n else:\n raise AttributeError('FLAGS.section_level_encoder={} is not a valid option'.format(FLAGS.section_level_encoder))\n doc_sections_state = tf.contrib.rnn.LSTMStateTuple(doc_sections_state_c, doc_sections_state_h)\n encoder_output_sections = doc_sections_h \n \n elif not self._hps.multi_layer_encoder:\n with tf.variable_scope('encoder'):\n with tf.variable_scope('word_level_encoder'):\n cell_fw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n (encoder_outputs, (fw_st, bw_st)) =\\\n tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs=emb_enc_inputs, dtype=tf.float32, sequence_length=self._enc_lens, swap_memory=True)\n # concatenate the forwards and backwards states\n encoder_outputs = tf.concat(axis=2, values=encoder_outputs)\n \n # stack n layers of lstms for encoder\n elif self._hps.multi_layer_encoder:\n # TODO: check\n for layer_i in xrange(self._hps.enc_layers):\n with tf.variable_scope('encoder%d'%layer_i), tf.device(\n self._next_device()):\n cell_fw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n if self._hps.use_do: # add dropout\n cell_fw = tf.contrib.rnn.DropoutWrapper(cell_fw, output_keep_prob=1.0 - self._hps.do_prob)\n cell_bw = tf.contrib.rnn.DropoutWrapper(cell_bw, output_keep_prob=1.0 - self._hps.do_prob)\n emb_enc_inputs, (fw_st, bw_st) =\\\n tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs=emb_enc_inputs, dtype=tf.float32, sequence_length=self._enc_lens, swap_memory=True)\n emb_enc_inputs = tf.concat(axis=2, values=emb_enc_inputs)\n encoder_outputs = emb_enc_inputs\n \n if self._hps.hier:\n self._enc_sec_states = encoder_output_sections\n self._enc_states = encoder_outputs_words \n else:\n self._enc_states = encoder_outputs\n self._enc_sec_states = None\n \n # convert the encoder bidirectional hidden state to the decoder state\n # (unidirectional) by an MLP\n if self._hps.hier:\n self._dec_in_state = doc_sections_state\n else:\n with tf.variable_scope('encoder'):\n with tf.variable_scope('word_level_encoder'):\n self._dec_in_state = self._reduce_states(fw_st, bw_st) \n \n # Add the decoder\n\n with tf.variable_scope('decoder'), tf.device(self._next_device()):\n cell = tf.contrib.rnn.LSTMCell(\n self._hps.hidden_dim,\n state_is_tuple=True,\n initializer=self.rand_unif_init)\n \n # We need to pass in the previous step's coverage vector each time\n prev_coverage = self.prev_coverage\\\n if hps.mode==\"decode\" and self._hps.coverage \\\n else None \n \n \n if self._hps.hier:\n decoder_outputs, self._dec_out_state, self.attn_dists, self.p_gens, self.coverage, self.attn_dists_sec =\\\n self.attn_decoder(emb_dec_inputs,\n self._dec_in_state,\n self._enc_states,\n cell,\n self._enc_sec_states,\n num_words_section=self._batch_sections_len,\n enc_padding_mask=self._enc_padding_mask,\n enc_section_padding_mask=self._enc_section_padding_mask,\n initial_state_attention=(self._hps.mode==\"decode\"),\n pointer_gen=self._hps.pointer_gen,\n use_coverage=self._hps.coverage,\n prev_coverage=prev_coverage,\n temperature=self._hps.temperature\n )\n \n else:\n decoder_outputs, self._dec_out_state, self.attn_dists, self.p_gens, self.coverage, _ =\\\n self.attn_decoder(emb_dec_inputs,\n self._dec_in_state,\n self._enc_states,\n cell,\n encoder_section_states=None,\n num_words_section=None,\n enc_padding_mask=self._enc_padding_mask,\n initial_state_attention=(self._hps.mode==\"decode\"),\n pointer_gen=self._hps.pointer_gen,\n use_coverage=self._hps.coverage,\n prev_coverage=prev_coverage,\n ) \n \n\n # Project decoder output to vocabulary\n with tf.variable_scope('output_projection'), tf.device(self._next_device()):\n if self._hps.output_weight_sharing:\n # share weights of embedding layer with projection\n # self.embedding is in shape [vsize, hps.emb_dim]\n w_proj = tf.get_variable('w_proj', [self._hps.emb_dim, self._hps.hidden_dim],\n dtype=tf.float32, initializer=self.trunc_norm_init)\n w = tf.tanh(tf.transpose(tf.matmul(self.embedding, w_proj))) # shape = [vsize, hps.hidden_dim]\n \n # w_t = tf.transpose(w)\n b = tf.get_variable('b', [vsize],\n dtype=tf.float32, initializer=self.trunc_norm_init)\n else: \n w = tf.get_variable('w', [self._hps.hidden_dim, vsize],\n dtype=tf.float32, initializer=self.trunc_norm_init)\n # w_t = tf.transpose(w)\n b = tf.get_variable('b', [vsize],\n dtype=tf.float32, initializer=self.trunc_norm_init)\n # vocabulary score at each decoder step\n vocab_scores = []\n for i,output in enumerate(decoder_outputs):\n if i > 0:\n tf.get_variable_scope().reuse_variables()\n vocab_scores.append(tf.nn.xw_plus_b(output, w, b)) # apply the linear layer\n\n # the final vocab distribution for each decoder time step\n # shape of each element is [batch_size, vsize]\n vocab_dists = [tf.nn.softmax(s) for s in vocab_scores] \n\n \n # pointing / generating\n if FLAGS.pointer_gen:\n final_dists = self._calc_final_dist(vocab_dists, self.attn_dists)\n# log_dists = [tf.log(dist) for dist in final_dists]\n else:\n# log_dists = [tf.log(dist) for dist in vocab_dists]\n final_dists = vocab_dists\n \n\n # Calculate Losses:\n \n if self._hps.mode in ['train', 'eval']:\n # Calculate the loss\n with tf.variable_scope('loss'), tf.device(self._next_device()):\n if FLAGS.pointer_gen:\n # Calculate the loss per step\n # This is fiddly; we use tf.gather_nd to pick out the gold target words\n # will be list length max_dec_steps containing shape (batch_size)\n loss_per_step = [] \n batch_nums = tf.range(0, limit=hps.batch_size) # shape (batch_size)\n for dec_step, dist in enumerate(final_dists):\n # The indices of the target words. shape (batch_size)\n targets = self._target_batch[:,dec_step] \n indices = tf.stack( (batch_nums, targets), axis=1) # shape (batch_size, 2)\n # shape (batch_size). loss on this step for each batch\n gold_probs = tf.gather_nd(dist, indices)\n losses = -tf.log(gold_probs)\n loss_per_step.append(losses)\n\n # Apply dec_padding_mask mask and get loss\n self._loss = _mask_and_avg(loss_per_step, self._dec_padding_mask)\n \n\n else: # baseline model\n # this applies softmax internally\n self._loss = tf.contrib.seq2seq.sequence_loss(\n tf.stack(vocab_scores, axis=1), self._target_batch, self._dec_padding_mask) # this applies softmax internally\n\n tf.summary.scalar('loss', self._loss)\n\n # Calculate coverage loss from the attention distributions\n if self._hps.coverage:\n with tf.variable_scope('coverage_loss'):\n self._coverage_loss = _coverage_loss(self.attn_dists, self._dec_padding_mask)\n tf.summary.scalar('coverage_loss', self._coverage_loss)\n self._total_loss = self._loss + self._hps.cov_loss_wt * self._coverage_loss\n tf.summary.scalar('total_loss', self._total_loss)\n \n # ---------------------------/\n\n\n if self._hps.mode == \"decode\":\n assert len(final_dists) == 1 # final_dists is a singleton list containing shape (batch_size, extended_vsize)\n final_dists = final_dists[0]\n topk_probs, self._topk_ids = tf.nn.top_k(final_dists, hps.batch_size*2) # take the k largest probs. note batch_size=beam_size in decode mode\n self._topk_log_probs = tf.log(topk_probs)", "def build_seq_embeddings(self):\n with tf.variable_scope(\"seq_embedding\"), tf.device(\"/cpu:0\"):\n embedding_map = tf.get_variable(\n name=\"map\",\n shape=[self.config.vocab_size, self.config.word_embedding_size],\n initializer=self.initializer)\n \n # We need to store the normalized lookup table for efficient mapping of embedding vectors to closest words\n self.normed_embedding_map = tf.nn.l2_normalize(embedding_map, dim=1)\n \n seq_embeddings = tf.nn.embedding_lookup(embedding_map, self.input_seqs) \n # seq_embeddings has the shape (batch_size, sequence_length, sentence_length, embedding_size)\n # meaning, for each index in input_seqs (with shape (batch_size, sequence_length, sentence_length)) it stores an embedding vector\n\n #print('Shape seq_embeddings: ' + str(seq_embeddings.get_shape()))\n\n self.seq_embeddings = seq_embeddings", "def init_embedding(input_embedding):\n bias = np.sqrt(3.0 / input_embedding.size(1))\n nn.init.uniform_(input_embedding, -bias, bias)", "def build(self, unused_input_shapes):\n if self.embedding_lookup is None:\n self.embedding_lookup = layers.OnDeviceEmbedding(\n vocab_size=self.config.vocab_size,\n embedding_width=self.config.hidden_size,\n initializer=tf.keras.initializers.TruncatedNormal(\n stddev=self.config.initializer_range),\n name=\"target_embeddings\")\n self.embedding_postprocessor = EmbeddingPostprocessor(\n use_type_embeddings=False,\n use_position_embeddings=True,\n max_position_embeddings=self.config.max_position_embeddings,\n dropout_prob=self.config.hidden_dropout_prob,\n initializer=tf.keras.initializers.VarianceScaling(\n scale=self.config.initializer_gain,\n mode=\"fan_avg\",\n distribution=\"uniform\"),\n name=\"embedding_postprocessor\")\n # Decoder can use a different intermediate size.\n self.multi_channel_cross_attention = self.config.get(\n \"multi_channel_cross_attention\", False)\n self.decoder = TransformerDecoder(\n num_hidden_layers=self.config.num_decoder_layers,\n hidden_size=self.config.hidden_size,\n num_attention_heads=self.config.num_decoder_attn_heads,\n intermediate_size=self.config.decoder_intermediate_size,\n intermediate_activation=self.config.hidden_act,\n hidden_dropout_prob=self.config.hidden_dropout_prob,\n attention_probs_dropout_prob=self.config.attention_probs_dropout_prob,\n initializer_range=self.config.initializer_range,\n multi_channel_cross_attention=self.multi_channel_cross_attention,\n name=\"decoder\")\n super(Decoder, self).build(unused_input_shapes)", "def generate_embedding(inputs, input_shape: list, embedding_args: dict, embedding_rank: int = 1, **kwargs):\n if len(kwargs) > 0:\n print(\"WARNING:kgcnn: Unknown embedding kwargs {0}. Will be reserved for future versions.\".format(kwargs))\n\n if len(input_shape) == embedding_rank:\n n = ks.layers.Embedding(**embedding_args)(inputs)\n else:\n n = inputs\n return n", "def pretrained_embedding_layer(word_to_vec_map, word_to_index):\n vocab_len = len(word_to_index) + 1 # adding 1 to fit Keras embedding (requirement)\n emb_dim = word_to_vec_map[\"cucumber\"].shape[0] # define dimensionality of your GloVe word vectors (= 50)\n emb_matrix = np.zeros((vocab_len, emb_dim)) # Initialize the embedding matrix as a numpy array of zeros of shape (vocab_len, dimensions of word vectors = emb_dim)\n for word, index in word_to_index.items(): # Set each row \"index\" of the embedding matrix to be the word vector representation of the \"index\"th word of the vocabulary\n emb_matrix[index, :] = word_to_vec_map[word]\n embedding_layer = Embedding(vocab_len, emb_dim, trainable = False) # Define Keras embedding layer with the correct output/input sizes, make it trainable. Use Embedding(...). Make sure to set trainable=False. \n embedding_layer.build((None,)) # Build the embedding layer, it is required before setting the weights of the embedding layer. Do not modify the \"None\".\n embedding_layer.set_weights([emb_matrix]) # Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.\n return embedding_layer", "def _create_example(self):\n source = np.random.randn(self.batch_size, self.max_decode_length,\n self.input_depth)\n source_len = np.random.randint(0, self.max_decode_length, [self.batch_size])\n target_len = np.random.randint(0, self.max_decode_length * 2,\n [self.batch_size])\n target = np.random.randn(self.batch_size,\n np.max(target_len), self.input_depth)\n labels = np.random.randint(0, self.vocab_size,\n [self.batch_size, np.max(target_len) - 1])\n\n example_ = namedtuple(\n \"Example\", [\"source\", \"source_len\", \"target\", \"target_len\", \"labels\"])\n return example_(source, source_len, target, target_len, labels)", "def load_embedding(src: str, embedding_type, layers) -> TransformerEmbedding:\n emb = TransformerEmbedding(src, embedding_type=embedding_type, layers=layers)\n return emb", "def __init__(self, input, input_size, embedding_size):\n\n self.input = input\n self.output = layers.EmbeddingLayer(self.input, input_size, embedding_size, W=initialize_parameters()[0])", "def build_vocabulary(self, tokens=None, embeddings=None):\n if tokens is not None and embeddings is not None:\n raise ValueError(\"Only accepts either `tokens` or `embeddings`.\")\n\n if tokens is not None:\n # Build from tokenized tokens\n # for sentence in tqdm(tokens):\n # for word in tokens:\n # print(type(word))\n # exit()\n self.vocab.extend(\n list(set([\n word\n for sentence in tqdm(tokens)\n for word in sentence\n ]))\n )\n elif embeddings is not None:\n # Build from pretrained embeddings\n for word in tqdm(embeddings):\n word = word.strip(\"\\n\")\n word = word.split(\" \")\n\n self.vocab.append(word[0])\n vector = word[1:]\n self.vectors.append(vector)", "def init_embedding(size=50):\n vector = np.random.normal(0.0, 0.01, size)\n return vector", "def init_embedding(self):\n self.embedding.weight.data.uniform_(-1./self.num_embeddings, 1./self.num_embeddings)", "def __init__(self, num_words, embedding_size, use_cuda):\n super(StandardEmbedding, self).__init__()\n self.embedding_size = embedding_size\n self.num_hash_functions = 0\n self.embeddings = nn.Embedding(num_words, embedding_size)\n self.embeddings = self.embeddings.cuda() if use_cuda else self.embeddings", "def create_emb_for_encoder_and_decoder(vocab_size,\n embed_size,\n dtype=tf.float32,\n num_partitions=0,\n scope=None):\n\n if num_partitions <= 1:\n partitioner = None\n else:\n # Note: num_partitions > 1 is required for distributed training due to\n # embedding_lookup tries to colocate single partition-ed embedding variable\n # with lookup ops. This may cause embedding variables being placed on worker\n # jobs.\n partitioner = tf.fixed_size_partitioner(num_partitions)\n\n with tf.variable_scope(\n scope or \"embeddings\", dtype=dtype, partitioner=partitioner) as scope:\n # Share embedding\n embedding_encoder = tf.get_variable(\"shared_embedding\",\n [vocab_size, embed_size], dtype)\n embedding_decoder = embedding_encoder\n\n return embedding_encoder, embedding_decoder", "def TransformerTokenEmbedding(\n num_embeddings, embedding_dim, padding_idx, freeze_embed=False\n):\n m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)\n nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)\n nn.init.constant_(m.weight[padding_idx], 0)\n if freeze_embed:\n m.weight.requires_grad = False\n return m", "def _add_pre_trained_embedding(self):\n\n if self.embedding_type['type'] == 'glove':\n self.logging.info('use pre-trained glove word2vec')\n # a. load pre trained glove\n GLOVE_DIR = '../data/glove_pretrained/glove.6B'\n glove_suffix_name = 'glove.6B.' + str(self.embedding_size) + 'd.txt'\n import os\n import numpy as np\n\n embeddings_index = {}\n f = open(os.path.join(GLOVE_DIR, glove_suffix_name)) # 'glove.6B.100d.txt'))\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n f.close()\n\n self.logging.info('')\n self.logging.info('Found %s word vectors.' % len(embeddings_index))\n\n # b. compute embedding matrix\n embedding_matrix = np.zeros((len(self.word_index) + 1, self.embedding_size))\n cnt = 0\n for word, i in self.word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector # words not found in embedding index will be all-zeros.\n else:\n # self.logging.info('token in train missing in word2vec: ' + str(word))\n cnt += 1\n self.logging.info('total tokens missing: ' + str(cnt) + ' / ' + str(len(self.word_index)))\n\n # c. build embedding layer\n from keras.layers import Embedding\n embedding_layer = Embedding(len(self.word_index) + 1,\n self.embedding_size,\n weights=[embedding_matrix],\n input_length=self.maxlen,\n trainable=False)\n\n elif self.embedding_type['type'] == 'gensim':\n self.logging.info('use pre-trained gensim word2vec')\n\n import gzip\n import gensim\n from keras.layers import Embedding\n import numpy as np\n\n # fname = '../data/word2vec_pretrained/motors/d_300_k_712904_w_6_e_60_v_motors'\n # fname = '../data/word2vec_pretrained/fashion/d_300_k_1341062_w_6_e_70_v_fashion'\n\n self.logging.info('load word2vec path: ' + str(self.embedding_type['path']))\n model = gensim.models.Word2Vec.load(self.embedding_type['path'])\n pretrained_weights = model.wv.syn0\n vocab_size, vector_dim = pretrained_weights.shape\n\n method = 3\n if method == 1:\n self.logging.info('word2vec attempt to fit into embedding layer - middle complex')\n # convert the wv word vectors into a numpy matrix that is suitable for insertion\n # into our TensorFlow and Keras models\n\n embedding_matrix = np.zeros((len(model.wv.vocab), vector_dim))\n for i in range(len(model.wv.vocab)):\n embedding_vector = model.wv[model.wv.index2word[i]]\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n\n embedding_layer = Embedding(input_dim=embedding_matrix.shape[0],\n output_dim=embedding_matrix.shape[1],\n # input_length=self.maxlen,\n weights=[embedding_matrix],\n trainable=False)\n elif method == 2:\n self.logging.info('word2vec simple embedding matching - simple complex')\n embedding_layer = Embedding(input_dim=vocab_size,\n output_dim=vector_dim,\n input_length=self.maxlen,\n weights=[pretrained_weights],\n trainable=False)\n elif method == 3:\n\n self.logging.info('word2vec match using word_index from keras tokenizer - as used in glove match above')\n # b. compute embedding matrix\n\n # sd = 1 / np.sqrt(len(self.word_index) + 1)\n # embedding_matrix = np.random.normal(0, scale=sd, size=(len(self.word_index) + 1, self.embedding_size))\n\n embedding_matrix = np.zeros((len(self.word_index) + 1, self.embedding_size))\n cnt = 0\n for word, i in self.word_index.items():\n if word in model.wv:\n embedding_vector = model.wv[word]\n embedding_matrix[i] = embedding_vector\n else:\n # self.logging.info('token in train missing in word2vec: ' + str(word))\n cnt += 1\n self.logging.info('total tokens missing: ' + str(cnt))\n\n\n # c. build embedding layer\n from keras.layers import Embedding\n embedding_layer = Embedding(len(self.word_index) + 1,\n self.embedding_size,\n weights=[embedding_matrix],\n input_length=self.maxlen,\n trainable=False)\n else:\n raise ValueError('unknown method value')\n\n else:\n raise ValueError('unknown embedding type')\n self.logging.info('create glove pre-trained embedding: ' + str(self.embedding_size))\n return embedding_layer", "def call(self, x, *args, **kwargs):\n with tf.name_scope(\"embedding\"):\n # fills out of bound values with padding symbol\n out_bound_mask = tf.cast(x > (self.vocab_size - 1), dtype=tf.int32)\n x *= 1 - out_bound_mask\n x += out_bound_mask * tf.cast(self.pad_sym, dtype=tf.int32)\n\n embeddings = tf.gather(self.shared_weights, x)\n if self.embed_scale:\n # Scale embedding by the sqrt of the hidden size\n embeddings *= self.hidden_size ** 0.5\n\n if self.mask_paddings:\n # Create binary array of size [batch_size, length]\n # where 1 = padding, 0 = not padding\n padding = get_padding(x, padding_value=self.pad_sym)\n\n # Set all padding embedding values to 0\n # embeddings *= tf.expand_dims(1 - padding, -1)\n embeddings *= tf.cast(tf.expand_dims(1.0 - padding, -1), dtype=embeddings.dtype)\n return embeddings", "def build_model(vocab_size, embedding_dim, rnn_units, batch_size):\n model=tf.keras.Sequential([\n \n tf.keras.layers.Embedding(vocab_size, embedding_dim, batch_input_shape=[batch_size, None]),\n rnn(rnn_units, return_sequences=True, recurrent_initializer='glorot_uniform', stateful=True),\n tf.keras.layers.Dense(vocab_size)\n \n ])\n \n return model", "def embedding_layer(n_categories, embedding_dim, name=None):\n\n input_tensor = Input(shape=(1,))\n x = Embedding(n_categories, embedding_dim, name=name)(input_tensor)\n x = Reshape(target_shape=(embedding_dim,))(x)\n\n return input_tensor, x", "def make_vocab(src_hparams, tgt_hparams):\n src_vocab = MonoTextData.make_vocab(src_hparams)\n\n if tgt_hparams[\"processing_share\"]:\n tgt_bos_token = src_hparams[\"bos_token\"]\n tgt_eos_token = src_hparams[\"eos_token\"]\n else:\n tgt_bos_token = tgt_hparams[\"bos_token\"]\n tgt_eos_token = tgt_hparams[\"eos_token\"]\n tgt_bos_token = utils.default_str(tgt_bos_token,\n SpecialTokens.BOS)\n tgt_eos_token = utils.default_str(tgt_eos_token,\n SpecialTokens.EOS)\n if tgt_hparams[\"vocab_share\"]:\n if tgt_bos_token == src_vocab.bos_token and \\\n tgt_eos_token == src_vocab.eos_token:\n tgt_vocab = src_vocab\n else:\n tgt_vocab = Vocab(src_hparams[\"vocab_file\"],\n bos_token=tgt_bos_token,\n eos_token=tgt_eos_token)\n else:\n tgt_vocab = Vocab(tgt_hparams[\"vocab_file\"],\n bos_token=tgt_bos_token,\n eos_token=tgt_eos_token)\n\n return src_vocab, tgt_vocab", "def build_embeddings(opt, word_dict, for_encoder='src'):\n if for_encoder=='src':\n embedding_dim = opt.src_word_vec_size #512\n elif for_encoder=='tgt':\n embedding_dim = opt.tgt_word_vec_size\n elif for_encoder=='structure':\n embedding_dim = 64\n\n word_padding_idx = word_dict.stoi[Constants.PAD_WORD]\n num_word_embeddings = len(word_dict)\n \n if for_encoder=='src' or for_encoder=='tgt':\n\n return Embeddings(word_vec_size=embedding_dim,\n position_encoding=opt.position_encoding,\n dropout=opt.dropout,\n word_padding_idx=word_padding_idx,\n word_vocab_size=num_word_embeddings,\n sparse=opt.optim == \"sparseadam\")\n elif for_encoder=='structure':\n return Embeddings(word_vec_size=embedding_dim,\n position_encoding=False,\n dropout=opt.dropout,\n word_padding_idx=word_padding_idx,\n word_vocab_size=num_word_embeddings,\n sparse=opt.optim == \"sparseadam\")", "def build(self, hp, inputs=None):\n input_node = inputs\n embedding_dim = self.embedding_dim or hp.Choice('embedding_dim', [8, 16], default=8)\n output_node = tf.stack(\n [\n tf.tensordot(input_node[0][:, col_id], tf.keras.layers.Embedding(1, embedding_dim)(0), axes=0)\n for col_id in range(self.num_of_fields)\n ],\n axis=1\n )\n return output_node", "def build(self, unused_input_shapes):\n self.embedding_lookup = EmbeddingLookup(\n vocab_size=self.config.vocab_size,\n embedding_size=self.config.hidden_size,\n initializer_range=self.config.initializer_range,\n dtype=tf.float32,\n name=\"word_embeddings\")\n self.embedding_postprocessor = CustomEmbeddingPostprocessor(\n word_embedding_width=self.config.hidden_size,\n use_type_embeddings=True,\n token_type_vocab_size=self.config.type_vocab_size,\n use_position_embeddings=True,\n max_position_embeddings=self.config.max_position_embeddings,\n dropout_prob=self.config.hidden_dropout_prob,\n initializer_range=self.config.initializer_range,\n dtype=tf.float32,\n name=\"embeddings\")\n self.encoder = CustomTransformer(\n num_hidden_layers=self.config.num_hidden_layers,\n hidden_size=self.config.hidden_size,\n num_attention_heads=self.config.num_attention_heads,\n intermediate_size=self.config.intermediate_size,\n intermediate_activation=self.config.hidden_act,\n hidden_dropout_prob=self.config.hidden_dropout_prob,\n attention_probs_dropout_prob=self.config.attention_probs_dropout_prob,\n initializer_range=self.config.initializer_range,\n backward_compatible=self.config.backward_compatible,\n float_type=self.float_type,\n name=\"encoder\")\n self.pooler_dense = tf.keras.layers.Dense(\n units=self.config.hidden_size,\n activation=\"tanh\",\n kernel_initializer=get_initializer(self.config.initializer_range))\n super(BertLayer, self).build(unused_input_shapes)", "def _get_embedding(self, data):\n # Tensor(n, c)\n cat = data['cat']\n return self.one_hot_embed(cat)", "def randomly_init_embeddings(self, embed_dim):\n self.embed_dim = embed_dim\n self.embeddings = np.random.rand(self.size(), embed_dim)\n for term in [self.pad_term, self.unk_term, self.eos_term]:\n self.embeddings[self.get_id(term)] = np.zeros([self.embed_dim])", "def _get_word_tensor(embedding_op: tf.Operation) -> tf.Tensor:\n assert embedding_op.type == 'AddV2'\n add = embedding_op.inputs[0].op\n assert add.type == 'AddV2'\n identity = add.inputs[0].op\n assert identity.type == 'Identity'\n gather = identity.inputs[0].op\n assert gather.type == 'ResourceGather'\n\n return gather.outputs[0]", "def create_embedding(skills):\n corpus = list(skills[\"description\"].values)\n embedder = SentenceTransformer(config[\"sentence_transformer\"][\"model\"])\n embedding = embedder.encode(corpus, show_progress_bar=True)\n return embedding", "def target_embedding_fairseq(self):\r\n if self.params[\"embedding.share\"]:\r\n return self.source_embedding_fairseq()\r\n return tf.get_variable(\r\n name=\"W\",\r\n shape=[self.target_vocab_info.total_size, self.params[\"embedding.dim\"]],\r\n initializer=tf.random_normal_initializer(\r\n mean=0.0,\r\n stddev=0.1))", "def __init__(self, config, mode=\"train\", input_reader=None):\n if mode not in [\"train\", \"eval\", \"encode\", \"test\"]:\n raise ValueError(\"Unrecognized mode: %s\" % mode)\n\n self.config = config\n self.mode = mode\n self.reader = input_reader if input_reader else tf.TFRecordReader()\n\n # Initializer used for non-recurrent weights.\n self.uniform_initializer = tf.random_uniform_initializer(\n minval=-self.config.uniform_init_scale,\n maxval=self.config.uniform_init_scale)\n\n\n # Each is an int64 Tensor with shape [batch_size, padded_length].\n self.encode_ids1 = None\n self.encode_ids2 = None\n\n # Boolean masks distinguishing real words (1) from padded words (0).\n # Each is an int32 Tensor with shape [batch_size, padded_length].\n self.encode_mask1 = None\n self.encode_mask2 = None\n\n # Input sentences represented as sequences of word embeddings.\n # Each is a float32 Tensor with shape [batch_size, padded_length, emb_dim].\n self.encode_emb1 = None\n self.encode_emb2 = None\n\n # The output from the sentence encoder.\n # A float32 Tensor with shape [batch_size, num_gru_units].\n self.thought_vectors1 = None\n self.thought_vectors2 = None\n\n self.label = None\n self.feature = None\n # The cross entropy losses and corresponding weights of the decoders. Used\n # for evaluation.\n self.target_cross_entropy_losses = []\n self.accuracy = []\n self.logits = []\n\n # The total loss to optimize.\n self.total_loss = None", "def _create_tf_embed_fnn(\n self,\n x_in: \"tf.Tensor\",\n layer_sizes: List[int],\n fnn_name: Text,\n embed_name: Text,\n ) -> \"tf.Tensor\":\n\n x = train_utils.create_tf_fnn(\n x_in,\n layer_sizes,\n self.droprate,\n self.C2,\n self._is_training,\n layer_name_suffix=fnn_name,\n )\n return train_utils.create_tf_embed(\n x,\n self.embed_dim,\n self.C2,\n self.similarity_type,\n layer_name_suffix=embed_name,\n )", "def _create_input(inputs: List[Tensor], initial: bool = False) \\\n -> Dict[str, Tensor]:\n word_embed = torch.stack(inputs, dim=0)\n seq_len, batch_size, embed_dim = word_embed.size()\n if not initial:\n # Add a dummy token at the end that stands for the token\n # to predict.\n word_embed = torch.cat([\n word_embed,\n word_embed.new_zeros(1, batch_size, embed_dim)\n ], dim=0)\n seq_len += 1\n segment_ids = word_embed.new_zeros(\n seq_len, batch_size, dtype=torch.long)\n return_dict = {\n \"word_embed\": word_embed,\n \"segment_ids\": segment_ids,\n }\n\n if not initial:\n # Only the dummy token is considered target.\n target_mapping = torch.cat([\n torch.zeros(1, seq_len - 1, batch_size),\n torch.ones(1, 1, batch_size)\n ], dim=1).to(device=word_embed.device)\n # Dummy token attends to nothing; actual tokens attend to all.\n permute_mask = torch.cat([\n torch.zeros(seq_len, seq_len - 1, batch_size),\n torch.ones(seq_len, 1, batch_size),\n ], dim=1).to(device=word_embed.device)\n return_dict.update({\n \"target_mapping\": target_mapping,\n \"permute_mask\": permute_mask,\n })\n\n return return_dict", "def __init__(self, voc_size=8000, embed_size=100, hid_size=100, trunc=4,\n model=None):\n\n self.log = logging.getLogger(\"TEST.Embed\")\n self.log.setLevel(logging.INFO)\n\n self.unknown_token = \"UNKNOWN_TOKEN\"\n self.sentence_start_token = \"SENTENCE_START\"\n self.sentence_end_token = \"SENTENCE_END\"\n\n if model is None:\n self.log.info(\"Initializing RNN parameters and functions...\")\n\n self.vocabulary_size = voc_size\n self.embed_size = embed_size\n self.hidden_size = hid_size\n self.bptt_truncate = trunc\n\n # Instantiate the network weights\n # I feel like the first and third are switched for some reason...\n # but it's pretty consistent in the example code. Perhaps it's\n # backwards for a purpose\n # The weights going from the input layer to the word embedding\n # layer (E, in tutorial)\n weights_ie = np.random.uniform(-np.sqrt(1./voc_size),\n np.sqrt(1./voc_size),\n (embed_size, voc_size))\n\n # The weights going from input layer to hidden layer\n # (U, in tutorial)\n weights_eh = np.random.uniform(-np.sqrt(1./voc_size),\n np.sqrt(1./voc_size),\n (3, hid_size, embed_size))\n\n # The weights going from hidden layer to hidden layer\n # (W, in tutorial)\n weights_hh = np.random.uniform(-np.sqrt(1./voc_size),\n np.sqrt(1./voc_size),\n (3, hid_size, hid_size))\n\n # The weights going from hidden layer to output layer\n # (V, in tutorial)\n weights_ho = np.random.uniform(-np.sqrt(1./voc_size),\n np.sqrt(1./voc_size),\n (voc_size, hid_size))\n\n # The bias for the hidden units (no bias applied to embedding layer)\n bias = np.zeros((3, hid_size))\n\n # The bias for the output units\n out_bias = np.zeros(voc_size)\n\n self.weights_ie = theano.shared(\n name='weights_ie',\n value=weights_ie.astype(theano.config.floatX))\n\n self.weights_eh = theano.shared(\n name='weights_eh',\n value=weights_eh.astype(theano.config.floatX))\n\n self.weights_hh = theano.shared(\n name='weights_hh',\n value=weights_hh.astype(theano.config.floatX))\n\n self.weights_ho = theano.shared(\n name='weights_ho',\n value=weights_ho.astype(theano.config.floatX))\n\n self.bias = theano.shared(\n name='bias',\n value=bias.astype(theano.config.floatX))\n\n self.out_bias = theano.shared(\n name='out_bias',\n value=out_bias.astype(theano.config.floatX))\n\n self.cache_ie = theano.shared(\n name='cache_ie',\n value=np.zeros(weights_ie.shape).astype(theano.config.floatX))\n\n self.cache_eh = theano.shared(\n name='cache_eh',\n value=np.zeros(weights_eh.shape).astype(theano.config.floatX))\n\n self.cache_hh = theano.shared(\n name='cache_hh',\n value=np.zeros(weights_hh.shape).astype(theano.config.floatX))\n\n self.cache_ho = theano.shared(\n name='cache_ho',\n value=np.zeros(weights_ho.shape).astype(theano.config.floatX))\n\n self.cache_bias = theano.shared(\n name='cache_bias',\n value=np.zeros(bias.shape).astype(theano.config.floatX))\n\n self.cache_out_bias = theano.shared(\n name='cache_out_bias',\n value=np.zeros(out_bias.shape).astype(theano.config.floatX))\n\n self.vocabulary = []\n self.word_to_index = {}\n self.index_to_word = []\n else:\n self.log.info(\"Loading model parameters from saved model...\")\n\n with open(model, \"rb\") as modelFile:\n params = cPickle.load(modelFile)\n\n self.vocabulary_size = params[0]\n self.embed_size = params[1]\n self.hidden_size = params[2]\n self.bptt_truncate = params[3]\n\n self.weights_ie = params[4]\n self.weights_eh = params[5]\n self.weights_hh = params[6]\n self.weights_ho = params[7]\n\n self.vocabulary = params[8]\n if not self.vocabulary[-1] == self.unknown_token:\n self.log.info(\"Appending unknown token\")\n self.vocabulary[-1] = self.unknown_token\n self.index_to_word = params[9]\n self.word_to_index = params[10]\n\n self.bias = params[11]\n self.out_bias = params[12]\n\n self.cache_ie = params[13]\n self.cache_eh = params[14]\n self.cache_hh = params[15]\n self.cache_ho = params[16]\n self.cache_bias = params[17]\n self.cache_out_bias = params[18]\n # End of if statement\n\n # Symbolic representation of one input sentence\n input = T.ivector('sentence')\n\n # Symbolic representation of the one output sentence\n output = T.ivector('sentence')\n\n # Symbolic representation of the cache decay for RMSprop\n decay = T.scalar('decay')\n\n # Stochastic Gradient Descent step\n learning_rate = T.scalar('learning_rate')\n\n def forward_propagate(word, previous_state):\n \"\"\"\n Vertically propagates one of the words.\n\n :type word: int\n :param word: the index of the current input word\n\n :type previous_state: T.dvector()\n :param word: the output of the hidden layer from the previous\n horizontal layer\n \"\"\"\n # Embedding layer\n word_vector = self.weights_ie[:, word]\n\n # GRU layer\n update_gate = T.nnet.hard_sigmoid(\n self.weights_eh[0].dot(word_vector) +\n self.weights_hh[0].dot(previous_state) +\n self.bias[0]\n )\n\n reset_gate = T.nnet.hard_sigmoid(\n self.weights_eh[1].dot(word_vector) +\n self.weights_hh[1].dot(previous_state) +\n self.bias[1]\n )\n\n hypothesis = T.tanh(\n self.weights_eh[2].dot(word_vector) +\n self.weights_hh[2].dot(previous_state * reset_gate) +\n self.bias[2]\n )\n\n current_state = (T.ones_like(update_gate) - update_gate) * hypothesis + update_gate * previous_state\n\n # Output layer\n current_output = T.nnet.softmax(\n self.weights_ho.dot(current_state) + self.out_bias\n )[0]\n\n # Not sure why current_output[0] and not just current_output...\n return [current_output, current_state]\n\n #######################################################################\n # Symbolically represents going through each input sentence word and\n # then calculating the state of the hidden layer and output word for\n # each word. The forward_propagate function is the one used to\n # generate the output word and hidden layer state.\n #######################################################################\n self.theano = {}\n\n [out, state], updates = theano.scan(\n forward_propagate,\n sequences=input,\n truncate_gradient=self.bptt_truncate,\n outputs_info=[None, dict(initial=T.zeros(self.hidden_size))],\n name=\"forward_propagate\"\n )\n\n # Predicts the output words for each word in the sentence\n prediction = T.argmax(out, axis=1)\n\n # Calculates the output error between the predicted output and the\n # actual output\n out_error = T.sum(T.nnet.categorical_crossentropy(out, output))\n\n # Symbolically represents gradient calculations for gradient descent\n d_weights_ie = T.grad(out_error, self.weights_ie)\n d_weights_eh = T.grad(out_error, self.weights_eh)\n d_weights_hh = T.grad(out_error, self.weights_hh)\n d_weights_ho = T.grad(out_error, self.weights_ho)\n d_bias = T.grad(out_error, self.bias)\n d_out_bias = T.grad(out_error, self.out_bias)\n\n # Symbolic theano functions\n self.forward_propagate = theano.function([input], out,\n name=\"forward_propagate\")\n self.predict = theano.function([input], prediction, name=\"predict\")\n self.calculate_error = theano.function([input, output], out_error,\n name=\"calculate_error\")\n self.bptt = theano.function([input, output],\n [d_weights_ie, d_weights_eh, d_weights_hh, d_weights_ho, d_bias,\n d_out_bias],\n name=\"bptt\")\n\n # RMSprop parameters\n cache_ie = (decay * self.cache_ie) + ((1 - decay) * d_weights_ie ** 2)\n cache_eh = (decay * self.cache_eh) + ((1 - decay) * d_weights_eh ** 2)\n cache_hh = (decay * self.cache_hh) + ((1 - decay) * d_weights_hh ** 2)\n cache_ho = (decay * self.cache_ho) + ((1 - decay) * d_weights_ho ** 2)\n cache_bias = (decay * self.cache_bias) + ((1 - decay) * d_bias ** 2)\n cache_out_bias = (decay * self.cache_out_bias) + ((1 - decay) * d_out_bias ** 2)\n eps = 1e-6 # Prevents division by 0\n\n self.sgd_step = theano.function(\n [input, output, learning_rate, theano.In(decay, value=0.9)],\n [],\n updates=[\n (self.weights_ie, self.weights_ie - learning_rate *\n d_weights_ie / (T.sqrt(self.cache_ie + eps))),\n (self.weights_eh, self.weights_eh - learning_rate *\n d_weights_eh / (T.sqrt(self.cache_eh + eps))),\n (self.weights_hh, self.weights_hh - learning_rate *\n d_weights_hh / (T.sqrt(self.cache_hh + eps))),\n (self.weights_ho, self.weights_ho - learning_rate *\n d_weights_ho / (T.sqrt(self.cache_ho + eps))),\n (self.bias, self.bias - learning_rate * d_bias /\n (T.sqrt(self.cache_bias + eps))),\n (self.out_bias, self.out_bias - learning_rate *\n d_out_bias / (T.sqrt(self.cache_out_bias + eps))),\n (self.cache_ie, cache_ie),\n (self.cache_eh, cache_eh),\n (self.cache_hh, cache_hh),\n (self.cache_ho, cache_ho),\n (self.cache_bias, cache_bias),\n (self.cache_out_bias, cache_out_bias)]\n )\n\n self.x_train = None\n self.y_train = None", "def _configure_embeddings(self):\r\n # TODO(omalleyt): Add integration tests.\r\n from tensorflow.python.keras.layers import embeddings\r\n try:\r\n from tensorboard.plugins import projector\r\n except ImportError:\r\n raise ImportError('Failed to import TensorBoard. Please make sure that '\r\n 'TensorBoard integration is complete.\"')\r\n config = projector.ProjectorConfig()\r\n for layer in self.model.layers:\r\n if isinstance(layer, embeddings.Embedding):\r\n embedding = config.embeddings.add()\r\n embedding.tensor_name = layer.embeddings.name\r\n\r\n if self.embeddings_metadata is not None:\r\n if isinstance(self.embeddings_metadata, str):\r\n embedding.metadata_path = self.embeddings_metadata\r\n else:\r\n if layer.name in embedding.metadata_path:\r\n embedding.metadata_path = self.embeddings_metadata.pop(layer.name)\r\n\r\n if self.embeddings_metadata:\r\n raise ValueError('Unrecognized `Embedding` layer names passed to '\r\n '`keras.callbacks.TensorBoard` `embeddings_metadata` '\r\n 'argument: ' + str(self.embeddings_metadata.keys()))\r\n\r\n class DummyWriter(object):\r\n \"\"\"Dummy writer to conform to `Projector` API.\"\"\"\r\n\r\n def __init__(self, logdir):\r\n self.logdir = logdir\r\n\r\n def get_logdir(self):\r\n return self.logdir\r\n\r\n writer = DummyWriter(self.log_dir)\r\n projector.visualize_embeddings(writer, config)", "def init_embedding(embeddings):\n bias = np.sqrt(3.0 / embeddings.size(1))\n torch.nn.init.uniform_(embeddings, -bias, bias)", "def build_image_embeddings(self):\n inception_output = image_embedding.inception_v3(\n self.images,\n trainable=self.train_inception,\n is_training=self.is_training())\n\n # Map inception output onto embedding space.\n with tf.variable_scope(\"image_embedding\") as scope:\n image_embeddings = tf.contrib.layers.fully_connected(\n inputs=inception_output,\n num_outputs=self.config.sentence_embedding_size,\n activation_fn=None,\n weights_initializer=self.initializer,\n biases_initializer=None,\n scope=scope)\n \n if self.mode == \"train\":\n # to avoid overfitting we use dropout for all fully connected layers\n image_embeddings = tf.nn.dropout(image_embeddings, self.config.dropout_keep_prob_encoder)\n\n # Save the embedding size in the graph.\n tf.constant(self.config.sentence_embedding_size, name=\"image_embedding_size\")\n\n self.image_embeddings = image_embeddings", "def get_model_bicond_sepembed(batch_size, max_seq_length, input_size, hidden_size, target_size,\n vocab_size, pretrain, tanhOrSoftmax, dropout):\n\n # batch_size x max_seq_length\n inputs = tf.placeholder(tf.int32, [batch_size, max_seq_length])\n inputs_cond = tf.placeholder(tf.int32, [batch_size, max_seq_length])\n\n cont_train = True\n if pretrain == \"pre\": # continue training embeddings or not. Currently works better to continue training them.\n cont_train = False\n embedding_matrix = tf.Variable(tf.random_uniform([vocab_size, input_size], -0.1, 0.1), # input_size is embeddings size\n name=\"embedding_matrix\", trainable=cont_train)\n embedding_matrix_cond = tf.Variable(tf.random_uniform([vocab_size, input_size], -0.1, 0.1),\n name=\"embedding_matrix\", trainable=cont_train)\n\n\n # batch_size x max_seq_length x input_size\n embedded_inputs = tf.nn.embedding_lookup(embedding_matrix, inputs)\n embedded_inputs_cond = tf.nn.embedding_lookup(embedding_matrix_cond, inputs_cond)\n\n\n # [batch_size x inputs_size] with max_seq_length elements\n # fixme: possibly inefficient\n # inputs_list[0]: batch_size x input[0] <-- word vector of the first word\n inputs_list = [tf.squeeze(x) for x in\n tf.split(1, max_seq_length, embedded_inputs)]\n inputs_cond_list = [tf.squeeze(x) for x in\n tf.split(1, max_seq_length, embedded_inputs_cond)]\n\n drop_prob = None\n if dropout:\n drop_prob = 0.1\n lstm_encoder = Encoder(rnn_cell.BasicLSTMCell, input_size, hidden_size, drop_prob, drop_prob)\n\n start_state = tf.zeros([batch_size, lstm_encoder.state_size])\n\n ### FORWARD\n\n # [h_i], [h_i, c_i] <-- LSTM\n # [h_i], [h_i] <-- RNN\n fw_outputs, fw_states = lstm_encoder(inputs_list, start_state, \"LSTM\")\n\n # running a second LSTM conditioned on the last state of the first\n fw_outputs_cond, fw_states_cond = lstm_encoder(inputs_cond_list, fw_states[-1],\n \"LSTMcond\")\n\n fw_outputs_fin = fw_outputs_cond[-1]\n\n ### BACKWARD\n bw_outputs, bw_states = lstm_encoder(inputs_list[::-1], start_state, \"LSTM_bw\")\n bw_outputs_cond, bw_states_cond = lstm_encoder(inputs_cond_list[::-1], bw_states[-1],\n \"LSTMcond_bw\")\n bw_outputs_fin = bw_outputs_cond[-1]\n\n outputs_fin = tf.concat(1, [fw_outputs_fin, bw_outputs_fin])\n\n\n if tanhOrSoftmax == \"tanh\":\n model = Projector(target_size, non_linearity=tf.nn.tanh, bias=True)(outputs_fin) # tf.nn.softmax\n else:\n model = Projector(target_size, non_linearity=tf.nn.softmax, bias=True)(outputs_fin) # tf.nn.softmax\n\n return model, [inputs, inputs_cond]", "def embeddings_layers_init(self):\n\n user_embeddings = tf.keras.layers.Embedding(\n self.n_users, self.user_dim, input_length=1)\n\n item_embeddings = tf.keras.layers.Embedding(\n self.n_items, self.item_dim, input_length=1)\n\n return user_embeddings, item_embeddings", "def embed_text(tensors, embeddings):\n wids = tensors[\"wids\"]\n cids = tensors[\"cids\"]\n\n embedding_weights = embeddings.get_initialized_params(trainable=False)\n word_vecs = tf.nn.embedding_lookup(embedding_weights, wids)\n char_emb = common_layers.character_cnn(cids)\n return tf.concat([word_vecs, char_emb], -1)", "def build_encoder(tparams, options):\n\t# word embedding (source)\n\tembedding = tensor.tensor3('embedding', dtype='float32')\n\tx_mask = tensor.matrix('x_mask', dtype='float32')\n\n\t# encoder\n\tproj = get_layer(options['encoder'])[1](tparams, embedding, options,\n\t\t\t\t\t\t\t\t\t\t\tprefix='encoder',\n\t\t\t\t\t\t\t\t\t\t\tmask=x_mask)\n\tctx = proj[0][-1]\n\n\treturn embedding, x_mask, ctx", "def build_bilstm(self, verbose=True):\r\n word_ids = Input(batch_shape=(None, None), dtype='int32', name='word_input')\r\n inputs = [word_ids]\r\n\r\n if self._params.use_pretrain_embedding:\r\n if verbose: logging.info(\"initial word embedding with pretrained embeddings\")\r\n if self._params.word_embedding_dim == 100:\r\n glove_file = self._params.data_dir + '/glove.6B.100d.txt'\r\n elif self._params.word_embedding_dim == 300:\r\n glove_file = self._params.data_dir + '/glove.42B.300d.txt'\r\n else:\r\n logging.error(\"we only support glove embedding with dimension 100 or 300\")\r\n raise ValueError(\"unmatch word dimension, we only support glove embedding with dimension 100 or 300\")\r\n glove_embedding_index = load_glove(glove_file, self._params.word_embedding_dim)\r\n word_vocab = self.input_processor.word_vocab.vocab\r\n glove_embeddings_matrix = np.zeros([len(word_vocab), self._params.word_embedding_dim])\r\n for word, i in word_vocab.items():\r\n vector = glove_embedding_index.get(word)\r\n if vector is not None:\r\n glove_embeddings_matrix[i] = vector\r\n \r\n word_embeddings = Embedding(input_dim=glove_embeddings_matrix.shape[0],\r\n output_dim=glove_embeddings_matrix.shape[1],\r\n trainable=False,\r\n mask_zero=True,\r\n weights=[glove_embeddings_matrix],\r\n name='word_embedding')(word_ids)\r\n else:\r\n word_embeddings = Embedding(input_dim=self._params.word_vocab_size,\r\n output_dim=self._params.word_embedding_dim,\r\n mask_zero=True,\r\n name='word_embedding')(word_ids)\r\n\r\n input_embeddings = [word_embeddings]\r\n if self._params.use_char:\r\n char_ids = Input(batch_shape=(None, None, None), dtype='int32', name='char_input')\r\n inputs.append(char_ids)\r\n if self._params.char_feature == \"lstm\":\r\n char_embeddings = Embedding(input_dim=self._params.char_vocab_size,\r\n output_dim=self._params.char_embedding_dim,\r\n mask_zero=True,\r\n name='char_embedding')(char_ids)\r\n if verbose: logging.info(\"using charcter level lstm features\")\r\n char_feas = TimeDistributed(Bidirectional(LSTM(self._params.char_lstm_size)), name=\"char_lstm\")(char_embeddings)\r\n elif self._params.char_feature == \"cnn\":\r\n # cnn do not support mask\r\n char_embeddings = Embedding(input_dim=self._params.char_vocab_size,\r\n output_dim=self._params.char_embedding_dim,\r\n name='char_embedding')(char_ids)\r\n if verbose: logging.info(\"using charcter level cnn features\")\r\n char_feas = char_cnn_encode(char_embeddings, self._params.n_gram_filter_sizes, self._params.n_gram_filter_nums)\r\n else:\r\n raise ValueError('char feature must be lstm or cnn')\r\n\r\n input_embeddings.append(char_feas)\r\n\r\n if self._params.use_pos:\r\n if verbose: logging.info(\"use pos tag features\")\r\n pos_ids = Input(batch_shape=(None, None), dtype='int32', name='pos_input')\r\n inputs.append(pos_ids)\r\n\r\n\r\n pos_embeddings = Embedding(input_dim=self._params.pos_vocab_size,\r\n output_dim=self._params.pos_embedding_dim,\r\n mask_zero=True,\r\n name='pos_embedding')(pos_ids)\r\n input_embeddings.append(pos_embeddings)\r\n\r\n if self._params.use_dict:\r\n if verbose: logging.info(\"use user dict features\")\r\n dict_ids = Input(batch_shape=(None, None), dtype='int32', name='dict_input')\r\n inputs.append(dict_ids)\r\n\r\n dict_embeddings = Embedding(input_dim=self._params.dict_vocab_size,\r\n output_dim=self._params.dict_embedding_dim,\r\n mask_zero=True,\r\n name='dict_embedding')(dict_ids)\r\n input_embeddings.append(dict_embeddings)\r\n\r\n input_embedding = Concatenate(name=\"input_embedding\")(input_embeddings) if len(input_embeddings)>1 else input_embeddings[0]\r\n input_embedding_ln = LayerNormalization(name='input_layer_normalization')(input_embedding)\r\n #input_embedding_bn = BatchNormalization()(input_embedding_ln)\r\n input_embedding_drop = Dropout(self._params.dropout, name=\"input_embedding_dropout\")(input_embedding_ln)\r\n\r\n z = Bidirectional(LSTM(units=self._params.main_lstm_size, return_sequences=True, dropout=0.2, recurrent_dropout=0.2),\r\n name=\"main_bilstm\")(input_embedding_drop)\r\n z = Dense(self._params.fc_dim, activation='tanh', name=\"fc_dense\")(z)\r\n\r\n if self._params.use_crf:\r\n if verbose: logging.info('use crf decode layer')\r\n crf = CRF(self._params.num_labels, sparse_target=False,\r\n learn_mode='marginal', test_mode='marginal', name='crf_out')\r\n loss = crf.loss_function\r\n pred = crf(z)\r\n else:\r\n loss = 'categorical_crossentropy'\r\n pred = Dense(self._params.num_labels, activation='softmax', name='softmax_out')(z)\r\n\r\n model = Model(inputs=inputs, outputs=pred)\r\n model.summary(print_fn=lambda x: logging.info(x + '\\n'))\r\n model.compile(loss=loss, optimizer=self._params.optimizer)\r\n\r\n self.model = model", "def call(self, inputs):\n unpacked_inputs = tf_utils.unpack_inputs(inputs)\n word_embeddings = unpacked_inputs[0]\n token_type_ids = unpacked_inputs[1]\n input_shape = tf_utils.get_shape_list(word_embeddings, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n width = input_shape[2]\n\n output = word_embeddings\n if self.use_type_embeddings:\n flat_token_type_ids = tf.reshape(token_type_ids, [-1])\n token_type_embeddings = tf.gather(self.type_embeddings,\n flat_token_type_ids)\n token_type_embeddings = tf.reshape(token_type_embeddings,\n [batch_size, seq_length, width])\n output += token_type_embeddings\n\n if self.use_position_embeddings:\n position_embeddings = tf.expand_dims(\n tf.slice(self.position_embeddings, [0, 0], [seq_length, width]),\n axis=0)\n\n output += position_embeddings\n\n output = self.output_layer_norm(output)\n output = self.output_dropout(output)\n\n return output", "def call(self, inputs):\n unpacked_inputs = tf_utils.unpack_inputs(inputs)\n word_embeddings = unpacked_inputs[0]\n token_type_ids = unpacked_inputs[1]\n input_shape = tf_utils.get_shape_list(word_embeddings, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n width = input_shape[2]\n\n output = word_embeddings\n if self.use_type_embeddings:\n flat_token_type_ids = tf.reshape(token_type_ids, [-1])\n token_type_embeddings = tf.gather(self.type_embeddings,\n flat_token_type_ids)\n token_type_embeddings = tf.reshape(token_type_embeddings,\n [batch_size, seq_length, width])\n output += token_type_embeddings\n\n if self.use_position_embeddings:\n position_embeddings = tf.expand_dims(\n tf.slice(self.position_embeddings, [0, 0], [seq_length, width]),\n axis=0)\n\n output += position_embeddings\n\n output = self.output_layer_norm(output)\n output = self.output_dropout(output)\n\n return output", "def testEmbeddings(self):\n input_data = {\n \"x\":\n constant_op.constant(\n np.array(np.random.random_sample((20)), dtype=np.int32))\n }\n\n class EmbeddingModel(keras.Model):\n\n def __init__(self):\n super(EmbeddingModel, self).__init__()\n self.shared_weights = self.add_weight(\n \"weights\",\n shape=(2000, 300),\n dtype=dtypes.float32,\n initializer=init_ops.random_normal_initializer(\n mean=0.0, stddev=300**(-0.5)))\n\n @def_function.function(input_signature=[\n tensor_spec.TensorSpec(shape=(20), dtype=dtypes.int32)\n ])\n def func(self, x):\n return array_ops.gather(self.shared_weights, x)\n\n model = EmbeddingModel()\n root, output_func = self._freezeModel(model.func)\n self._testConvertedFunction(root, root.f, output_func, input_data)", "def instantiate_weights(self):\n self.product_embeddings = tf.get_variable(\n name='product_embeddings',\n shape=[50000, 300],\n dtype=tf.float32\n )\n self.aisle_embeddings = tf.get_variable(\n name='aisle_embeddings',\n shape=[250, 50],\n dtype=tf.float32\n )\n self.department_embeddings = tf.get_variable(\n name='department_embeddings',\n shape=[50, 10],\n dtype=tf.float32\n )\n self.W_relu = tf.get_variable(\"W_relu\",shape=[670, 30]) #这个参数后续需要自适应\n self.b_relu = tf.get_variable(\"bias_relu\",shape=[30]) \n self.W_projection = tf.get_variable(\"W_projection\",shape=[30, 1]) \n self.b_projection = tf.get_variable(\"bias_projection\",shape=[1])", "def embedding_model(\n n_factors: int = 50,\n window: int = 5,\n min_count: int = 1,\n learning_rate: float = 0.05,\n negative_samples: int = 10,\n negative_exponent: float = 0.75,\n workers: int = 4,\n n_iterations: int = 10,\n batch_size: int = 10000,\n skip_gram: int = 0,\n) -> Word2Vec:\n logger.info(\"Defining Embedding Neural Network model.\")\n model = Word2Vec(\n vector_size=n_factors,\n window=window,\n min_count=min_count,\n alpha=learning_rate,\n negative=negative_samples,\n ns_exponent=negative_exponent,\n workers=workers,\n epochs=n_iterations,\n batch_words=batch_size,\n sg=skip_gram,\n compute_loss=True,\n )\n return model", "def __init__(self, input_dim, output_dim, name='embedding_layer'):\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.name = name\n\n # Randomly generate weights\n self.embeddings = shared((input_dim, output_dim),\n self.name + '__embeddings')\n\n # Define parameters\n self.params = [self.embeddings]", "def _embed(self):\n batch_size = tf.shape(self.p)[0]\n with tf.variable_scope(\"emb\"):\n with tf.variable_scope(\"char\"):\n pc_emb = tf.reshape(tf.nn.embedding_lookup(\n self.char_embed, self.pc), \n [batch_size * self.max_p_len, self.max_w_len, self.vocab.char_embed_dim])\n qc_emb = tf.reshape(tf.nn.embedding_lookup(\n self.char_embed, self.qc), \n [batch_size * self.max_q_len, self.max_w_len, self.vocab.char_embed_dim])\n cell_fw = tf.contrib.rnn.GRUCell(self.char_hidden_size)\n cell_bw = tf.contrib.rnn.GRUCell(self.char_hidden_size)\n _, (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, pc_emb, self.pc_length, dtype=tf.float32)\n pc_emb = tf.concat([state_fw, state_bw], axis=1)\n _, (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, qc_emb, self.qc_length, dtype=tf.float32)\n qc_emb = tf.concat([state_fw, state_bw], axis=1)\n pc_emb = tf.reshape(pc_emb, [batch_size, self.max_p_len, 2 * self.char_hidden_size])\n qc_emb = tf.reshape(qc_emb, [batch_size, self.max_q_len, 2 * self.char_hidden_size])\n\n with tf.name_scope(\"word\"):\n p_emb = tf.nn.embedding_lookup(self.word_embed, self.p)\n q_emb = tf.nn.embedding_lookup(self.word_embed, self.q)\n\n with tf.name_scope(\"pos\"):\n p_pos_emb = tf.nn.embedding_lookup(self.pos_embed, self.p_pos)\n q_pos_emb = tf.nn.embedding_lookup(self.pos_embed, self.q_pos)\n \n with tf.name_scope(\"em\"):\n sh = tf.shape(self.p_em)\n resh = [sh[0], sh[1], 1]\n p_em_feat = tf.reshape(tf.cast(self.p_em, dtype=tf.float32), shape=resh)\n\n self.p_emb = tf.concat([p_emb, pc_emb, p_pos_emb, p_em_feat], axis=2)\n self.q_emb = tf.concat([q_emb, qc_emb, q_pos_emb], axis=2)", "def cbow_model(vocabulary_size, embedding_size, context_length, batch_size,\n num_sampled, valid_examples, learning_rate):\n input_batch_size = context_length * batch_size\n\n graph = tf.Graph()\n with graph.as_default():\n # Input data.\n tf_train_dataset = tf.placeholder(tf.int32, shape=[input_batch_size])\n tf_train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])\n\n # This sums the input embeddings in a batch of size input_batch_size,\n # by group of context_length. This results in an input vector with\n # batch_size rows.\n word_mean_op = tf.constant((1.0 / context_length) *\n np.kron(np.eye(batch_size), np.ones([1, context_length])), dtype=tf.float32)\n\n # Variables.\n embeddings = tf.Variable(tf.random_uniform(\n [vocabulary_size, embedding_size], -1.0, 1.0))\n softmax_weights = tf.Variable(tf.truncated_normal(\n [vocabulary_size, embedding_size], stddev=1.0 / np.sqrt(embedding_size)))\n softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))\n\n # Model.\n # Look up embeddings for inputs.\n embed = tf.nn.embedding_lookup(embeddings, tf_train_dataset)\n word_means = tf.matmul(word_mean_op, embed)\n # Compute the softmax loss, using a sample of the negative labels each time.\n loss = tf.reduce_mean(tf.nn.sampled_softmax_loss(\n softmax_weights, softmax_biases, word_means, tf_train_labels, num_sampled, vocabulary_size))\n\n # Optimizer.\n optimizer = tf.train.AdagradOptimizer(learning_rate).minimize(loss)\n\n # Compute the similarity between minibatch examples and all embeddings.\n # We use the cosine distance:\n norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))\n normalized_embeddings = embeddings / norm\n\n similarity = None\n if valid_examples is not None:\n valid_dataset = tf.constant(valid_examples, dtype=tf.int32)\n valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)\n similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings))\n\n tf_graph = {\n 'graph': graph,\n 'data_ph': tf_train_dataset,\n 'labels_ph': tf_train_labels }\n\n return tf_graph, optimizer, loss, normalized_embeddings, similarity", "def __init__(self, \n k=DEFAULT_EMBEDDING_SIZE, \n eta=DEFAULT_ETA, \n epochs=DEFAULT_EPOCH, \n batches_count=DEFAULT_BATCH_COUNT, \n seed=DEFAULT_SEED,\n embedding_model_params={},\n optimizer=DEFAULT_OPTIM, \n optimizer_params={'lr':DEFAULT_LR},\n loss=DEFAULT_LOSS, \n loss_params={},\n regularizer=DEFAULT_REGULARIZER, \n regularizer_params={},\n verbose=DEFAULT_VERBOSE):\n # Store for restoring later.\n self.all_params = \\\n {\n 'k': k,\n 'eta': eta,\n 'epochs': epochs,\n 'batches_count': batches_count,\n 'seed': seed,\n 'embedding_model_params': embedding_model_params,\n 'optimizer': optimizer,\n 'optimizer_params': optimizer_params,\n 'loss': loss,\n 'loss_params': loss_params,\n 'regularizer': regularizer,\n 'regularizer_params': regularizer_params,\n 'verbose': verbose\n\n }\n tf.reset_default_graph()\n\n self.is_filtered = False\n self.loss_params = loss_params\n\n self.embedding_model_params = embedding_model_params\n\n self.k = k\n self.seed = seed\n self.epochs = epochs\n self.eta = eta\n self.regularizer_params = regularizer_params\n self.batches_count = batches_count\n if batches_count == 1:\n logger.warn(\n 'batches_count=1. All triples will be processed in the same batch. This may introduce memory issues.')\n print('WARN: when batches_count=1 all triples will be processed in the same batch. '\n 'This may introduce memory issues.')\n\n try:\n self.loss = LOSS_REGISTRY[loss](self.eta, self.loss_params, verbose=verbose)\n except KeyError:\n msg = 'Unsupported loss function: {}'.format(loss)\n logger.error(msg)\n raise ValueError(msg)\n\n try:\n if regularizer is not None:\n self.regularizer = REGULARIZER_REGISTRY[regularizer](self.regularizer_params, verbose=verbose)\n else:\n self.regularizer = regularizer\n except KeyError:\n msg = 'Unsupported regularizer: {}'.format(regularizer)\n logger.error(msg)\n raise ValueError(msg)\n\n self.optimizer_params = optimizer_params\n if optimizer == \"adagrad\":\n self.optimizer = tf.train.AdagradOptimizer(learning_rate=self.optimizer_params.get('lr', DEFAULT_LR))\n elif optimizer == \"adam\":\n self.optimizer = tf.train.AdamOptimizer(learning_rate=self.optimizer_params.get('lr', DEFAULT_LR))\n elif optimizer == \"sgd\":\n self.optimizer = tf.train.GradientDescentOptimizer(\n learning_rate=self.optimizer_params.get('lr', DEFAULT_LR))\n elif optimizer == \"momentum\":\n self.optimizer = tf.train.MomentumOptimizer(learning_rate=self.optimizer_params.get('lr', DEFAULT_LR),\n momentum=self.optimizer_params.get('momentum',\n DEFAULT_MOMENTUM))\n else:\n msg = 'Unsupported optimizer: {}'.format(optimizer)\n logger.error(msg)\n raise ValueError(msg)\n\n self.verbose = verbose\n\n self.rnd = check_random_state(self.seed)\n\n self.initializer = tf.contrib.layers.xavier_initializer(uniform=False, seed=self.seed)\n self.tf_config = tf.ConfigProto(allow_soft_placement=True)\n self.tf_config.gpu_options.allow_growth = True\n self.sess_train = None\n self.sess_predict = None\n self.trained_model_params = []\n self.is_fitted = False\n self.eval_config = {}", "def get_model_tweetonly(batch_size, max_seq_length, input_size, hidden_size, target_size,\n vocab_size, pretrain, tanhOrSoftmax, dropout):\n\n # batch_size x max_seq_length\n inputs = tf.placeholder(tf.int32, [batch_size, max_seq_length])\n\n cont_train = True\n if pretrain == \"pre\":\n cont_train = False\n embedding_matrix = tf.Variable(tf.random_uniform([vocab_size, input_size], -0.1, 0.1), # input_size is embeddings size\n name=\"embedding_matrix\", trainable=cont_train)\n\n # batch_size x max_seq_length x input_size\n embedded_inputs = tf.nn.embedding_lookup(embedding_matrix, inputs)\n\n\n # [batch_size x inputs_size] with max_seq_length elements\n # fixme: possibly inefficient\n # inputs_list[0]: batch_size x input[0] <-- word vector of the first word\n inputs_list = [tf.squeeze(x) for x in\n tf.split(1, max_seq_length, embedded_inputs)]\n\n lstm_encoder = Encoder(rnn_cell.BasicLSTMCell, input_size, hidden_size)\n start_state = tf.zeros([batch_size, lstm_encoder.state_size])\n\n # [h_i], [h_i, c_i] <-- LSTM\n # [h_i], [h_i] <-- RNN\n outputs, states = lstm_encoder(inputs_list, start_state, \"LSTM\")\n\n drop_prob = None\n if dropout:\n drop_prob = 0.1\n\n lstm_encoder = Encoder(rnn_cell.BasicLSTMCell, input_size, hidden_size, drop_prob, drop_prob)\n\n outputs_fin = outputs[-1]\n if tanhOrSoftmax == \"tanh\":\n model = Projector(target_size, non_linearity=tf.nn.tanh)(outputs_fin) #tf.nn.softmax\n else:\n model = Projector(target_size, non_linearity=tf.nn.softmax)(outputs_fin) # tf.nn.softmax\n\n\n return model, [inputs]", "def __init__(self, vocab, embed_size=512, dropout_rate=0.1, max_len=200):\n super(DecoderEmbeddings, self).__init__()\n pad_token_idx = 0 #vocab.tokenizer.ids_to_tokens[0]\n assert vocab.tokenizer.ids_to_tokens[0] == '[PAD]'\n self.embeddings = nn.Embedding(len(vocab.tokenizer.ids_to_tokens), embed_size, padding_idx=pad_token_idx)\n self.positional_encoding = PositionalEncoding(d_model=embed_size, dropout=dropout_rate, max_len=max_len)", "def __init__(self,\n vocab_size,\n embed_dim,\n unit_dim,\n window_size,\n hidden_activation,\n pooling_type,\n dropout,\n num_gpus=1,\n default_gpu_id=0,\n regularizer=None,\n random_seed=0,\n trainable=True,\n scope=\"char_feat\"):\n self.vocab_size = vocab_size\n self.embed_dim = embed_dim\n self.unit_dim = unit_dim\n self.window_size = window_size\n self.hidden_activation = hidden_activation\n self.pooling_type = pooling_type\n self.dropout = dropout\n self.num_gpus = num_gpus\n self.default_gpu_id = default_gpu_id\n self.regularizer = regularizer\n self.random_seed = random_seed\n self.trainable = trainable\n self.scope = scope\n \n with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):\n self.embedding_layer = create_embedding_layer(self.vocab_size, self.embed_dim, False,\n None, self.num_gpus, self.default_gpu_id, None, self.random_seed, self.trainable)\n \n self.dropout_layer = create_dropout_layer(self.dropout, self.num_gpus, self.default_gpu_id, self.random_seed)\n \n self.conv_layer = create_convolution_layer(\"multi_1d\", 1, self.embed_dim,\n self.unit_dim, 1, self.window_size, 1, \"SAME\", self.hidden_activation, [0.0], None,\n False, False, True, self.num_gpus, self.default_gpu_id, self.regularizer, self.random_seed, self.trainable)\n \n self.pooling_layer = create_pooling_layer(self.pooling_type, self.num_gpus, self.default_gpu_id)", "def __init__(self,\n config,\n is_training,\n inputs):\n self._input = inputs\n vocab_size = config.vocab_size # num of possible words\n self._gpu_devices = [i for i in range(len(get_gpu_devices(FLAGS.gpu_devices)))]\n self._gpu_num = len(self._gpu_devices)\n self._cpu_device = FLAGS.cpu_device\n\n with tf.name_scope(\"model_variables\"):\n with tf.name_scope(\"global_step\"):\n self._global_step = tf.Variable(0, name='global_step', trainable=False)\n\n with tf.name_scope(\"epoch_counter\"):\n self._epoch_count = tf.Variable(0, name='epoch', trainable=False)\n self._epoch_inc = tf.assign(self._epoch_count, tf.add(self._epoch_count, tf.constant(1)))\n self._epoch_reset = tf.assign(self._epoch_count, tf.constant(0))\n\n # ptrs to the lstm cell object, ltsm initial state op and final state\n self._cell = []\n self._initial_state = []\n self._final_state = []\n\n # construct the embedding layer on cpu device\n with tf.variable_scope(\"embedding\"), tf.device(self._cpu_device):\n # the embedding matrix is allocated in the cpu to save valuable gpu memory for the model.\n embedding_map = tf.get_variable(\n name=\"embedding\", shape=[vocab_size, config.embedding_size], dtype=tf.float32)\n b_embed_in = tf.get_variable(name=\"b_embed_in\", shape=[config.embedding_size], dtype=tf.float32)\n embedding = tf.nn.embedding_lookup(embedding_map, self._input.input_data) + b_embed_in\n\n # non variational wrapper for the embedding\n if is_training and config.keep_prob_embed < 1:\n embedding_out = tf.nn.dropout(embedding,\n config.keep_prob_embed) # / config.keep_prob_embed\n else:\n embedding_out = embedding\n\n # split input to devices if needed\n with tf.name_scope(\"split_inputs\"):\n if self._gpu_num > 1:\n embedding_out = tf.split(embedding_out, self._gpu_num)\n targets = tf.split(inputs.targets, self._gpu_num)\n else:\n embedding_out = [embedding_out]\n targets = [inputs.targets]\n\n # construct the rest of the model on every gpu\n all_loss = [] # 2D array of scalar loss; [i,j] element stands for the loss of the j-th layer of the i-th gpu\n all_grads = [] # 2D array of grads; [i,j] element stands for the grad of the j-th layer of the i-th gpu\n\n with tf.variable_scope(\"gpus\"):\n for i in range(self._gpu_num):\n with tf.device(\"/gpu:%d\" % self._gpu_devices[i]), tf.name_scope(\"gpu-%d\" % i):\n loss, grads, cell, initial_state, final_state, cache_data = self.complete_model(embedding_out[i],\n embedding_map,\n config,\n is_training,\n inputs,\n targets[i])\n\n self._cache_data = cache_data\n self._cell.append(cell)\n self._initial_state.append(initial_state)\n self._final_state.append(final_state)\n all_loss.append(loss)\n all_grads.append(grads)\n\n # reuse variables for the next gpu\n tf.get_variable_scope().reuse_variables()\n\n # reduce per-gpu-loss to total loss\n with tf.name_scope(\"reduce_loss\"):\n self._loss = self.reduce_loss(all_loss)\n\n if config.dynamic_eval is not None:\n # average grads ; sync point\n with tf.name_scope(\"average_grads\"):\n averaged_grads = self.average_grads(all_grads)\n\n # get trainable vars\n tvars = tf.trainable_variables()\n\n self._dynamic_eval = DynamicEval(config, tvars, averaged_grads)\n\n self._train_op = self._dynamic_eval.update_op()", "def _create_embedding_variable(self, name, initial_value):\n if name not in self._tls._embed_variables:\n embed_var = tf.Variable(\n initial_value,\n name=name + str(threading.get_ident()),\n shape=(None, None),\n dtype=tf.float32,\n trainable=False,\n )\n self._tls._embed_variables[name] = embed_var\n else:\n embed_var = self._tls._embed_variables[name]\n embed_var.assign(initial_value)\n return embed_var", "def instantiate_weights(self):\n with tf.name_scope(\"decoder_init_state\"):\n self.W_initial_state = tf.get_variable(\"W_initial_state\", shape=[self.hidden_size, self.hidden_size*2], initializer=self.initializer)\n self.b_initial_state = tf.get_variable(\"b_initial_state\", shape=[self.hidden_size*2])\n with tf.name_scope(\"embedding_projection\"): # embedding matrix\n self.Embedding = tf.get_variable(\"Embedding\", shape=[self.vocab_size, self.embed_size],initializer=self.initializer) # [vocab_size,embed_size] tf.random_uniform([self.vocab_size, self.embed_size],-1.0,1.0)\n self.Embedding_label = tf.get_variable(\"Embedding_label\", shape=[self.num_classes, self.embed_size*2],dtype=tf.float32) #,initializer=self.initializer\n self.W_projection = tf.get_variable(\"W_projection\", shape=[self.hidden_size*2, self.num_classes],\n initializer=self.initializer) # [embed_size,label_size]\n self.b_projection = tf.get_variable(\"b_projection\", shape=[self.num_classes])\n\n # GRU parameters:update gate related\n with tf.name_scope(\"gru_weights_encoder\"):\n self.W_z = tf.get_variable(\"W_z\", shape=[self.embed_size, self.hidden_size], initializer=self.initializer)\n self.U_z = tf.get_variable(\"U_z\", shape=[self.embed_size, self.hidden_size], initializer=self.initializer)\n self.b_z = tf.get_variable(\"b_z\", shape=[self.hidden_size])\n # GRU parameters:reset gate related\n self.W_r = tf.get_variable(\"W_r\", shape=[self.embed_size, self.hidden_size], initializer=self.initializer)\n self.U_r = tf.get_variable(\"U_r\", shape=[self.embed_size, self.hidden_size], initializer=self.initializer)\n self.b_r = tf.get_variable(\"b_r\", shape=[self.hidden_size])\n\n self.W_h = tf.get_variable(\"W_h\", shape=[self.embed_size, self.hidden_size], initializer=self.initializer)\n self.U_h = tf.get_variable(\"U_h\", shape=[self.embed_size, self.hidden_size], initializer=self.initializer)\n self.b_h = tf.get_variable(\"b_h\", shape=[self.hidden_size])\n\n with tf.name_scope(\"gru_weights_decoder\"):\n self.W_z_decoder = tf.get_variable(\"W_z_decoder\", shape=[self.embed_size*2, self.hidden_size*2], initializer=self.initializer)\n self.U_z_decoder = tf.get_variable(\"U_z_decoder\", shape=[self.embed_size*2, self.hidden_size*2], initializer=self.initializer)\n self.C_z_decoder = tf.get_variable(\"C_z_decoder\", shape=[self.embed_size * 2, self.hidden_size * 2],initializer=self.initializer) #TODO\n self.b_z_decoder = tf.get_variable(\"b_z_decoder\", shape=[self.hidden_size*2])\n # GRU parameters:reset gate related\n self.W_r_decoder = tf.get_variable(\"W_r_decoder\", shape=[self.embed_size*2, self.hidden_size*2], initializer=self.initializer)\n self.U_r_decoder = tf.get_variable(\"U_r_decoder\", shape=[self.embed_size*2, self.hidden_size*2], initializer=self.initializer)\n self.C_r_decoder = tf.get_variable(\"C_r_decoder\", shape=[self.embed_size * 2, self.hidden_size * 2],initializer=self.initializer) #TODO\n self.b_r_decoder = tf.get_variable(\"b_r_decoder\", shape=[self.hidden_size*2])\n\n self.W_h_decoder = tf.get_variable(\"W_h_decoder\", shape=[self.embed_size*2, self.hidden_size*2], initializer=self.initializer)\n self.U_h_decoder = tf.get_variable(\"U_h_decoder\", shape=[self.embed_size*2, self.hidden_size*2], initializer=self.initializer) #TODO\n self.C_h_decoder = tf.get_variable(\"C_h_decoder\", shape=[self.embed_size * 2, self.hidden_size * 2],initializer=self.initializer)\n self.b_h_decoder = tf.get_variable(\"b_h_decoder\", shape=[self.hidden_size*2])\n\n with tf.name_scope(\"full_connected\"):\n self.W_fc=tf.get_variable(\"W_fc\",shape=[self.hidden_size*2,self.hidden_size])\n self.a_fc=tf.get_variable(\"a_fc\",shape=[self.hidden_size])" ]
[ "0.74340177", "0.7306449", "0.72952855", "0.70558393", "0.7044115", "0.68895066", "0.67812735", "0.6750932", "0.67492104", "0.67476356", "0.67427427", "0.6717155", "0.6711093", "0.6680487", "0.6676144", "0.6672202", "0.6647878", "0.6621589", "0.66202843", "0.65980107", "0.658886", "0.6586531", "0.6531334", "0.65286624", "0.65250295", "0.6519084", "0.65062785", "0.64712554", "0.6382853", "0.6364604", "0.63344926", "0.63317245", "0.6310544", "0.6304809", "0.6304148", "0.62498164", "0.6203061", "0.61877984", "0.61850864", "0.61698747", "0.61607033", "0.61514133", "0.61407125", "0.61136097", "0.6103015", "0.6082179", "0.6070234", "0.6060752", "0.6043806", "0.603122", "0.60290176", "0.6023309", "0.59869546", "0.59678787", "0.59638184", "0.5957849", "0.5954293", "0.5939391", "0.5937538", "0.5935299", "0.5929666", "0.5926617", "0.59260964", "0.5903822", "0.5903713", "0.59028614", "0.59006244", "0.5893193", "0.5883604", "0.5877171", "0.58761704", "0.58755654", "0.58658624", "0.584614", "0.584295", "0.5840003", "0.5838431", "0.5829407", "0.58258873", "0.58249336", "0.58105195", "0.5804602", "0.58002186", "0.57970023", "0.57943845", "0.5788356", "0.5788356", "0.57842904", "0.5780896", "0.57787", "0.5772783", "0.5768228", "0.5762246", "0.57596517", "0.575167", "0.5741646", "0.57399917", "0.5739313", "0.5724532", "0.57237846" ]
0.66311824
17
Evaluates the trained model on the test set using the bleu_score method from NLTK.
def eval_nmt_bleu(model,dataset,vectorizer,args): model = model.eval().to(args.device) sampler = NMTSamplerWithMLTM(vectorizer, model) dataset.set_split('test') batch_generator = generate_nmt_batches(dataset, batch_size=args.batch_size, device=args.device) test_results = [] for batch_dict in batch_generator: sampler.apply_to_batch(batch_dict) for i in range(args.batch_size): test_results.append(sampler.get_ith_item(i, False)) bleu4 = np.array([r['bleu-4'] for r in test_results])*100 return np.mean(bleu4),bleu4
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate_bleu_score(cls, predictions, targets, ticks=False, corpus=True):\n if ticks:\n ref_sentences = cls._ticks_to_sentences(targets)\n cand_sentences = cls._ticks_to_sentences(predictions)\n else:\n ref_sentences = [[str(x) for x in seq] for seq in predictions]\n cand_sentences = [[str(x) for x in seq] for seq in targets]\n\n if corpus: bleu_score = corpus_bleu([[l] for l in ref_sentences], cand_sentences)\n else:\n bleu_score = 0.0\n num_sentences = 0\n\n for i in tqdm(range(len(ref_sentences))):\n sentence_bleu_score = sentence_bleu(ref_sentences[i], cand_sentences[i])\n print(sentence_bleu_score)\n bleu_score += sentence_bleu_score\n num_sentences += 1\n\n bleu_score /= num_sentences\n\n return bleu_score", "def evaluate(self, test_x, test_y):\n score = self._model.evaluate(test_x, test_y, verbose=self._verbose)\n print(\"Test score: \", score[0])\n print(\"Test accuracy: \", score[1])", "def evaluate(self, train_set=\"train_set\", test_set=\"test_set\", targets=\"targets\", k=10):\n\n test_set = self.cache.fetch(test_set) if isinstance(test_set, str) else test_set\n\n # Predict\n preds = self.run(dataset=train_set, targets=targets, k=k)\n\n # Evaluate model\n print(\"evaluating model ...\")\n score = evaluate(preds, test_set)\n print(\"MAP@{}: {:.5f}\\n\".format(k, score))\n\n return score", "def test_evaluate():\n X_train, X_test, y_train, y_test = src.load()\n clf, score = src.train(X_train, y_train)\n test_score = src.evaluate(clf, X_test, y_test)\n assert isinstance(test_score, float)", "def evaluate(self, test_data):\n result = self.model.run(test_data)\n self._save_result(result)", "def score_model(self, model, test_training, test_target):\n\n target_prediction = model.predict(test_training)\n from sklearn.metrics import classification_report\n if(self.VERBOSE):\n print(classification_report(test_target, target_prediction))\n\n return [\n f1_score(test_target, target_prediction, average='weighted'),\n precision_score(test_target, target_prediction, average='weighted'),\n recall_score(test_target, target_prediction, average='weighted')\n ]", "def evaluate_model(model, X_test, y_test):\n # run prediction with test data\n y_pred = model.predict(X_test)\n\n # print precision, recall and f1-score\n i = 0\n for col in y_test:\n print('Evaluation for \"{}\": \\n {} \\n\\n'.format(col, classification_report(y_test[col], y_pred[:,i])))\n i += 1", "def evaluate(self):\n\n\t\tself.model_score = self.model.evaluate(self.x_test, self.y_test, batch_size=2048)\n\t\tprint(\"%s score = %f\\n\" %(self.modelName, self.model_score[1]))\n\t\treturn self.model_score", "def score(self, test_data):\n\n\t\tins, outs = self._split_inputs_outputs(test_data)\n\n\t\t# One hot encode the input/labels\n\t\tencoder = LabelEncoder()\n\t\tencoder.fit(outs)\n\t\tenc_labels = encoder.transform(outs)\n\t\tenc_labels = np_utils.to_categorical(enc_labels)\n\n\t\t_, score = self.model.evaluate(ins, enc_labels, verbose=2)\n\n\t\treturn score", "def evaluate_model(model, X_test, Y_test): \n #Make predictions with the model\n Y_pred = model.predict(X_test)\n #convert numpy output to dataframe and add columns\n Y_pred_df = pd.DataFrame(Y_pred)\n Y_pred_df.columns = Y_test.columns\n #Convert predictions and correct y values to float for faciliate comparison\n Y_pred_df = Y_pred_df.astype('float64')\n Y_test = Y_test.astype('float64')\n print_score(Y_test, Y_pred_df, 'weighted avg')", "def evaluate(self, test_data, test_labels):\n raise NotImplementedError", "def evaluate_model(self, test_data, test_labels,verbose=2):\n test_loss, test_acc = self.model.evaluate(test_data, test_labels, verbose=verbose)\n return test_loss, test_acc", "def score(self, test_data):\n\n\t\tins, outs = self._split_inputs_outputs(test_data)\n\t\treturn self.model.score(ins, outs)", "def assess_model(model, test_data, label):\n return model.score(test_data,label)", "def model_evaluate(self, test):\n features = {name: np.array(value) for name, value in test.items()}\n labels = {name: features.pop(name) for name in self.label_names}\n metrics = self.model.evaluate(x=features, y=labels, batch_size=5)\n return metrics", "def evaluate(self, X_test, y_test):\n self.run(self)\n self.y_pred = self.pipeline.predict(X_test)\n self.rmse = compute_rmse(self.y_pred, y_test)", "def test(self) -> None:\n\n self._predictions = self._lr.predict(self._X_test)", "def evaluate(X_test, y_test):\n # batch size is 16 for evaluation\n batch_size = 16\n\n # Load Model\n model = load_model('model/model.h5')\n return model.evaluate(X_test, y_test, batch_size, verbose = 1)", "def eval(self, test_docs, test_labels):\n assert len(test_docs)==len(test_labels)\n preds = [] # predicted labels\n for doc,y_gold in zip(test_docs,test_labels):\n y_pred = self.predict(doc)\n preds.append(y_pred)\n ev = Eval(test_labels, preds)\n return ev.accuracy()", "def test(self, test_instances, test_labels):\n scores = self.classifier.predict(test_instances)\n # TODO: print report", "def _score(self, estimator, train, test):\n b = estimator.fit(self.A[train], self.b[train]).predict(self.A[test])\n return accuracy_score(self.b[test], b)", "def evaluate(self, x_test, y_test, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n\n test_results = self.model.evaluate(x_test,\n y_test,\n batch_size=self.batch_size,\n verbose=verbose)\n self.val_history = test_results\n return test_results", "def evaluate(self, x_test, y_test, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n\n test_results = self.model.evaluate(x_test,\n y_test,\n batch_size=self.batch_size,\n verbose=verbose)\n self.val_history = test_results\n return test_results", "def evaluate(self, x_test, y_test, verbose=0):\n\n if self.model is None:\n raise StandardError('Model is not built. Run build method or load model before fitting')\n\n test_results = self.model.evaluate(x_test,\n y_test,\n batch_size=self.batch_size,\n verbose=verbose)\n self.val_history = test_results\n return test_results", "def model_evaluation(X_train, y_train, X_test, y_test, k=16):\n print(\">>>>>>> x.shape\", X_train.shape)\n p_matrix, X_reduce = dimension_reduction(X_train, k=k)\n print(\"model training ...\")\n bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=2), n_estimators=30, learning_rate=1)\n bdt.fit(X_reduce, y_train)\n print(\"fit succeed\")\n\n X_test = np.dot(X_test, p_matrix)\n y_pred = bdt.predict(X_test)\n print(classification_report(y_test, y_pred, target_names=['benign', 'gafgyt', 'miari'], digits=4))", "def score(self):\n\n\t\tsplits = 10\n\t\tscore = 0\n\n\t\tkf = KFold(n_splits=splits, shuffle=True)\n\t\tkf.get_n_splits(self.data)\n\n\t\tfor train_ind, test_ind in kf.split(self.data):\n\n\t\t\ttrain = [self.data[ind] for ind in train_ind]\n\t\t\ttest = [self.data[ind] for ind in test_ind]\n\n\t\t\tself.model = self._fit(train)\n\t\t\ttemp_score = self.score_one(test)\n\t\t\tscore += temp_score\n\n\t\treturn score/float(splits)", "def score(self):\n\n\t\tsplits = 10\n\t\tscore = 0\n\n\t\tkf = KFold(n_splits=splits, shuffle=True)\n\t\tkf.get_n_splits(self.data)\n\n\t\tfor train_ind, test_ind in kf.split(self.data):\n\n\t\t\ttrain = [self.data[ind] for ind in train_ind]\n\t\t\ttest = [self.data[ind] for ind in test_ind]\n\n\t\t\tself.model = self._fit(train)\n\t\t\ttemp_score = self.score_one(test)\n\t\t\tscore += temp_score\n\n\t\treturn score/float(splits)", "def evaluate(self):\n predictions = self.model.predict(self.test[0])\n accuracy = accuracy_score(self.test[1], predictions)\n print(\"Accuracy:\", str(accuracy * 100) + \"%\")\n self.plot_results(predictions)", "def score(self, test_data):\n\n\t\tpass", "def test(self, dataset):\n test_accuracy = 0\n test_loss = 0\n num_examples_tested = 0\n # Put model into evaluation mode\n self.model.eval()\n for num, batch in enumerate(dataset.loader):\n xs, ys = batch\n batch_size = len(xs)\n num_examples_tested += batch_size\n iloss, iaccuracy = self.model(xs, ys)\n test_loss += iloss.cpu().data.numpy().item() * batch_size\n test_accuracy += iaccuracy.cpu().data.numpy().item() * batch_size\n test_accuracy = test_accuracy / num_examples_tested\n test_loss = test_loss / num_examples_tested\n # Return accuracy and loss for this model on the test set\n return test_accuracy, test_loss", "def evaluate_prediction_BoW(vectorizer, classifier, test_data):\n \n data = (test_data[k][0] for k in range(len(test_data))) # generator for the train data\n data_features = vectorizer.transform(data)\n predictions = classifier.predict(data_features)\n target = [test_data[k][1] for k in range(len(test_data))]\n \n return accuracy_score(target, predictions)", "def test_model_performance(model_es,test_file):\n test_label = get_test_label(test_file)\n result = model_es.predict(input_fn = lambda :input_fn(test_file,1,False,100,True))\n predict_list = []\n for one_res in result:\n if \"probabilities\" in one_res:\n predict_list.append(one_res['probabilities'][1])\n get_auc(predict_list,test_label)", "def evaluate(self):\n\n\t\t## We should be evaluating on dev dataset as well, so commenting x_test\n\t\t#self.model_score = self.model.evaluate(self.x_test, self.y_test_oh, batch_size=2048)\n\t\tself.model_score = self.model.evaluate(self.x_dev, self.y_dev_oh, batch_size=2048)\n\t\tprint(\"%s score = %f\\n\" %(self.modelName, self.model_score[1]))\n\n\t\t##Saving atucal vs predicted predictions\n\t\t##np.argmax returns the index where it see's 1 in the row\n\t\t#y_pred = np.argmax(self.model.predict(self.x_test, batch_size=2048), axis=1)\n\t\ty_pred = np.argmax(self.model.predict(self.x_dev, batch_size=2048), axis=1)\n\n\t\t## vstack will stack them in 2 rows, so we use Trasnpose to get them in column stack\n\t\t#output_predict = np.vstack((np.argmax(self.y_test_oh, axis=1), y_pred)).T\n\t\toutput_predict = np.vstack((np.argmax(self.y_dev_oh, axis=1), y_pred)).T\n\t\toutputFile = self.resultDir + \"/outputPredict.csv\" \n\t\tnp.savetxt(outputFile, output_predict, fmt=\"%5.0f\", delimiter=\",\")\n\n\t\t##Error Analysis of the prediction\n\t\terrorAnalysis(outputFile)\n\n\t\treturn self.model_score", "def evaluate_model(model, X_test, y_test, category_names):\n y_pred = model.predict(X_test)\n labels = np.unique(y_pred)\n print(labels)\n #print out score for each class and mean scores, including precision, recall, f1 score\n print(classification_report(y_test.values, y_pred, target_names=category_names.values))", "def trainAndEvaluate(trainDataFile, devDataFile, classifier, average):\n\n ids, instances, labels, features, classes = readArffFile(trainDataFile)\n\n startTime = time.time()\n\n classifier = classifier.lower()\n if classifier == \"svc\" or classifier == \"svm\":\n print(\"Using SVM\")\n clf = LinearSVC()\n elif classifier == \"nb\":\n print(\"Using Naive Bayes\")\n clf = MultinomialNB()\n elif classifier.lower() == \"nbboost\" or classifier.lower() == \"nbboosted\":\n print(\"Using Boosted Naive Bayes\")\n clf = MultinomialNB()\n clf = AdaBoostClassifier(clf)\n elif classifier == \"1r\":\n print(\"Sorry, 1R / LinearRegression isn't working right now\")\n exit()\n clf = LinearRegression(copy_X=False,fit_intercept=True, normalize=False)\n elif classifier == \"0r\":\n print(\"Using 0R\")\n from collections import Counter\n mostCommonTrainingClass = Counter(labels).most_common(1)[0][0]\n else:\n print(\"Invalid classifier choice.\")\n return\n\n print(\"Training the model\")\n\n if classifier != \"0r\":\n clf.fit(instances, labels)\n\n timeForTrain = time.time() - startTime\n numTrainInstances = len(instances)\n\n \"\"\"\n Testing and evaluating the model\n \"\"\"\n\n # Throw away the features and classes, we've already read them in.\n ids, instances, labels, _, _ = readArffFile(devDataFile)\n\n startTime = time.time()\n\n print(\"Testing the model\")\n numCorrect = 0\n numWrong = 0\n lenInstances = len(instances)\n predicted = []\n for i in range(lenInstances):\n # Status update of how it's going.\n if i % 1000 == 0:\n print(\"\\r\" + str(i).zfill(len(str(lenInstances))) + \"/\" + str(lenInstances) + \" \", end=\"\")\n instance = instances[i]\n label = labels[i]\n\n if classifier == \"0r\":\n res = mostCommonTrainingClass\n else:\n res = predictPrint(clf, instance)\n predicted.append(res)\n # print(\"-- Predicted label: {} || Correct label: {} --\". format(res, label))\n if res == label:\n numCorrect += 1\n else:\n numWrong += 1\n print()\n\n timeForTest = time.time() - startTime\n\n predicted = np.array(predicted)\n outName = outputFileName + classifier.upper() + dataSet + \".csv\"\n writeOutput(ids, predicted, outName)\n numDevInstances = len(instances)\n\n\n \"\"\"\n Printing various evaluation metrics.\n \"\"\"\n # report = classification_report(labels, predicted, target_names=classes)\n report = parameterizableReport(labels, predicted, beta=0.5, target_names=classes, averageType=average)\n print(report)\n print()\n # print(classification_report(labels, predicted, target_names=classes))\n\n \"\"\"\n print(\"Number of training instances: {}\".format(numTrainInstances))\n print(\"Number of dev instances: {}\".format(numDevInstances))\n print()\n\n print(\"Number of correct classifications: {}\".format(numCorrect))\n print(\"Number of wrong classifications: {}\".format(numWrong))\n print(\"Percentage of correct classifications: {0:.2f}%\".format(numCorrect*100/(numCorrect+numWrong)))\n print()\n \"\"\"\n\n print(\"Time taken to train the model: {0:.2f} sec\".format(timeForTrain))\n print(\"Time taken to test the model: {0:.2f} sec\".format(timeForTest))\n print()\n\n confMatrix = confusion_matrix(labels, predicted)\n if classifier == \"nb\":\n title = \"Naive Bayes\"\n elif classifier == \"svm\" or classifier == \"svc\":\n title = \"Support Vector Machine\"\n title += \" \" + dataSet\n plot_confusion_matrix(confMatrix, classes, title=title, normalize=True)", "def run_evaluate(self, test):\n accs = []\n correct_preds, total_correct, total_preds = 0., 0., 0.\n for words, labels in minibatches(test, self.config.batch_size):\n labels_pred, sequence_lengths = self.predict_batch(words)\n\n for lab, lab_pred, length in zip(labels, labels_pred,\n sequence_lengths):\n lab = lab[:length]\n lab_pred = lab_pred[:length]\n accs += [a==b for (a, b) in zip(lab, lab_pred)]\n\n lab_chunks = set(get_chunks(lab, self.config.vocab_tags))\n lab_pred_chunks = set(get_chunks(lab_pred,\n self.config.vocab_tags))\n\n correct_preds += len(lab_chunks & lab_pred_chunks)\n total_preds += len(lab_pred_chunks)\n total_correct += len(lab_chunks)\n\n p = correct_preds / total_preds if correct_preds > 0 else 0\n r = correct_preds / total_correct if correct_preds > 0 else 0\n f1 = 2 * p * r / (p + r) if correct_preds > 0 else 0\n acc = np.mean(accs)\n print('*'*20, '\\n')\n print('precision:', p, 'recall:', r, 'f1:', f1, '\\n')\n print('*'*20)\n\n return {\"acc\": 100*acc, \"f1\": 100*f1}", "def evaluate(weights: fl.common.Weights) -> Optional[Tuple[float, float]]:\n model = models.load_model(glb.MODEL)\n model.set_weights(weights)\n model.to(DEVICE)\n testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=False)\n # using pytorch for central evaluation, can be tensorflow as well\n return modules.pt_test(model, testloader, device=DEVICE)", "def evaluate(self, test_dir='data/dev', target='real'):\n test_data = {c: os.path.join(test_dir, c) for c in self.classes}\n if not target in test_data:\n print('Error: target class does not exist in test data.')\n return\n outcomes = {'TP': 0, 'TN': 0, 'FP': 0, 'FN': 0}\n # >>> YOUR ANSWER HERE\n data = []\n for c in test_data:\n docs = open(test_data[c]).readlines()\n for doc in docs:\n preprocess_doc = doc.strip()\n data.append((c, preprocess_doc))\n for item in data:\n predict_ans = self.predict(item[1])\n if item[0] == 'real':\n if predict_ans == 'real':\n outcomes['TP'] += 1\n else:\n outcomes['FN'] += 1\n else:\n if predict_ans == 'real':\n outcomes['FP'] += 1\n else:\n outcomes['TN'] += 1\n precision = outcomes['TP'] / (outcomes['TP'] + outcomes['FP']) # replace with equation for precision\n recall = outcomes['TP'] / (outcomes['TP'] + outcomes['FN']) # replace with equation for recall\n f1_score = 2 * ((precision * recall) / (precision + recall)) # replace with equation for f1\n # >>> END YOUR ANSWER\n return precision, recall, f1_score", "def evaluate(model: torch.nn.Module, dataloader: torch.utils.data.DataLoader, show_progress: bool = True,\n device: torch.device = torch.device('cuda:0')):\n with torch.no_grad():\n model.to(device=device)\n sum_cross_entropy = torch.nn.BCEWithLogitsLoss(reduction='sum').to(device=device)\n scoring_loss = 0.\n scoring_predictions = []\n scoring_labels = []\n for scoring_data in tqdm(dataloader, total=len(dataloader), desc=\"Evaluating model\",\n disable=not show_progress, position=1):\n \n # Get samples as lists\n labels, inputs, sequence_lengths, counts_per_sequence, sample_ids = scoring_data\n \n # Apply attention-based sequence reduction and create minibatch\n labels, inputs, sequence_lengths, n_sequences = model.reduce_and_stack_minibatch(\n labels, inputs, sequence_lengths, counts_per_sequence)\n \n # Compute predictions from reduced sequences\n logit_outputs = model(inputs, n_sequences)\n prediction = torch.sigmoid(logit_outputs)\n \n # Compute mean of losses on-the-fly\n scoring_loss += sum_cross_entropy(logit_outputs, labels[..., -1]) / len(dataloader.dataset)\n \n # Store predictions and labels\n scoring_predictions.append(prediction)\n scoring_labels.append(labels[..., -1])\n \n # Compute BACC, F1, and AUC score\n scoring_predictions = torch.cat(scoring_predictions, dim=0).float()\n scoring_predictions_threshold = (scoring_predictions > 0.5).float()\n scoring_labels = torch.cat(scoring_labels).float()\n \n scoring_labels = scoring_labels.cpu().numpy()\n scoring_predictions = scoring_predictions.cpu().numpy()\n scoring_predictions_threshold = scoring_predictions_threshold.cpu().numpy()\n \n roc_auc = metrics.roc_auc_score(scoring_labels, scoring_predictions, average=None)\n bacc = metrics.balanced_accuracy_score(y_true=scoring_labels, y_pred=scoring_predictions_threshold)\n f1 = metrics.f1_score(y_true=scoring_labels, y_pred=scoring_predictions_threshold, average='binary',\n pos_label=1)\n return roc_auc, bacc, f1, scoring_loss", "def test_evaluate(self):\n # Check build does not raise errors\n dataset = KDDCupDataset()\n dataset.create_fixed_samples(\n *self.data, samples_num=1, partition_sizes=self.partition_sizes)\n dataset.set_current_sample(0)\n model = self.MODEL(dataset, **self.model_arguments)\n model.fit(training_epochs=50)\n metric = model.evaluate('test')\n self.assertLessEqual(0, metric)\n self.assertGreaterEqual(1, metric)", "def evaluate_model(model, X_test, y_test, category_names):\n # Predict for test set\n y_pred = model.predict(X_test)\n \n print(\"**** Scores for each category *****\\n\")\n for i in range(36):\n print(\"Scores for '{}':\".format(category_names[i]))\n print(classification_report(y_test.values[:,i], y_pred[:,i]))", "def model(classifier, data):\n print(\"Beggining to test model\")\n train, test = cross_validation.train_test_split(data, test_size=.30)\n f,c = train[:,1:], train[:,0]\n classifier.fit(f,c,False)\n print(\"Score: \" + classifier.score(f,c))\n print(\"Finished testing model\")", "def test(self, idx_test):\n self.eval()\n output = self.predict()\n # output = self.output\n loss_test = F.nll_loss(output[idx_test], self.labels[idx_test])\n acc_test = utils.accuracy(output[idx_test], self.labels[idx_test])\n print(\"Test set results:\",\n \"loss= {:.4f}\".format(loss_test.item()),\n \"accuracy= {:.4f}\".format(acc_test.item()))\n return acc_test.item()", "def evaluate(self, test):\r\n self.logger.info(\"Testing model over test set\")\r\n metrics = self.run_evaluate(test)\r\n msg = \" - \".join([\"{} {:04.2f}\".format(k, v)\r\n for k, v in metrics.items()])\r\n self.logger.info(msg)\r\n return metrics", "def evaluate(train, train_labels, test, test_labels):\n \n # Use the same model for each training set for now\n model = RandomForestClassifier(n_estimators = 100, \n random_state = 50, n_jobs = -1)\n \n train = train.replace({np.inf: np.nan, -np.inf: np.nan})\n test = test.replace({np.inf: np.nan, -np.inf:np.nan})\n \n feature_names = list(train.columns)\n \n # Impute the missing values\n imputer = Imputer(strategy = 'median', axis = 1)\n train = imputer.fit_transform(train)\n test = imputer.transform(test)\n \n cv_score = 1 * cross_val_score(model, train, train_labels, \n scoring = \"f1\", \n cv = 5)\n \n # Fit on the training data and make predictions\n model.fit(train, train_labels)\n preds = model.predict(test)\n \n # Calculate the performance\n f1 = f1_score(test_labels, preds)\n print('5-fold CV F1: {:.2f} with std: {:.2f}'.format(cv_score.mean(),cv_score.std()))\n print('Test F1: {:.2f}.'.format(f1))\n \n feature_importances = pd.DataFrame({'feature': feature_names, \n 'importance': model.feature_importances_})\n \n return preds, feature_importances", "def test_score():\n\n tpot_obj = TPOTClassifier()\n\n try:\n tpot_obj.score(testing_features, testing_classes)\n assert False # Should be unreachable\n except ValueError:\n pass", "def evaluate_model_performance():\n\n config = load_config()\n data_processor = DataProcessor()\n df_test = data_processor.create_user_click_sequence(\n start_date=config[\"test_split_date\"]\n )\n df_test[\"truths\"] = df_test[\"merchant_seq\"].apply(lambda x: list(set(x)))\n truth_dict = dict(zip(df_test[\"user_id\"], df_test[\"truths\"]))\n\n # get model\n print(\"model training...\")\n model = Merchant2VecModel()\n model.train()\n\n # compute mAP@k\n k = model.num_rec\n all_truths, all_preds = [], []\n for user_id, user_merchants in truth_dict.items():\n this_pred = model.generate_predictions(\n user_id=user_id, eval_date=config[\"test_split_date\"]\n )\n all_truths.append(user_merchants)\n all_preds.append(this_pred)\n score = mapk(all_truths, all_preds, k)\n print(\"mAP@{} for current model: {:.4f}\".format(k, score))", "def evaluate(test_set, predictions):\n full_report = metrics.classification_report(test_set.labels, predictions,\n labels=range(len(test_set.index2label)),\n target_names=test_set.index2label, digits=3)\n pre, rec, f1, support = metrics.precision_recall_fscore_support(test_set.labels, predictions, average='weighted')\n return pre, rec, f1, support, full_report", "def score(self, data_test, labels_pred, is_train=False):\n return -np.log(np.clip(self.score_trust(data_test, labels_pred, is_train=is_train),\n sys.float_info.min, None))", "def _score(self, ModifiedWeights):\r\n \r\n UnflattenedWeights = self._UnflattenWeights(WeightsStrucure = self.WeightsStrucure, ModifiedWeights = ModifiedWeights)\r\n self.KerasModels.set_weights(UnflattenedWeights)\r\n test_on_batch = self.KerasModels.test_on_batch(X_train, y_train, sample_weight=None) # return ['loss', 'acc']\r\n return test_on_batch[1]", "def evaluate_prediction(self):\n\n # ratio_train = self.evaluate_data(self.train_x, self.train_y)\n ratio_test = self.evaluate_data(self.test_x, self.test_y)\n\n print(\"\\n*NAIVE BAYES:\")\n # print(\"Test1: {}%\".format(ratio_dev*100))\n print(\"Test: {} %\".format(ratio_test*100))", "def evaluate(self, test):\n self.logger.info(\"Testing model over test set\")\n metrics = self.run_evaluate(test)\n msg = \" - \".join([\"{} {:04.2f}\".format(k, v)\n for k, v in metrics.items()])\n self.logger.info(msg)\n return metrics", "def score(self, X_test, y_test):\n correct = []\n for one in X_test:\n correct.append(self.predict(one))\n try:\n return sum(0 if correct[i] != y_test[i] else 1 for i in range(len(X_test))) / len(\n X_test\n )\n except ZeroDivisionError:\n pass", "def evaluate_model(model, testset):\n\n # Sort data by top level label to ease inspection\n testset = testset.sort_using_layer(-1, reverse=True)\n\n # Feed the samples to the model to obtain each layers' activations\n v = testset.get_layer(0)\n hs = model.transform(v)[1:]\n\n # Read model weights\n ws = [params['w'] for params in model.parameters]\n del params\n\n # Take the (hidden) labels from the data set\n ls = testset.get_layers()[1:]\n\n # In each layer, reorder and invert neurons to match best with the labels\n for i in range(len(ls)):\n hs[i], ws[i] = align_with_labels(ls[i], hs[i], ws[i])\n del i\n\n # Measure correlations, etcetera\n metrics = compare(ls, hs)\n\n # Simply return a dict with all used variables\n return locals()", "def run_evaluate(self, test):\n accs = []\n correct_preds, total_correct, total_preds = 0., 0., 0.\n for words, labels in minibatches(test, self.config.batch_size):\n labels_pred, sequence_lengths = self.predict_batch(words)\n\n for lab, lab_pred, length in zip(labels, labels_pred,\n sequence_lengths):\n lab = lab[:length]\n lab_pred = lab_pred[:length]\n accs += [a==b for (a, b) in zip(lab, lab_pred)]\n\n lab_chunks = set(get_chunks(lab, self.config.vocab_tags))\n lab_pred_chunks = set(get_chunks(lab_pred,\n self.config.vocab_tags))\n\n correct_preds += len(lab_chunks & lab_pred_chunks)\n total_preds += len(lab_pred_chunks)\n total_correct += len(lab_chunks)\n\n p = correct_preds / total_preds if correct_preds > 0 else 0\n r = correct_preds / total_correct if correct_preds > 0 else 0\n f1 = 2 * p * r / (p + r) if correct_preds > 0 else 0\n acc = np.mean(accs)\n\n return {\"acc\": 100*acc, \"f1\": 100*f1}", "def test_model (self, text_test, labels_test):\n print(classification_report(labels_test, self.classify(text_test)))", "def score_one(self, test_data):\n\n\t\ttest_in, test_labels = self._split_inputs_outputs(test_data)\n\t\tcorrect = 0\n\t\ttotal = 0\n\n\t\tfor i, test_input in enumerate(test_in):\n\t\t\tprediction = self.model.predict(test_input.reshape(1,-1))\n\t\t\tif prediction[0] == test_labels[i]:\n\t\t\t\tcorrect+=1\n\t\t\ttotal+=1\n\t\treturn float(correct)/total", "def score_one(self, test_data):\n\n\t\ttest_in, test_labels = self._split_inputs_outputs(test_data)\n\t\tcorrect = 0\n\t\ttotal = 0\n\n\t\tfor i, test_input in enumerate(test_in):\n\t\t\tprediction = self.model.predict(test_input.reshape(1,-1))\n\t\t\tif prediction[0] == test_labels[i]:\n\t\t\t\tcorrect+=1\n\t\t\ttotal+=1\n\t\treturn float(correct)/total", "def test(self, test=False): \n if test == True:\n if os.path.exists(self.student_save_path):\n checkpoint = torch.load(self.student_save_path, map_location=self.device)\n else:\n raise ValueError('No file with the pretrained model selected')\n\n self.student_model.load_state_dict(checkpoint)\n self.student_model.eval()\n\n running_acc = 0\n with torch.no_grad():\n for data, label in self.testloader:\n data, label = data.to(self.device), label.to(self.device)\n\n student_logits, *student_activations = self.student_model(data)\n\n running_acc += utils.accuracy(student_logits.data, label)\n\n print(f\"Test accuracy: {running_acc / len(self.testloader)}\")\n return running_acc / len(self.testloader)", "def test(self, test_fn, eval_metrics):\n # Load gold and predict\n X, Y = self.load_dataset(test_fn)\n y = self.model.predict(X)\n\n # Get most probable predictions and flatten\n Y = RNN_model.consolidate_labels(self.transform_output_probs(Y).flatten())\n y = RNN_model.consolidate_labels(self.transform_output_probs(y).flatten())\n\n # Run evaluation metrics and report\n # TODO: is it possible to compare without the padding?\n ret = []\n for (metric_name, metric_func) in eval_metrics:\n ret.append((metric_name, metric_func(Y, y)))\n logging.debug(\"calculating {}\".format(ret[-1]))\n\n for (metric_name, metric_val) in ret:\n logging.info(\"{}: {:.4f}\".format(metric_name,\n metric_val))\n return Y, y, ret", "def testModel( self, classTest, classPred):", "def test(model, test_loader, loss_function, device):\n\n model.eval()\n test_loss, correct = 0, 0\n\n with torch.no_grad():\n for data, target in test_loader:\n target = target.float().unsqueeze(dim=-1).to(device)\n output = model(data.to(device))\n pred = sigmoid2predictions(output)\n test_loss += loss_function(output, target).sum().item()\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n\n print('...validation: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))", "def evaluate(self, test_set, predicted_values, certainty):\r\n\r\n if self.classification_type == \"classification\":\r\n self.classification_evaluation(test_set, predicted_values, certainty)\r\n elif self.classification_type == \"regression\":\r\n self.regression_evaluation(test_set, predicted_values)", "def evaluate(model, train_corpus, test_coprus, vocab=idx2word,\r\n num_docs_test=num_docs_test, tc=tc, td=td,\r\n eval_batch_size=_eval_batch_size,\r\n vocab_size=vocab_size,\r\n bow_norm=bow_norm):\r\n\r\n model.eval() # set model in evaluation mode\r\n with torch.no_grad():\r\n indices = torch.split(torch.tensor(range(num_docs_test)), eval_batch_size)\r\n\r\n ## get \\beta here\r\n beta = model.get_beta()\r\n\r\n ### do dc and tc here\r\n acc_loss = 0\r\n cnt = 0\r\n\r\n for idx, ind in enumerate(indices):\r\n data_batch = get_batch(test_corpus, ind, vocab_size, device)\r\n sums = data_batch.sum(1).unsqueeze(1)\r\n if bow_norm:\r\n normalized_data_batch = data_batch / sums\r\n else:\r\n normalized_data_batch = data_batch\r\n\r\n ## get theta\r\n theta, _ = model.get_theta(normalized_data_batch)\r\n ## get prediction loss\r\n res = torch.mm(theta, beta)\r\n preds = torch.log(res)\r\n recon_loss = -(preds * data_batch).sum(1)\r\n loss = recon_loss / sums.squeeze()\r\n loss = loss.mean().item()\r\n acc_loss += loss\r\n cnt += 1\r\n\r\n # Calculate final loss\r\n cur_loss = acc_loss / cnt\r\n ppl_dc = round(math.exp(cur_loss), 1)\r\n print('Eval Doc Completion PPL: {}'.format(ppl_dc))\r\n\r\n if tc or td: # calculate topic coherence or topic diversity\r\n beta = beta.data.cpu().numpy()\r\n if tc:\r\n print('Computing topic coherence...')\r\n utils.get_topic_coherence(beta, train_corpus, vocab)\r\n if td:\r\n print('Computing topic diversity...')\r\n utils.get_topic_diversity(beta, 25)\r\n return ppl_dc", "def evaluate_model(model, X_test, Y_test, category_names):\n\n print(\"Testing Performance\")\n print(classification_report(Y_test, model.predict(X_test), target_names=category_names))\n\n #Todo cat names", "def score(self, X, y):\n X_pp = self.preprocessor.transform(X)\n # Score the model on the data here\n return(self.estimator.score(X_pp, y))", "def evaluate(model, eval_data, num_labels): \n # Turn on the evaluation state to ignore dropouts\n model.eval()\n results = [predict(model, x) for x, y in eval_data]\n f1_score, accuracy = get_metrics(np.array([y for x, y in eval_data]), results, num_labels)\n return f1_score, accuracy", "def evaluate_model():\n\n # Get the processed data (in proper format to evaluate the NER model)\n data = get_json_from_file_path(PROCESSED_DATA_PATH)\n # Split the dataset for training and test as we did for training\n train_data, test_data = train_test_split(data, train_size=0.7, \n random_state=4)\n\n # Load the model trained\n try:\n ner_model = spacy.load(OUTPUT_MODEL_PATH)\n except Exception as err:\n msg = f'Could not load the model. Error: {err}'\n raise Exception(msg)\n\n # Compute evaluation scores\n print('Computing metrics...')\n scores = evaluate(ner_model, test_data)\n # General metrics of the model\n f_score = scores.get('ents_f')\n precision = scores.get('ents_p')\n recall = scores.get('ents_r')\n print('\\nScoring:')\n print(f'F-score: {f_score}')\n print(f'Precision: {precision}')\n print(f'Recall: {recall}')\n\n # Get the specific scores for each entity \n scores_per_entity = scores.get('ents_per_type')\n # Get the F-score of the entities\n f_scores_of_entities = []\n for entity_scores in scores_per_entity.values():\n f_scores_of_entities.append(entity_scores['f'])\n # Compute the macro averaged F-score\n macro_avg_f_score = sum(f_scores_of_entities)/len(f_scores_of_entities)\n print(f'Macro averaged F-score: {macro_avg_f_score}')\n \n print('\\nScores per entity;')\n print('{:<15} {:<10} {:<10} {:<10}'.format('Entity','F-score','Precision','Recall'))\n for key, value in scores_per_entity.items():\n entity = key\n f, p, r = value['f'], value['p'], value['r']\n print('{:<15} {:<10.2f} {:<10.2f} {:<10.2f}'.format(entity, f, p, r))", "def test(self,test_fn, eval_metrics):\n # Load gold and predict\n X, Y = self.load_dataset(test_fn)\n y = self.model.predict(X)\n\n # Get most probable predictions and flatten\n Y = RNNOIE_model.consolidate_labels(self.transform_output_probs(Y).flatten())\n y = RNNOIE_model.consolidate_labels(self.transform_output_probs(y).flatten())\n\n # Run evaluation metrics and report\n # TODO: is it possible to compare without the padding?\n ret = []\n for (metric_name, metric_func) in eval_metrics:\n ret.append((metric_name, metric_func(Y, y)))\n # logging.debug(\"calculating {}\".format(ret[-1]))\n\n for (metric_name, metric_val) in ret:\n logging.info(\"{}: {:.4f}\".format(metric_name,\n metric_val))\n return Y, y, ret", "def train(self):\n\t\t# Helper: Early stopping.\n\t\tearly_stopper = EarlyStopping(patience=2, verbose = 1)\n\t\tself.model.fit(data.x_train, data.y_train,\n\t\t\t\t\t\tbatch_size=data.batch_size,\n\t\t\t\t\t\tepochs=10000, # using early stopping, so no real limit\n\t\t\t\t\t\tverbose=1,\n\t\t\t\t\t\tvalidation_split=0.05,\n\t\t\t\t\t\tcallbacks=[early_stopper])\n\n\t\tscore = self.model.evaluate(data.x_test, data.y_test, verbose=1)\n\n\t\treturn score[1] # 1 is accuracy. 0 is loss.", "def test_model(test_loader):\n device = get_device()\n model = load_model(\"./model.pth\")\n correct = 0\n total = 0\n with torch.no_grad():\n for i, (inputs, labels) in enumerate(test_loader):\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n # Forward through model\n outputs = model(inputs)\n\n # Take softmax and convert to predicted labels\n preds = F.softmax(outputs, dim=1)\n preds = torch.argmax(preds, 1)\n #logging.info(\"{}, {}\".format(preds, labels))\n correct += torch.sum(preds == labels.data).item()\n total += inputs.size(0)\n\n logging.info(\"Test Accuracy: {}%\".format(round(100 * (correct / total), 3)))", "def test_DNN(self, X_test, Y_test):\n if self.trained:\n return accuracy_score(self.predict(X_test), Y_test)", "def test(model, args, test_loader):\n with torch.no_grad():\n model.eval()\n test_loss = 0\n correct = 0\n # Data and target are a single pair of images and labels.\n for data, target in tqdm(test_loader, desc='Batching Test Data'):\n if args.cuda:\n data, target = data.cuda(), target.cuda()\n pred, tloss = make_prediction(data, target)\n test_loss += tloss\n correct += pred.eq(target.data.view_as(pred)).cpu().sum()\n\n test_loss /= len(test_loader.dataset)\n uf.box_print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))", "def get_scores(self):\n return SklearnModel.evaluate_no_ground_truth_classifier_metrics(self.X_test, self.predictions)", "def test(self):\n for data_tier in self.data_tiers:\n tot = len(self.preprocessed_data[data_tier]['features'])\n p = int(math.floor(tot*0.2))\n test_features = np.array(self.preprocessed_data[data_tier]['features'][p:])\n trend_test_classifications = np.array(self.preprocessed_data[data_tier]['trend_classifications'][p:])\n avg_test_classifications = np.array(self.preprocessed_data[data_tier]['avg_classifications'][p:])\n accuracy_trend = self.clf_trend[data_tier].score(test_features, trend_test_classifications)\n accuracy_avg = self.clf_avg[data_tier].score(test_features, avg_test_classifications)\n self.logger.info('The accuracy of %s trend classifier for data tier %s is %.3f', self.name, data_tier, accuracy_trend)\n self.logger.info('The accuracy of %s avg regressor for data tier %s is %.3f', self.name, data_tier, accuracy_avg)", "def score(self, target_text, prediction_text):\n bleu_score = sacrebleu.corpus_bleu([prediction_text], [[target_text]],\n smooth_method=\"exp\",\n smooth_value=0.0,\n force=False,\n lowercase=False,\n tokenize=\"intl\",\n use_effective_order=False)\n return {\"bleu\": BleuScore(bleu_score.score)}", "def test():\n listpost,listclass = bayes.loaddataset()\n myvocablist = bayes.createlist(listpost)\n tmatrix = list()\n for doc in listpost:\n\t vec = bayes.word2vec(myvocablist,doc)\n\t tmatrix.append(vec)\n p0,p1,pa = bayes.train(tmatrix,listclass)\n testdoc1 = ['love','my','dalmation']\n testvec1 = bayes.word2vec(myvocablist,testdoc1)\n print testdoc1,'classify as :',bayes.classify(testvec1,p0,p1,pa)\n testdoc2 = ['stupid','love']\n testvec2 = bayes.word2vec(myvocablist,testdoc2)\n print testdoc2,'classify as :',bayes.classify(testvec2,p0,p1,pa)", "def calculate_bleu(lightning_model: pl.LightningModule, config: Namespace) -> float:\n\n valid_en = load_file(os.path.join(config.directory, 'valid_en.txt'))\n valid_ru = load_file(os.path.join(config.directory, 'valid_ru.txt'))\n\n predicted_texts = list()\n\n for i_batch in tqdm(range(math.ceil(len(valid_en) / config.batch_size)),\n desc='Inference', disable=not config.verbose):\n\n batch = valid_en[i_batch * config.batch_size:(i_batch + 1) * config.batch_size]\n tokenized_batch = lightning_model.sequence2sequence_preparer.source_tokenize(batch)\n translated_batch = lightning_model.model.generate(tokenized_batch)\n predicted_texts_batch = lightning_model.sequence2sequence_preparer.target_language_tokenizer.decode_batch(\n translated_batch)\n predicted_texts.extend(predicted_texts_batch)\n\n tokenized_predicted = [word_tokenize(sample) for sample in predicted_texts]\n tokenized_target = [[word_tokenize(sample)] for sample in valid_ru]\n\n score = corpus_bleu(tokenized_target, tokenized_predicted)\n\n return score", "def evaluate(model, df_result, label='test'):\n\n y_true = df_result['RUL']\n y_hat = df_result['y_hat']\n df_result['breakdown'].replace(0, False, inplace=True) # rsf only takes true or false\n df_result['breakdown'].replace(1, True, inplace=True) # rsf only takes true or false\n\n mse = mean_squared_error(y_true, y_hat)\n rmse = np.sqrt(mse)\n variance = r2_score(y_true, y_hat)\n\n # the concordance index (CI) is interested on the order of the predictions, not the predictions themselves\n # CI can only be measured between individual samples where a censoring or failure event occurred\n # https://medium.com/analytics-vidhya/concordance-index-72298c11eac7#:~:text=The%20concordance%20index%20or%20c,this%20definition%20mean%20in%20practice\n df_result_grouped = df_result.groupby('unit num').last()\n breakdown = df_result_grouped['breakdown']\n y_true = df_result_grouped['RUL']\n y_hat = df_result_grouped['y_hat']\n ci_sk = ci_scikit(breakdown, y_true, y_hat)[0]\n score = nasaScore(y_true, y_hat) # score should be based on the last instance\n # print(f'Number of concordant pairs (scikit-survival): {ci_scikit(breakdown, y_true, y_hat)[1]}')\n # print(f'Number of discordant pairs (scikit-survival): {ci_scikit(breakdown, y_true, y_hat)[2]}')\n # print(f'Number of pairs having tied estimated risks (scikit-survival): {ci_scikit(breakdown, y_true, y_hat)[3]}')\n # print(f'Number of comparable pairs sharing the same time (scikit-survival): {ci_scikit(breakdown, y_true, y_hat)[4]}')\n print('{} set RMSE:{:.2f}, Score:{:.2f}, CI(scikit):{:.4f}, R2:{:.2f}'.format(label, rmse, score, ci_sk, variance))\n result = [model, label, rmse, score, ci_sk, variance]\n return result", "def test(self, verbose=False):\n\t\tif not self.trained: self.train()\n\t\tloss = self.compute_loss(self.w0, self.w, 'test')\n\t\tprint('Mean log loss of TEST data:', loss)", "def eval_performance(weights, test_y, test_x):\n y_predicted = predict_labels(weights, test_x)\n accuracy = len(y_predicted[y_predicted == test_y]) / len(y_predicted)\n return accuracy", "def scoring_function(self, model, y_true, y_predicted_probability):", "def eval_perf_total(model, X_train, y_train, X_test, y_test):\n\n y_hat_train = model.predict(X_train)\n y_hat_test = model.predict(X_test)\n \n train_mae = metrics.mean_absolute_error(y_train, y_hat_train)\n train_mse = metrics.mean_squared_error(y_train, y_hat_train)\n train_rmse = np.sqrt(metrics.mean_squared_error(y_train, y_hat_train))\n train_r = metrics.r2_score(y_train, y_hat_train)\n\n print('Evaluating Performance on Training Data:\\n')\n print(f' Train Mean Absolute Error: {train_mae:,.2f}')\n print(f' Train Mean Squared Error: {train_mse:,.2f}\\n')\n print(f'Train Root Mean Squared Error: {train_rmse:,.2f}')\n print(f'Train R-Square Value: {round(train_r,2)}')\n\n print('\\n'+'---'*25+'\\n')\n\n test_mae = metrics.mean_absolute_error(y_test, y_hat_test)\n test_mse = metrics.mean_squared_error(y_test, y_hat_test)\n test_rmse = np.sqrt(metrics.mean_squared_error(y_test, y_hat_test))\n test_r = metrics.r2_score(y_test, y_hat_test)\n\n print('Evaluating Performance on Testing Data:\\n')\n print(f' Test Mean Absolute Error: {test_mae:,.2f}')\n print(f' Test Mean Squared Error: {test_mse:,.2f}\\n')\n print(f'Test Root Mean Squared Error: {test_rmse:,.2f}')\n print(f'Test R-Square Value: {round(test_r,2)}')", "def score_dataset(X_train, X_valid, y_train, y_valid):\r\n model = RandomForestRegressor(n_estimators=100, random_state=0)\r\n model.fit(X_train, y_train)\r\n preds = model.predict(X_valid)\r\n score = mean_absolute_error(y_valid, preds)\r\n return score", "def evaluate(\n self,\n test_data=None,\n print_report=True,\n save_path=\"ktrain_classification_report.csv\",\n class_names=[],\n ):\n return self.validate(\n val_data=test_data,\n print_report=print_report,\n save_path=save_path,\n class_names=class_names,\n )", "def test(classifier, data, labels):\n eval_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": data},\n y=labels,\n num_epochs=1,\n shuffle=False)\n eval_results = classifier.evaluate(input_fn=eval_input_fn)\n eval_results[\"F-Score\"] = 2 * eval_results[\"precision\"] * eval_results[\"recall\"] / (eval_results[\"precision\"] + eval_results[\"recall\"])\n# print(eval_results)\n return eval_results", "def test(self):\n self.model.eval()\n test_loss, test_correct_preds = 0, defaultdict(int)\n if self.test_loader is None: # running G2E\n self.test_loader, self.test_size, _ = self._get_smi_dl(phase=\"test\", shuffle=False)\n test_loader = tqdm(self.test_loader, desc='testing...')\n\n running_topk_accs = defaultdict(lambda: np.nan)\n with torch.no_grad():\n epoch_test_size = 0\n for i, batch in enumerate(test_loader):\n batch_data = batch[0]\n if not isinstance(batch_data, tuple):\n batch_data = batch_data.to(self.device)\n if self.model_name == 'TransformerEBM':\n batch_data = (batch_data, 'test')\n batch_mask = batch[1].to(self.device)\n batch_energies = self._one_batch(\n batch_data, batch_mask, backprop=False,\n )\n test_batch_size = batch_energies.shape[0]\n epoch_test_size += test_batch_size\n\n # for validation/test data, true rxn may not be present!\n batch_idx = batch[2]\n batch_true_ranks_array = self.proposals_data['test'][batch_idx, 2].astype('int')\n batch_true_ranks_valid = batch_true_ranks_array[batch_true_ranks_array < self.args.minibatch_eval]\n batch_true_ranks = torch.as_tensor(batch_true_ranks_array).unsqueeze(dim=-1)\n # slightly tricky as we have to ignore rxns with no 'positive' rxn for loss calculation\n # (bcos nothing in the numerator, loss is undefined)\n loss_numerator = batch_energies[\n np.arange(batch_energies.shape[0])[batch_true_ranks_array < self.args.minibatch_eval],\n batch_true_ranks_valid\n ]\n loss_denominator = batch_energies[\n np.arange(batch_energies.shape[0])[batch_true_ranks_array < self.args.minibatch_eval],\n :\n ]\n batch_loss = (loss_numerator + torch.logsumexp(-loss_denominator, dim=1)).sum().item()\n for k in self.k_to_test:\n # index with lowest energy is what the model deems to be the most feasible rxn\n batch_preds = torch.topk(batch_energies, k=k, dim=1, largest=False)[1] \n batch_correct_preds = torch.where(batch_preds == batch_true_ranks)[0].shape[0]\n test_correct_preds[k] += batch_correct_preds\n running_topk_accs[k] = test_correct_preds[k] / epoch_test_size\n\n if k == 1:\n # overhead is only 5 ms, will check ~5 times each epoch (regardless of batch_size)\n try:\n for j in range(i * self.args.batch_size_eval, (i+1) * self.args.batch_size_eval):\n if j % (self.test_size // 5) == random.randint(0, 3) or j % (self.test_size // 8) == random.randint(0, 5): # peek at a random sample of current batch to monitor training progress\n rxn_idx = random.sample(list(range(self.args.batch_size_eval)), k=1)[0]\n rxn_true_rank = batch_true_ranks_array[rxn_idx]\n rxn_pred_rank = batch_preds[rxn_idx, 0].item()\n rxn_pred_energy = batch_energies[rxn_idx, rxn_pred_rank].item()\n rxn_true_energy = batch_energies[rxn_idx, rxn_true_rank].item() if rxn_true_rank != 9999 else 'NaN'\n rxn_orig_energy = batch_energies[rxn_idx, 0].item()\n rxn_orig_energy2 = batch_energies[rxn_idx, 1].item()\n rxn_orig_energy3 = batch_energies[rxn_idx, 2].item()\n\n rxn_true_prod = self.proposals_data['test'][batch_idx[rxn_idx], 0]\n rxn_true_prec = self.proposals_data['test'][batch_idx[rxn_idx], 1]\n rxn_cand_precs = self.proposals_data['test'][batch_idx[rxn_idx], 3:]\n rxn_pred_prec = rxn_cand_precs[batch_preds[rxn_idx]]\n rxn_orig_prec = rxn_cand_precs[0]\n rxn_orig_prec2 = rxn_cand_precs[1]\n rxn_orig_prec3 = rxn_cand_precs[2]\n logging.info(f'\\ntrue product: \\t\\t\\t\\t{rxn_true_prod}')\n logging.info(f'pred precursor (rank {rxn_pred_rank}, energy = {rxn_pred_energy:+.4f}):\\t\\t\\t{rxn_pred_prec}')\n if rxn_true_energy == 'NaN':\n logging.info(f'true precursor (rank {rxn_true_rank}, energy = {rxn_true_energy}):\\t\\t\\t\\t{rxn_true_prec}')\n else:\n logging.info(f'true precursor (rank {rxn_true_rank}, energy = {rxn_true_energy:+.4f}):\\t\\t\\t{rxn_true_prec}')\n logging.info(f'orig precursor (rank 0, energy = {rxn_orig_energy:+.4f}):\\t\\t\\t{rxn_orig_prec}')\n logging.info(f'orig precursor (rank 1, energy = {rxn_orig_energy2:+.4f}):\\t\\t\\t{rxn_orig_prec2}')\n logging.info(f'orig precursor (rank 2, energy = {rxn_orig_energy3:+.4f}):\\t\\t\\t{rxn_orig_prec3}\\n')\n break\n except Exception as e:\n tb_str = traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)\n logging.info(\"\".join(tb_str))\n logging.info('\\nIndex out of range (last minibatch)')\n\n test_loss += batch_loss\n test_loader.set_description(f\"testing...loss={test_loss / epoch_test_size:.4f}, top-1 acc={running_topk_accs[1]:.4f}, top-5 acc={running_topk_accs[5]:.4f}, top-10 acc={running_topk_accs[10]:.4f}\")\n test_loader.refresh()\n \n for k in self.k_to_test:\n self.test_topk_accs[k] = test_correct_preds[k] / epoch_test_size # self.test_size\n\n logging.info(f'\\nTest loss: {test_loss / epoch_test_size:.4f}')\n message = f\"{self.args.expt_name}\\n\"\n for k in self.k_to_test:\n this_topk_message = f'Test top-{k} accuracy: {100 * self.test_topk_accs[k]:.3f}%'\n logging.info(this_topk_message)\n message += this_topk_message + '\\n'\n try:\n send_message(message)\n except Exception as e:\n pass", "def test(self, test_X, test_y):\n if self.feat_sel:\n test_X = self.skb.transform(test_X)\n predicted = self.clf.predict_proba(test_X)[:, 1]\n return roc_auc_score(test_y, predicted), average_precision_score(test_y, predicted)", "def evaluate(model, test_files):\n print(\"Running predictions.\")\n models = load_model(model)\n predictions = predict(models, test_files)\n\n # # write predictions to file\n # write_predictions(\"evaluate_out.json\",predictions)\n evaluate_individual(predictions, test_files, models)\n evaluate_overall(predictions)", "def train_and_test_model(self, X_train, y_train, X_test, y_test):\n\n\t\t# Fit the classification model on the whole training set (as opposed to cross-validation)\n\t\t# print(\"Y TRAIN: \", y_train[:10])\n\t\t# print(\"x TRAIN: \", X_train[:10])\n\t\tself.classifier.fit(X_train, y_train)\n\t\ty_train_predicted = self.classifier.predict(X_train)\n\t\tprint(\"np.mean Accuracy TRAINING: %s\" % np.mean(y_train_predicted == y_train))\n\n\t\t''' Predict the outcome on the test set\n\t\t\tNote that the clf classifier has already been fit on the training data.\n\t\t'''\n\t\ty_predicted = self.classifier.predict(X_test)\n\n\t\tprint(\"%.2f seconds: Finished training the model and predicting class labels for the test set\" % time.process_time())\n\n\t\t# Simple evaluation using numpy.mean\n\t\t# print(\"np.mean Accuracy: %s\" % np.mean(y_predicted == y_test))\n\n\t\t# Log the classification report\n\t\t# print(\"Classification report:\\n%s\" % metrics.classification_report(y_test, y_predicted))\n\n\t\t# The confusion matrix\n\t\t# confusion_matrix = metrics.confusion_matrix(y_test, y_predicted)\n\t\t# print(\"Confusion matrix:\\n%s\" % confusion_matrix)", "def test_model(model, dset_loader):\n model.train(False)\n\n running_corrects = 0\n\n for inputs, labels in dset_loader:\n # wrap them in Variable\n# inputs, labels = Variable(inputs.cuda()), \\\n# Variable(labels.cuda())\n\n # forward\n outputs = model.forward_prediction(inputs)\n _, preds = torch.max(outputs.data, 1)\n\n running_corrects += torch.sum(preds == labels.data)\n\n return running_corrects/(len(dset_loader) * dset_loader.batch_size)", "def evaluate(self, featureset):\r\n #sequence, tag = featureset\r\n gs, labels = [], []\r\n for s, t in featureset:\r\n gs.append(t)\r\n label = self.tagger.choose_tag(s)\r\n labels.append(label)\r\n print (t, label)\r\n\r\n assert(len(gs) == len(labels))\r\n self.write_to_file(labels)\r\n words = self.tagger.test(self.r.test_sents, word=True)\r\n print (accuracy_score(gs, labels))", "def finetune_and_evaluate_model(model: BertForSequenceClassification, dataset: SentenceComplexityDataset):\n if test_split_ratio:\n train_ds, test_ds = get_train_test_split(dataset, test_split_ratio)\n train_ft_ds = SentenceComplexityFinetuningDataset(train_ds)\n test_ft_ds = SentenceComplexityFinetuningDataset(test_ds)\n else:\n train_ft_ds = SentenceComplexityFinetuningDataset(dataset)\n test_ft_ds = None\n\n training_args = TrainingArguments(\"finetune_trainer\",\n evaluation_strategy=\"epoch\",\n logging_strategy=\"epoch\",\n per_device_train_batch_size=16,\n per_device_eval_batch_size=16)\n\n trainer = Trainer(model=model, args=training_args, train_dataset=train_ft_ds, eval_dataset=test_ft_ds)\n trainer.train()\n trainer.evaluate()\n\n model.save_pretrained(FINETUNED_BERT_MODEL_PATH)", "def evaluate(self, x_test, y_test):\n result = {}\n results = []\n # Prepare test\n x_test = [gensim.utils.simple_preprocess(text) for text in x_test]\n x_test = keras.preprocessing.sequence.pad_sequences(\n self.tokenizer.texts_to_sequences(x_test),\n maxlen=self.k_max_sequence_len)\n\n # Predict\n confidences = self.k_model.predict(x_test, verbose=1)\n\n y_pred_1d = []\n\n for confidence in confidences:\n idx = np.argmax(confidence)\n y_pred_1d.append(self.label_encoder.classes_[idx])\n\n y_pred_bin = []\n for i in range(0, len(results)):\n y_pred_bin.append(1 if y_pred_1d[i] == y_test[i] else 0)\n\n # Classification report\n result[\"CLASSIFICATION_REPORT\"] = classification_report(y_test, y_pred_1d, output_dict=True)\n\n # Confusion matrix\n result[\"CONFUSION_MATRIX\"] = confusion_matrix(y_test, y_pred_1d)\n\n # Accuracy\n result[\"ACCURACY\"] = accuracy_score(y_test, y_pred_1d)\n\n return result", "def compute(self) -> None:\n \n self.model.eval()\n \n with torch.no_grad():\n for (input, target, _) in self.loader:\n\n # self.model = self.model.train(False) # TEST @lacoupe\n output, _ = self.model(input)\n \n output = (output >= 0.5)\n \n for out, tar in zip(output, target):\n \n tar = bool(tar)\n \n if out and tar:\n self.confusion['true_positive'] += 1\n elif not out and not tar:\n self.confusion['true_negative'] += 1\n elif out and not tar:\n self.confusion['false_positive'] += 1\n elif not out and tar:\n self.confusion['false_negative'] += 1\n \n self.accuracy = (self.confusion['true_positive'] + self.confusion['true_negative']) \\\n / sum(list(self.confusion.values()))\n \n if (self.confusion['true_positive'] + self.confusion['false_positive']) == 0.:\n self.precision = 0.\n else:\n self.precision = self.confusion['true_positive'] \\\n / (self.confusion['true_positive'] + self.confusion['false_positive'])\n \n if (self.confusion['true_positive'] + self.confusion['false_negative']) == 0.:\n self.recall = 0.\n else:\n self.recall = self.confusion['true_positive'] \\\n / (self.confusion['true_positive'] + self.confusion['false_negative'])\n \n if (self.precision + self.recall) == 0.:\n self.f1_score = 0.\n else:\n self.f1_score = 2 * self.precision * self.recall / (self.precision + self.recall)", "def evaluate(self, X_test, y_test):\n y_pred = self.pipeline.predict(X_test)\n return compute_rmse(y_pred, y_test)", "def eval_model(self, model):\n evaluation = model.evaluate(x=self.xt_test, y=self.yt_test)\n print(\"loss : \" + str(round(evaluation[0]*100, 2)) + \"%\")\n print(\"accuracy: \" + str(round(evaluation[1]*100, 2)) + \"%\")", "def score(self, x_test, y_test, regressor=False):\n if regressor:\n r2_scores = []\n mse_scores = []\n for model in self.list_of_models:\n predictions = model.predict(x_test)\n self.r2_scores.append(r2_score(y_test, predictions))\n self.mse_scores.append(mean_squared_error(y_test, predictions))\n self.print_results(regressor=True)\n else:\n self.f1_scores = []\n self.recall_scores = []\n self.precision_scores = []\n self.accuracy_scores = []\n for model in self.list_of_models:\n predictions = model.predict(x_test)\n self.f1_scores.append(f1_score(y_test, predictions))\n self.recall_scores.append(recall_score(y_test, predictions))\n self.precision_scores.append(precision_score(y_test, predictions))\n self.accuracy_scores.append(accuracy_score(y_test, predictions))\n self.print_results()", "def evaluate_model(model, model_name, X_train, Y_train, X_test, ground_truth):\n\tprint(\"\t\tModel [\" + model_name + \"]\")\n\tmodel.fit(X_train, Y_train)\n\tY_pred = model.predict(X_test).astype(int)\n\tregression = np.sqrt(metrics.mean_squared_error(ground_truth, Y_pred))\n\treturn regression", "def train_model_and_score(X,y_train):\n scaler = MinMaxScaler()\n X_scaled = scaler.fit_transform(X)\n\n #chose model\n # model = RandomForestClassifier()\n model = GradientBoostingClassifier()\n\n #split train/test\n x_train,x_test,y_train,y_test = train_test_split(X_scaled,y_train,test_size=0.33,random_state =42)\n\n #train\n model.fit(x_train,y_train)\n\n #evaluation\n sc = model.score(x_test,y_test), model.score(x_train,y_train)\n\n print(sc)\n\n return model,sc" ]
[ "0.7001942", "0.69130796", "0.68518585", "0.68095434", "0.6744196", "0.6740567", "0.6694026", "0.6630554", "0.6620787", "0.66182154", "0.65482765", "0.6532654", "0.65137815", "0.6487489", "0.64665717", "0.64459497", "0.6421892", "0.64201504", "0.6412638", "0.64110035", "0.63942903", "0.6382537", "0.6382537", "0.6382537", "0.63816804", "0.63794154", "0.63794154", "0.63704807", "0.634472", "0.63323927", "0.63196415", "0.6313083", "0.6271839", "0.62642115", "0.62637067", "0.62541246", "0.62538886", "0.6244691", "0.62422884", "0.62359506", "0.6235761", "0.6228438", "0.6206299", "0.62006325", "0.6195615", "0.61927575", "0.61924094", "0.6179245", "0.61744034", "0.61737514", "0.6168375", "0.61648667", "0.6164147", "0.61615646", "0.61594087", "0.61543655", "0.61506027", "0.61506027", "0.6142563", "0.61321723", "0.61174446", "0.611123", "0.6110901", "0.6110876", "0.61004895", "0.60906315", "0.60806584", "0.6074597", "0.6073108", "0.6070868", "0.6070582", "0.60704386", "0.6059216", "0.60535353", "0.6042663", "0.60378677", "0.6026182", "0.6022291", "0.6019815", "0.60173535", "0.6011375", "0.60099816", "0.6005306", "0.6003325", "0.6002375", "0.6000931", "0.5999676", "0.59963673", "0.5995895", "0.59851104", "0.5980041", "0.5979232", "0.5979142", "0.5976978", "0.5965908", "0.5962811", "0.5958374", "0.59574485", "0.5957191", "0.5957172" ]
0.6284775
32
Selftest function will try to connect to the LDAP instance. Fail if any exceptions are raised.
def selftest_function(opts): domains_list = get_domains_list(opts) ldap = LDAPDomains(opts) state = "success" reason = "N/A" domain = "N/A" conn = "" for domain_name in domains_list: try: """ If labels are given to the servers in the app.config `domain_name` will start with 'fn_ldap_utilities:' else if labels are not given then `domain_name` will equal 'fn_ldap_utilites'. If `domain_name` contains ':' then a labels have been given to the servers and `domain` will be set to the label given to the server else if `domain_name` does not contain ':' then servers have not been labled and `domain` will be set to `domain_name` which will equal 'fn_ldap_utilities'. """ domain = domain_name[domain_name.index(":")+1:] if ":" in domain_name else domain_name # Instansiate helper (which gets appconfigs from file) helper = LDAPUtilitiesHelper(ldap.ldap_domain_name_test(domain, domains_list)) options = opts.get(domain_name, {}) log.info(f"Verifying app.config values for {str(options.get('ldap_server'))} config section") # Instansiate LDAP Server and Connection conn = helper.get_ldap_connection() # Bind to the connection log.info("Verifying LDAP connection...") conn.bind() log.info("Test was successful\n") except Exception as err: state = "failure" reason = err break finally: # Unbind connection if conn: conn.unbind() if state == "success": return {"state": state} return { "state": state, "reason": reason, "domain": domain }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_simple_auth_error(self):\n client = LDAPClient(self.url)\n client.set_credentials(\"SIMPLE\", (\"cn=wrong\", \"wronger\"))\n self.assertRaises(bonsai.AuthenticationError, client.connect)", "def connect(self):\n conf = self.conf\n\n if not conf.uris or not conf.base:\n raise ConfigError('Base DN and LDAP URI(s) must be provided.', 1)\n\n if conf.tls_require_cert is not None:\n if conf.tls_require_cert not in [ldap.OPT_X_TLS_DEMAND, ldap.OPT_X_TLS_HARD]:\n print(BAD_REQCERT_WARNING, file=sys.stderr)\n # this is a global option!\n ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, conf.tls_require_cert)\n\n if conf.cacert_dir:\n # this is a global option!\n ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, conf.cacert_dir)\n\n if not conf.referrals:\n # this is a global option!\n ldap.set_option(ldap.OPT_REFERRALS, 0)\n\n # NOTE: The uri argument is passed directly to the underlying openldap\n # library that allows multiple URIs separated by a space for failover.\n self._conn = conn = ldap.initialize(' '.join(conf.uris))\n try:\n conn.protocol_version = conf.ldap_version\n conn.network_timeout = conf.bind_timeout\n conn.timeout = conf.search_timeout\n\n if conf.sasl == 'GSSAPI':\n self._bind_sasl_gssapi()\n return\n\n if conf.ssl == 'start_tls' and conf.ldap_version >= 3:\n conn.start_tls_s()\n\n if conf.bind_dn and conf.bind_pass:\n self._bind(conf.bind_dn, conf.bind_pass)\n except ldap.SERVER_DOWN:\n raise LDAPConnectionError('Can\\'t contact LDAP server.', 3)", "def setUp(self):\n curdir = os.path.abspath(os.path.dirname(__file__))\n self.cfg = configparser.ConfigParser()\n self.cfg.read(os.path.join(curdir, 'test.ini'))\n self.ipaddr = self.cfg[\"SERVER\"][\"hostip\"]\n self.url = \"ldap://%s:%s/ou=nerdherd,%s?%s?%s\" % \\\n (self.cfg[\"SERVER\"][\"hostip\"], self.cfg[\"SERVER\"][\"port\"],\n self.cfg[\"SERVER\"][\"basedn\"], self.cfg[\"SERVER\"][\"search_attr\"],\n self.cfg[\"SERVER\"][\"search_scope\"])\n self.host = \"ldap://%s\" % self.cfg['SERVER']['hostname']\n self.basedn = self.cfg[\"SERVER\"][\"basedn\"]\n client = LDAPClient(self.url)\n client.set_credentials(\"SIMPLE\", (self.cfg[\"SIMPLEAUTH\"][\"user\"],\n self.cfg[\"SIMPLEAUTH\"][\"password\"]))\n client.auto_page_acquire = False\n self.conn = client.connect()\n self.async_conn = LDAPConnection(client, True)", "def test_auth_test(self):\n backend = LdapBackend()\n backend.authenticate(None, username=\"apple\", password=\"ffffff\")", "def connect(self):\n if self._ignore_cert:\n ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)\n if self._ignore_referrals:\n ldap.set_option(ldap.OPT_REFERRALS, ldap.OPT_OFF)\n LOG.debug(\"LDAP connecting to %s\", self._url)\n self._server = ldap.initialize(self._url, bytes_mode=False)\n self._bind_to_service()", "def test_init(self):\n self._set_args(log_path=None)\n ldap = Ldap()", "def test_ldap(self):\n results = self.sync.ldap.conn.search_s('ou=example,o=test', ldap.SCOPE_ONELEVEL, '(cn=*)')\n self.assertEquals(self.ldapobj.methods_called(), ['initialize', 'simple_bind_s', 'search_s'])\n self.assertEquals(sorted(results), sorted([self.manager, self.alice]))", "def test_wrong_conn_param(self):\n self.assertRaises(TypeError, lambda: LDAPConnection(\"wrong\"))\n self.assertRaises(TypeError, lambda: LDAPConnection(LDAPClient(), 1))", "def connect(self, conf):\n\n uri = '%s://%s:%d' % (conf['ldap_protocol'],conf['ldap_server'],\n conf['ldap_server_port'])\n\n connection = ldap.initialize(uri)\n if conf['ldap_tls']:\n connection.start_tls_s()\n return connection", "def test_verify_active_directory_works_after_failover_with_new_system_dataset(driver):\n pass", "def connect(self, trace_level=2, trace_file=sys.stdout):\n logger.info(\"connect to %s\" % self.server)\n # blind trust all certs for test purpose.\n ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)\n\n conn = ldap.initialize(uri=self.server.uri, trace_level=trace_level, trace_file=trace_file)\n\n self._conn = conn", "def test_functionality(self):\n currentTime = datetime.now().strftime('%y%m%d%H%M%S') \n \n self.dirType = \"Microsoft Active Directory\"\n self.dirName = \"TestDirectory\" + currentTime\n self.dirUname = \"[email protected]\"\n self.dirPass = \"hcl@1234\"\n self.host = \"172.31.62.1\"\n self.port = \"389\"\n self.protocol = \"Plain\"\n self.baseDN = \"CN=Users,DC=ess,DC=delllabs,DC=net\"\n self.dirFilter = \"objectClass=*\"\n self.uNameAttr = \"samAccountName\"\n self.lNameAttr = \"givenName\"\n self.fNameAttr = \"sn\"\n self.emailAttr = \"email\"\n self.dirSource = self.dirName\n self.searchTerm = \"TestAutowithuser\"\n self.importRole = \"Read Only\"\n \n self.browserObject = globalVars.browserObject\n \n self.preRunSetup()\n \n self.runTestCase()\n \n self.postRunCleanup()", "def _ldap_server_connect():\n ldap_server = current_app.config['LDAP_SERVER']\n ldap_port = int(current_app.config['LDAP_PORT'])\n ldap_use_tls = current_app.config['LDAP_USE_TLS']\n ldap_key_path = current_app.config['LDAP_KEY_PATH']\n ldap_sa_bind_dn = current_app.config['LDAP_SA_BIND_DN']\n ldap_sa_password = current_app.config['LDAP_SA_PASSWORD']\n\n tls = Tls(validate=ssl.CERT_NONE, local_private_key_file=ldap_key_path)\n\n if ldap_use_tls:\n server = Server(ldap_server, ldap_port, tls=tls, use_ssl=True)\n else:\n server = Server(ldap_server, ldap_port)\n\n conn = Connection(server, ldap_sa_bind_dn, ldap_sa_password, auto_bind=True)\n\n return conn", "def test_bind_gssapi_error(self):\n if \"GSSAPIAUTH\" not in self.cfg:\n self.skipTest(\"GSSAPI authentication is not set.\")\n if not bonsai.has_krb5_support():\n self.skipTest(\"Module doesn't have KRB5 support.\")\n if sys.platform == \"darwin\":\n self.skipTest(\"Kerberos is not available on Mac.\")\n if (\"realm\" not in self.cfg[\"GSSAPIAUTH\"]\n or self.cfg[\"GSSAPIAUTH\"][\"realm\"] == \"None\"):\n self.skipTest(\"Realm is not set.\")\n client = LDAPClient(self.url)\n client.set_credentials(\"GSSAPI\", (self.cfg[\"GSSAPIAUTH\"][\"user\"],\n self.cfg[\"GSSAPIAUTH\"][\"password\"],\n self.cfg[\"GSSAPIAUTH\"][\"realm\"],\n None))\n self.assertRaises(bonsai.AuthenticationError, client.connect)", "def test_search_ldapdn(self):\n ldap_dn = LDAPDN(self.basedn)\n obj = self.conn.search(ldap_dn, 1)\n self.assertIsNotNone(obj)", "def test_init_defaults(self):\n self._set_args(log_path=None,\n state='present',\n username='myBindAcct',\n password='myBindPass',\n server='ldap://example.com:384',\n search_base='OU=Users,DC=example,DC=com',\n role_mappings={'.*': ['storage.monitor']},\n )\n\n ldap = Ldap()", "def reconnect(func):\n\n @wraps(func)\n def _reconnect(self, *args, **kwargs):\n \"\"\"\n Inner wrap function to reconnect on failure\n \"\"\"\n try:\n return func(self, *args, **kwargs)\n except ldap.LDAPError:\n self.connect()\n return func(self, *args, **kwargs)\n\n return _reconnect", "def setUp(self):\n # patch the logging module\n self.patcher_logging = patch.object(ldap, 'logging')\n mock_logging = self.patcher_logging.start()\n mock_logging.getLogger.return_value = Mock(\n spec=['info', 'warning', 'error', 'debug'])\n self.mock_logger = mock_logging.getLogger.return_value\n\n # prepare the configuration values\n self.patcher_config = patch.object(ldap, 'CONF')\n self.mock_config = self.patcher_config.start()\n self.test_conf = {\n 'auth': {\n 'ldap': {\n 'host': 'foo.com',\n 'username': 'userfoo',\n 'password': 'foopwd',\n 'user_base': 'ou=base,o=foo.com',\n 'user_filter': '(objectclass=Person)',\n 'user_attributes': {\n 'title': 'title',\n },\n 'group_filter': '(cn=foo-users)',\n 'group_base': 'ou=foogroup,o=foo.com',\n 'group_membership_attr': 'uniquemember',\n }\n }\n }\n self.mock_config.get_config.return_value = self.test_conf\n\n # prepare the mocks for ldap3 library\n self.patcher_ldap3 = patch.object(ldap, 'ldap3')\n self.mock_ldap3 = self.patcher_ldap3.start()\n self.mock_ldap3.Server.return_value = sentinel.server_obj\n self.mock_ldap3.NONE = sentinel.ldap3_none\n self.mock_conn = MagicMock(\n spec_set=['bind', 'search', 'response', 'result', '__exit__',\n '__enter__']\n )\n self.mock_conn.__enter__.return_value = self.mock_conn\n self.mock_ldap3.Connection.return_value = self.mock_conn", "def _reconnect(self, *args, **kwargs):\n try:\n return func(self, *args, **kwargs)\n except ldap.LDAPError:\n self.connect()\n return func(self, *args, **kwargs)", "def ldap_login(self, username, password):\n if settings.LDAP_USERS and username not in settings.LDAP_USERS:\n logging.warning(f\"User {username} not allowed for LDAP login\")\n return False\n LDAP_SERVER = settings.LDAP_SERVER\n # Create fully qualified DN for user\n LDAP_DN = settings.LDAP_LOGIN_DN.replace(\"{username}\", username)\n logger.debug(f\"LDAP dn: {LDAP_DN}\")\n # disable certificate check\n ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW)\n\n # specify certificate dir or file\n if settings.LDAP_CERT_DIR:\n ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, settings.LDAP_CERT_DIR)\n if settings.LDAP_CERT_FILE:\n ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, settings.LDAP_CERT_FILE)\n try:\n # build a client\n ldap_client = ldap.initialize(LDAP_SERVER)\n ldap_client.set_option(ldap.OPT_REFERRALS, 0)\n # perform a synchronous bind to test authentication\n ldap_client.simple_bind_s(LDAP_DN, password)\n logger.info(f\"User '{username}' successfully authenticated via LDAP\")\n ldap_client.unbind_s()\n return True\n except (ldap.INVALID_CREDENTIALS, ldap.NO_SUCH_OBJECT):\n ldap_client.unbind()\n logger.warning(\"LDAP: wrong username or password\")\n except ldap.SERVER_DOWN:\n logger.warning(\"LDAP server not available\")\n except ldap.LDAPError as e:\n if isinstance(e, dict) and \"desc\" in e:\n logger.warning(f\"LDAP error: {e['desc']}\")\n else:\n logger.warning(f\"LDAP error: {e}\")\n return False", "def connect_to_ldap(self, ldap_protocol, ldap_host, ldap_port,\n username, password, method='BASIC'):\n self.logger.debug(\"Attempting to connect to LDAP...\")\n\n try:\n conn_string = \"{}://{}:{}\".format(ldap_protocol,\n ldap_host,\n ldap_port)\n\n self.logger.debug(\"Connection string: {0}\".format(conn_string))\n\n self.ldap_conn = ldap.initialize(conn_string)\n self.ldap_conn.set_option(ldap.OPT_REFERRALS, 0)\n\n # If a username and password is provided, we assume\n # SASL's DIGESTMD5 authentication method.\n self.logger.debug(\"Auth using method: {0}\".format(method))\n if method == \"DIGESTMD5\":\n if username and password:\n self.logger.debug((\"Username and password provided,\" +\n \" attempting DIGEST MD5 connection.\"))\n auth_tokens = ldap.sasl.digest_md5(username, password)\n self.ldap_conn.sasl_interactive_bind_s(\"\", auth_tokens)\n else:\n raise PSQLAuthnzLDAPException(\n (\"A username and password must supplied \" +\n \"for DIGESTMD5 authentication.\")\n )\n else:\n if username and password:\n self.logger.debug(\n (\"\"\"\n Username and password provided,\n attempting simple bind connection.\n \"\"\")\n )\n self.ldap_conn.simple_bind_s(username, password)\n else:\n self.logger.debug(\n (\"No username and password provided, \" +\n \"attempting anonymous connection.\")\n )\n self.ldap_conn.simple_bind_s()\n\n except Exception as e:\n logging.error(unicode(e.message).encode('utf-8'))\n raise PSQLAuthnzLDAPException()", "def is_ldap_up(host, port):\n conn = ldap.initialize(f'ldap://{host}:{port}')\n conn.simple_bind_s(LDAP_BINDDN, LDAP_SECRET)\n\n # The OpenLDAP server is pretty quick to start up but it can still be building the indices\n # or computing the memberOf property. So check and wait until that's done before we let the\n # tests proceed, otherwise we get all kinds of crazy errors.\n # conn.search returns either True or False, depending on if the query succeeded or not. As\n # long as the query doesn't succeed we're still starting up.\n res = conn.search_s('dc=planetexpress,dc=com', ldap.SCOPE_BASE, '(objectclass=*)')\n return res", "def test_auth_conn_exception(self):\n # prepare search operation\n self.mock_conn.search.return_value = True\n fake_resp = {\n 'attributes': {\n 'mail': ['[email protected]'],\n 'cn': ['Bar User', 'Baruser'],\n 'title': 'Job title',\n },\n 'type': 'searchResEntry',\n 'dn': 'uid=000000000,c=de,ou=base,o=foo.com',\n }\n self.mock_conn.response.__getitem__.return_value = fake_resp\n self.mock_conn.response.__len__.return_value = 1\n\n # simulate ldap3 Connection failing\n empty_exc = Exception('Empty password')\n self.mock_ldap3.Connection.side_effect = [self.mock_conn, empty_exc]\n\n # validate result\n ldap_manager = ldap.MANAGER()\n self.assertIs(None, ldap_manager.authenticate('baruser', ''))\n\n # validate behavior\n self.mock_conn.bind.assert_not_called()\n self.mock_logger.debug.assert_called_with(\n 'User %s bind failed, debug info:',\n fake_resp['dn'],\n exc_info=empty_exc)", "def __init__(self, ldap_configs):\n assert isinstance(ldap_configs, dict)\n try:\n self.base_dn = ldap_configs['basedn']\n self.host = ldap_configs['host']\n self.port = ldap_configs['port']\n self.user = ldap_configs['user']\n self.password = ldap_configs['password']\n self.timeout = ldap_configs['timeout']\n except KeyError as ex:\n raise IOError(\"Can not find %s in %s, aborting\" % (ex, ldap_configs))\n self.connection = None\n self.connection = self.connect()\n # log.debug(\"Created new LDAPConnection instance %s\" % self)", "def test_search_fail(self):\n # make search fail\n self.mock_conn.search.return_value = False\n ldap_manager = ldap.MANAGER()\n self.assertIs(None, ldap_manager.authenticate('baruser', 'barpwd'))\n\n # validate behavior\n self.mock_ldap3.Server.assert_called_with(\n self.test_conf['auth']['ldap']['host'], port=636, use_ssl=True,\n get_info=sentinel.ldap3_none)\n self.mock_ldap3.Connection.assert_called_with(\n sentinel.server_obj,\n self.test_conf['auth']['ldap']['username'],\n self.test_conf['auth']['ldap']['password'],\n read_only=True,\n receive_timeout=10\n )\n _, kwargs = self.mock_conn.search.call_args\n self.assertEqual(\n kwargs['search_base'], self.test_conf['auth']['ldap']['user_base'])\n self.assertEqual(\n kwargs['search_filter'], '(&(mail=baruser)(objectclass=Person))')\n for item in ['mail', 'cn', 'title']:\n if item not in kwargs['attributes']:\n raise AssertionError('User attribute {} missing'.format(item))", "def init_ldap(username, password, server, port=389, DEBUG=False):\r\n if DEBUG:\r\n warning('trying to connect to %s:%d as %s\\n'%(server, port, username))\r\n ldapurl = \"ldap://%s:%d\"%(server, port)\r\n\r\n if DEBUG:\r\n ldap.set_option(ldap.OPT_DEBUG_LEVEL,255)\r\n ldapmodule_trace_level = 1\r\n else:\r\n ldapmodule_trace_level = 0\r\n ldapmodule_trace_file = sys.stderr\r\n l = ldap.initialize(ldapurl, trace_level=ldapmodule_trace_level, trace_file=ldapmodule_trace_file)\r\n l.set_option(ldap.OPT_REFERRALS, 0)\r\n l.set_option(ldap.OPT_PROTOCOL_VERSION, 3)\r\n\r\n if DEBUG:\r\n l.set_option( ldap.OPT_DEBUG_LEVEL, 255 )\r\n l.bind_s(username, password, ldap.AUTH_SIMPLE)\r\n \r\n return l", "def connect(self, bind=True):\n try:\n self.ldapserv = ldap.open(self.ldapHost)\n except ldap.LDAPError, error:\n return {'bool': False, 'message': error}\n\n # Bind with authentification\n if(bind):\n return self.bind()\n\n else:\n return {'bool': True}", "def test_auth_bind_exception(self):\n # prepare search operation\n self.mock_conn.search.return_value = True\n fake_resp = {\n 'attributes': {\n 'mail': ['[email protected]'],\n 'cn': ['Bar User', 'Baruser'],\n 'title': 'Job title',\n },\n 'type': 'searchResEntry',\n 'dn': 'uid=000000000,c=de,ou=base,o=foo.com',\n }\n self.mock_conn.response.__getitem__.return_value = fake_resp\n self.mock_conn.response.__len__.return_value = 1\n\n # simulate ldap3 bind failing\n empty_exc = Exception('Empty password')\n self.mock_conn.bind.side_effect = empty_exc\n\n # validate result\n ldap_manager = ldap.MANAGER()\n self.assertIs(None, ldap_manager.authenticate('baruser', ''))\n\n # validate behavior\n self.mock_conn.bind.assert_called_with()\n self.mock_logger.debug.assert_called_with(\n 'User %s bind failed, debug info:',\n fake_resp['dn'],\n exc_info=empty_exc)", "def test_make_configuration(self):\n data = dict(log_path=None,\n state='present',\n username='myBindAcct',\n password='myBindPass',\n server='ldap://example.com:384',\n search_base='OU=Users,DC=example,DC=com',\n role_mappings={'.*': ['storage.monitor']},\n )\n\n self._set_args(**data)\n ldap = Ldap()\n expected = dict(id='default',\n bindLookupUser=dict(user=data['username'],\n password=data['password'], ),\n groupAttributes=['memberOf'],\n ldapUrl=data['server'],\n names=['example.com'],\n searchBase=data['search_base'],\n roleMapCollection=[{\"groupRegex\": \".*\",\n \"ignoreCase\": True,\n \"name\": \"storage.monitor\"\n }\n ],\n userAttribute='sAMAccountName'\n )\n\n actual = ldap.make_configuration()\n self.maxDiff = None\n self.assertEqual(expected, actual)", "def openldap(docker_ip, docker_services):\n host, port = docker_ip, docker_services.port_for('openldap', 389)\n docker_services.wait_until_responsive(\n timeout=600, pause=10,\n check=lambda: is_ldap_up(host, port))\n\n global LDAP_HOST\n global LDAP_PORT\n\n LDAP_HOST = host\n LDAP_PORT = port\n\n return None", "def test_004_connect(self):\n HEADING()\n self.db.connect()\n\n result = True\n assert result", "def test_connectFailure(self):\n db = Database.TestDB(self.mktemp())\n # Make _db_init fail\n db._db_init = lambda: 1 / 0\n self.assertFalse(db.initialized)\n try:\n yield db.open()\n except:\n pass\n self.assertFalse(db.initialized)\n self.assertEquals(db.pool, None)", "def test_bind_fail(self):\n # prepare search operation\n self.mock_conn.search.return_value = True\n fake_resp = {\n 'attributes': {\n 'mail': ['[email protected]'],\n 'cn': ['Bar User', 'Baruser'],\n 'title': 'Job title',\n },\n 'type': 'searchResEntry',\n 'dn': 'uid=000000000,c=de,ou=base,o=foo.com',\n }\n self.mock_conn.response.__getitem__.return_value = fake_resp\n self.mock_conn.response.__len__.return_value = 1\n # make bind fail\n self.mock_conn.bind.return_value = False\n\n # validate result\n ldap_manager = ldap.MANAGER()\n self.assertIs(None, ldap_manager.authenticate('baruser', 'barpwd'))\n\n # validate behavior\n self.mock_conn.bind.assert_called_with()\n self.mock_logger.debug.assert_called_with(\n 'User %s bind failed: %s', fake_resp['dn'], self.mock_conn.result)", "def test_missing_ldap_login_():\n login_inputs = {\"id\": \"\", \"password\": \"\"}\n expected = {\"message\": \"Incorrect username or password.\", \"code\": 400}\n\n test_invalid_auth_inputs(\n login_inputs=login_inputs,\n expected_result=expected[\"message\"],\n expected_status_code=expected[\"code\"],\n )", "def test_connect_invalid_string(self):\n with pytest.raises(ValueError):\n DatabaseDriver.connect('not a valid connect string')", "def test_connect_53371():\n with patch(\n \"socket.getaddrinfo\",\n autospec=True,\n side_effect=socket.gaierror(\"[Errno 11004] getaddrinfo failed\"),\n ):\n rtn = win_network.connect(\"test-server\", 80)\n assert rtn\n assert not rtn[\"result\"]\n assert (\n rtn[\"comment\"]\n == \"Unable to connect to test-server (unknown) on tcp port 80\"\n )", "def test_connection():\r\n try:\r\n connect()\r\n except:\r\n pass\r\n print ('Unable to connect.')\r\n else:\r\n main()", "def __init__(self):\n #Iotlab PROD LDAP parameters\n self.ldapserv = None\n ldap_config = LdapConfig()\n self.config = ldap_config\n self.ldapHost = ldap_config.LDAP_IP_ADDRESS\n self.ldapPeopleDN = ldap_config.LDAP_PEOPLE_DN\n self.ldapGroupDN = ldap_config.LDAP_GROUP_DN\n self.ldapAdminDN = ldap_config.LDAP_WEB_DN\n self.ldapAdminPassword = ldap_config.LDAP_WEB_PASSWORD\n self.ldapPort = ldap.PORT\n self.ldapVersion = ldap.VERSION3\n self.ldapSearchScope = ldap.SCOPE_SUBTREE", "def testInvalidConnectionParameters(self):\r\n \r\n self._configuration.serverUri = _SERVER_DOWN_URI\r\n self.assertRaises(PersistenceError, self._ldapPrincipalSearcher.searchPrincipal,\r\n _VALID_USER_GROUP_QUERY, constants.SEARCH_MODE_GROUP_ONLY)", "def test_mysql_connect_fail(self):\n if _is_backend_avail('mysql', user=\"openstack_cifail\"):\n self.fail(\"Shouldn't have connected\")", "def test_adtls(self):\n for domain in config.ADW2K12_DOMAINS:\n principal = '%s@%s' % (config.ADW2k12_USER1, domain)\n\n testflow.step(\"Login as user %s\", config.ADW2k12_USER1)\n users.loginAsUser(\n principal,\n self.conf['authn_name'],\n config.ADW2k12_USER_PASSWORD,\n True,\n )\n\n testflow.step(\n \"Testing connection with user %s\", config.ADW2k12_USER1\n )\n assert common.connectionTest(), \"User %s can't login.\" % principal", "def test_wrong_add_param(self):\n def close_conn():\n cli = LDAPClient(\"ldap://%s\" % self.ipaddr)\n LDAPConnection(cli).add(bonsai.LDAPEntry(\"cn=dummy\"))\n self.assertRaises(ClosedConnection, close_conn)\n self.assertRaises(TypeError, lambda: self.conn.add(\"wrong\"))", "def __init__(self, user='', password='', host='ldap.csh.rit.edu', base=USERS, app=False, objects=False, debug=False):\n self.host = host\n self.base = base\n self.objects = objects\n self.debug = debug\n\n # Configure the LDAP server\n tls = ldap.Tls(validate=ssl.CERT_NONE, version=ssl.PROTOCOL_TLSv1)\n self.ldap_server = ldap.Server(self.host, use_ssl=True, tls=tls)\n\n if user == '':\n # No user specified, use Kerberos via SASL/GSSAPI to bind\n self.ldap_conn = ldap.Connection(self.ldap_server, authentication=ldap.SASL, sasl_mechanism='GSSAPI')\n else:\n # Use simple authentication\n if app:\n # Use the APPS base rather than USERS or whatever was passed\n self.base = APPS\n\n # Construct user's distinguished name\n ldap_user_dn = 'uid={},{}'.format(user, self.base)\n\n # Set up the connection\n self.ldap_conn = ldap.Connection(self.ldap_server, user=ldap_user_dn, password=password)\n\n # Attempt to bind\n try:\n self.ldap_conn.bind()\n except ldap.LDAPException as e:\n print(\"Unable to bind to LDAP: \" + str(e))\n if self.debug:\n print(\"[DEBUG] Connection details: \" + str(self.ldap_conn))", "def _try_connect(self):\n tuya = TuyaApi()\n try:\n tuya.init(\n self._username,\n self._password,\n self._country_code,\n self._platform,\n self._region,\n )\n except (TuyaAPIRateLimitException, TuyaNetException, TuyaServerException):\n return RESULT_CONN_ERROR\n except TuyaAPIException:\n return RESULT_AUTH_FAILED\n\n return RESULT_SUCCESS", "def test_domain_list_fails(self, mock_ghn, mock_grnam, mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Throw exception, need to clear internal cached host in driver\n self._fail_domain_list = True\n self.driver._vgc_host = None\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)", "def test_auth_no_group(self):\n # that causes a search without verifying group membership\n del self.test_conf['auth']['ldap']['group_filter']\n\n # prepare search mock\n self.mock_conn.search.return_value = True\n # bind operation\n self.mock_conn.bind.return_value = True\n # response to the search and bind calls\n self.mock_conn.response.__len__.return_value = 1\n fake_resp = {\n 'attributes': {\n 'mail': ['[email protected]'],\n 'cn': ['Bar User', 'Baruser'],\n 'title': 'Job title',\n },\n 'type': 'searchResEntry',\n 'dn': 'uid=000000000,c=de,ou=base,o=foo.com',\n }\n self.mock_conn.response.__getitem__.return_value = fake_resp\n\n # perform action\n ldap_manager = ldap.MANAGER()\n\n # validate response\n check_resp = {\n 'login': fake_resp['attributes']['mail'][0],\n 'fullname': fake_resp['attributes']['cn'][0],\n 'title': fake_resp['attributes']['title'],\n }\n self.assertEqual(\n check_resp, ldap_manager.authenticate('baruser', 'barpwd'))\n\n # validate behavior\n _, kwargs = self.mock_conn.search.call_args\n self.assertEqual(\n kwargs['search_base'], self.test_conf['auth']['ldap']['user_base'])\n self.assertEqual(\n kwargs['search_filter'], '(&(mail=baruser)(objectclass=Person))')\n for item in ['mail', 'cn', 'title']:\n if item not in kwargs['attributes']:\n raise AssertionError('User attribute {} missing'.format(item))\n\n self.mock_ldap3.Connection.assert_any_call(\n sentinel.server_obj,\n self.test_conf['auth']['ldap']['username'],\n self.test_conf['auth']['ldap']['password'],\n read_only=True,\n receive_timeout=10\n )\n self.mock_ldap3.Connection.assert_any_call(\n sentinel.server_obj,\n fake_resp['dn'],\n 'barpwd',\n read_only=True,\n receive_timeout=10\n )\n self.mock_conn.bind.assert_called_with()", "def test_auth_with_group_fail(self):\n # prepare search mock\n self.mock_conn.search.side_effect = [True, False]\n # response to the first search\n self.mock_conn.response.__len__.return_value = 1\n fake_resp = {\n 'attributes': {\n 'mail': ['[email protected]'],\n 'cn': ['Bar User', 'Baruser'],\n 'title': 'Job title',\n },\n 'type': 'searchResEntry',\n 'dn': 'uid=000000000,c=de,ou=base,o=foo.com',\n }\n self.mock_conn.response.__getitem__.return_value = fake_resp\n\n # perform action\n ldap_manager = ldap.MANAGER()\n\n # validate result\n self.assertEqual(None, ldap_manager.authenticate('baruser', 'barpwd'))\n\n # validate behavior - since most behavior is already checked by the\n # positive test we just check the difference\n self.mock_logger.warning.assert_called_with(\n 'user %s not member of allowed group(s)', 'baruser')", "def check_credentials(username, password):\n LDAP_SERVER = 'ldap://172.24.1.102:389'\n # fully qualified AD user name\n LDAP_USERNAME = '%s' % username\n # your password\n LDAP_PASSWORD = password\n base_dn = 'DC=LGE,DC=NET'\n ldap_filter = 'userPrincipalName=%s' % username\n attrs = ['memberOf']\n\n #print \"entered username : %s \" % username\n #print \"entered password : %s \" % password\n try:\n # build a client\n ldap_client = ldap.initialize(LDAP_SERVER)\n #print ldap_client\n # perform a synchronous bind\n ldap_client.set_option(ldap.OPT_REFERRALS,0)\n ldap_client.simple_bind_s(LDAP_USERNAME, LDAP_PASSWORD)\n except ldap.INVALID_CREDENTIALS:\n ldap_client.unbind()\n return 'Wrong username or password'\n except ldap.SERVER_DOWN:\n return 'AD server not available'\n # all is well\n ldap_client.unbind()\n return \"success\"", "def create_ldap_connection(server):\n ldap_con=ldap.initialize(ldap_server)\n return ldap_con", "def connectionTest(configFile):\n config = loadConfigurationFile(configFile)\n wmInit = WMInit() \n print(\"Checking default database connection...\", end=' ')\n\n if not hasattr(config, \"CoreDatabase\"):\n print(\"skipped.\")\n return\n\n (dialect, junk) = config.CoreDatabase.connectUrl.split(\":\", 1)\n socket = getattr(config.CoreDatabase, \"socket\", None)\n\n try:\n wmInit.setDatabaseConnection(dbConfig = config.CoreDatabase.connectUrl,\n dialect = dialect,\n socketLoc = socket)\n except Exception as ex:\n msg = \"Unable to make connection to using \\n\"\n msg += \"parameters provided in %s\\n\" % config.CoreDatabase.connectUrl \n msg += str(ex)\n print(msg)\n raise ex\n\n print(\"ok.\")\n return", "def init_ldaps(username, password, server, port=636, DEBUG=False):\r\n if DEBUG:\r\n warning('trying to connect to %s:%d as %s\\n\\tcertfile:%s\\n'%(server, port, username, data.certfile))\r\n ldapurl = \"ldaps://%s:%d\"%(server, port)\r\n\r\n ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_DEMAND)\r\n ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, data.certfile)\r\n if DEBUG:\r\n ldap.set_option(ldap.OPT_DEBUG_LEVEL,255)\r\n ldapmodule_trace_level = 1\r\n else:\r\n ldapmodule_trace_level = 0\r\n ldapmodule_trace_file = sys.stderr\r\n l = ldap.initialize(ldapurl, trace_level=ldapmodule_trace_level, trace_file=ldapmodule_trace_file)\r\n l.set_option(ldap.OPT_REFERRALS, 0)\r\n l.set_option(ldap.OPT_PROTOCOL_VERSION, 3)\r\n l.set_option(ldap.OPT_X_TLS, ldap.OPT_X_TLS_DEMAND)\r\n l.set_option( ldap.OPT_X_TLS_DEMAND, True )\r\n if DEBUG:\r\n l.set_option( ldap.OPT_DEBUG_LEVEL, 255 )\r\n l.set_option(ldap.OPT_X_TLS_CACERTFILE, data.certfile)\r\n l.bind_s(username, password, ldap.AUTH_SIMPLE)\r\n \r\n return l", "def ldap_connect(ldap_server, bind_user, bind_password):\n\n srv = ldap3.Server(ldap_server, get_info='ALL', mode='IP_V4_PREFERRED', use_ssl=LDAP_SSL)\n try:\n conn = ldap3.Connection(srv, auto_bind=True, authentication=ldap3.NTLM, user=bind_user, password=bind_password)\n except LDAPSocketOpenError as conn_soc_err:\n logger.critical(f'{conn_soc_err.__str__()}')\n raise SystemExit(f'ERROR: {conn_soc_err.__str__()}')\n except ConnectionError as conn_err:\n logger.critical(f'{conn_err.__str__()}')\n raise SystemExit(f'ERROR: {conn_err.__str__()}')\n return conn", "def setUp(self):\n config = SelectLsstImagesTask.ConfigClass()\n try:\n DbAuth.username(config.host, str(config.port)),\n except RuntimeError as e:\n reason = \"Warning: did not find host=%s, port=%s in your db-auth file; or %s \" \\\n \"skipping unit tests\" % \\\n (config.host, str(config.port), e)\n raise unittest.SkipTest(reason)", "def connect_samdb_ex(samdb_url, lp=None, session_info=None, credentials=None,\n flags=0, ldb_options=None, ldap_only=False):\n sam_db = connect_samdb(samdb_url, lp, session_info, credentials,\n flags, ldb_options, ldap_only)\n # fetch RootDse\n res = sam_db.search(base=\"\", expression=\"\", scope=ldb.SCOPE_BASE,\n attrs=[\"*\"])\n return (sam_db, res[0])", "def testProblemOnPerformingQuery(self):\r\n \r\n self.assertRaises(PersistenceError, self._ldapPrincipalSearcher.searchPrincipal,\r\n _PROBLEM_ON_QUERY, constants.SEARCH_MODE_GROUP_ONLY)", "def test_connect_enable_iam_auth_error() -> None:\n connect_string = \"my-project:my-region:my-instance\"\n # create mock instance with enable_iam_auth=False\n instance = MockInstance(enable_iam_auth=False)\n mock_instances = {}\n mock_instances[connect_string] = instance\n # init Connector\n connector = Connector()\n with patch.dict(connector._instances, mock_instances):\n # try to connect using enable_iam_auth=True, should raise error\n pytest.raises(\n ValueError,\n connector.connect,\n connect_string,\n \"pg8000\",\n enable_iam_auth=True,\n )\n # remove mock_instance to avoid destructor warnings\n connector._instances = {}", "def test_connect(self):\n port = socket_any_family()\n port.bind((\"\", 0))\n port.listen(3)\n\n clientSSL = Connection(Context(SSLv23_METHOD), socket(port.family))\n clientSSL.connect((loopback_address(port), port.getsockname()[1]))\n # XXX An assertion? Or something?", "def search(self, bind, filterstr, attrlist=None,\n scope=ldap.SCOPE_SUBTREE, retries=5):\n for i in range(retries):\n try:\n self.__bind__()\n self.connection.search(bind, scope,\n filterstr=filterstr, attrlist=attrlist)\n result = self.connection.result(timeout=10)\n log.info('Success')\n return result\n except ldap.SERVER_DOWN, err:\n self.connection = self.__connect__()\n log.error('Error: %s' % (err))\n if i != retries:\n log.info('Retrying LDAP search')\n return False", "def test_connect_refused(self):\n client = socket_any_family()\n context = Context(SSLv23_METHOD)\n clientSSL = Connection(context, client)\n # pytest.raises here doesn't work because of a bug in py.test on Python\n # 2.6: https://github.com/pytest-dev/pytest/issues/988\n try:\n clientSSL.connect((loopback_address(client), 1))\n except error as e:\n exc = e\n assert exc.args[0] == ECONNREFUSED", "def test_connect():\n\n\tprint(\"Testing connecting to the server\")\n\ttry:\n\t\twith socket.socket() as s:\n\t\t\ts.connect((\"localhost\", 8000))\n\t\tprint(\"Connection attempt succeeded.\")\n\t\treturn None\n\texcept socket.error:\n\t\treturn \"Server didn't answer on localhost port 8000. Is it running?\"", "def setup_method(self, method):\n\n connect('authserver-db-test', host='mongomock://localhost', alias='test')", "def more_than_one_user_directory(self):\n message = \"DB::Exception: Duplicate storage type 'ldap' at user_directories\"\n servers = {\n \"openldap1\": {\n \"host\": \"openldap1\", \"port\": \"389\", \"enable_tls\": \"no\",\n \"auth_dn_prefix\": \"cn=\", \"auth_dn_suffix\": \",ou=users,dc=company,dc=com\"\n },\n \"openldap2\": {\n \"host\": \"openldap2\", \"port\": \"636\", \"enable_tls\": \"yes\",\n \"auth_dn_prefix\": \"cn=\", \"auth_dn_suffix\": \",ou=users,dc=company,dc=com\",\n \"tls_require_cert\": \"never\"\n }\n }\n users = [\n {\"server\": \"openldap1\", \"username\": \"user1\", \"password\": \"user1\", \"login\": True},\n {\"server\": \"openldap2\", \"username\": \"user2\", \"password\": \"user2\", \"login\": True}\n ]\n role = f\"role_{getuid()}\"\n entries = [\n ([\"openldap1\"], [(role,)]),\n ([\"openldap2\"], [(role,)])\n ]\n\n with ldap_servers(servers):\n with rbac_roles(role) as roles:\n config = create_entries_ldap_external_user_directory_config_content(entries)\n\n with ldap_external_user_directory(server=None, roles=None, restart=True, config=config):\n with When(f\"I login as {users[0]['username']} authenticated using openldap1\"):\n current().context.node.query(f\"SELECT 1\",\n settings=[(\"user\", users[0][\"username\"]), (\"password\", users[0][\"password\"])])\n\n with And(f\"I login as {users[1]['username']} authenticated using openldap2\"):\n current().context.node.query(f\"SELECT 1\",\n settings=[(\"user\", users[1][\"username\"]), (\"password\", users[1][\"password\"])])", "def _try_connect(self):\n try:\n return mysql.connect(\n host=self.host,\n database=self.database,\n user=self.user,\n passwd=self.password\n )\n except Error as e:\n raise ConnectionError(f\"Could not connect to {self.connection_string}\") from e", "def test_loginWithoutPortal(self):\n self.server.portal = None\n def login():\n d = self.client.login(b'testuser', b'wrong-password')\n d.addBoth(self._cbStopClient)\n\n d1 = self.connected.addCallback(strip(login)).addErrback(self._ebGeneral)\n d2 = self.loopback()\n d = defer.gatherResults([d1, d2])\n return d.addCallback(self._cbTestFailedLogin)", "def test_database_connection(self):\n\t\t\n\t\tself.assertTrue(database.connect())", "def test_connection_failure(aggregator, check, bad_instance):\n instance_tags = [\"supervisord_server:travis\"]\n with pytest.raises(Exception):\n check.check(bad_instance)\n aggregator.assert_service_check(\"supervisord.can_connect\", status=check.CRITICAL, tags=instance_tags, count=1)", "def test_use_certificate_uninitialized(self, ctx_or_conn):\n with pytest.raises(Error):\n ctx_or_conn.use_certificate(X509())", "def test_connection_failure(aggregator):\n\n gitlab = GitlabCheck('gitlab', BAD_CONFIG['init_config'], {})\n\n try:\n gitlab.check(BAD_CONFIG['instances'][0])\n except Exception:\n pass\n else:\n assert False, \"Gitlab should not be able to connect to this URL\"\n\n # We should get only one failed service check, the first\n aggregator.assert_service_check(\n 'gitlab.{}'.format(GitlabCheck.ALLOWED_SERVICE_CHECKS[0]),\n status=GitlabCheck.CRITICAL,\n tags=['gitlab_host:{}'.format(HOST), 'gitlab_port:1234'] + CUSTOM_TAGS,\n count=1,\n )", "def test(a1000):\n try:\n a1000.test_connection()\n return 'ok'\n except Exception as e:\n return_error(str(e))", "def test_auth(self, conn: ldap3.Connection, auth_user_dn: str, password: str) -> bool:\n try:\n auth_bound = conn.rebind(user=auth_user_dn, password=password)\n except ldap3.core.exceptions.LDAPBindError:\n auth_bound = False\n finally:\n conn.unbind()\n return auth_bound", "def test_startconnector_with_noretry_on_con_failure(self):\n\n yield self.connect('127.0.0.1', self.pbPort)\n\n localConfig = copy.copy(self.defaultConfig)\n localConfig.reconnectOnConnectionFailure = False\n yield self.add(localConfig)\n yield self.start(localConfig.id)\n\n # It takes a moment to stop the service after a connection failure\n while True:\n ssRet = yield self.service_status(localConfig.id)\n if ssRet != 1:\n break;\n else:\n time.sleep(1)\n\n self.assertEqual(0, ssRet)\n\n yield self.stop(localConfig.id)", "def test_mysql_connect_fail(self):\n if test_migrations._is_backend_avail(\n 'mysql', 'kickstand_cifail', self.PASSWD, self.DATABASE):\n self.fail(\"Shouldn't have connected\")", "def test_missingPortal(self):\n self.server.challengers[b'LOGIN'] = imap4.LOGINCredentials\n\n cAuth = imap4.LOGINAuthenticator(b'testuser')\n self.client.registerAuthenticator(cAuth)\n\n self.server.portal = None\n\n def auth():\n return self.client.authenticate(b'secret')\n\n d = self.connected.addCallback(strip(auth))\n d.addErrback(self.assertClientFailureMessage,\n b\"Temporary authentication failure\")\n d.addCallbacks(self._cbStopClient, self._ebGeneral)\n\n return defer.gatherResults([d, self.loopback()])", "def _test_connection(self, connection_string):\n try:\n engine = create_engine(connection_string)\n connection = engine.connect()\n connection.close()\n return True\n except Exception as e:\n if options.debug:\n logging.exception(\"Database connection failed: %s\" % e)\n return False", "async def test_on_connect_failed(hass: HomeAssistant, side_effect, error) -> None:\n flow_result = await hass.config_entries.flow.async_init(\n DOMAIN,\n context={\"source\": SOURCE_USER, \"show_advanced_options\": True},\n )\n\n with PATCH_GET_HOST, patch(\n \"homeassistant.components.asuswrt.bridge.AsusWrtLegacy\"\n ) as asus_wrt:\n asus_wrt.return_value.connection.async_connect = AsyncMock(\n side_effect=side_effect\n )\n asus_wrt.return_value.async_get_nvram = AsyncMock(return_value={})\n asus_wrt.return_value.is_connected = False\n\n result = await hass.config_entries.flow.async_configure(\n flow_result[\"flow_id\"], user_input=CONFIG_DATA\n )\n assert result[\"type\"] == data_entry_flow.FlowResultType.FORM\n assert result[\"errors\"] == {\"base\": error}", "def test_connection(self):\n self._bind_to_service()", "def test_check_connection(self):\n self.assertIsNotNone(app.check_connection())", "def test_connect_fail(self, req):\n req.side_effect = ks_exc.ConnectFailure()\n self.client._get_resource_provider(self.context, \"fake\")\n\n # reset the call count to demonstrate that future calls do\n # work\n req.reset_mock()\n self.client._get_resource_provider(self.context, \"fake\")\n self.assertTrue(req.called)", "def connect(self):\n\t\tself._entity_server_connection.attempt_connection()", "def test_basic_login(self):\n c = Client()\n c.login(username='a', password='123456')", "def test_connection(self):\n r = main.List.connection()\n self.assertTrue(r.ping(), \"Connection failed.\")", "async def test_client_connection_error(hass: HomeAssistant, mock_daikin) -> None:\n config_entry = MockConfigEntry(\n domain=DOMAIN,\n unique_id=MAC,\n data={CONF_HOST: HOST, KEY_MAC: MAC},\n )\n config_entry.add_to_hass(hass)\n\n mock_daikin.factory.side_effect = ClientConnectionError\n await hass.config_entries.async_setup(config_entry.entry_id)\n await hass.async_block_till_done()\n\n assert config_entry.state == ConfigEntryState.SETUP_RETRY", "def test_ApiConnectionWillAuthenticate_InvalidCredentials_Unsuccessfully(self):\n connection = ApiConnection(self.userId, \"\")\n self.assertFalse(connection.connected())", "def test_start_sameconnector_twice_with_noreconnecting_on_failure(self):\n\n yield self.connect('127.0.0.1', self.pbPort)\n\n localConfig = copy.copy(self.defaultConfig)\n localConfig.reconnectOnConnectionFailure = False\n yield self.add(localConfig)\n yield self.start(localConfig.id)\n startRet = yield self.start(localConfig.id)\n\n self.assertEqual(True, startRet)\n\n yield self.stopall()\n\n # Give a grace time for stopping\n yield waitFor(0.2)", "def connect(host='localhost', port=21050, database=None, timeout=None,\n use_ssl=False, ca_cert=None, auth_mechanism='NOSASL', user=None,\n password=None, kerberos_service_name='impala', use_ldap=None,\n ldap_user=None, ldap_password=None, use_kerberos=None,\n protocol=None, krb_host=None, use_http_transport=False,\n http_path='', auth_cookie_names=None, http_cookie_names=None,\n retries=3, jwt=None, user_agent=None):\n # pylint: disable=too-many-locals\n if use_kerberos is not None:\n warn_deprecate('use_kerberos', 'auth_mechanism=\"GSSAPI\"')\n if use_kerberos:\n auth_mechanism = 'GSSAPI'\n\n if use_ldap is not None:\n warn_deprecate('use_ldap', 'auth_mechanism=\"LDAP\"')\n if use_ldap:\n auth_mechanism = 'LDAP'\n\n if auth_mechanism:\n auth_mechanism = auth_mechanism.upper()\n else:\n auth_mechanism = 'NOSASL'\n\n if auth_mechanism not in AUTH_MECHANISMS:\n raise NotSupportedError(\n 'Unsupported authentication mechanism: {0}'.format(auth_mechanism))\n\n if auth_mechanism == 'JWT':\n if jwt is None:\n raise NotSupportedError(\"JWT authentication requires specifying the 'jwt' argument\")\n if not use_http_transport:\n raise NotSupportedError('JWT authentication is only supported for HTTP transport')\n if not use_ssl:\n warn_nontls_jwt()\n if user is not None or ldap_user is not None:\n raise NotSupportedError(\"'user' argument cannot be specified with '{0}' authentication\".format(auth_mechanism))\n if password is not None or ldap_password is not None:\n raise NotSupportedError(\"'password' argument cannot be specified with '{0}' authentication\".format(auth_mechanism))\n else:\n if jwt is not None:\n raise NotSupportedError(\"'jwt' argument cannot be specified with '{0}' authentication\".format(auth_mechanism))\n\n if ldap_user is not None:\n warn_deprecate('ldap_user', 'user')\n user = ldap_user\n\n if ldap_password is not None:\n warn_deprecate('ldap_password', 'password')\n password = ldap_password\n\n if protocol is not None:\n if protocol.lower() == 'hiveserver2':\n warn_protocol_param()\n else:\n raise NotSupportedError(\n \"'{0}' is not a supported protocol; only HiveServer2 is \"\n \"supported\".format(protocol))\n\n if auth_cookie_names is not None and http_cookie_names is None:\n warn_deprecate('auth_cookie_names', 'http_cookie_names')\n http_cookie_names = auth_cookie_names\n elif http_cookie_names is None:\n # Set default value as the list of HTTP cookie names used by Impala and Hive.\n http_cookie_names = ['impala.auth', 'impala.session.id', 'hive.server2.auth']\n\n service = hs2.connect(host=host, port=port,\n timeout=timeout, use_ssl=use_ssl,\n ca_cert=ca_cert, user=user, password=password,\n kerberos_service_name=kerberos_service_name,\n auth_mechanism=auth_mechanism, krb_host=krb_host,\n use_http_transport=use_http_transport,\n http_path=http_path,\n http_cookie_names=http_cookie_names,\n retries=retries,\n jwt=jwt, user_agent=user_agent)\n return hs2.HiveServer2Connection(service, default_db=database)", "def invalid_server(self):\n servers = {\n \"openldap1\": {\n \"host\": \"openldap1\", \"port\": \"389\", \"enable_tls\": \"no\",\n \"auth_dn_prefix\": \"cn=\", \"auth_dn_suffix\": \",ou=users,dc=company,dc=com\"\n },\n }\n user = {\"server\": \"openldap1\", \"username\": \"user1\", \"password\": \"user1\", \"login\": True}\n role = f\"role_{getuid()}\"\n\n entries = [\n ([\"openldap2\"], [(role,)])\n ]\n\n with ldap_servers(servers):\n with rbac_roles(role) as roles:\n config = create_entries_ldap_external_user_directory_config_content(entries)\n with ldap_external_user_directory(server=None, roles=None, restart=True, config=config):\n with When(f\"I login as {user['username']} and execute query\"):\n current().context.node.query(\"SELECT 1\",\n settings=[(\"user\", user[\"username\"]), (\"password\", user[\"password\"])],\n exitcode=4, message=\"DB::Exception: user1: Authentication failed: password is incorrect or there is no user with such name.\")", "def test_auth_with_group(self):\n # prepare search mock\n self.mock_conn.search.return_value = True\n # bind operation\n self.mock_conn.bind.return_value = True\n # response to the search and bind calls\n self.mock_conn.response.__len__.return_value = 1\n fake_resp = {\n 'attributes': {\n 'mail': ['[email protected]'],\n 'cn': ['Bar User', 'Baruser'],\n 'title': 'Job title',\n },\n 'type': 'searchResEntry',\n 'dn': 'uid=000000000,c=de,ou=base,o=foo.com',\n }\n self.mock_conn.response.__getitem__.return_value = fake_resp\n\n # perform action\n ldap_manager = ldap.MANAGER()\n\n # validate response\n check_resp = {\n 'login': fake_resp['attributes']['mail'][0],\n 'fullname': fake_resp['attributes']['cn'][0],\n 'title': fake_resp['attributes']['title'],\n }\n self.assertEqual(\n check_resp, ldap_manager.authenticate('baruser', 'barpwd'))\n\n # validate behavior\n ldap_conf = self.test_conf['auth']['ldap']\n\n search_filter = '(&{group_filter}({member_attr}={user_dn}))'.format(\n group_filter=ldap_conf['group_filter'],\n member_attr=ldap_conf['group_membership_attr'],\n user_dn=fake_resp['dn']\n )\n self.mock_conn.search.assert_called_with(\n search_base=ldap_conf['group_base'],\n search_filter=search_filter,\n attributes=[ldap_conf['group_membership_attr']]\n )\n self.mock_ldap3.Connection.assert_any_call(\n sentinel.server_obj,\n ldap_conf['username'],\n ldap_conf['password'],\n read_only=True,\n receive_timeout=10\n )\n self.mock_ldap3.Connection.assert_any_call(\n sentinel.server_obj,\n fake_resp['dn'],\n 'barpwd',\n read_only=True,\n receive_timeout=10\n )\n self.mock_conn.bind.assert_called_with()", "def test_connect(self):\n self.conn = SolrConnection(SOLR_HTTP)\n\n try:\n self.conn.conn.request(\"GET\", SOLR_PATH)\n except socket.error, e:\n self.fail(\"Connection to %s failed\" % (SOLR_HTTP))\n\n status = self.conn.conn.getresponse().status\n self.assertEquals(status, 302, \"Expected FOUND (302), got: %d\" % status)", "def connect_samdb_env(env_url, env_username, env_password, lp=None):\n samdb_url = env_get_var_value(env_url)\n creds = credentials.Credentials()\n if lp is None:\n # guess Credentials parameters here. Otherwise workstation\n # and domain fields are NULL and gencache code segfalts\n lp = param.LoadParm()\n creds.guess(lp)\n creds.set_username(env_get_var_value(env_username))\n creds.set_password(env_get_var_value(env_password))\n return connect_samdb(samdb_url, credentials=creds, lp=lp)", "def test_wrong_search_param(self):\n def close_conn():\n cli = LDAPClient(\"ldap://%s\" % self.ipaddr)\n LDAPConnection(cli).search()\n def missing_scope():\n cli = LDAPClient(\"ldap://%s\" % self.ipaddr)\n LDAPConnection(cli).open().search()\n def wrong():\n cli = LDAPClient(\"ldap://%s\" % self.ipaddr)\n LDAPConnection(cli).open().search(\"\", 0, 3)\n self.assertRaises(ClosedConnection, close_conn)\n self.assertRaises(ValueError, missing_scope)\n self.assertRaises(TypeError, wrong)", "def test_unexpectedLoginFailure(self):\n\n class UnexpectedException(Exception):\n \"\"\"\n An unexpected exception.\n \"\"\"\n\n class FailingChecker:\n \"\"\"\n A credentials checker whose L{requestAvatarId} method\n raises L{UnexpectedException}.\n \"\"\"\n credentialInterfaces = (IUsernameHashedPassword,\n IUsernamePassword)\n\n def requestAvatarId(self, credentials):\n raise UnexpectedException(\"Unexpected error.\")\n\n realm = TestRealm()\n portal = Portal(realm)\n portal.registerChecker(FailingChecker())\n self.server.portal = portal\n\n self.server.challengers[b'LOGIN'] = loginCred = imap4.LOGINCredentials\n\n verifyClass(IChallengeResponse, loginCred)\n\n cAuth = imap4.LOGINAuthenticator(b'testuser')\n self.client.registerAuthenticator(cAuth)\n\n def auth():\n return self.client.authenticate(b'secret')\n\n def assertUnexpectedExceptionLogged():\n self.assertTrue(self.flushLoggedErrors(UnexpectedException))\n\n d1 = self.connected.addCallback(strip(auth))\n d1.addErrback(self.assertClientFailureMessage,\n b\"Server error: login failed unexpectedly\")\n d1.addCallback(strip(assertUnexpectedExceptionLogged))\n d1.addCallbacks(self._cbStopClient, self._ebGeneral)\n d = defer.gatherResults([self.loopback(), d1])\n return d", "def run_connection(self, url_string , auth = False):\n\t\tresponse = self.client.get(url_string)\n\t\tif auth == True:\n\t\t\tself.assertEqual(response.status_code, 404)\n\n\t\tself.assertEqual(response.status_code, 200 , url_string + ' is not connecting properly')\n\n\t\tif auth == True:\n\t\t\tmy_admin = HelperFunctions().create_superuser()\n\t\t\t\n\t\t\tself.client.login(my_admin.user , my_admin.password)", "def test_failure(self):\n \n result = self.authenticator.authenticate(\n username=u'thruflo', \n password=u'wrong'\n )\n self.assertTrue(result is None)", "def test_postgresql_connect_fail(self):\n if _is_backend_avail('postgresql', user=\"openstack_cifail\"):\n self.fail(\"Shouldn't have connected\")", "async def test_connect_error_with_no_server(event_loop):\n client = SMTP(hostname=\"127.0.0.1\", port=65534, loop=event_loop)\n\n with pytest.raises(SMTPConnectError):\n await client.connect(timeout=0.1)", "def test_connect(rgd):\n assert rgd.connected is True", "def test_not_in_domain(self, mock_ghn, mock_grnam, mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Throw exception, need to clear internal cached host in driver\n self._empty_domain_list = True\n self.driver._vgc_host = None\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)", "def test_loginException(self):\n\n class UnexpectedException(Exception):\n \"\"\"\n An unexpected exception.\n \"\"\"\n\n def raisesUnexpectedException(user, passwd):\n raise UnexpectedException(\"Whoops\")\n\n self.server.authenticateLogin = raisesUnexpectedException\n\n def login():\n return self.client.login(b'testuser', b'password-test')\n\n d1 = self.connected.addCallback(strip(login))\n\n d1.addErrback(self.assertClientFailureMessage, b\"Server error: Whoops\")\n\n @d1.addCallback\n def assertErrorLogged(_):\n self.assertTrue(self.flushLoggedErrors(UnexpectedException))\n\n d1.addErrback(self._ebGeneral)\n d1.addBoth(self._cbStopClient)\n\n d2 = self.loopback()\n d = defer.gatherResults([d1, d2])\n return d.addCallback(self._cbTestFailedLogin)", "def connect() -> None:\n # attempt to load a database extension\n global _db_ext\n _db_ext = extensions.database_extension()\n if _db_ext is None:\n # The fallback gets implemented via the default_cap\n # dict defined in _capability()\n log.debug(\"Using internal database module.\")\n _db_ext = dict()\n\n # Tell everyone whether we're threadsafe\n global threadsafe\n threadsafe = _capability(\"reentrant\")\n\n # If fetch the database config, if present\n if \"database\" in config.config:\n database_config = config.config[\"database\"]\n else:\n database_config = dict()\n\n # Call the connect function from the database extension (or fallback)\n func = _capability(\"connect\")\n if func is None:\n func = _connect\n db = func(config=database_config)\n\n database_proxy.initialize(db)\n\n if isinstance(db, (pw.MySQLDatabase, pw.PostgresqlDatabase)):\n db.field_types[\"enum\"] = \"enum\"\n EnumField.native = True\n else:\n EnumField.native = False", "def test_is_embedded_fail(self):\n\n self._set_args()\n with self.assertRaises(AnsibleFailJson):\n with mock.patch(self.REQ_FUNC, side_effect=Exception):\n ldap = Ldap()\n ldap.is_embedded()" ]
[ "0.7001593", "0.6606929", "0.65416896", "0.6496196", "0.64913446", "0.6485445", "0.6349957", "0.61097586", "0.6085969", "0.60859203", "0.6060914", "0.60118145", "0.6002777", "0.5980626", "0.59615344", "0.59526885", "0.59469163", "0.5924804", "0.5921622", "0.5895566", "0.587629", "0.58682793", "0.58007044", "0.57908666", "0.577932", "0.5769446", "0.57501197", "0.57329315", "0.5731093", "0.5695066", "0.56914645", "0.5673622", "0.56612766", "0.56569314", "0.5598953", "0.55770206", "0.55464315", "0.5545359", "0.5526642", "0.55110335", "0.54875326", "0.5470346", "0.5454395", "0.5449834", "0.54497135", "0.5428271", "0.53936887", "0.5367894", "0.53556186", "0.5335071", "0.53345215", "0.53285205", "0.52940905", "0.52920115", "0.5287828", "0.5258298", "0.52298063", "0.52191794", "0.52171665", "0.5188204", "0.5179434", "0.5176004", "0.5167627", "0.51662576", "0.51566356", "0.51510197", "0.5146276", "0.5145231", "0.51398695", "0.5126293", "0.5123235", "0.5116139", "0.51066464", "0.5104939", "0.51044303", "0.5089065", "0.50786054", "0.5076322", "0.50706965", "0.5065588", "0.5060677", "0.5057366", "0.5057072", "0.50569314", "0.50539523", "0.50514686", "0.5050429", "0.5048826", "0.50449157", "0.5043606", "0.5038079", "0.50377184", "0.5033669", "0.5024428", "0.50163573", "0.50156313", "0.5013186", "0.5001939", "0.49897075", "0.49866524" ]
0.6377324
6
Processes a list of splits by modifying any positions as needed.
def handle_splits(self, splits): total_leftover_cash = 0 for instrument, ratio in splits: if instrument in self.positions: self._dirty_stats = True # Make the position object handle the split. It returns the # leftover cash from a fractional share, if there is any. position = self.positions[instrument] leftover_cash = position.handle_split(instrument, ratio) total_leftover_cash += leftover_cash return total_leftover_cash
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split(self, splits, catchall=False):\r\n raise NotImplementedError()", "def _setup_splits(self):\n #ntot = self.reredux_conf['nperfile']\n ntot = self.reredux_conf['Ngals']\n npersplit = self.runconf['nper']\n\n self.beglist, self.endlist = get_splits(ntot, npersplit)", "def addSplit(self):\n pass", "def split(self, place_leaf_splitted):\n raise NotImplementedError", "def split(self, X):", "def _splitPoints(self, points, split):\n # validate split\n if not split:\n return [points]\n\n # complete split with adding start and end frames\n if split[0] != 0:\n split.insert(0, 0)\n\n if split[-1] != len(points):\n split.append(len(points))\n\n # make sure split is sorted and doesn't contain any duplicates\n split = list(set(split))\n split.sort()\n\n # split range for looping\n splitA = split[:-1]\n splitB = split[1:]\n\n # get lists\n return [points[a:b + 1] for a, b in zip(splitA, splitB)]", "def data_process(self):\n logging.info('Processing the data and split files')\n lines = Utility.file_len(self.fname)\n self.lines_to_be, self.split_files = Utility.split_files(self.fname, lines,\n cpu_count().real)", "def split(self):\n st = time()\n tokens = self._build_args.tokens\n\n for token_split in IStorage._tokens_partitions(tokens, config.min_number_of_tokens,\n config.number_of_partitions):\n storage_id = uuid.uuid4()\n log.debug('assigning to %s %d tokens', str(storage_id), len(token_split))\n new_args = self._build_args._replace(tokens=token_split, storage_id=storage_id)\n self.__class__._store_meta(new_args)\n\n yield self.__class__.build_remotely(new_args)\n log.debug('completed split of %s in %f', self.__class__.__name__, time() - st)", "def nextSplit(self):\n pass", "def place_at_splits(data):\n groups = defaultdict(list)\n for runner_idx, runner in enumerate(data):\n splits = runner['splits']\n for split in splits:\n split['runner_idx'] = runner_idx\n groups[split['split_dist']].append(split)\n\n ranks = []\n srt_keys = sorted(groups, key=groups.get)\n for key in srt_keys:\n group = groups[key]\n srt_group = sorted(group, key=lambda t: t['split_mins'])\n ranked_group = []\n for rank, split in enumerate(srt_group):\n split['rank'] = rank\n ranked_group.append(split)\n ranks.append(ranked_group)\n\n return data, ranks", "def unsplit(self, variant_groups):\n for vargroup in variant_groups:\n self.variant_list.extend(vargroup.variant_list)\n self.pos = min([var.start for var in self.variant_list])\n self.end = max([var.end for var in self.variant_list])", "def go(self):\n num_fofs = self.fofs['fofid'].max()\n fof_splits = split.get_splits(num_fofs, self['chunksize'])\n\n for isplit,fof_split in enumerate(fof_splits):\n logger.info('%s %s' % (isplit,fof_split))\n self._write_split(isplit, fof_split)", "def process(lines):\n lines = list(map(_clean, lines))\n # lines = list(map(_split, lines))\n return lines", "def split(self):\n if self.split_level <= 0:\n self.doSplit = False\n return []\n else:\n vel_polar = cart_to_polar(self.pos[1])\n\n ang1 = vel_polar[1] + math.pi / 4\n ang2 = vel_polar[1] - math.pi / 4\n\n vel1 = polar_to_cart( [vel_polar[0] * 1.5, ang1] )\n vel2 = polar_to_cart( [vel_polar[0] * 1.5, ang2] )\n\n offset1 = polar_to_cart( [self.radius, ang1] )\n offset2 = polar_to_cart( [self.radius, ang2] )\n\n pos1 = self.pos[0] + offset1\n pos2 = self.pos[0] + offset2\n\n return [ Asteroid([pos1, vel1], [self.ang[0] + 0.01, self.ang[1] * 1.5],\n scale = self.scale / 1.4,\n color_idx = self.color_idx,\n doSplit = True,\n split_level = self.split_level - 1\n ),\n Asteroid([pos2, vel2], [self.ang[0] - 0.01, -self.ang[1] * 1.5],\n scale = self.scale / 1.4,\n color_idx = self.color_idx,\n doSplit = True,\n split_level = self.split_level - 1\n )\n ]", "def spliter(temp,split1,split2):\n for x in range(len(temp)):\n if x<len(temp)/2:\n split1.append(temp[x])\n else:\n split2.append(temp[x])", "def splits(self) -> List[int]:\n if self._splits is None:\n self.RefreshStats()\n return self._splits", "def split(\n items: typing.List[typing.Any],\n sizes: typing.List[float],\n random_state: int = 42,\n stratify: typing.Sequence[typing.Hashable] = None,\n group: typing.Sequence[typing.Hashable] = None,\n preserve: typing.Sequence[typing.Optional[int]] = None,\n) -> typing.Sequence[typing.Any]:\n splits: typing.List[typing.List[typing.Any]] = [[] for _ in range(len(sizes))]\n if group is None:\n group = list(range(len(items)))\n if stratify is None:\n stratify = [0] * len(items)\n if preserve is not None:\n assert len(items) == len(\n preserve\n ), \"When preserve is provided, it must be the same length as items.\"\n for item, preserveIdx in zip(items, preserve):\n if preserveIdx is not None:\n splits[preserveIdx].append(item)\n ideal_counts = [s * len(items) for s in sizes]\n items, stratify, group = [\n [\n entry\n for entry, preserveIdx in zip(current_list, preserve)\n if preserveIdx is None\n ]\n for current_list in [items, stratify, group]\n ]\n if len(items) == 0:\n # There's nothing left to split.\n return splits\n # Rebalance sizes so that we shuffle the remaining\n # items into the splits to try and match the originally\n # desired sizes.\n offsets = [\n max(target - len(split), 0) for split, target in zip(splits, ideal_counts)\n ]\n sizes = [offset / sum(offsets) for offset in offsets]\n assert (\n 0.99 < sum(sizes) < 1.01\n ), f\"The sizes must add up to 1.0 (they added up to {sum(sizes)}).\"\n assert len(group) == len(items), \"group must be the same length as the collection.\"\n assert len(stratify) == len(\n items\n ), \"stratify must be the same length as the collection.\"\n rng = np.random.default_rng(seed=random_state)\n grouped = [\n {**dict(zip([\"idxs\", \"stratifiers\"], zip(*grouper))), \"group\": g}\n for g, grouper in groupby_unsorted(\n list(zip(range(len(stratify)), stratify)),\n key=lambda v: typing.cast(typing.Sequence[typing.Hashable], group)[v[0]],\n )\n ]\n hashes = {\n h: list(g)\n for h, g in groupby_unsorted(\n grouped, key=lambda g: hash(tuple(set(g[\"stratifiers\"])))\n )\n }\n for subgroups in hashes.values():\n for a, u in zip(\n rng.choice(len(sizes), size=len(subgroups), p=sizes),\n subgroups,\n ):\n splits[a].extend(items[idx] for idx in u[\"idxs\"])\n return splits", "def fillBestSplitsInDataByInfoGainIntoDict(self, data, structure, colName, numOfSplits, splitsList, indexToInsert):\n if len(data) <= 0 or numOfSplits <= 0:\n return []\n colIndex = structure[colName]['index']\n split = self.findBestSplitInDataByInfoGain(data, structure, colName)\n if str(indexToInsert) in splitsList:\n splitsList[str(indexToInsert)] += [split]\n else:\n splitsList[str(indexToInsert)] = [split]\n indexToInsert, numOfSplits = indexToInsert + 1, numOfSplits - 1\n\n if split:\n newDataBellowSplit = list(filter(lambda y: float(y[colIndex]) <= split[0], data))\n newDataAboveSplit = list(filter(lambda y: float(y[colIndex]) > split[0], data))\n self.fillBestSplitsInDataByInfoGainIntoDict(newDataBellowSplit, structure, colName, numOfSplits, splitsList, indexToInsert)\n self.fillBestSplitsInDataByInfoGainIntoDict(newDataAboveSplit, structure, colName, numOfSplits, splitsList, indexToInsert)", "def setSplit(self,split):\n self.split=split", "def split_train(splits, val_size, groups=None, **kwargs):\n new_splits = []\n for train_val, test in splits:\n sub_groups = None if groups is None else groups[train_val]\n train, val = train_test_split_groups(\n train_val, val_size=val_size, groups=sub_groups, **kwargs) if val_size > 0 else (train_val, [])\n new_splits.append([train, val, test])\n return new_splits", "def process(self, lists, subqueries):\n pass", "def split_chunks(item_list, num_items_in_list):\n for item in range(0, len(item_list), num_items_in_list):\n # Create an index range for item_list of num_items_in_list items:\n yield item_list[item:item + num_items_in_list]", "def split(self, num_or_size_splits, shuffle=False):\n raise NotImplementedError", "def split(self, user, number=2, piece='a', comment=None, force_refresh=True):\n if comment is None:\n comment = 'Split sample into {0} pieces'.format(number)\n\n process = Process.objects.create(title='Split Sample',\n comment=comment,\n user=user,\n type_id='split-process')\n nodes = []\n\n branch = self.get_piece(piece)\n for i in range(number):\n if i == 0:\n new_piece = piece\n else:\n new_piece = self._get_next_piece()\n # Note: Issue #248 in django-mptt causes the tree to not be properly\n # updated when inserting objects if parent is set. Workaround\n # is to set parent_id instead. This fixes methods such as\n # MPTTModel.get_descendants(). Since you are referencing an\n # object that has changed in the database (process_tree),\n # the lft and rght items are not updated properly. Workarounds\n # include manually updating the root node or requerying for\n # the sample object which will force a refresh.\n nodes.append(self._insert_node(process, new_piece, i + 1, branch))\n if force_refresh: # workaround to force the root node to update\n self.refresh_tree()\n return nodes", "def set_split(self):\n #Regular expressions; try 1 first, then 2, etc.\n rex1 = re.compile('F?LD')\n rex2 = re.compile('[LF]?LQ')\n \n #For regular expression, check if there is a match that is >10 AA from the end\n if re.search(rex1, self.sequence) and len(re.split(rex1, self.sequence)[-1]) > 10:\n start, end = [m.span() for m in rex1.finditer(self.sequence)][-1]\n# end += 16 #TODO why +15/16?\n elif re.search(rex2, self.sequence) and len(re.split(rex2,self.sequence)[-1]) > 10:\n start, end = [m.span() for m in rex2.finditer(self.sequence)][-1]\n# end += 15\n else:\n self.split_index = -1\n self.core = self.sequence\n self.leader = ''\n return\n self.split_index = end\n self.leader = self.sequence[:end]\n self.core = self.sequence[end:]", "def split(base_list):\n list_mid_pointer=len(base_list)//2\n return base_list[:list_mid_pointer],base_list[list_mid_pointer:]", "def split_all(self):\n for domino in self.dominoes[:]:\n self.split(domino)", "def apply_split(\n sid: str,\n old_shares: int,\n new_shares: int\n ) -> list[dict[str, Union[str, int]]]:\n params = {\n \"sid\": sid,\n \"old_shares\": old_shares,\n \"new_shares\": new_shares\n }\n response = houston.patch(\"/blotter/positions\", params=params)\n houston.raise_for_status_with_json(response)\n return response.json()", "def process_commands():\n # Parse and handle each different command\n args = parse_arguments()\n\n pdfsplit.pdf_split(args.file, args.pages, args.offset)", "def make_splits(self):\n # produce fold/portion splits of the training indexes: these output indexes to the tr. indexes themselves\n if self.folds is not None:\n meta_trainval_idx = kfold_split(self.train_idx, self.folds, self.seed, self.labels, self.label_info)\n elif self.portion is not None:\n meta_trainval_idx = portion_split(self.train_idx, self.portion, self.seed, self.labels, self.label_info)\n else:\n meta_trainval_idx = [(np.arange(len(self.train_idx)), np.arange(0, dtype=np.int32))]\n # \"dereference\" the metaindexes to point to the data themselves\n self.trainval_idx = []\n for (tidx, vidx) in meta_trainval_idx:\n self.trainval_idx.append((self.train_idx[tidx], self.train_idx[vidx]))", "def do_split(self, segs, segs_tips, segs_undecided, segs_adjacency,\n segs_distances, iseg, new_tips):\n seg = segs[iseg]\n # restrict distance matrix to points in segment\n if not isinstance(self.Dchosen, data_graph.OnFlySymMatrix):\n Dseg = self.Dchosen[np.ix_(seg, seg)]\n else:\n Dseg = self.Dchosen.restrict(seg)\n # given the three tip points and the distance matrix detect the\n # branching on the segment, return the list ssegs of segments that\n # are defined by splitting this segment\n result = self._do_split(Dseg, new_tips, seg, segs_tips)\n ssegs, ssegs_tips, ssegs_adjacency, trunk = result\n # map back to global indices\n for iseg_new, seg_new in enumerate(ssegs):\n ssegs[iseg_new] = seg[seg_new]\n ssegs_tips[iseg_new] = seg[ssegs_tips[iseg_new]]\n # remove previous segment\n segs.pop(iseg)\n segs_tips.pop(iseg)\n # insert trunk at same position\n segs.insert(iseg, ssegs[trunk])\n segs_tips.insert(iseg, ssegs_tips[trunk])\n # append other segments\n segs += [seg for iseg, seg in enumerate(ssegs) if iseg != trunk]\n segs_tips += [seg_tips for iseg, seg_tips in enumerate(ssegs_tips) if iseg != trunk]\n if len(ssegs) == 4:\n # insert undecided cells at same position\n segs_undecided.pop(iseg)\n segs_undecided.insert(iseg, True)\n # correct edges in adjacency matrix\n n_add = len(ssegs) - 1\n new_shape = (segs_distances.shape[0] + n_add, segs_distances.shape[1] + n_add)\n # segs_distances.resize() throws an error!\n segs_distances_help = segs_distances.copy()\n segs_distances = np.zeros((new_shape))\n segs_distances[np.ix_(range(segs_distances_help.shape[0]),\n range(segs_distances_help.shape[1]))] = segs_distances_help\n segs_distances = self.adjust_adjacency(iseg, n_add,\n segs,\n segs_tips,\n segs_adjacency,\n segs_distances)\n segs_undecided += [False for i in range(n_add)]\n # need to return segs_distances as inplace formulation doesn't work\n return segs_distances", "def _score_tokens(self, tokens: [str], positions: {str: [int]}, weights: {str: int}) -> None:\n for i, token in enumerate(tokens):\n positions[token].append(i)\n self._increment_token_weight(weights, token=token)", "def split(self):\n \n spl = self.which('split')\n if spl:\n self.__tmp = \"/tmp\"\n self.__tmpout = \"/tmp/output\"\n if not os.path.exists(self.__tmpout):\n os.makedirs(self.__tmpout)\n #os.chdir(\"/tmp\")\n '''\n assume split prog overwrites existing files if\n there is a conflict in file names\n '''\n #thecommand = \"%s -a 3 -b 500k %s %s/%s\" % (spl, self.__filename, self.__tmpout, self.__filename + self.__postfix)\n thecommand = \"%s -a 3 -b 10m %s %s/%s\" % (spl, self.__filename, self.__tmpout, self.__filename + self.__postfix)\n os.system(thecommand)\n dirList=os.listdir(self.__tmpout)\n #self.constructCat(dirList)\n for chunkfilename in dirList:\n #print chunkfilename \n #self.__cat += self.__remotepath + \"/\" + chunkfilename + \" \"\n #print self.__cat\n self.__flist.append(self.__tmpout + \"/\" + chunkfilename)\n #print self.__flist\n self.writeLog(chunkfilename, self.md5(fileName=self.__tmpout + \"/\" + chunkfilename))\n self.__numchunks = len([item for item in os.listdir(self.__tmpout) if os.path.isfile(self.__tmpout + \"/\" + item)])\n else:\n try:\n f = open(self.__filename, 'rb')\n except (OSError, IOError), e:\n raise FileSplitterException, str(e)\n \n bname = (os.path.split(self.__filename))[1]\n # Get the file size\n fsize = os.path.getsize(self.__filename)\n # dynamically calculate number of chunks\n strfsize = str(fsize)\n '''\n in MB's\n 8 - teens\n 9 - hundreds\n 10 - gigabytes\n '''\n if len(strfsize) == 8:\n #self.__numchunks = fsize/100000\n self.__numchunks = fsize/50000\n elif len(strfsize) == 9:\n #self.__numchunks = fsize/1000000\n self.__numchunks = fsize/500000\n elif len(strfsize) == 10:\n #self.__numchunks = fsize/10000000\n self.__numchunks = fsize/5000000\n #print '\\nSplitting file %s into %d chunks' % (self.__filename, self.__numchunks)\n # Get size of each chunk\n self.__chunksize = int(float(fsize)/float(self.__numchunks))\n \n chunksz = self.__chunksize\n total_bytes = 0\n \n for x in range(self.__numchunks):\n #chunkfilename = bname + '-' + str(x+1) + self.__postfix\n chunkfilename = bname + ('-%03d' % (x+1)) + self.__postfix\n # kill residual file if it exists\n if os.path.exists(chunkfilename):\n os.remove(chunkfilename)\n \"\"\"\n if reading the last section, calculate correct\n chunk size.\n \"\"\"\n if x == self.__numchunks - 1:\n chunksz = fsize - total_bytes\n \n try:\n if self.__debug:\n print 'Writing file chunk: %s' % chunkfilename\n data = f.read(chunksz)\n total_bytes += len(data)\n chunkf = file(chunkfilename, 'wb')\n chunkf.write(data)\n chunkf.close()\n #self.__cat += self.__remotepath + \"/\" + chunkfilename + \" \"\n self.__flist.append(chunkfilename)\n self.writeLog(chunkfilename, self.md5(fileName=chunkfilename))\n except (OSError, IOError), e:\n print e\n continue\n except EOFError, e:\n print e\n break\n\n print '\\nSplit complete on file: %s into %d chunks\\n' % (self.__filename, self.__numchunks)\n self.__logfhandle.close()\n #self.__cat += \"> \" + self.__remotepath + \"/\" + self.__filename\n self.set_cat_statement()", "def split_list(items, pred):\n\n thisresult = []\n results = [thisresult]\n for i in items:\n thisresult.append(i)\n if pred(i):\n thisresult = []\n results.append(thisresult)\n return results", "def change_list_elements(operatorsList, numbersList, result, pos):\n operatorsList.pop(pos)\n numbersList.pop(pos+1)\n numbersList[pos] = result\n return operatorsList, numbersList", "def split_list_by(lst, sepfunc, includesep):\n\tblocks = []\n\tblock = []\n\tfor elem in lst:\n\t\tif sepfunc(elem):\n\t\t\tif includesep:\n\t\t\t\tblock.append(elem)\n\t\t\tblocks.append(block)\n\t\t\tblock = []\n\t\telse:\n\t\t\tblock.append(elem)\n\tif len(block):\n\t\tblocks.append(block)\n\treturn blocks", "def fillDictWithBestValueSplitsOfDataByGini(self, data, structure, colIndex, numOfSplits, splitsList, indexToInsert):\n if len(data) <= 0 or numOfSplits <= 0:\n return []\n split = self.findBestValueSplitByGini(data, structure, colIndex)\n if str(indexToInsert) in splitsList:\n splitsList[str(indexToInsert)] += [split]\n else:\n splitsList[str(indexToInsert)] = [split]\n indexToInsert, numOfSplits = indexToInsert + 1, numOfSplits - 1\n\n if split:\n newDataBellowSplit = list(filter(lambda y: float(y[colIndex]) <= split[0], data))\n newDataAboveSplit = list(filter(lambda y: float(y[colIndex]) > split[0], data))\n self.fillDictWithBestValueSplitsOfDataByGini(newDataBellowSplit, structure, colIndex, numOfSplits, splitsList, indexToInsert)\n self.fillDictWithBestValueSplitsOfDataByGini(newDataAboveSplit, structure, colIndex, numOfSplits, splitsList, indexToInsert)", "def _parse_split_dims(tree, split_dims, level=0, parent=None, max_level=7,\n split_positions=None):\n if level == max_level:\n # this is leaf tree, and split_dim=-1.\n return\n\n if tree.lesser is not None:\n _parse_split_dims(tree.lesser, split_dims, level=level+1, parent=tree,\n max_level=max_level, split_positions=split_positions)\n else:\n # This will happen when the point is overlapped and `split_dim==-1`,\n # in this case just use parent `tree`.\n # print('tree.lesser is None, level {}'.format(level))\n _parse_split_dims(tree, split_dims, level=level+1, parent=tree,\n max_level=max_level, split_positions=split_positions)\n if tree.greater is not None:\n _parse_split_dims(tree.greater, split_dims, level=level+1, parent=tree,\n max_level=max_level, split_positions=split_positions)\n else:\n # This will happen when the point is overlapped and `split_dim==-1`,\n # in this case just use parent `tree`.\n # print('[WARNING] tree.greater is None, level {}'.format(level))\n _parse_split_dims(tree, split_dims, level=level+1, parent=tree,\n max_level=max_level, split_positions=split_positions)\n\n if level < max_level:\n split_dim = tree.split_dim\n if split_dim == -1:\n # since we repeated premature leafs, we get invalid splits\n # in this case just use the parents.\n # This case happen when the points are overlapped.\n # print('split_dim is -1 at level', level)\n split_dim = parent.split_dim if (parent.split_dim > -1) else 0\n split_dims[level].append(split_dim)\n if split_positions is not None:\n split = tree.split\n if split_dim == -1:\n split = parent.split if (parent.split_dim > -1) else 0\n split_positions[level].append(split)", "def split(self, reads_in_first_split=None):\n split_index = 0\n self.out_fns = []\n writer = open(self._out_fn(split_index), 'w')\n self.out_fns.append(self._out_fn(split_index))\n if reads_in_first_split is None:\n reads_in_first_split = self.reads_per_split\n\n io_format = 'fastq' if self.is_fq else 'fasta'\n reader = SeqIO.parse(open(self.input_fa_or_fq), io_format)\n for ridx, r in enumerate(reader):\n if ((split_index == 0 and ridx == reads_in_first_split) or\n (split_index > 0 and ridx % self.reads_per_split == 0)) \\\n and ridx != 0:\n split_index += 1\n writer.close()\n writer = open(self._out_fn(split_index), 'w')\n self.out_fns.append(self._out_fn(split_index))\n SeqIO.write(r, writer, io_format)\n writer.close()\n return list(self.out_fns)", "def priority_split(text, *splitters):\n present = [s for s in splitters if s in text]\n # fall back to non-present splitter; ensures we have a splitter\n splitters = present + list(splitters)\n splitter = splitters[0]\n return [seg.strip() for seg in text.split(splitter) if seg.strip()]", "def split_POS_tag(data_dir, split_sum, split_count):\n with open(data_dir + \"pos_tag.json\") as pos_tag:\n pos_data = json.load(pos_tag)\n\n for i in range(0, split_sum, split_count):\n pos_data_split = pos_data[i:i+split_count]\n split_dir = data_dir + \"pos_tag_\"+ str(i) + \"_\" + str(i+split_count) + \".json\"\n with open(split_dir, \"w\") as out_file:\n json.dump(pos_data_split, out_file, indent=4)", "def split_bygeom(self, iterable, geom_getter=lambda x: x.geom):\n points, linestrings, multipoints, multilinestrings = [], [], [], []\n\n for x in iterable:\n geom = geom_getter(x)\n if geom is None:\n pass\n elif isinstance(geom, GeometryCollection):\n # Duplicate object, shapefile do not support geometry collections !\n subpoints, sublines, pp, ll = self.split_bygeom(geom, geom_getter=lambda geom: geom)\n if subpoints:\n clone = x.__class__.objects.get(pk=x.pk)\n clone.geom = MultiPoint(subpoints, srid=geom.srid)\n multipoints.append(clone)\n if sublines:\n clone = x.__class__.objects.get(pk=x.pk)\n clone.geom = MultiLineString(sublines, srid=geom.srid)\n multilinestrings.append(clone)\n elif isinstance(geom, Point):\n points.append(x)\n elif isinstance(geom, LineString):\n linestrings.append(x)\n else:\n raise ValueError(\"Only LineString and Point geom should be here. Got %s for pk %d\" % (geom, x.pk))\n return points, linestrings, multipoints, multilinestrings", "def detect_splits(self):\n logg.info(' abstracted graph will have {} nodes'.format(self.n_splits+1))\n indices_all = np.arange(self.X.shape[0], dtype=int)\n segs = [indices_all]\n if False: # this is safe, but not compatible with on-the-fly computation\n tips_all = np.array(np.unravel_index(np.argmax(self.Dchosen), self.Dchosen.shape))\n else:\n if self.iroot is not None:\n tip_0 = np.argmax(self.Dchosen[self.iroot])\n else:\n tip_0 = np.argmax(self.Dchosen[0]) # just a random index, here fixed to \"0\"\n tips_all = np.array([tip_0, np.argmax(self.Dchosen[tip_0])])\n # we keep a list of the tips of each segment\n segs_tips = [tips_all]\n if self.clusters_precomputed_names:\n self.segs_names_original = [', '.join(self.clusters_precomputed_names)]\n segs_undecided = [True]\n segs_adjacency = [[]]\n segs_distances = np.zeros((1, 1))\n segs_adjacency_nodes = [{}]\n # logg.info(' do not consider groups with less than {} points for splitting'\n # .format(self.min_group_size))\n for ibranch in range(self.n_splits):\n if self.clusters == 'unconstrained_segments':\n iseg, new_tips = self.select_segment(segs, segs_tips, segs_undecided)\n if iseg == -1:\n logg.info('... partitioning converged')\n break\n logg.info('... branching {}:'.format(ibranch + 1),\n 'split group', iseg)\n segs_distances = self.do_split(segs, segs_tips,\n segs_undecided,\n segs_adjacency,\n segs_distances,\n iseg, new_tips)\n else:\n logg.msg(' split', ibranch + 1, v=4)\n stop, segs_distances = self.do_split_constrained(segs, segs_tips,\n segs_adjacency,\n segs_adjacency_nodes,\n segs_distances)\n if stop: break\n\n # segments\n self.segs = segs\n self.segs_tips = segs_tips\n self.segs_sizes = []\n for iseg, seg in enumerate(self.segs): self.segs_sizes.append(len(seg))\n\n # the full, unscaled adjacency matrix\n self.segs_adjacency_full_attachedness = 1/segs_distances\n # if self.attachedness_measure == 'connectedness':\n # norm = np.sqrt(np.multiply.outer(self.segs_sizes, self.segs_sizes))\n # self.segs_adjacency_full_attachedness /= norm\n self.segs_adjacency_full_confidence, self.segs_adjacency_tree_confidence \\\n = self.compute_adjacency_confidence(\n self.segs_adjacency_full_attachedness,\n segs_adjacency,\n self.tree_based_confidence)\n np.fill_diagonal(self.segs_adjacency_full_attachedness, 0)", "def parse_split(self, response):\n # save full HTML content for this race's split data\n if self.save_html:\n race_file_name = f'{response.meta[\"round_file_name\"]}-race_{response.meta[\"race_number\"]}-splits'\n save_raw_html(html_content=response.body, file_name=race_file_name, split=True)\n\n # extract split times and positions for each athlete on each lap\n athlete_names = response.css('tr.tablehead th[scope=\"col\"]::text')[1:].getall()\n laps = response.css('tr[class*=tablecol]')\n\n num_laps = len(laps)\n split_data = {\n \"season\": [response.meta[\"season_title\"]] * num_laps,\n \"competition\": [response.meta[\"competition_title\"]] * num_laps,\n \"event\": [response.meta[\"event_title\"]] * num_laps,\n \"instance_of_event_in_competition\": [response.meta[\"instance_of_event_in_competition\"]] * num_laps,\n \"gender\": [response.meta[\"event_gender\"]] * num_laps,\n \"round\": [response.meta[\"round_title\"]] * num_laps,\n \"race\": [response.meta[\"race_number\"]] * num_laps\n }\n col_ids = list()\n for start_position in range(1, MAX_ATHLETES_IN_RACE + 1):\n col_id = f'START_POS_{str(start_position)}'\n if start_position <= len(athlete_names):\n col_ids.append(col_id)\n split_data[f'{col_id} POSITION'] = [np.nan] * num_laps\n split_data[f'{col_id} LAP TIME'] = [np.nan] * num_laps\n split_data[f'{col_id} ELAPSED TIME'] = [np.nan] * num_laps\n\n for lap_index, lap in enumerate(laps):\n for athlete_col, col_id in zip(lap.css('td')[1:], col_ids):\n athlete_position = athlete_col.css('td span::text').get()\n athlete_position_cleaned = athlete_position.strip('[]') if athlete_position is not None else np.nan\n split_data[f'{col_id} POSITION'][lap_index] = athlete_position_cleaned\n\n laptime_field = athlete_col.css('td::text').getall()\n if len(laptime_field):\n both_times = regex_replace(laptime_field[1]).strip(')').split('(')\n else:\n both_times = [np.nan, np.nan]\n split_data[f'{col_id} LAP TIME'][lap_index] = parse_time_string(both_times[1])\n split_data[f'{col_id} ELAPSED TIME'][lap_index] = parse_time_string(both_times[0])\n\n save_parsed_data(df=pd.DataFrame(split_data), file_path=SPLITS_FILE)", "def mapped_split(reddit_dir, data_set_name, mapped_col, result_col, num_splits):\n\n table_files = os.listdir(os.path.join(reddit_dir, data_set_name))\n args_list = [\n (reddit_dir, data_set_name, table_fname, mapped_col, result_col, num_splits)\n for table_fname in table_files\n ]\n\n process = psutil.Process(os.getpid())\n logger.debug(\"PID: %d, Memory usage: %.1f GB\" % (process.pid, process.memory_info().rss / 1e9))\n\n logger.debug(\"PID: %d, forking...\" % process.pid)\n pool = mp.Pool(pool_size)\n pool.map(unpack_mapped_split_core, args_list)", "def split_list(self):\n wanted_parts = self.args.ncore\n alist = glob.glob(self.args.input + '*.root')\n length = len(alist)\n return [alist[i * length // wanted_parts: (i + 1) * length // wanted_parts]\n for i in range(wanted_parts)]", "def process_data(self):\r\n \r\n self.processed_data = dict()\r\n for split,text_data_ in self.text_data.items():\r\n y = text_data_[self.target_col].values\r\n print(\"Vectorizing for split: \"+split)\r\n x = np.array([self.vectorizer(x_) for x_ in text_data_['Text']])\r\n \r\n self.processed_data[split] = {'x':x,'y':y}\r\n \r\n self.set_split(self.split_)", "def chunk(self, shape, split) -> NotImplementedError:\n raise NotImplementedError()", "def split(features, groundtruths, n_split):\n\n if n_split == 1:\n return features, groundtruths\n\n tags = list(set(groundtruths))\n new_index = {}\n for tag in tags:\n new_index[tag] = []\n for index, gt in enumerate(groundtruths):\n new_index[gt].append(index)\n new_feats = []\n new_gts = []\n for i in range(0, n_split):\n indexes = []\n for tag in tags:\n ref = len(new_index[tag])/n_split\n indexes.append(new_index[tag][ref*i:ref*(i+1)])\n \"\"\"\n ..todo:: manage multiple tags!\n \"\"\"\n indexes = indexes[0] + indexes[1]\n # print(features[:5])\n # print(len(indexes))\n # print(len(indexes[0]))\n # print(len(indexes[1]))\n # sys.exit()\n indexes.sort()\n new_gts.append([groundtruths[j] for j in indexes])\n new_feats.append([features[j] for j in indexes])\n return new_feats, new_gts", "def split(self, params):\n\n if \"train_df\" in params.keys():\n self.df = params[\"train_df\"]\n if \"test_df\" in params.keys():\n self.df = pd.concat([self.df, params[\"test_df\"]])\n if \"n_splits\" in params.keys():\n self.n_splits = params[\"n_splits\"]\n if \"shuffle\" in params.keys():\n self.shuffle = params[\"shuffle\"]\n if \"random_state\" in params.keys():\n self.random_state = params[\"random_state\"]\n\n self.__validate_input()\n\n n_samples = num_of_samples(self.df)\n\n if self.n_splits > n_samples:\n raise ValueError(\n f\"Cannot have number of splits {self.n_splits} > number of\"\n f\" samples {n_samples}\"\n )\n\n indices = np.arange(n_samples)\n for test_indices in self.__iter_test_indices(n_samples):\n train_indices = indices[np.logical_not(test_indices)]\n test_indices = indices[test_indices]\n yield train_indices, test_indices", "def convert_split(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n axis = op.input(\"AxisTensor\")\n if axis:\n axis = g.get_node(axis[0])\n axis, infered = try_infer_value(axis, g.get_params())\n if infered:\n axis = axis.tolist()[0]\n else:\n axis = op.attr(\"axis\")\n\n sections = op.input(\"SectionsTensorList\")\n if sections:\n tmp_section = []\n for i in sections:\n i = g.get_node(i)\n i, infered = try_infer_value(i, g.get_params())\n if infered:\n i = i.tolist()\n else:\n raise ValueError(\"Dynamic Split not yet supported.\")\n tmp_section.extend(i)\n sections = tmp_section\n else:\n sections = op.attr(\"sections\")\n if sections:\n indices = []\n split_index = 0\n for i in sections[:-1]:\n if i == -1:\n input_shape = infer_shape(x)[axis]\n i = input_shape - np.sum(sections) - 1\n split_index += i\n indices.append(split_index)\n else:\n indices = op.attr(\"num\")\n\n out = _op.split(x, indices, axis)\n for i, out_i in enumerate(out):\n g.add_node(op.output(\"Out\")[i], out_i)", "def smart_split(strokes):\n\n splited = []\n for stroke in strokes:\n splited += stroke.split_non_differentiable_points()\n return splited", "def split(items):\n return {\n \"class\": \"split\",\n \"items\": items\n }", "def partition_processor(partitionlinechunks):\n\n model_pipe_object = joblib.load(SparkFiles.get(\"mmp_phase1_D2.clf\"))\n\n def set_predictions(x):\n segment = model_pipe_object.predict_proba(x)\n return segment\n\n df_with_nan = build_dataframe(partitionlinechunks)\n df_with_newline = df_with_nan.replace(u\"NULL\", pd.np.nan)\n behaviour_df = df_with_newline.replace(u\"\\\\N\", pd.np.nan)\n predictions_ser = set_predictions(behaviour_df)\n\n predictions_list = [value for value in [zip(predictions_ser.index, predictions_ser.loc[:,'A'], predictions_ser.loc[:,'Y'], predictions_ser.loc[:,'segment'], predictions_ser.loc[:,'model_version'])]]\n return iter(predictions_list)", "def split(self, smiles):\n splitted_smiles = []\n for j, k in enumerate(smiles):\n if j == 0:\n if k.isupper() and smiles[j + 1].islower() and smiles[j + 1] != \"c\":\n splitted_smiles.append(k + smiles[j + 1])\n else:\n splitted_smiles.append(k)\n elif j != 0 and j < len(smiles) - 1:\n if k.isupper() and smiles[j + 1].islower() and smiles[j + 1] != \"c\":\n splitted_smiles.append(k + smiles[j + 1])\n elif k.islower() and smiles[j - 1].isupper() and k != \"c\":\n pass\n else:\n splitted_smiles.append(k)\n\n elif j == len(smiles) - 1:\n if k.islower() and smiles[j - 1].isupper() and k != \"c\":\n pass\n else:\n splitted_smiles.append(k)\n return splitted_smiles", "def _postprocess(\n self,\n result: List[str],\n eojeols: List[str],\n poses: List[str],\n ):\n token_indices = []\n temp_group = []\n for i, res in enumerate(result):\n if (\"<\" in res) or (\">\" in res):\n continue\n if not temp_group:\n temp_group.append(i)\n else:\n if i == (temp_group[-1] + 1):\n temp_group.append(i)\n else:\n token_indices.append(temp_group)\n temp_group = [i]\n token_indices.append(temp_group)\n\n lucrative = 0\n for i, li_index in enumerate(token_indices):\n if poses:\n eojeol = eojeols[i].split(\"+\")\n pos = poses[i].split(\"+\")\n tagged = []\n for e, p in zip(eojeol, pos):\n tagged.append(f\"{e}/{p}\")\n result[li_index[0] - lucrative:li_index[-1] + 1 -\n lucrative] = [\"+\".join(tagged)]\n else:\n result[li_index[0] - lucrative:li_index[-1] + 1 -\n lucrative] = [eojeols[i]]\n lucrative += len(li_index) - 1\n\n return result", "def split(self, moves):\r\n children = []\r\n while not moves.empty():\r\n move_pos = moves.get()[1]\r\n children.append((self.make_move(move_pos.pos, self.to_move), move_pos))\r\n\r\n return children", "def split(self):\n\t\tif self.size > 1:\n\t\t\tfor _ in range(2):\n\t\t\t\tasteroid = Asteroid(self.position, self.create_asteroid_callback, self.size-1)\n\t\t\t\tself.create_asteroid_callback(asteroid)", "def splitting_coefficients(filepath, split_scheme):\n infile = open(filepath, 'r')\n lines = infile.readlines()\n infile.close()\n\n if split_scheme == 'LF2':\n coeffs = lines[6].strip().split(', ')\n stages = lines[9].strip().split(', ')\n a1 = eval(lines[14][lines[14].find('=')+1:].strip())\n a2 = eval(lines[15][lines[15].find('=')+1:].strip())\n b1 = eval(lines[16][lines[16].find('=')+1:].strip())\n b2 = eval(lines[17][lines[17].find('=')+1:].strip())\n\n number_of_stages = dict(a = 2, b = 2)\n order = dict(coeffs = coeffs, stages = stages)\n splitting = dict(order = order, number_of_stages = number_of_stages,\n a = [None, a1, a2],\n b = [None, b1, b2])\n\n elif split_scheme == 'Y4':\n coeffs = lines[23].strip().split(', ')\n stages = lines[26].strip().split(', ')\n a1 = eval(lines[31][lines[31].find('=')+1:].strip())\n a2 = eval(lines[32][lines[32].find('=')+1:].strip())\n a3 = eval(lines[33][lines[33].find('=')+1:].strip())\n a4 = eval(lines[34][lines[34].find('=')+1:].strip())\n b1 = eval(lines[36][lines[36].find('=')+1:].strip())\n b2 = eval(lines[37][lines[37].find('=')+1:].strip())\n b3 = eval(lines[38][lines[38].find('=')+1:].strip())\n b4 = eval(lines[39][lines[39].find('=')+1:].strip())\n\n number_of_stages = dict(a = 4, b = 4)\n order = dict(coeffs = coeffs, stages = stages)\n splitting = dict(order = order, number_of_stages = number_of_stages,\n a = [None, a1, a2, a3, a4],\n b = [None, b1, b2, b3, b4])\n\n elif split_scheme == 'O6-4':\n coeffs = lines[45].strip().split(', ')\n stages = lines[48].strip().split(', ')\n a1 = eval(lines[53][lines[53].find('=')+1:].strip())\n a2 = eval(lines[54][lines[54].find('=')+1:].strip())\n a3 = eval(lines[55][lines[55].find('=')+1:].strip())\n a4 = eval(lines[56][lines[56].find('=')+1:].strip())\n b1 = eval(lines[58][lines[58].find('=')+1:].strip())\n b2 = eval(lines[59][lines[59].find('=')+1:].strip())\n b3 = eval(lines[60][lines[60].find('=')+1:].strip())\n b4 = eval(lines[61][lines[61].find('=')+1:].strip())\n\n number_of_stages = dict(a = 4, b = 4)\n order = dict(coeffs = coeffs, stages = stages)\n splitting = dict(order = order, number_of_stages = number_of_stages,\n a = [None, a1, a2, a3, a4],\n b = [None, b1, b2, b3, b4])\n\n elif split_scheme == 'O11-6':\n coeffs = lines[67].strip().split(', ')\n coeffs += lines[68].strip().split(', ')\n\n stages = lines[71].strip().split(', ')\n stages += lines[72].strip().split(', ')\n\n a1 = eval(lines[78][lines[78].find('=')+1:].strip())\n a2 = eval(lines[79][lines[79].find('=')+1:].strip())\n a3 = eval(lines[80][lines[80].find('=')+1:].strip())\n a4 = eval(lines[81][lines[81].find('=')+1:].strip())\n a5 = eval(lines[82][lines[82].find('=')+1:].strip())\n a6 = eval(lines[83][lines[83].find('=')+1:].strip())\n b1 = eval(lines[85][lines[85].find('=')+1:].strip())\n b2 = eval(lines[86][lines[86].find('=')+1:].strip())\n b3 = eval(lines[87][lines[87].find('=')+1:].strip())\n b4 = eval(lines[88][lines[88].find('=')+1:].strip())\n b5 = eval(lines[89][lines[89].find('=')+1:].strip())\n b6 = eval(lines[90][lines[90].find('=')+1:].strip())\n\n number_of_stages = dict(a = 6, b = 6)\n order = dict(coeffs = coeffs, stages = stages)\n splitting = dict(order = order, number_of_stages = number_of_stages,\n a = [None, a1, a2, a3, a4, a5, a6],\n b = [None, b1, b2, b3, b4, b5, b6])\n\n elif split_scheme == 'O14-6':\n coeffs = lines[96].strip().split(', ')\n coeffs += lines[97].strip().split(', ')\n coeffs += lines[98].strip().split(', ')\n\n stages = lines[101].strip().split(', ')\n stages += lines[102].strip().split(', ')\n stages += lines[103].strip().split(', ')\n\n a1 = eval(lines[110][lines[110].find('=')+1:].strip())\n a2 = eval(lines[111][lines[111].find('=')+1:].strip())\n a3 = eval(lines[112][lines[112].find('=')+1:].strip())\n a4 = eval(lines[113][lines[113].find('=')+1:].strip())\n a5 = eval(lines[114][lines[114].find('=')+1:].strip())\n a6 = eval(lines[115][lines[115].find('=')+1:].strip())\n a7 = eval(lines[116][lines[116].find('=')+1:].strip())\n a8 = eval(lines[117][lines[117].find('=')+1:].strip())\n b1 = eval(lines[119][lines[119].find('=')+1:].strip())\n b2 = eval(lines[120][lines[120].find('=')+1:].strip())\n b3 = eval(lines[121][lines[121].find('=')+1:].strip())\n b4 = eval(lines[122][lines[122].find('=')+1:].strip())\n b5 = eval(lines[123][lines[123].find('=')+1:].strip())\n b6 = eval(lines[124][lines[124].find('=')+1:].strip())\n b7 = eval(lines[125][lines[125].find('=')+1:].strip())\n\n number_of_stagess = dict(a = 8, b = 7)\n order = dict(coeffs = coeffs, stages = stages)\n splitting = dict(order = order, number_of_stages = number_of_stages,\n a = [None, a1, a2, a3, a4, a5, a6, a7, a8],\n b = [None, b1, b2, b3, b4, b5, b6, b7])\n\n return splitting", "def mapped_split(reddit_directory, data_set_name, mapped_col, result_col, num_splits):\n\n table_files = os.listdir(os.path.join(reddit_directory, data_set_name))\n args_list = [\n (reddit_directory, data_set_name, table_fname, mapped_col, result_col, num_splits)\n for table_fname in table_files\n ]\n\n pool = mp.Pool(pool_size)\n pool.map(unpack_mapped_split_core, args_list)", "def _split_generators(self, dl_manager):\n if isinstance(self.config.data_files, (str, list, tuple)):\n # Handle case with only one split\n files = self.config.data_files\n if isinstance(files, str):\n files = [files]\n return [\n nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={\"files\": files})\n ]\n else:\n # Handle case with several splits and a dict mapping\n splits = []\n for split_name in [nlp.Split.TRAIN, nlp.Split.VALIDATION, nlp.Split.TEST]:\n if split_name in self.config.data_files:\n files = self.config.data_files[split_name]\n if isinstance(files, str):\n files = [files]\n splits.append(\n nlp.SplitGenerator(name=split_name, gen_kwargs={\"files\": files})\n )\n return splits", "def split_start(infiles, outfiles):\n\n # split always runs exactly one job (unlike @subdivide)\n # So it implicitly combines all its inputs before running and generating multiple output\n # @originate generates multiple output so the input for @split is a list...\n infile = infiles[0]\n\n # clean up previous\n for f in outfiles:\n os.unlink(f)\n\n\n #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n #\n # Create more files than the previous invocation\n #\n #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n n_to_produce = len(outfiles) + 1\n for i in range(n_to_produce):\n f = '{}{}.split'.format(tempdir, i)\n open(f, 'a').close()", "def split_list(cmdline, has_options):\n token_list = []\n in_positional_params = False if has_options else True\n for token in cmdline:\n if in_positional_params or token == \"--\":\n token_list.append(token)\n in_positional_params = True\n elif token[0] != '-': # then it is a value\n token_list.append(token)\n elif token.startswith(\"--\"):\n token_list.extend(Splitter._handle_long_form(token))\n else:\n token_list.extend(Splitter._handle_short_form(token))\n return Stack(token_list)", "def split_and_load(batch, ctx_list):\n num_ctx = len(ctx_list)\n new_batch = []\n for i, data in enumerate(batch):\n new_data = [x.as_in_context(ctx) for x, ctx in zip(data, ctx_list)]\n new_batch.append(new_data)\n return new_batch", "def split_and_load(batch, ctx_list):\n num_ctx = len(ctx_list)\n new_batch = []\n for i, data in enumerate(batch):\n new_data = [x.as_in_context(ctx) for x, ctx in zip(data, ctx_list)]\n new_batch.append(new_data)\n return new_batch", "def doTitleSplits(self, title, splitter):\n largetTextLen = 0\n largeTextIndex = 0\n titlePieces = splitter.split(title)\n \n # find the largest title piece\n for i in range(len(titlePieces)):\n current = titlePieces[i]\n if len(current) > largetTextLen:\n largetTextLen = len(current)\n largeTextIndex = i\n \n # replace content\n title = titlePieces[largeTextIndex]\n return TITLE_REPLACEMENTS.replaceAll(title).strip()", "def split_and_load(batch, ctx_list):\n new_batch = []\n for i, data in enumerate(batch):\n if isinstance(data, (list, tuple)):\n new_data = [x.as_in_context(ctx) for x, ctx in zip(data, ctx_list)]\n else:\n new_data = [data.as_in_context(ctx_list[0])]\n new_batch.append(new_data)\n return new_batch", "def split_list(list_in,number_of_pieces):\n output_length = len(list_in) / number_of_pieces\n output = []\n piece = []\n counter = 0\n for list_item in list_in:\n counter += 1\n piece.append(list_item)\n if counter >= output_length:\n output.append(piece)\n counter = 0\n piece = []\n # Make sure nothing is missed\n if len(piece) > 0:\n output.append(piece)\n return output", "def SetSplitValues(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_SplitCurve_SetSplitValues(self, *args)", "def encode_splits(data):\n lookup = {'train': 0, 'val': 1, 'test': 2}\n return [lookup[datum['split']] for datum in data]", "def process(self, data, output, processes, process):\n slice_list = du.get_grouped_slice_list(data, self.get_filter_frame_type(), self.get_max_frames())\n self._process_chunks(slice_list, data, output, len(processes), process)", "def split_simplified_json_acceptor_dataset(dataset: SimpleJsonAcceptorDataset, split_list):\n import numpy as np\n # create a list of lengths [0.1, 0.4, 0.5] -> [100, 500, 1000(=len_data)]\n split_list = np.multiply(np.cumsum(split_list), len(dataset)).astype(\"int\").tolist()\n # list of shuffled indices to sample randomly\n shuffled_idx = list(range(len(dataset)))\n shuffle(shuffled_idx)\n # split the data itself\n new_data = [[] for _ in range(len(split_list))]\n for sub_data_idx, (start, end) in enumerate(zip([0] + split_list[:-1], split_list)):\n for i in range(start, end):\n new_data[sub_data_idx].append(dataset.__getitem__(shuffled_idx[i]))\n # create sub sets\n sub_datasets = []\n for i in range(len(new_data)):\n ready_dict = {\n \"_idx_to_chr\": dataset._idx_to_chr,\n \"_chr_embed\": dataset._chr_embed,\n \"data\": new_data[i]\n }\n sub_datasets.append(SimpleJsonAcceptorDataset(dataset._size, ready=ready_dict))\n return sub_datasets", "def submitlist(jb, ls):\n segstart, segend = calculatestartend(ls) # Get the segment id for the current segment\n seg = None\n opp = None\n with jb.lock: # Lock the segments dictionary\n segments = jb.segments\n if segstart in segments:\n seg, opp = segments.pop(segstart, None)\n elif segend in segments:\n seg, opp = segments.pop(segend, None)\n if seg:\n segments.pop(opp)\n else:\n segments[segstart] = (ls, segend)\n segments[segend] = (ls, segstart)\n if seg:\n reqq.put((\"merge\", (ls, seg)), )", "def split_data_set(reddit_path, data_set_name, on, num_splits, target_directories, map_columns=None):\n targets = {}\n for i in range(num_splits):\n targets[i] = os.path.join(target_directories[i], data_set_name)\n mkdir(targets[i])\n\n full_sub_data_path = os.path.join(reddit_path, data_set_name)\n data_files = map(lambda f: os.path.join(full_sub_data_path, f), os.listdir(full_sub_data_path))\n args_list = [(on, table_file, targets, num_splits, map_columns) for table_file in data_files]\n\n pool = mp.Pool(pool_size)\n pool.map(unpack_split_file_with_map, args_list)", "def split_shards(original_list, split_fractions):\n\n assert np.isclose(\n sum(split_fractions), 1.0\n ), f\"Split fractions do not sum to 1: {sum(split_fractions)}\"\n\n original_list = [str(x) for x in sorted(original_list)]\n\n sublists = []\n prev_index = 0\n for weight in split_fractions:\n next_index = prev_index + int(round((len(original_list) * weight), 0))\n sublists.append(original_list[prev_index:next_index])\n prev_index = next_index\n\n assert sum([len(x) for x in sublists]) == len(original_list), \"Split size mismatch\"\n\n if not all(len(x) > 0 for x in sublists):\n logger.warning(\"Unexpected shard distribution encountered - trying to fix this\")\n if len(split_fractions) == 3:\n if len(sublists[0]) > 2:\n sublists[0] = original_list[:-2]\n sublists[1] = original_list[-2:-1]\n sublists[2] = original_list[-1:]\n else:\n raise ValueError(\n f\"Not enough shards (#{len(original_list)}) for new distribution\"\n )\n\n elif len(split_fractions) == 2:\n sublists[0] = original_list[:-1]\n sublists[1] = original_list[-1:]\n else:\n raise ValueError\n logger.warning(f\"New shard split: {sublists}\")\n\n if len(sublists) != 3:\n logger.warning(\"No test shards specified\")\n sublists.append(None)\n\n return sublists", "async def update_split(self, name, delta, items=None):\n\n # Generates list\n splits_list = await self._get_all_splits()\n\n # Gets index of name based on fuzzy search match\n index = await self._get_name_index(name, splits_list)\n\n # If index is valid, adds provided value to splits\n if index == -1:\n return None\n prev_val = int(splits_list[index][1])\n new_val = prev_val + delta\n await self._set_split(index + 1, new_val)\n\n # If item provided, appends item to end of item list\n if items is not None:\n item_list = self.sheet.cell(index + 1, 3).value\n if len(item_list) != 0:\n new_item_list = item_list + \", \" + items\n else: \n new_item_list = items\n self.sheet.update_cell(index + 1, 3, new_item_list)\n\n # Returns both previous and new value\n return prev_val, new_val, splits_list[index][0]", "def generate_numerical_splits( records, index ):\n possible = {}\n for r in records:\n possible[ r.features[index] ] = True\n possible = possible.keys()\n splits = []\n\n for i in xrange(0, len(possible)-1):\n s = Split(is_numerical=True)\n s.set_numerical_range( possible[i] )\n s.place( records, index )\n splits.append( s )\n\n return splits", "def get_split_positions(read, min_gap):\n cigar = read.cigar\n # Cigar string is a list of tuples:\n if len(read.cigar) <= 1:\n return [] # no break points = empty list of break point positions\n\n ##\n # read has break points if cigar string is longer than 1\n\n # This is a list with the breakpoint tuples\n list_of_break_point_positions = []\n\n # set the current position on the genome\n if cigar[0][0] == 0:\n current_pos = int(read.positions[0])\n else:\n current_pos = int(read.positions[0]) - cigar[0][1]\n\n # Search for breakpoints in cigar and get the corresponding position on the genome\n\n i = 0\n for info_tuple in cigar:\n # If current segment in cigar string is aligned.\n if info_tuple[0] == 0:\n # Special case when at first segment:\n if i == 0 and cigar[1][1] >= min_gap: # first end-split\n list_of_break_point_positions.append((current_pos + info_tuple[1] , True))\n\n # Special case when at last segment:\n elif i == len(cigar) - 1 and cigar[i - 1][1] >= min_gap:\n list_of_break_point_positions.append((current_pos, False))\n\n # Internal segments:\n elif cigar[i - 1][1] >= min_gap and cigar[i + 1][1] >= min_gap:\n if cigar[i - 1][1] >= min_gap:\n list_of_break_point_positions.append((current_pos, False))\n if cigar[i + 1][1] >= min_gap:\n list_of_break_point_positions.append((current_pos + info_tuple[1] - 1, True))\n i += 1\n\n current_pos += info_tuple[1]\n\n return(list_of_break_point_positions)", "def split_sort_merge(items):\n # TODO: Running time: ??? Why and under what conditions?\n # TODO: Memory usage: ??? Why and under what conditions?\n # Split items list into approximately equal halves\n pivot = int(len(items)/2)\n first_half = items[:pivot]\n second_half = items[pivot:]\n # TODO: Sort each half using any other sorting algorithm\n while not is_sorted(first_half):\n bubble_sort(first_half)\n\n while not is_sorted(second_half):\n insertion_sort(second_half)\n # TODO: Merge sorted halves into one list in sorted order\n # Why does this mutate when we use list[:]\n items[:] = merge(first_half,second_half)", "def process_fix_list(fix_list, fixes):\r\n for line in fix_list:\r\n yield process_fix_line(line, fixes)", "def set_split(self, split, force=True):\n if(force or self.split != split):\n c = (0, 1)[split]\n self.debug_print('set split mode: %d' % c)\n r = self.send_com(0x0f, [c])\n if(r):\n self.split = split\n return r", "def split(self, stage, iterator, lengths, inner_to_outer=True):\n stage_id = self._resolve_stage_id(stage)\n\n self.state_object, res = _ffi_api.StateSplit(self.state_object, stage_id, iterator, lengths,\n inner_to_outer)\n return res", "def split(self):\n\n # FIXME: user should be able to change the default behavior of\n # this function (for instance user may require one filter not\n # to split the content of the input file and the same input \n # to be used by the next filter.\n \n utils.split_file(self.files['hit_ids'],\n self.files['input'],\n self.files['filtered_reads'],\n self.files['survived_reads'])", "def test_n_group_split(self):\n # Test 2 groups like HalfSplitter first\n hs = NGroupPartitioner(2)\n\n for isreversed, splitter in enumerate((hs, hs)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in hs.generate(self.data) ]\n self.assertTrue(len(splits) == 2)\n\n for i, p in enumerate(splits):\n self.assertTrue( len(p) == 2 )\n self.assertTrue( p[0].nsamples == 50 )\n self.assertTrue( p[1].nsamples == 50 )\n\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [5, 6, 7, 8, 9])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4])\n\n # check if it works on pure odd and even chunk ids\n moresplits = [ list(spl.generate(p)) for p in hs.generate(splits[0][0])]\n\n for split in moresplits:\n self.assertTrue(split[0] != None)\n self.assertTrue(split[1] != None)\n\n # now test more groups\n s5 = NGroupPartitioner(5)\n\n # get the splits\n for isreversed, s5splitter in enumerate((s5, s5)):\n if isreversed:\n spl = Splitter(attr='partitions', reverse=True)\n else:\n spl = Splitter(attr='partitions')\n splits = [ list(spl.generate(p)) for p in s5splitter.generate(self.data) ]\n\n # must have 10 splits\n self.assertTrue(len(splits) == 5)\n\n # check split content\n assert_array_equal(splits[0][1-isreversed].sa['chunks'].unique,\n [0, 1])\n assert_array_equal(splits[0][isreversed].sa['chunks'].unique,\n [2, 3, 4, 5, 6, 7, 8, 9])\n assert_array_equal(splits[1][1-isreversed].sa['chunks'].unique,\n [2, 3])\n assert_array_equal(splits[1][isreversed].sa['chunks'].unique,\n [0, 1, 4, 5, 6, 7, 8, 9])\n # ...\n assert_array_equal(splits[4][1-isreversed].sa['chunks'].unique,\n [8, 9])\n assert_array_equal(splits[4][isreversed].sa['chunks'].unique,\n [0, 1, 2, 3, 4, 5, 6, 7])\n\n\n # Test for too many groups\n def splitcall(spl, dat):\n return list(spl.generate(dat))\n s20 = NGroupPartitioner(20)\n self.assertRaises(ValueError,splitcall,s20,self.data)", "def split_data(data_dir, split_sum, split_count):\n with open(data_dir + \"data.json\") as raw_file:\n raw_data = json.load(raw_file)[\"list_string\"]\n\n raw_data_dict = {}\n raw_data_dict[\"pos\"] = \"\"\n raw_data_dict[\"negation_label\"] = 0\n raw_data_dict[\"error_label\"] = 0\n raw_data_dict[\"semantic_label\"] = 0\n\n for i in range(0, split_sum, split_count):\n raw_data_split = []\n raw_data_range = raw_data[i:i+split_count]\n\n for pos in raw_data_range:\n raw_data_dict[\"pos\"] = pos\n raw_data_split.append(raw_data_dict.copy())\n\n split_dir = data_dir + \"data_\" + str(i) + \"_\" + str(i+split_count) + \".json\"\n with open(split_dir, \"w\") as out_file:\n json.dump(raw_data_split, out_file, indent=4)", "def test_parse_split_index_ordering():\n index = [5, 37, 38, 56, 111] # test has max index 9999\n split = \"test\"\n kwargs = dict(epochs=1, batch_size=1, dataset_dir=DATASET_DIR, shuffle_files=False)\n ds = datasets.mnist(split=split, **kwargs)\n fixed_order = []\n for i, (x, y) in enumerate(ds):\n if i in index:\n fixed_order.append(x)\n if i >= max(index):\n break\n\n sliced_split = f\"{split}[{index}]\"\n ds = datasets.mnist(split=sliced_split, **kwargs)\n output_x = [x for (x, y) in ds]\n assert len(fixed_order) == len(output_x)\n for x_i, x_j in zip(fixed_order, output_x):\n assert (x_i == x_j).all()", "def split_sort_merge(items):\n # TODO: Split items list into approximately equal halves\n pivot = len(items) // 2\n # TODO: Sort each half using any other sorting algorithm\n # sort first half in-place (insertion sort)\n left = insertion_sort(items[:pivot])\n\n right = insertion_sort(items[pivot:])\n # TODO: Merge sorted halves into one list in sorted order\n # merge the two half list (merge function but this does this in-place)\n sorted_list = merge(left, right)\n # change the input items\n items[:] = sorted_list\n return items", "def split():\n str_list = sys.stdin.readlines()\n element_list = list()\n\n for line in str_list:\n element_list.extend(split_line(line.rstrip()))\n\n for element in element_list:\n print(element)", "def split_format(splits):\n fmt_splits = []\n for split_idx, (split_label, split_str, dist) in enumerate(splits):\n comps = split_str.split(':')\n # if some splits are missing data (from original NYC data file)\n if len(comps) == 3:\n comps = map(int, comps)\n split_mins = comps[0]*60 + comps[1] + comps[2]/60\n datum = {'split_label': split_label, 'split_str': split_str,\n 'split_mins': split_mins, 'split_idx': split_idx,\n 'split_dist': dist}\n else:\n datum = {'split_label': split_label, 'split_str': '--',\n 'split_mins': '--', 'split_idx': split_idx,\n 'split_dist': dist}\n fmt_splits.append(datum)\n\n return fmt_splits", "def splitVRdata(partdata, halodata, pids_halos, pids, coords, vels, nhalo, nsubhalo):\n\n\t# Arrays to hold different subsets of FOF group particles\n\tpids_background = np.array(pids_halos[:nhalo], dtype = 'object')\n\tcoords_background = np.empty(nhalo, dtype = 'object')\n\tvels_background = np.empty(nhalo, dtype = 'object')\n\tpids_sub = np.array(pids_halos[nhalo:], dtype = 'object')\n\tcoords_sub = np.empty(nsubhalo, dtype = 'object')\n\tvels_sub = np.empty(nsubhalo, dtype = 'object')\n\n\t# Create analogues to pids_halos (i.e. array where each\n\t# entry is the coordinates and velocities for each particle\n\t# in that (sub)halo)\n\tpids_all = np.concatenate(pids_halos)\n\tpid_idx = np.argsort(pids)\n\tpid_sorted = pids[pid_idx]\n\tmatch_idx = np.searchsorted(pid_sorted, pids_all)\n\tidxs = pid_idx[match_idx]\n\tcoords_halos = coords[idxs]\n\tvels_halos = vels[idxs]\n\n\t# Indices that mark the first and last particle in each (sub)halo\n\tlinds = partdata['Offset'] + partdata['Offset_unbound']\n\tuinds = linds + partdata['Npart']\n\n\t# (Field) haloes\n\tfor ihalo in range(nhalo):\n\t\tcoords_background[ihalo] = np.array([coord for coord in coords_halos[linds[ihalo]:uinds[ihalo]]])\n\t\tvels_background[ihalo] = np.array([vel for vel in vels_halos[linds[ihalo]:uinds[ihalo]]])\n\n\t# Subhaloes\n\tfor isub in range(nhalo, nhalo + nsubhalo):\n\t\tidx = isub - nhalo\n\t\tcoords_sub[idx] = np.array([coord for coord in coords_halos[linds[isub]:uinds[isub]]])\n\t\tvels_sub[idx] = np.array([vel for vel in vels_halos[linds[isub]:uinds[isub]]])\n\n\t# Get PIDs of all subhaloes hosted by each field halo to create\n\t# arrays containing ALL particles in the FOF group\n\thostHaloID = halodata['hostHaloID']\n\tpids_fof = np.empty(nhalo, dtype = 'object')\n\tcoords_fof = np.empty(nhalo, dtype = 'object')\n\tvels_fof = np.empty(nhalo, dtype = 'object')\n\tfor ihalo in range(nhalo):\n\t\tsubs = np.where(hostHaloID == ihalo + 1)[0] - nhalo\n\t\tif subs.size > 0:\n\t\t\t#print(pids_sub[subs])\n\t\t\t#print(np.concatenate(pids_sub[subs]))\n\t\t\tpids_fof[ihalo] = np.concatenate((pids_background[ihalo], np.concatenate(pids_sub[subs])))\n\t\t\tcoords_fof[ihalo] = np.concatenate((coords_background[ihalo], np.concatenate(coords_sub[subs])))\n\t\t\tvels_fof[ihalo] = np.concatenate((vels_background[ihalo], np.concatenate(vels_sub[subs])))\n\t\telse: # This halo hosts no subhaloes\n\t\t\tpids_fof[ihalo] = pids_background[ihalo]\n\t\t\tcoords_fof[ihalo] = coords_background[ihalo]\n\t\t\tvels_fof[ihalo] = vels_background[ihalo]\n\n\t# Construct FOF components dictionary\n\tfofdata = {}\n\tfofdata['FOF/PIDs'] = pids_fof\n\tfofdata['FOF/Coordinates'] = coords_fof\n\tfofdata['FOF/Velocities'] = vels_fof\n\tfofdata['Background/PIDs'] = pids_background\n\tfofdata['Background/Coordinates'] = coords_background\n\tfofdata['Background/Velocities'] = vels_background\n\tfofdata['Satellite/PIDs'] = pids_sub\n\tfofdata['Satellite/Coordinates'] = coords_sub\n\tfofdata['Satellite/Velocities'] = vels_sub\n\n\treturn fofdata", "def split_dataset(self, split):\n trunk_pos_size = math.ceil((1 - split) * len(self.Pos))\n trunk_neg_size = math.ceil((1 - split) * len(self.Neg))\n trunk_num = int(1 / (1 - split))\n pos_temp = list()\n neg_temp = list()\n for index in range(trunk_num):\n pos_temp.append(self.Pos[index * trunk_pos_size:(index + 1) *\n trunk_pos_size])\n neg_temp.append(self.Neg[index * trunk_neg_size:(index + 1) *\n trunk_neg_size])\n self.test = pos_temp.pop(2) + neg_temp.pop(2)\n # self.train = [i for item in pos_temp + neg_temp for i in item]\n self.train = []\n for item in pos_temp + neg_temp:\n for i in item:\n self.train.append(i)\n\n random.shuffle(self.train)\n random.shuffle(self.test)", "def split(input, output, fields, delimiter, encoding, verbose, format_in, zipfile, gzipfile, chunksize, filter):\n if verbose:\n enableVerbose()\n options = {}\n options['delimiter'] = delimiter\n options['fields'] = fields\n options['output'] = output\n options['encoding'] = encoding\n options['format_in'] = format_in\n options['zipfile'] = zipfile\n options['gzipfile'] = gzipfile\n options['chunksize'] = chunksize\n options['filter'] = filter\n acmd = Selector()\n acmd.split(input, options)\n pass", "def _split_lines(self, lines, separator_marker):\n result = []\n current_group = []\n for line in lines:\n if re.match(rf'[^\\S\\n]*{separator_marker}\\w+(\\(.*\\))?:', line):\n if current_group:\n result.append(current_group)\n current_group = []\n current_group.append(line)\n if current_group:\n result.append(current_group)\n return result", "def split_data(basedir, data_split=0.80):\n manip = data_manipulator(basedir)\n manip.train_test_split(data_split=data_split)", "def split(self):\n left = BPlusNode(self.order)\n right = BPlusNode(self.order)\n mid = self.order // 2\n\n left.keys = self.keys[:mid]\n left.values = self.values[:mid]\n\n right.keys = self.keys[mid:]\n right.values = self.values[mid:]\n\n # When the node is split, set the parent key to the left-most key of the right child node.\n self.keys = [right.keys[0]]\n self.values = [left, right]\n self.leaf = False", "def make_splits(input_pkl, test_split=0.1, val_split=0.1):\n if (test_split > 1) or (val_split > 1) or (test_split + val_split > 1) or (test_split <= 0) or (val_split <= 0):\n logging.warning('Check the input for make splits, quitting')\n exit()\n\n main_dict = load_pickle(input_pkl)\n data, labels = main_dict['data'], main_dict['labels']\n idx_arr = np.random.choice(len(data), len(data))\n data, labels = data[idx_arr], labels[idx_arr]\n print(len(data[0][-1]))\n # Find the split sizes\n val_split = int(len(data) * val_split)\n test_split = val_split + int(len(data) * test_split)\n\n # Make and save the splits\n save_pickle({'data': data[:val_split], 'labels': labels[:val_split]}, 'data/val.pkl')\n save_pickle({'data': data[val_split:test_split], 'labels': labels[val_split:test_split]}, 'data/test.pkl')\n save_pickle({'data': data[test_split:], 'labels': labels[test_split:]}, 'data/train.pkl')", "def clean_split(x_split):\n clean_x_split = []\n\n undef_idx_of_jet_num = constant.UNDEF_IDX_OF_JET_NUM\n\n for idx, x in enumerate(x_split):\n undef_col_idx = undef_idx_of_jet_num[idx].copy()\n undef_col_idx.append(constant.JET_NUM_COL)\n new_x = np.delete(x, undef_col_idx, 1)\n clean_x_split.append(new_x)\n\n return clean_x_split", "def _split_to_wordpieces(self, tokens: List[str]) -> Tuple[List[str], List[int]]:\n bert_tokens = [] # Original tokens split into wordpieces.\n # Index of each wordpiece that starts a new token.\n token_start_indices = []\n for i, token in enumerate(tokens):\n # '+ 1' is because bert_tokens will be prepended by [CLS] token later.\n token_start_indices.append(len(bert_tokens) + 1)\n pieces = self._tokenizer.tokenize(token)\n bert_tokens.extend(pieces)\n return bert_tokens, token_start_indices", "def test_split(self):\n\n p1 = \"std::vector<char, std::allocator<char> >\"\n p2 = \"std::vector<int, std::allocator<int> >\"\n args_list = [\n \"const std::basic_string<char> &\", \"const int &\", \"const double &\"]\n\n for arg in args_list:\n\n li = [p1]\n name, args = declarations.templates.split(\n \"myClass0a<\" + \", \".join(li) + \">\")\n self.assertEqual(name, \"myClass0a\")\n self.assertEqual(args, li)\n\n li = [p1, p2]\n name, args = declarations.templates.split(\n \"myClass0b<\" + \", \".join(li) + \">\")\n self.assertEqual(name, \"myClass0b\")\n self.assertEqual(args, li)\n\n li = [p1, p2, p2]\n name, args = declarations.templates.split(\n \"myClass0c<\" + \", \".join(li) + \">\")\n self.assertEqual(name, \"myClass0c\")\n self.assertEqual(args, li)\n\n li = [p1 + \" (\" + arg + \")\"]\n name, args = declarations.templates.split(\n \"myClass1<\" + \", \".join(li) + \">\")\n self.assertEqual(name, \"myClass1\")\n self.assertEqual(args, li)\n\n li = [p1 + \" (\" + arg + \", \" + arg + \")\"]\n name, args = declarations.templates.split(\n \"myClass2<\" + \", \".join(li) + \">\")\n self.assertEqual(name, \"myClass2\")\n self.assertEqual(args, li)\n\n li = [p2 + \" (\" + arg + \", \" + arg + \")\"]\n name, args = declarations.templates.split(\n \"myClass3<\" + \", \".join(li) + \">\")\n self.assertEqual(name, \"myClass3\")\n self.assertEqual(args, li)\n\n li = [p1 + \" (\" + arg + \", \" + arg + \", \" + arg + \")\"]\n name, args = declarations.templates.split(\n \"myClass4<\" + \", \".join(li) + \">\")\n self.assertEqual(name, \"myClass4\")\n self.assertEqual(args, li)\n\n li = [\n p1 + \" (\" + arg + \", \" + arg + \", \" + arg + \")\",\n p1]\n name, args = declarations.templates.split(\n \"myClass5<\" + \", \".join(li) + \">\")\n self.assertEqual(name, \"myClass5\")\n self.assertEqual(args, li)\n\n li = [\n p1,\n p1 + \" (\" + arg + \", \" + arg + \", \" + arg + \")\"]\n name, args = declarations.templates.split(\n \"myClass6<\" + \", \".join(li) + \">\")\n self.assertEqual(name, \"myClass6\")\n self.assertEqual(args, li)\n\n li = [\n p2 + \" (\" + arg + \")\",\n p1,\n p1 + \" (\" + arg + \", \" + arg + \", \" + arg + \")\"]\n name, args = declarations.templates.split(\n \"myClass7<\" + \", \".join(li) + \">\")\n self.assertEqual(name, \"myClass7\")\n self.assertEqual(args, li)\n\n li = [\n p1,\n p2 + \" (\" + arg + \")\",\n p1 + \" (\" + arg + \", \" + arg + \", \" + arg + \")\"]\n name, args = declarations.templates.split(\n \"myClass8<\" + \", \".join(li) + \">\")\n self.assertEqual(name, \"myClass8\")\n self.assertEqual(args, li)\n\n li = [\n p2 + \" (\" + arg + \")\",\n p1 + \" (\" + arg + \", \" + arg + \")\",\n p1]\n name, args = declarations.templates.split(\n \"myClass9<\" + \", \".join(li) + \">\")\n self.assertEqual(name, \"myClass9\")\n self.assertEqual(args, li)\n\n li = [\n p2 + \" (\" + arg + \")\",\n p1 + \" (\" + arg + \", \" + arg + \", \" + arg + \")\",\n p1,\n p2]\n name, args = declarations.templates.split(\n \"myClass10<\" + \", \".join(li) + \">\")\n self.assertEqual(name, \"myClass10\")\n self.assertEqual(args, li)", "def splitting_coefficients(filepath, split_scheme):\n infile = open(filepath, 'r')\n lines = infile.readlines()\n infile.close()\n\n if split_scheme == 'LF2':\n coeffs = lines[6].strip().split(', ')\n stages = lines[9].strip().split(', ')\n a1 = eval(lines[14][lines[14].find('=')+1:].strip())\n a2 = eval(lines[15][lines[15].find('=')+1:].strip())\n b1 = eval(lines[16][lines[16].find('=')+1:].strip())\n b2 = eval(lines[17][lines[17].find('=')+1:].strip())\n\n order = dict(coeffs = coeffs, stages = stages)\n splitting = dict(order = order,\n a = [None, a1, a2],\n b = [None, b1, b2])\n\n elif split_scheme == 'Y4':\n coeffs = lines[23].strip().split(', ')\n stages = lines[26].strip().split(', ')\n a1 = eval(lines[31][lines[31].find('=')+1:].strip())\n a2 = eval(lines[32][lines[32].find('=')+1:].strip())\n a3 = eval(lines[33][lines[33].find('=')+1:].strip())\n a4 = eval(lines[34][lines[34].find('=')+1:].strip())\n b1 = eval(lines[36][lines[36].find('=')+1:].strip())\n b2 = eval(lines[37][lines[37].find('=')+1:].strip())\n b3 = eval(lines[38][lines[38].find('=')+1:].strip())\n b4 = eval(lines[39][lines[39].find('=')+1:].strip())\n\n order = dict(coeffs = coeffs, stages = stages)\n splitting = dict(order = order,\n a = [None, a1, a2, a3, a4],\n b = [None, b1, b2, b3, b4])\n\n elif split_scheme == 'O6-4':\n coeffs = lines[45].strip().split(', ')\n stages = lines[48].strip().split(', ')\n a1 = eval(lines[53][lines[53].find('=')+1:].strip())\n a2 = eval(lines[54][lines[54].find('=')+1:].strip())\n a3 = eval(lines[55][lines[55].find('=')+1:].strip())\n a4 = eval(lines[56][lines[56].find('=')+1:].strip())\n b1 = eval(lines[58][lines[58].find('=')+1:].strip())\n b2 = eval(lines[59][lines[59].find('=')+1:].strip())\n b3 = eval(lines[60][lines[60].find('=')+1:].strip())\n b4 = eval(lines[61][lines[61].find('=')+1:].strip())\n\n order = dict(coeffs = coeffs, stages = stages)\n splitting = dict(order = order,\n a = [None, a1, a2, a3, a4],\n b = [None, b1, b2, b3, b4])\n\n elif split_scheme == 'O11-6':\n coeffs = lines[67].strip().split(', ')\n coeffs += lines[68].strip().split(', ')\n\n stages = lines[71].strip().split(', ')\n stages += lines[72].strip().split(', ')\n\n a1 = eval(lines[78][lines[78].find('=')+1:].strip())\n a2 = eval(lines[79][lines[79].find('=')+1:].strip())\n a3 = eval(lines[80][lines[80].find('=')+1:].strip())\n a4 = eval(lines[81][lines[81].find('=')+1:].strip())\n a5 = eval(lines[82][lines[82].find('=')+1:].strip())\n a6 = eval(lines[83][lines[83].find('=')+1:].strip())\n b1 = eval(lines[85][lines[85].find('=')+1:].strip())\n b2 = eval(lines[86][lines[86].find('=')+1:].strip())\n b3 = eval(lines[87][lines[87].find('=')+1:].strip())\n b4 = eval(lines[88][lines[88].find('=')+1:].strip())\n b5 = eval(lines[89][lines[89].find('=')+1:].strip())\n b6 = eval(lines[90][lines[90].find('=')+1:].strip())\n\n order = dict(coeffs = coeffs, stages = stages)\n splitting = dict(order = order,\n a = [None, a1, a2, a3, a4, a5, a6],\n b = [None, b1, b2, b3, b4, b5, b6])\n\n elif split_scheme == 'O14-6':\n coeffs = lines[96].strip().split(', ')\n coeffs += lines[97].strip().split(', ')\n coeffs += lines[98].strip().split(', ')\n\n stages = lines[101].strip().split(', ')\n stages += lines[102].strip().split(', ')\n stages += lines[103].strip().split(', ')\n\n a1 = eval(lines[110][lines[110].find('=')+1:].strip())\n a2 = eval(lines[111][lines[111].find('=')+1:].strip())\n a3 = eval(lines[112][lines[112].find('=')+1:].strip())\n a4 = eval(lines[113][lines[113].find('=')+1:].strip())\n a5 = eval(lines[114][lines[114].find('=')+1:].strip())\n a6 = eval(lines[115][lines[115].find('=')+1:].strip())\n a7 = eval(lines[116][lines[116].find('=')+1:].strip())\n a8 = eval(lines[117][lines[117].find('=')+1:].strip())\n b1 = eval(lines[119][lines[119].find('=')+1:].strip())\n b2 = eval(lines[120][lines[120].find('=')+1:].strip())\n b3 = eval(lines[121][lines[121].find('=')+1:].strip())\n b4 = eval(lines[122][lines[122].find('=')+1:].strip())\n b5 = eval(lines[123][lines[123].find('=')+1:].strip())\n b6 = eval(lines[124][lines[124].find('=')+1:].strip())\n b7 = eval(lines[125][lines[125].find('=')+1:].strip())\n\n order = dict(coeffs = coeffs, stages = stages)\n splitting = dict(order = order,\n a = [None, a1, a2, a3, a4, a5, a6, a7, a8],\n b = [None, b1, b2, b3, b4, b5, b6, b7])\n\n return splitting" ]
[ "0.69268346", "0.57384413", "0.5705373", "0.5634635", "0.56108665", "0.5545612", "0.5541551", "0.5517076", "0.5466351", "0.54659456", "0.54127634", "0.5412377", "0.53936285", "0.5370275", "0.53439856", "0.5326818", "0.53085357", "0.5286673", "0.5274086", "0.5254431", "0.52331835", "0.52204424", "0.5218673", "0.5206883", "0.51222473", "0.5118309", "0.5107839", "0.5102903", "0.51009226", "0.5100633", "0.5099031", "0.5096876", "0.5088275", "0.50685716", "0.50503767", "0.5049566", "0.50261265", "0.49981284", "0.49978057", "0.4968625", "0.4959227", "0.49566507", "0.49468875", "0.49343067", "0.49341", "0.49294484", "0.49266273", "0.49209282", "0.4911546", "0.49016136", "0.48959494", "0.48927212", "0.48923254", "0.4887497", "0.4883822", "0.48640972", "0.48603275", "0.48552135", "0.48505604", "0.48459035", "0.4838106", "0.4832643", "0.48325792", "0.48287618", "0.48287618", "0.48273134", "0.48261976", "0.4822733", "0.48217735", "0.48207107", "0.48159355", "0.48158434", "0.48118573", "0.48104915", "0.48048708", "0.4801693", "0.47966567", "0.47933576", "0.47912282", "0.47906086", "0.47871307", "0.47860488", "0.4785616", "0.4778195", "0.47766873", "0.47658893", "0.47582966", "0.47553197", "0.475216", "0.47509405", "0.47429204", "0.47420427", "0.47282583", "0.4718846", "0.4716042", "0.47084308", "0.4693666", "0.46912766", "0.46890935", "0.4688383" ]
0.6112161
1
Given a list of dividends whose ex_dates are all the next trading day, calculate and store the cash and/or stock payments to be paid on each dividend's pay date.
def earn_dividends(self, cash_dividends, stock_dividends): for cash_dividend in cash_dividends: self._dirty_stats = True # only mark dirty if we pay a dividend # Store the earned dividends so that they can be paid on the # dividends' pay_dates. div_owed = self.positions[cash_dividend.instrument].earn_dividend( cash_dividend, ) try: self._unpaid_dividends[cash_dividend.pay_date].append(div_owed) except KeyError: self._unpaid_dividends[cash_dividend.pay_date] = [div_owed] for stock_dividend in stock_dividends: self._dirty_stats = True # only mark dirty if we pay a dividend div_owed = self.positions[ stock_dividend.instrument ].earn_stock_dividend(stock_dividend) try: self._unpaid_stock_dividends[stock_dividend.pay_date].append( div_owed, ) except KeyError: self._unpaid_stock_dividends[stock_dividend.pay_date] = [ div_owed, ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pay_dividends(self, next_trading_day):\n net_cash_payment = 0.0\n\n try:\n payments = self._unpaid_dividends[next_trading_day]\n # Mark these dividends as paid by dropping them from our unpaid\n del self._unpaid_dividends[next_trading_day]\n except KeyError:\n payments = []\n\n # representing the fact that we're required to reimburse the owner of\n # the stock for any dividends paid while borrowing.\n for payment in payments:\n net_cash_payment += payment['amount']\n\n # Add stock for any stock dividends paid. Again, the values here may\n # be negative in the case of short positions.\n try:\n stock_payments = self._unpaid_stock_dividends[next_trading_day]\n except KeyError:\n stock_payments = []\n\n for stock_payment in stock_payments:\n payment_instrument = stock_payment['payment_instrument']\n share_count = stock_payment['share_count']\n # note we create a Position for stock dividend if we don't\n # already own the instrument\n if payment_instrument in self.positions:\n position = self.positions[payment_instrument]\n else:\n position = self.positions[payment_instrument] = Position(\n payment_instrument,\n )\n\n position.amount += share_count\n\n return net_cash_payment", "def get_dividends(self, stock_list, start_date=None, end_date=None):\n df_dict = {}\n df_list = []\n file_in_path = [year.replace(\".csv\", \"\") for year in self.get_csv_in_path(self.dividend_eps_path)]\n if not start_date:\n start_date = file_in_path[0]\n if not end_date:\n end_date = file_in_path[-1]\n if start_date > end_date:\n return df_dict\n for year in range(int(start_date), int(end_date)+1):\n target_path = \"{}/{}.csv\".format(self.dividend_eps_path, year)\n df = pd.read_csv(target_path, index_col=\"名稱\")\n self.replace_nan_to_other(df, \"\")\n for stock in stock_list:\n pd_index = df.index.to_list()\n old_list = []\n if stock in pd_index:\n data = df.loc[stock]\n\n # print(\"日期 = {}\".format(data.get(\"除息交易日\")))\n if df_dict.get(stock):\n old_list = df_dict.get(stock)\n\n # check data is available\n dict = {}\n if data.get(\"現金股利\") != \"\":\n dict.update({\"除息交易日\": \"{}{}\".format(year, data.get(\"除息交易日\").split(\"'\")[1].replace(\"/\", \"\")) if data.get('除息交易日') else \"\",\n \"現金股利\": data.get(\"現金股利\"),\n })\n if data.get(\"股票股利\") != \"\":\n dict.update({\"除權交易日\": \"{}{}\".format(year, data.get(\"除權交易日\").split(\"'\")[1].replace(\"/\", \"\")) if data.get('除權交易日') else \"\",\n \"股票股利\": data.get(\"股票股利\"),\n })\n if dict:\n old_list.append(dict)\n df_dict.update({stock: old_list})\n\n return df_dict", "def list_dividends(\n self,\n ticker: Optional[str] = None,\n ticker_lt: Optional[str] = None,\n ticker_lte: Optional[str] = None,\n ticker_gt: Optional[str] = None,\n ticker_gte: Optional[str] = None,\n ex_dividend_date: Optional[Union[str, date]] = None,\n ex_dividend_date_lt: Optional[Union[str, date]] = None,\n ex_dividend_date_lte: Optional[Union[str, date]] = None,\n ex_dividend_date_gt: Optional[Union[str, date]] = None,\n ex_dividend_date_gte: Optional[Union[str, date]] = None,\n record_date: Optional[Union[str, date]] = None,\n record_date_lt: Optional[Union[str, date]] = None,\n record_date_lte: Optional[Union[str, date]] = None,\n record_date_gt: Optional[Union[str, date]] = None,\n record_date_gte: Optional[Union[str, date]] = None,\n declaration_date: Optional[Union[str, date]] = None,\n declaration_date_lt: Optional[Union[str, date]] = None,\n declaration_date_lte: Optional[Union[str, date]] = None,\n declaration_date_gt: Optional[Union[str, date]] = None,\n declaration_date_gte: Optional[Union[str, date]] = None,\n pay_date: Optional[Union[str, date]] = None,\n pay_date_lt: Optional[Union[str, date]] = None,\n pay_date_lte: Optional[Union[str, date]] = None,\n pay_date_gt: Optional[Union[str, date]] = None,\n pay_date_gte: Optional[Union[str, date]] = None,\n frequency: Optional[Union[int, Frequency]] = None,\n cash_amount: Optional[float] = None,\n cash_amount_lt: Optional[float] = None,\n cash_amount_lte: Optional[float] = None,\n cash_amount_gt: Optional[float] = None,\n cash_amount_gte: Optional[float] = None,\n dividend_type: Optional[Union[str, DividendType]] = None,\n limit: Optional[int] = None,\n sort: Optional[Union[str, Sort]] = None,\n order: Optional[Union[str, Order]] = None,\n params: Optional[Dict[str, Any]] = None,\n raw: bool = False,\n options: Optional[RequestOptionBuilder] = None,\n ) -> Union[Iterator[Dividend], HTTPResponse]:\n url = \"/v3/reference/dividends\"\n\n return self._paginate(\n path=url,\n params=self._get_params(self.list_dividends, locals()),\n raw=raw,\n deserializer=Dividend.from_dict,\n options=options,\n )", "def calculate_payments(yearly_payments_percentage, cost_reductions,\n days_with_payments, days_for_discount_rate):\n\n return [period_payment(yearly_payments_percentage, ccr,\n days_with_payments[i], days_for_discount_rate[i])\n for i, ccr in enumerate(cost_reductions)]", "def price_generator(self, start, end, periods):\r\n tickers = [self.SelectedTicker]\r\n tick_yahoo = YahooFinancials(tickers)\r\n data = tick_yahoo.get_historical_price_data(start, \r\n end, \r\n periods)\r\n \r\n df = pd.DataFrame({\r\n a: {x['formatted_date']: x['adjclose'] for x in data[a]['prices']} for a in tickers})\r\n \r\n self.prices = df.dropna()\r\n self.returns = self.prices.pct_change().dropna()\r\n try:\r\n self.div_yield = tick_yahoo.get_dividend_yield()\r\n #print(self.div_yield[self.SelectedTicker])\r\n if self.div_yield[self.SelectedTicker] == None:\r\n self.div_yield = 0.00\r\n else:\r\n self.div_yield = self.div_yield[self.SelectedTicker]\r\n except:\r\n print(\"no dividend yield\")", "def _recompute(self):\n current_date = self.start_date\n self.quarterly_date_list = []\n self.daily_date_list = []\n while current_date <= self.end_date:\n current_quarter = get_quarter(current_date)\n current_year = current_date.year\n next_year, next_quarter = add_quarter(current_year, current_quarter)\n next_start_quarter_date = date(next_year, get_month(next_quarter),\n 1)\n\n days_till_next_quarter = (next_start_quarter_date -\n current_date).days\n days_till_end = (self.end_date - current_date).days\n if days_till_next_quarter <= days_till_end:\n current_start_quarter_date = date(current_year,\n get_month(current_quarter), 1)\n if current_start_quarter_date == current_date:\n self.quarterly_date_list.append(\n (current_year, current_quarter, lambda x: True))\n current_date = next_start_quarter_date\n elif days_till_next_quarter > self.balancing_point:\n self.quarterly_date_list.append(\n (current_year, current_quarter,\n lambda x: date(x['date_filed']) >= self.start_date))\n current_date = next_start_quarter_date\n else:\n while current_date < next_start_quarter_date:\n self.daily_date_list.append(current_date)\n current_date += timedelta(days=1)\n else:\n if days_till_end > self.balancing_point:\n if days_till_next_quarter - 1 == days_till_end:\n self.quarterly_date_list.append(\n (current_year, current_quarter, lambda x: True))\n current_date = next_start_quarter_date\n else:\n self.quarterly_date_list.append(\n (current_year, current_quarter,\n lambda x: date(x['date_filed']) <= self.end_date))\n current_date = self.end_date\n else:\n while current_date <= self.end_date:\n self.daily_date_list.append(current_date)\n current_date += timedelta(days=1)", "def running_total(date_list):\n return sum(d.price for d in date_list)", "def fill_prices_using_dates(ls_ls_prices, ls_ls_dates, ls_master_dates):\n dict_corrections = {}\n dict_errors = []\n for indiv_ind, ls_prices in enumerate(ls_ls_prices):\n for day_ind, price in enumerate(ls_prices):\n if price != price:\n relative_day = 0\n while (day_ind + relative_day < len(ls_master_dates)-1) and\\\n (ls_ls_prices[indiv_ind][day_ind + relative_day] !=\\\n ls_ls_prices[indiv_ind][day_ind + relative_day]):\n relative_day += 1\n next_valid_date = ls_ls_dates[indiv_ind][day_ind + relative_day]\n # if next_valid_date is not None (end of series full of None)\n if next_valid_date and next_valid_date != '--':\n try:\n # could have bad info in date (check with regex?)\n next_valid_date_int = int(u'20%s%s%s' %(next_valid_date[6:],\n next_valid_date[3:5],\n next_valid_date[:2]))\n # next date must be the same or anterior to the current date\n if next_valid_date_int <= int(ls_master_dates[day_ind]):\n ls_ls_prices[indiv_ind][day_ind] = ls_ls_prices[indiv_ind][day_ind + relative_day]\n dict_corrections.setdefault(indiv_ind, []).append(day_ind)\n except:\n dict_errors.setdefault(indiv_ind, []).append(day_ind)\n return (ls_ls_prices, dict_corrections, dict_errors)", "def compute_portvals(start_date, end_date, orders_file, start_val):\n \n #Read order file\n orders = pd.read_csv( orders_file, parse_dates = [0])\n \n #Get symbols making up the portfolio\n stock_symbols = list( set( orders[\"Symbol\"] ) )\n dates = pd.date_range(start_date, end_date)\n \n #Read stock prices\n stock_prices = get_data(stock_symbols, dates)\n \n #Create a portfolio keeping track of positions, \n #_CASH column indicates cash position, _VALUE total portfolio value\n #_LEVERAGE the leverage of portfolio when we allow for short selling\n symbols = stock_symbols[:] #Shallow copy of the list\n symbols.append(\"_CASH\")\n symbols.append(\"_VALUE\")\n symbols.append(\"_LEVERAGE\")\n \n #Index contains only business days, same dates as stock prices\n portfolio = pd.DataFrame(index=stock_prices.index, columns = symbols )\n portfolio.fillna(0) \n portfolio[\"_CASH\"][0] = start_val\n portfolio[\"_VALUE\"][0] = start_val\n \n #Snapshot of a portfolio at any time. To avoid using numerical indexes\n portfolio_snapshot = dict.fromkeys ( symbols, 0 )\n portfolio_snapshot[\"_CASH\"] = start_val\n portfolio[\"_VALUE\"] = start_val\n \n #Now calcualte portfolio day by day\n for date in portfolio.index:\n #Check transactions for the day\n day_orders = orders[ orders[\"Date\"] == date ] \n \n for ord in day_orders.iterrows():\n symbol = ord[1][ \"Symbol\"] \n stock_price = stock_prices[ symbol ][ date ]\n shares = ord[1][\"Shares\" ]\n side = ord[1][\"Order\"]\n \n if side == \"BUY\":\n portfolio_snapshot[ \"_CASH\" ] -= stock_price * shares\n portfolio_snapshot[ symbol ] += shares \n elif side == \"SELL\":\n portfolio_snapshot[ \"_CASH\" ] += stock_price * shares\n portfolio_snapshot[ symbol ] -= shares\n else:\n raise \"Order not recognized.\"\n \n #Compute portfolio value\n portfolio_snapshot[ \"_VALUE\" ] = portfolio_snapshot[ \"_CASH\" ]\n shorts = longs = 0\n for symbol in stock_symbols: \n stock_price = stock_prices[ symbol ][ date ]\n shares = portfolio_snapshot[ symbol ]\n notional = stock_price*shares\n if shares > 0:\n longs += notional\n else:\n shorts += notional\n \n portfolio_snapshot[ \"_VALUE\" ] += notional\n \n #Compute leverage\n leverage = (longs+shorts)/(longs-shorts + portfolio_snapshot[ \"_CASH\" ] )\n portfolio_snapshot[ \"_LEVERAGE\" ] = leverage\n \n #Assert we never achieve a leverage > 2.0\n if leverage > 2:\n raise \"Leverage > 2.0 achieved\"\n \n #Update portfolio from the daily snapshot\n #TODO: Is this causing performance issues?\n for symbol in portfolio.keys():\n portfolio[ symbol ][ date ] = portfolio_snapshot[ symbol ]\n \n return portfolio", "def get_returns(self, start_date=None, end_date=None, stocks=None):\n if stocks is None:\n stocks = self.stocks\n\n if start_date is None:\n start_date = self.dates[0]\n\n if end_date is None:\n end_date = self.dates[-1]\n\n if type(end_date) is not datetime.datetime and type(end_date) is not pd.tslib.Timestamp:\n end_date = datetime.datetime.strptime(end_date, \"%Y-%m-%d\")\n\n if type(start_date) is not datetime.datetime and type(start_date) is not pd.tslib.Timestamp:\n start_date = datetime.datetime.strptime(start_date, \"%Y-%m-%d\")\n\n dates_to_check = self.dates[self.dates.index(start_date): self.dates.index(end_date) + 1]\n\n stock_money = []\n\n for date in dates_to_check:\n stock_money += [self.get_day_returns(stocks, date)]\n\n stock_money = pd.DataFrame({\"stock value\": stock_money}).set_index([self.dates])\n\n return_info = join_features(stock_money, self.cash)\n return_info['value'] = return_info['cash'] + return_info['stock value']\n\n return return_info", "def _get_financials_by_chunk(self, args):\n (istart, iend) = args\n comp_index = self.components.index\n # download financials\n browser=webdriver.Chrome()\n for sym in comp_index[istart:iend]:\n print('Chunk %s-%s: downloading financial data for %s' %(comp_index[istart], comp_index[iend], sym))\n stock = Symbol(sym)\n if 'Exchange' in self.components.columns:\n exch = self.components['Exchange'][sym]\n if type(exch) == pd.Series:\n # unexpected duplicates, e.g. AMOV\n exch = exch.iloc[0]\n if type(exch) == str:\n stock.exch = exch\n stock.get_financials(browser=browser)\n stock.save_financial_data()\n browser.quit()\n return", "def get_financials(self, update_list=True, sym_start=str(), sym_end=str(), num_procs=9):\n if self.components.empty or update_list:\n self.get_compo_list(update_list=True)\n # slice symbols\n comp_index = self.components.index\n istart = 0\n iend = len(comp_index)\n if len(sym_start) > 0 and sym_start in comp_index:\n istart = comp_index.get_loc(sym_start)\n if len(sym_end) > 0 and sym_end in comp_index:\n iend = comp_index.get_loc(sym_end)\n if istart > iend:\n (istart, iend) = (iend, istart) # make sure end is greater than start\n # download financials\n pool = mp.Pool(processes=num_procs)\n steps = np.round(np.linspace(istart, iend, num_procs+1)).astype(int)\n args = [(steps[i-1], steps[i]-1) for i in range(1,len(steps))]\n stats = pool.map(self._get_financials_by_chunk, args)\n return", "def period_payment(yearly_payments_percentage, client_cost_reduction,\n days_with_payments, days_for_discount_rate):\n\n yearly_payments_percentage = Fraction(str(yearly_payments_percentage))\n client_cost_reduction = Fraction(str(client_cost_reduction))\n\n if days_with_payments == 0:\n payments = Fraction(0)\n else:\n payments = Fraction(days_with_payments, days_for_discount_rate)\n return (yearly_payments_percentage * client_cost_reduction * payments)", "def get_prices(start, end):\n\n tickers = TICKERS # fetch tickers from config.py\n df_final = pd.DataFrame() # declared for merging purposes (inside loops)\n\n for ticker in tickers: # Loop over tickers to fetch individual price series\n\n r = requests.get(\"https://www.alphavantage.co/query?function=TIME_SERIES_DAILY_ADJUSTED&symbol=\" + ticker\n + \"&outputsize=full&apikey=\" + ALPHAVANTAGE_KEY)\n r_dict = r.json()\n\n dates = np.array([]) # this loop makes the index into an index of datetime objects. Note the format.\n for i in r_dict['Time Series (Daily)'].keys():\n datetime_obj = datetime.datetime.strptime(i, '%Y-%m-%d')\n dates = np.append(dates, datetime_obj)\n\n prices = np.array([]) # This loop extracts all prices and put them into an array\n for i in r_dict['Time Series (Daily)']:\n x = r_dict['Time Series (Daily)'][i]['5. adjusted close']\n prices = np.append(prices, x)\n\n open_prices = np.array([]) # grab opening prices as well\n for i in r_dict['Time Series (Daily)']:\n x = r_dict['Time Series (Daily)'][i]['1. open']\n open_prices = np.append(open_prices, x)\n\n df = pd.DataFrame({ # This dataframe contains each individual stock\n 'Date': dates,\n str(ticker + '_' + 'adjclose'): prices,\n str(ticker + '_' + 'open'): open_prices\n })\n df = df.set_index('Date')\n\n df_final = pd.DataFrame(data=df_final,\n index=dates) # these few lines are for merging the individual dataframes\n df_final.index.name = 'Date'\n df_final = df.merge(df_final, left_index=True, right_index=True)\n\n for ticker in tickers: # convert to numeric values. Prices are just \"objects\"\n df_final[str(ticker + '_' + 'adjclose')] = pd.to_numeric(df_final[str(ticker + '_' + 'adjclose')])\n df_final[str(ticker + '_' + 'open')] = pd.to_numeric(df_final[str(ticker + '_' + 'open')])\n\n df_final = df_final.iloc[::-1]\n\n return df_final[start: end] # slice the dataframe at the end, only return the specified date-range.", "def compute_portvals(start_date, end_date, trades_df, start_val):\n # SETTING UP ORDERS DATAFRAME\n # Read orders file into a dataframe http://pandas.pydata.org/pandas-docs/stable/io.html#io-read-csv-table \n orders = trades_df\n symbols = np.unique(orders['Symbol']).tolist() # List of all the symbols used in orders\n\n # SETTING UP PRICES DATAFRAME\n # Read in adjusted closing prices for given symbols, date range... drop non-trading days... add cash column\n dates = pd.date_range(start_date, end_date)\n prices = get_data(symbols, dates, addSPY=False).dropna()\n prices['cash'] = 1.00\n\n # SETTING UP TRADES DATAFRAME\n # Daily snapshot of portfolio changes (+ = Buy Order, - = Sell Order) with cash adjustments\n trades = pd.DataFrame(0.00, index=prices.index, columns=symbols)\n trades['cash'] = 0.00\n\n for row_index, row in orders.iterrows():\n try:\n if row.Order == 'SELL':\n trades.ix[row.Date,row.Symbol] += (-1 * row.Shares) # Subtract ShareAmount for Sell \n trades.ix[row.Date,'cash'] += (row.Shares * prices.ix[row.Date, row.Symbol]) #adjust cash value for Sell\n elif row.Order == 'BUY':\n trades.ix[row.Date,row.Symbol] += (row.Shares) # Add ShareAmount for Buy\n trades.ix[row.Date,'cash'] += (-1 * row.Shares * prices.ix[row.Date, row.Symbol]) #adjust cash value for Buy\n else:\n print 'ERROR: order type not recognized, looking for BUY or SELL'\n except:\n print 'Unknown Error:'\n\n\n # SETTING UP HOLDINGS DATAFRAME \n # accumulating trades into holdings dataframe, snapshot of shares and cash for given day\n holdings = pd.DataFrame(0.00, index=prices.index, columns=symbols)\n holdings['cash'] = 0.00\n holdings.ix[start_date,'cash'] = start_val # add starting cash value\n previous_row = holdings.iloc[0]\n for row_index, row in holdings.iterrows():\n holdings.ix[row_index] = previous_row + trades.ix[row_index] #previous day's value + trades\n previous_row = row\n\n #SETTING UP VALUES DATAFRAME\n # convert shares into their respective dollar amounts\n values = pd.np.multiply(holdings, prices)\n #DAILY VALUE OF THE PORTFOLIO\n portvals = values.sum(axis=1)\n return portvals", "def calculateDailyBill(service):\n bill = [] # initialize the empty list called bill, storing bill amount for each AC serviced for a particular day\n for service_ele in service:\n total = (service_ele[-1] + service_ele[-2]) * 1.05 # iterate the service list and sum up the labour_charge and cost replaced multiply 5% gov tax \n bill.append(total) # append the total amount value to bill list\n # service[service.index(service_ele)] = total # using index to locate the list element and add the total value to the last in each list element\n return bill", "def calc_price_for_period(prev_price):\n result = []\n for i in range(1, N+1):\n price = prev_price + calc_price_delta(prev_price, i)\n prev_price = price\n result.append(price)\n return result", "def get_day_returns(self, stocks=None, date=None):\n if stocks is None:\n stocks = self.stocks\n\n if date is None:\n date = self.date\n\n if type(date) is not datetime.datetime and type(date) is not pd.tslib.Timestamp:\n date = datetime.datetime.strptime(date, \"%Y-%m-%d\")\n\n stock_money = 0\n for stock in stocks:\n stock_day = self.stock_data[stock]\n # TODO find a better way than avging open and cloase\n stock_money += stock_day.position['Position'][date] *\\\n (stock_day.market['Close'][date] + stock_day.market['Open'][date])/2\n\n return stock_money", "def build_dividend_lists(portfolio_dict):\n # ETF dividend list\n dow_dividends = lookup_dividends(yf.Ticker(\"DIA\")) \n sp500_dividends = lookup_dividends(yf.Ticker(\"SPY\")) \n nasdaq_dividends = lookup_dividends(yf.Ticker(\"QQQ\")) \n totalmarket_dividends = lookup_dividends(yf.Ticker(\"VTI\")) \n \n # Portfolio dividends\n portfolio_dividend_dict = {}\n for key in portfolio_dict:\n portfolio_dividend_dict[key] = lookup_dividends(yf.Ticker(key))\n \n return (dow_dividends, sp500_dividends, nasdaq_dividends, totalmarket_dividends, portfolio_dividend_dict)", "def get_daily_percent_change(ticker_symbols_as_list):\n\tyahoo_finance_url = 'http://query.yahooapis.com/v1/public/yql'\n\n\t# when we make the request, pass along additional preferences, eg the SQL query\n\tticker_symbols_as_string = ','.join(ticker_symbols_as_list)\n\tdata = {'q': \"select Symbol, PercentChange from yahoo.finance.quotes where symbol in (%s)\" % ticker_symbols_as_string, \n\t'format': 'json',\n\t'diagnostics':'false',\n\t'env': 'http://datatables.org/alltables.env',}\n\n\tencoded_data = urllib.urlencode(data)\n\turl = \"%s?%s\" % (yahoo_finance_url, encoded_data)\n\n\tyahoo_response = urllib.urlopen(url).read()\n\tyahoo_json = json.loads(yahoo_response)\n\n\tdaily_percent_change_keyed_by_ticker_symbol = {}\n\n\tfor quote_result in yahoo_json['query']['results']['quote']:\n\n\t\tsymbol = quote_result['Symbol'] \n\t\tdaily_percent_change = quote_result['PercentChange'] \n\n\t\tif daily_percent_change is None:\n\t\t\tprint 'warning: no value found for percent change', symbol\n\t\t\tcontinue\n\t\t\t\n\t\t# we noticed that the percent change is often reported as a string like \"+14.35%\"...\n\t\t# let's get rid of the leading \"+\" and the trailing \"%\"\n\t\tif daily_percent_change.startswith('+'):\n\t\t\t# 'slice' the string (my_value[start_index:stop_index]); define the start index,\n\t\t\t# and in this case, no need to specify the end index\n\t\t\tdaily_percent_change = daily_percent_change[1:] \n\t\t\n\t\t# get rid of the trailing \"%\" \n\t\tif daily_percent_change.endswith('%'):\n\t\t\t# 'slice' the string. this time, no need to define the start index, but definiely define the end index\n\t\t\tdaily_percent_change = daily_percent_change[:-1]\n\n\t\tprint symbol, daily_percent_change\n\n\t\tdaily_percent_change_keyed_by_ticker_symbol[symbol] = daily_percent_change\n\n\treturn daily_percent_change_keyed_by_ticker_symbol", "def gains_btw_dates(self, date_ini='Ini', date_fin='today', pct=False):\n assert date_fin == 'today' or isinstance(date_fin, date), 'Error! You have to pass a datetime.date istance to date parameters.'\n assert date_ini == 'Ini' or isinstance(date_ini, date), 'Error! You have to pass a datetime.date istance to date parameters.'\n assert isinstance(pct, bool), 'Error! The pct parameter must be boolean.'\n if date_fin == 'today':\n date_fin = self.data.index[-1]\n if date_ini == 'Ini':\n date_ini = self.data.index[0]\n assert date_ini >= self.data.index[0], 'Error ! Invalid Initial Date'\n assert date_fin >= self.data.index[0], 'Error ! Invalid Final Date'\n date_fin = self._first_good_date(date_fin)\n if date_ini == self.data.index[0]:\n profit = self.data.loc[date_fin, 'Profit/Loss']\n else:\n #date_ini = self._first_good_date(self._first_good_date(date_ini) - timedelta(1))\n date_ini = self._first_good_date(date_ini - timedelta(1))\n profit = self.data.loc[date_fin, 'Profit/Loss'] - self.data.loc[date_ini, 'Profit/Loss']\n if pct:\n return round(profit / self.value(date_ini) * 100, 2)\n else:\n return round(profit, 2)", "def daily_price():\n for item in data:\n if valid_date(item):\n yield data[item]['daily_value']", "def get_price_data(ticker, days_befoure):\r\n #config_file=raw_input('config file: ')\r\n config_file=\"d:/tmp/moex.json\" \r\n try:\r\n with open(config_file) as config_file: \r\n conn_data = json.load(config_file)\r\n except:\r\n print \"Error: Unable to read config file. \"\r\n sys.exit(1)\r\n\r\n username = conn_data['username']\r\n password = conn_data['password']\r\n my_config = Config(user=username, password=password, proxy_url='')\r\n\r\n my_auth = MicexAuth(my_config)\r\n date = datetime.datetime.now() - datetime.timedelta(days_befoure)\r\n \r\n #ticker = 'SBER' # for tesing...\r\n \r\n if my_auth.is_real_time():\r\n iss = MicexISSClient(my_config, my_auth, MyDataHandler, MyData)\r\n iss.get_history_securities('stock',\r\n 'shares',\r\n 'tqbr',\r\n ticker, \r\n date.strftime(\"%Y-%m-%d\")\r\n #here to be start end dates\r\n )\r\n #print iss.handler.data.history\r\n return iss.handler.data.as_dataframe()", "def calculate_prices(self, merged_data):\n calculated_prices = []\n for record in merged_data:\n prices_dict = dict()\n supplier_price_id = record.get('supplier_detail').get('identifier') # get the supplier price id\n session_id = record.get('supplier_transaction').get('session_id') # get the transaction session\n supplier_trans_fee_price = self.compute_fee_price(\n record) # Get the fee price for each transaction if needed\n supplier_trans_time_price = self.compute_time_price(\n record) # Get the time price for each transaction if needed\n supplier_trans_kwh_price = self.compute_kwh_price(record)\n total_price = supplier_trans_fee_price + supplier_trans_time_price + supplier_trans_kwh_price\n prices_dict.update({'fee_price': supplier_trans_fee_price,\n 'time_price': supplier_trans_time_price,\n 'kwh_price': supplier_trans_kwh_price,\n 'total_price': total_price,\n 'session_id': session_id,\n 'supplier_price_id': supplier_price_id})\n calculated_prices.append(prices_dict)\n\n return calculated_prices", "def compute_costs(timesheet, biller, date1=None, date2=None): \n # Slice\n f = slice_by_dates(timesheet, date1, date2)\n\n # Resample and add start/end dates\n if biller.freq is not None:\n freq = biller.freq\n f = timesheet.set_index('date')[['duration']].resample(freq).sum()\n f = f.reset_index()\n f['period'] = f['date'].map(lambda x: pd.Period(x, freq))\n f['start_date'] = f['period'].map(lambda x: x.start_time)\n f['end_date'] = f['period'].map(lambda x: x.end_time)\n else:\n start_date, end_date = f['date'].min(), f['date'].max()\n f['start_date'] = start_date\n f['end_date'] = end_date\n\n # Get bins for aggregating\n if biller.base_fee:\n bins = [0] + biller.bins\n else:\n bins = biller.bins\n\n def my_agg(group):\n d = OrderedDict()\n d['start_date'] = group['start_date'].iat[0]\n d['end_date'] = group['end_date'].iat[0]\n t = group['duration'].iat[0]\n d['duration'] = pd.Series(decompose(t, bins))\n c1 = d['duration'].cumsum().map(biller)\n c2 = c1.shift(1).fillna(0)\n cost = c1 - c2\n d['rate'] = cost/d['duration']\n d['cost'] = cost\n return pd.DataFrame(d)\n \n f = f.groupby('date').apply(my_agg\n ).reset_index().drop(['level_1', 'date'], axis=1)\n\n # Drop NaN rate items\n f = f.dropna(subset=['rate'])\n\n return f", "async def daily(self, ctx):\r\n # TODO: Asssess whether this can be cleaned up. \r\n # As it stands, very similar to inv()\r\n author = ctx.author\r\n with DB() as db:\r\n company = await self.get_active_company(ctx, db, author)\r\n stock = self.iex.get_held_stocks(db, company.id)\r\n inventory = []\r\n for s in stock:\r\n close = await self.get_latest_close(ctx, db, s.symbol)\r\n inventory.append([s.symbol, s.quantity, s.purchase_price, close.close, s.quantity*close.close - s.quantity*s.purchase_price ]) \r\n inv_df = pd.DataFrame(inventory, columns=['Symbol', 'Quantity', 'Purchase Price', 'Close', 'Current Value'])\r\n inv_df['sign'] = np.where(inv_df['Current Value']>=0, '+', '-')\r\n inv_df['%'] = abs(((inv_df['Close'] - inv_df['Purchase Price']) / inv_df['Purchase Price']) * 100)\r\n inv_df['%'] = inv_df['%'].round(1)\r\n inv_df = inv_df.sort_values(['Symbol'])\r\n inv_df = inv_df[['sign', '%', 'Symbol', 'Quantity', 'Purchase Price', 'Close', 'Current Value']]\r\n aggregated = tabulate(inv_df.values.tolist(), headers=['Δ', '%', 'Symbol', 'Quantity', 'Purchase Price', 'Close', 'Current Value'])\r\n await ctx.send(f'```diff\\n{aggregated}```')", "def get_adjusted_data(stockSymbol, df):\n\n events = ['SPLIT', 'BONUS']\n arr = ['Open Price', 'High Price', 'Low Price',\n 'Last Price', 'Close Price', 'Average Price']\n\n stockSymbol = stockSymbol.replace('&', '%26')\n\n if(df.empty):\n print(\"Please check data. Dataframe is empty\")\n return df\n\n df.index = pd.to_datetime(df.index)\n df.sort_index(inplace=True)\n\n try:\n df = df.drop(['Prev Close'], axis=1)\n except KeyError:\n pass\n\n for event in events:\n\n ratio, dates = scrape_bonus_splits(stockSymbol, event)\n for i in range(len(dates)):\n\n date = datetime.datetime.strptime(dates[i], '%d-%b-%Y')\n print(event, \" on : \", dates[i], \" and ratio is : \", ratio[i])\n\n changed_data = df.loc[df.index < date]\n same_data = df.loc[df.index >= date]\n\n for j in arr:\n\n try:\n changed_data.loc[:, j] = changed_data.loc[:, j]/ratio[i]\n except TypeError:\n pass\n\n df = pd.concat([changed_data, same_data])\n\n return df", "def YahooFinancials_Data(Ticker=[],Start='',End ='',Frequency ='daily'):\n\n\n \n import pandas as pd\n from yahoofinancials import YahooFinancials\n import datetime as dt \n \n Ticker = Ticker or input(\"Enter Tcikers separated by',': \").split(',')\n Start = Start or input(\"Enter Start Date separated by '-': \") or (dt.date.today()-\n dt.timedelta(1825)).strftime(\"%Y-%m-%d\")\n End = End or input(\"Enter End Date separated by '-': \") or (dt.date.today()).strftime(\"%Y-%m-%d\")\n Frequency = Frequency or input(\"Enter Frequency like 'daily','weekly': \") or 'daily'\n \n data = pd.DataFrame()\n for i in range(len(Ticker)):\n try:\n yahoo_financials = YahooFinancials(Ticker[i])\n Json_obj = yahoo_financials.get_historical_price_data(Start, End, Frequency)\n Ohlv = Json_obj[Ticker[i]]['prices']\n temp = pd.DataFrame(Ohlv)[[\"formatted_date\",\"adjclose\"]]\n temp.set_index(\"formatted_date\", inplace = True)\n temp = temp[~temp.index.duplicated(keep = 'first')]\n data[Ticker[i]] = temp['adjclose']\n \n except:\n print(f\"Unable to get the Data for: {Ticker[i]}\")\n continue\n \n return data", "def quant(date, bid, ask, voodoo):\n\n future = 200\n voodoo[:] = ask-bid\n for i in xrange(0, future):\n voodoo += (ask-bid + ask-bid + ask-bid + ask-bid\n +ask-bid + ask-bid + ask-bid + ask-bid\n ) / 8\n voodoo[:] = voodoo / future", "def get_portfolio_prices(stocks: list, funds: list, etfs: list, start_date: str, end_date=today) -> pd.DataFrame:\r\n data_frames_stocks = get_assets_data_frames(\r\n stocks, inv.get_stock_historical_data, 'brazil', start_date=start_date, end_date=end_date)\r\n data_frames_funds = get_assets_data_frames(\r\n funds, inv.get_fund_historical_data, 'brazil', start_date=start_date, end_date=end_date)\r\n data_frames_etfs = get_assets_data_frames(\r\n etfs, inv.get_etf_historical_data, 'brazil', start_date=start_date, end_date=end_date)\r\n\r\n data_frames = [*data_frames_stocks, *data_frames_funds, *data_frames_etfs]\r\n\r\n assets = [*stocks, *funds, *etfs]\r\n\r\n portfolio_prices = build_multi_index_data_frame(\r\n data_frames, assets, ['Close', 'Open', 'High', 'Low'])\r\n\r\n return portfolio_prices", "def get_energy_demand_values_day(weather_data, houses_list, houses_dict,\n energy_factor_types, energy_demands_types,\n load_curve_houses, load_profile_df,\n daily_energy_demand_houses):\n start = weather_data.index[0]\n while start < weather_data.index[-1]:\n end = start + pd.Timedelta('1 days')\n if logger.isEnabledFor(logging.INFO):\n print('\\rProgress: '+str(start), end='\\r') # print progress\n typtag = weather_data.loc[start]['typtag']\n for house_name in houses_list:\n house_type = houses_dict[house_name]['house_type']\n for i, energy_factor_type in enumerate(energy_factor_types):\n energy_demand_type = energy_demands_types[i]\n # Example: Q_Heiz_TT(t) = F_Heiz_TT(t) * Q_Heiz_TT\n load_curve_houses.loc[start:end, (house_name,\n energy_demand_type)] =\\\n load_profile_df.loc[start:end, (energy_factor_type,\n house_type)] *\\\n daily_energy_demand_houses.loc[(house_name,\n energy_demand_type), typtag]\n# print(load_curve_houses.loc[start:end])\n start = end\n\n if logger.isEnabledFor(logging.INFO):\n # overwrite last status with empty line\n print('\\r', end='\\r')\n\n return load_curve_houses", "def create_order(df_stock, df_signal, moneyness=('OTM', 'ITM'),\n cycle=0, strike=0, expire=(False, True)):\n symbol = df_stock.ix[df_stock.index.values[0]]['symbol']\n\n tb_closes = {\n stock.date.strftime('%Y-%m-%d'): np.float(stock.close) for stock in\n Stock.objects.filter(Q(symbol=symbol) & Q(source='thinkback'))\n }\n\n holding = df_signal['holding'].apply(\n lambda x: int(x / np.timedelta64(1, 'D'))\n ).astype(np.int).min()\n\n data = list()\n dates0, options0 = get_options_by_cycle_strike(\n symbol=symbol,\n name='CALL',\n dates0=df_signal['date0'],\n dte=holding,\n moneyness=moneyness,\n cycle=cycle,\n strike=strike\n )\n\n for date0, (index, signal) in zip(dates0, df_signal.iterrows()):\n date1 = signal['date1']\n\n if date0:\n option0 = options0.get(date=date0)\n\n option1 = None\n if option0 and option0.bid > 0:\n date1, option1 = get_option_by_contract_date(option0.contract, date1)\n\n if option0 and option1:\n stock0 = tb_closes[option0.date.strftime('%Y-%m-%d')]\n close0 = stock0 - np.float(option0.bid)\n\n ask1 = 0\n if int(expire):\n ask1 = np.float(\n tb_closes[option1.date.strftime('%Y-%m-%d')]\n - np.float(option0.contract.strike)\n )\n ask1 = ask1 if ask1 > 0 else 0.0\n\n date1 = option1.date\n stock1 = tb_closes[option1.date.strftime('%Y-%m-%d')]\n close1 = stock1 - np.float(ask1)\n else:\n date1 = option1.date\n stock1 = tb_closes[option1.date.strftime('%Y-%m-%d')]\n close1 = stock1 - np.float(option1.ask)\n\n data.append({\n 'date0': option0.date,\n 'date1': date1,\n 'signal0': 'BUY',\n 'signal1': 'SELL',\n 'stock0': stock0,\n 'stock1': stock1,\n 'option0': option0.bid,\n 'option1': ask1 if expire else option1.ask,\n 'close0': np.round(close0, 2), # buy using ask\n 'close1': np.round(close1, 2), # sell using bid\n 'option_code': option0.contract.option_code,\n 'strike': np.float(option0.contract.strike),\n 'dte0': np.int(option0.dte),\n 'dte1': np.int(option1.dte),\n 'intrinsic0': np.float(option0.intrinsic),\n 'intrinsic1': np.float(option1.intrinsic)\n })\n\n df = DataFrame()\n if len(data):\n df = DataFrame(data, columns=[\n 'date0', 'date1', 'signal0', 'signal1',\n 'stock0', 'stock1', 'option0', 'option1', 'close0', 'close1',\n 'option_code', 'strike', 'dte0', 'dte1',\n 'intrinsic0', 'intrinsic1'\n ])\n\n df['holding'] = df['date1'] - df['date0']\n df['pct_chg'] = np.round((df['close1'] - df['close0']) / df['close0'], 2)\n\n f = lambda x: np.round(x['pct_chg'] * -1 if x['signal0'] == 'SELL' else x['pct_chg'], 2)\n df['pct_chg'] = df.apply(f, axis=1)\n\n df['sqm0'] = 100\n df['sqm1'] = -100\n df['oqm0'] = -1\n df['oqm1'] = 1\n\n return df", "def set_final_settle_prices(pr_trades, exer_date, mode, TestMode):\n\n if not pr_trades:\n msg = ('No Exercise/Assign trades made on date {0}'.format(exer_date))\n Logme()(msg, 'WARNING')\n return\n\n for t in pr_trades:\n ins = t.insaddr\n settle_price = getSettlePriceFromMarket(ins, exer_date, \"SETTLEMENT\")\n if not settle_price:\n msg = ('Will skip trade {0} since there is no price for this '\n 'instrument {1}.'.format(t.trdnbr, ins.insid))\n Logme()(msg)\n continue\n\n strike_price = convert_price_to_und_or_strike_quotation(ins,\n ins.strike_price, 1)\n\n if ins.settlement == 'Cash':\n if ins.call_option:\n p_der = FBDPCommon.create_quotetype_price(ins,\n settle_price - strike_price)\n elif ins.instype == 'Future/Forward':\n p_der = settle_price\n else:\n p_der = FBDPCommon.create_quotetype_price(ins,\n strike_price - settle_price)\n\n else: # Physical settlement\n p_phys = 0.0 # price to be set in the physical trade\n t_phys = get_physical_trade(t)\n if not t_phys:\n Logme()('Physical settlement trade does not exist for trade '\n '{0}.'.format(t.trdnbr))\n continue\n if mode == 'Market':\n p_phys = settle_price\n if ins.instype == 'Option':\n p_phys = settle_price\n if ins.call_option:\n p_der = FBDPCommon.create_quotetype_price(ins,\n settle_price - strike_price)\n else:\n p_der = FBDPCommon.create_quotetype_price(ins,\n strike_price - settle_price)\n else: # Future\n p_der = settle_price\n\n else: # Physical is done to the strike price (Strike mode)\n p_der = 0.0\n if ins.instype == 'Option':\n p_phys = ins.strike_price\n else: # Future\n p_phys = settle_price\n\n if (abs(ins.phys_contr_size) > 0.000001 and\n abs(ins.phys_contr_size - ins.contr_size) > 0.000001):\n update_exercise_payment(t, settle_price, mode, TestMode)\n\n phys_clone = t_phys.clone()\n phys_clone.price = p_phys\n if (ins.instype in ['Option', 'Warrant'] and\n ins.und_instype == 'Curr'):\n phys_clone.fx_update_non_dealt_amount(p_phys)\n else:\n phys_clone.premium = trade_premium_from_quote(\n phys_clone.trdnbr, p_phys, phys_clone.acquire_day)\n if not TestMode:\n phys_clone.commit()\n\n der_clone = t.clone()\n der_clone.price = p_der\n der_clone.premium = trade_premium_from_quote(der_clone.trdnbr, p_der,\n t.acquire_day)\n\n if not TestMode:\n der_clone.commit()\n ael.poll", "def convert_to_daily(data_list):\n for _in in range(1, len(data_list)):\n data_list[-_in] = data_list[-_in] - data_list[-_in - 1]", "def calculate_futures(current_balance, today_shares_owned, history, range_days, redistribution):\n today_fund_value = today_shares_owned * history.iloc[0]\n current_distribution = today_fund_value / current_balance\n\n range_max_price = []\n overall_max_price = []\n for account in history:\n range_max_price.append(max(history[account][:range_days]))\n overall_max_price.append(max(history[account][:]))\n new_fund_distribution = redistribution * current_balance # move dollar balance to new redistribution\n new_shares_after_distribution = new_fund_distribution/history.iloc[0]\n\n new_share_range_max_price = new_shares_after_distribution * range_max_price\n potential_range_gain_loss = new_share_range_max_price - new_fund_distribution\n potential_total = sum(new_share_range_max_price)\n total_gain_loss = potential_total - current_balance\n\n current_shares_at_range_max_price = today_shares_owned * range_max_price\n tot = sum(current_shares_at_range_max_price)\n est_gain_loss = tot - current_balance\n current_distrib_v_scenario = total_gain_loss - est_gain_loss\n\n return current_distrib_v_scenario", "def prices(tickers):\n try:\n start = dt.datetime.today()\n start = start.strftime('%Y-%m-%d') \n data = pdr.get_data_yahoo(tickers, start=start)\n price = data['Adj Close']\n vol = data['Volume']\n data_dic = {}\n for stock in tickers:\n data_dic[str(stock)] = price[str(stock)][0], vol[str(stock)][0]\n \n df_data = pd.DataFrame(data_dic.values(), columns=['precio_usa', 'volumen_usa'])\n df_data['Ticker'] = tickers\n df_data = df_data.loc[:,['Ticker', 'precio_usa', 'volumen_usa']]\n\n except:\n start = dt.datetime.today()\n start = start - Day(3)\n start = start.strftime('%Y-%m-%d') \n data = pdr.get_data_yahoo(tickers, start=start)\n price = data['Adj Close']\n vol = data['Volume']\n data_dic = {}\n for stock in tickers:\n data_dic[str(stock)] = price[str(stock)][0], vol[str(stock)][0]\n \n df_data = pd.DataFrame(data_dic.values(), columns=['precio_usa', 'volumen_usa'])\n df_data['Ticker'] = tickers\n df_data = df_data.loc[:,['Ticker', 'precio_usa', 'volumen_usa']]\n\n return df_data", "def date_sort(self, type, dict, start_date, end_date):\n # print(\"start date = {}, end date = {}\".format(start_date, end_date))\n if type == \"buy\":\n for stock, data in dict.items():\n # if self.date_compare(start_date, data.get(\"購買時間\")) or self.date_compare(data.get(\"購買時間\"), end_date)):\n stock_index = dict.get(stock).get(\"股票代號\")\n price_list = dict.get(stock).get(\"購買股價\")\n num_list = dict.get(stock).get(\"購買數量\")\n cost_list = dict.get(stock).get(\"投資成本\")\n date_list = dict.get(stock).get(\"購買時間\")\n del_indexes = []\n for index, date in enumerate(date_list):\n # print(\"Date = {}\".format(date))\n if (start_date and self.date_compare(start_date, date)) or (end_date and self.date_compare(date, end_date)):\n del_indexes.append(index)\n # del date_list[index]\n print(del_indexes)\n price_list = self.del_element_from_array_by_index(price_list, del_indexes)\n num_list = self.del_element_from_array_by_index(num_list, del_indexes)\n cost_list = self.del_element_from_array_by_index(cost_list, del_indexes)\n date_list = self.del_element_from_array_by_index(date_list, del_indexes)\n\n dict.update(\n {stock: {\n \"股票代號\": stock_index,\n \"購買股價\": price_list,\n \"購買數量\": num_list,\n \"投資成本\": cost_list,\n \"購買時間\": date_list\n }\n })\n elif type == \"sell\":\n for stock, data in dict.items():\n # if self.date_compare(start_date, data.get(\"購買時間\")) or self.date_compare(data.get(\"購買時間\"), end_date)):\n stock_index = dict.get(stock).get(\"股票代號\")\n price_list = dict.get(stock).get(\"賣出股價\")\n num_list = dict.get(stock).get(\"賣出數量\")\n cost_list = dict.get(stock).get(\"賣出價格\")\n date_list = dict.get(stock).get(\"賣出時間\")\n income_list = dict.get(stock).get(\"實現損益\")\n buycost_list = dict.get(stock).get(\"購買成本\")\n del_indexes = []\n for index, date in enumerate(date_list):\n if (start_date and self.date_compare(start_date, date)) or (end_date and self.date_compare(date, end_date)):\n del_indexes.append(index)\n # del date_list[index]\n print(del_indexes)\n price_list = self.del_element_from_array_by_index(price_list, del_indexes)\n num_list = self.del_element_from_array_by_index(num_list, del_indexes)\n cost_list = self.del_element_from_array_by_index(cost_list, del_indexes)\n date_list = self.del_element_from_array_by_index(date_list, del_indexes)\n\n dict.update(\n {stock: {\n \"股票代號\": stock_index,\n \"賣出股價\": price_list,\n \"賣出數量\": num_list,\n \"賣出價格\": cost_list,\n \"賣出時間\": date_list,\n \"實現損益\": income_list,\n \"購買成本\": buycost_list\n }\n })", "def ideal_curve(self, original_estimate, start, due):\n\n # we count the day before as milestone date, but a non working one\n dates = self.dates_inbetween(start, due)\n working_dates, non_work_dates = self.get_date_values(dates)\n\n try:\n work_per_day = float(original_estimate) / (len(working_dates) - 1)\n except ZeroDivisionError:\n # the milestone is only 1 day long\n work_per_day = original_estimate\n\n working_dates_str = self.dates_as_strings(working_dates)\n ideal_data = []\n work_days = 0\n # we set ideal_for_date and last_day_amount to original estimate\n # to handle cases when the first day in the milestone is a weekend\n ideal_for_date = last_day_amount = original_estimate\n\n for date in self.dates_as_strings(dates):\n if date in set(working_dates_str):\n ideal_for_date = original_estimate - (work_per_day*work_days)\n ideal_data.append((date, ideal_for_date))\n work_days += 1\n else:\n ideal_data.append((date, last_day_amount))\n\n last_day_amount = ideal_for_date\n\n return ideal_data", "def _vcash(totmoney, totcftable, cashobj):\n cashl = []\n cashl.append(totmoney + totcftable.iloc[0].cash)\n for i in range(len(totcftable) - 1):\n date = totcftable.iloc[i + 1].date\n delta = totcftable.iloc[i + 1].cash\n if delta < 0:\n cashl.append(\n myround(\n delta\n / cashobj.price[cashobj.price[\"date\"] <= date].iloc[-1].netvalue\n )\n )\n else:\n cashl.append(delta)\n datadict = {\"date\": totcftable.loc[:, \"date\"], \"mf\": cashl}\n return pd.DataFrame(data=datadict)", "def test_weekly_bussiness_days_only(self):\n print()\n print(\"Test Bussiness Days Only\")\n start_date = timezone.now()\n start_date = start_date.replace(day=1, month = 9, year = 2020)\n end_date = start_date.replace(day=30)\n expense = BudgetExpense.objects.get(id = 600)\n\n expected_dates = []\n expected_date = expense.start_date\n expected_date = expected_date.replace(day = 4, month = 9, year = 2020)\n expected_dates.append(expected_date)\n expected_date = expected_date.replace(day = 14, month = 9, year = 2020)\n expected_dates.append(expected_date)\n expected_date = expected_date.replace(day = 21, month = 9, year = 2020)\n expected_dates.append(expected_date)\n expected_date = expected_date.replace(day = 28, month = 9, year = 2020)\n expected_dates.append(expected_date)\n\n print(\"EXPECTED\")\n print(\"==========\")\n for d in expected_dates:\n print(d)\n\n result = get_anticipated_transaction_occurences(expense, start_date, end_date)\n print()\n print(\"Actual Result\")\n print(\"============\")\n for r in result.get(expense):\n print(r)\n print()\n self.assertEquals(expected_dates, result.get(expense))", "def get_date_values(self, all_dates):\n\n if self.day_value == 'all':\n working_dates = all_dates[:]\n non_working_dates = []\n elif self.day_value == 'weekdays':\n working_dates, non_working_dates = self.working_days(all_dates)\n elif self.day_value == 'custom':\n working_dates, non_working_dates = self.working_days(all_dates,\n blacklisted_dates)\n\n # we always want the day before the milestone starts to be a working day\n # regardless if it is a weekday or weekend\n # if it was a non working day the ideal effort curve would not decrease\n # by the end of the actual start date\n day_before = all_dates[0]\n if day_before not in working_dates:\n non_working_dates.remove(day_before)\n working_dates.insert(0, day_before)\n # else it must be in working dates already\n\n return working_dates, non_working_dates", "def project(self, end: datetime) -> pd.Series:\n if self.recur is not None:\n if self.date is None:\n self.date = datetime.combine(datetime.today(), datetime.min.time())\n\n if isinstance(end, int):\n end = self.date + timedelta(days=end)\n\n dates = pd.date_range(\n start=self.date,\n freq=self.recur,\n end=end,\n )\n if len(dates) < 2:\n dates = pd.date_range(\n start=self.date,\n freq=self.recur,\n periods=2\n )\n if dates[0] > self.date:\n try:\n dates = dates.union(pd.date_range(\n start=self.date,\n freq=f'-{dates.freqstr}',\n periods=2\n ))\n except ValueError as e:\n dates = dates.union(pd.date_range(\n start=self.date,\n freq=f'-1{dates.freqstr}',\n periods=2\n ))\n\n if self.compile is not None:\n total = self.amount * (dates.shape[0] - 1)\n amt = round(total / dates.to_series().diff().sum().days, 2)\n else:\n amt = self.amount\n\n res = pd.Series(data=np.full(dates.shape[0], amt), index=dates)\n LOGGER.debug('-' * 50)\n LOGGER.debug(f'Amount: {amt}')\n LOGGER.debug(res.index)\n\n if self.compile is not None:\n res = res.resample('D').pad()\n LOGGER.debug(f'{res.index[0]} to {res.index[-1]}')\n res = res[self.date:end]\n res = res.resample(self.compile).sum()\n LOGGER.debug(res.index)\n\n if self.offset > 0:\n freq = self.compile or self.recur\n if freq == 'MS':\n offset = self.offset - 1\n else:\n offset = self.offset\n res.index += timedelta(days=offset)\n LOGGER.debug(res.index)\n\n # prevents dates that are out of range\n res = res[self.date:end]\n\n # prevents recurring charges compiled based on a number of days from all showing up on the first day\n if 'D' in self.recur or (self.compile is not None and 'D' in self.compile):\n res = res[res.index != self.date]\n return res\n else:\n return pd.Series(data=[self.amount], index=[self.date])", "def date_arithmetic(): \n\n #Calculating the first Question and date \n date1 = \"Feb 27, 2000\" # %b M, %d D, %Y\n dt1 = datetime.datetime.strptime(date1,\"%b %d, %Y\") #changing the date format into python date\n num_days = 3\n dt2 = dt1 + datetime.timedelta(days=num_days)\n\n #Calculating the second Question and date \n date2 = \"Feb 27, 2017\"\n dm1 = datetime.datetime.strptime(date2,\"%b %d, %Y\")\n dm2 = dm1 + datetime.timedelta(days=num_days)\n \n #Calculating the third Question and date\n date3 = \"Jan 1, 2017\"\n date4 = \"Oct 31, 2017\"\n dm3 = datetime.datetime.strptime(date3, \"%b %d, %Y\")\n dm4 = datetime.datetime.strptime(date4, \"%b %d, %Y\")\n delta = dm4 - dm3\n\n #Returning the results in a tuple\n return dt2, dm2, delta.days", "def add_transaction(self, date: datetime, instruments: List[Instrument]):\n\n for inst in instruments: # type: Instrument\n self._update_profit(inst.cost)", "def daily_incidents(df2):\n\n if (df2[\"Holiday\"] == \"Thanksgiving Day\") | (df2[\"Holiday\"] == \"Christmas Day\"):\n d_inc = df2[\"Total\"] / 18\n elif df2[\"Holiday\"] == \"Non-holidays\":\n d_inc = df2[\"Total\"] / 6712\n else:\n d_inc = df2[\"Total\"] / 19\n\n return d_inc", "def __init__(self, start_date=\"2017-01-01\", end_date=datetime.datetime.now().strftime(\"%Y-%m-%d\"), asset_list=[]):\n\n self.start_date = start_date\n self.end_date = end_date\n self.asset_list = asset_list\n self.portfolio = pd.DataFrame()\n self.benchmark = san.get(\"ohlcv/bitcoin\", from_date=start_date,\n to_date=end_date).closePriceUsd.pct_change()\n\n for portfolio_asset in asset_list:\n self.portfolio[portfolio_asset] = san.get(\"ohlcv/\" + portfolio_asset,\n from_date=start_date,\n to_date=end_date).closePriceUsd.pct_change()\n self.portfolio = self.portfolio.replace([np.inf, -np.inf], 0)\n self.metrics = dict()", "def iex_equities(symbols):\n # strict this in memory so that we can reiterate over it\n symbols = tuple(symbols)\n\n def ingest(environ,\n asset_db_writer,\n minute_bar_writer, # ignored\n daily_bar_writer,\n adjustment_writer,\n calendar,\n start_session, # ignored\n end_session, # ignored\n cache,\n show_progress,\n output_dir):\n\n metadata = pd.DataFrame(np.empty(len(symbols), dtype=[\n ('start_date', 'datetime64[ns]'),\n ('end_date', 'datetime64[ns]'),\n ('auto_close_date', 'datetime64[ns]'),\n ('symbol', 'object'),\n ]))\n\n today = datetime.today()\n start = datetime(today.year-5,today.month,today.day)\n \n def _pricing_iter():\n sid = 0\n with maybe_show_progress(\n symbols,\n show_progress,\n label='Downloading IEX pricing data: ') as it, \\\n requests.Session() as session:\n for symbol in it:\n path = _cachpath(symbol, 'ohlcv')\n try:\n df = cache[path]\n except KeyError:\n df = cache[path] = get_historical_data(symbol, start=start, end=None, output_format='pandas').sort_index()\n df.index = pd.to_datetime(df.index)\n # the start date is the date of the first trade and\n # the end date is the date of the last trade\n start_date = df.index[0]\n end_date = df.index[-1]\n # The auto_close date is the day after the last trade.\n ac_date = end_date + pd.Timedelta(days=1)\n metadata.iloc[sid] = start_date, end_date, ac_date, symbol\n\n df.rename(\n columns={\n 'Open': 'open',\n 'High': 'high',\n 'Low': 'low',\n 'Close': 'close',\n 'Volume': 'volume',\n },\n inplace=True,\n )\n yield sid, df\n sid += 1\n\n daily_bar_writer.write(_pricing_iter(), show_progress=True)\n\n metadata['exchange'] = \"NYSE\"\n \n symbol_map = pd.Series(metadata.symbol.index, metadata.symbol)\n asset_db_writer.write(equities=metadata)\n\n adjustment_writer.write()\n\n return ingest", "def initialize_historical_data(f_out, StockTicker_list, days) :\n\t#Since this is for daily open price, just run this once a day on the website\n\td_prices = dict()\n\td_dates = dict()\n\tfor stock in StockTicker_list :\n\t\thist = get_historical_data(stock, days)\n\t\tprices = []\n\t\tdates = []\n\t\tfor day in hist :\n\t\t\t#print(day[\"Date\"].replace(\" \",\"_\").replace(\",\",\"\"), end=\":\")\n\t\t\t#print(day[\"Open\"], end=\" \")\n\t\t\tprices.append(day[\"Open\"])\n\t\t\tdates.append(day[\"Date\"].split(\", \")[0])\n\t\tfull_name = Share(stock).get_name().replace(\" \", \"_\")\n\t\td_prices[full_name] = list(reversed(prices))\n\t\td_dates[full_name] = list(reversed(dates))\n\treturn (d_prices, d_dates)", "def fill_no_transact_days(self, list_of_dates, list_of_changes):\r\n # Set current day to first day given\r\n current_day = list_of_dates[0]\r\n last_day = list_of_dates[-1]\r\n day_delta = datetime.timedelta(days=1)\r\n\r\n # Create dictionary to lookup day and total change on that day\r\n day_change_lookup = dict(zip(list_of_dates, list_of_changes))\r\n\r\n # For every day until the last day, check if exists in list_of_dates\r\n # and append the correct change if does. If not in, appends 0\r\n all_days = []\r\n all_changes = []\r\n while(current_day <= last_day):\r\n all_days.append(current_day)\r\n if current_day in list_of_dates:\r\n all_changes.append(day_change_lookup[current_day])\r\n else:\r\n all_changes.append(0.)\r\n current_day += day_delta\r\n\r\n return(all_days, all_changes)", "def updatePrices(self,dd):\n for key in dd:\n self.DoS[key].updatePrice(dd[key])", "def divideSeries(requestContext, dividendSeriesList, divisorSeries):\n if len(divisorSeries) != 1:\n raise ValueError(\"divideSeries second argument must reference exactly 1 series\")\n\n divisorSeries = divisorSeries[0]\n results = []\n\n for dividendSeries in dividendSeriesList:\n name = \"divideSeries(%s,%s)\" % (dividendSeries.name, divisorSeries.name)\n bothSeries = (dividendSeries, divisorSeries)\n step = reduce(lcm,[s.step for s in bothSeries])\n\n for s in bothSeries:\n s.consolidate( step / s.step )\n\n start = min([s.start for s in bothSeries])\n end = max([s.end for s in bothSeries])\n end -= (end - start) % step\n\n values = ( safeDiv(v1,v2) for v1,v2 in izip(*bothSeries) )\n\n quotientSeries = TimeSeries(name, start, end, step, values)\n quotientSeries.pathExpression = name\n results.append(quotientSeries)\n\n return results", "def generate_cashflows(path_account, isin_cash):\n # Read and parse\n df_account = pd.read_excel(path_account)\n df_account = df_account.rename(\n columns={\n \"Variación\": \"ccyDelta\",\n \"Unnamed: 8\": \"delta\",\n \"Saldo\": \"ccyAmount\",\n \"Unnamed: 10\": \"amount\",\n \"Fecha valor\": \"date\",\n }\n )\n\n df_account[\"date\"] = pd.to_datetime(df_account[\"date\"], dayfirst=True)\n df_account = df_account.drop(\n columns=[\"Fecha\", \"Hora\", \"Producto\", \"Tipo\", \"ID Orden\"]\n )\n\n # Generate changes in position\n deltas_df = df_account.groupby([\"date\", \"ISIN\", \"amount\"])[\"delta\"].sum()\n deltas_df = pd.DataFrame(deltas_df).reset_index()\n\n # Compute cashflows\n cashflows_df = deltas_df.pivot_table(\n index=\"date\", columns=\"ISIN\", values=\"delta\", aggfunc=\"sum\"\n )\n\n # Compute external cashflows\n cashflows_external_df = df_account.loc[\n df_account[\"Descripción\"].isin([\"Ingreso\", \"Retirada\"])\n ]\n\n # For some reason DEGIRO has the cashflows mark to market shifted by one.\n # and my guess is that unless there is a position transaction, they dont\n # write cash mark to markets on Fridays ...\n cashflows_df = cashflows_df.asfreq(\"D\")\n cashflows_df[isin_cash] = cashflows_df[isin_cash].shift()\n cashflows_df = cashflows_df.asfreq(\"B\")\n\n return cashflows_df, cashflows_external_df", "def get_stock_price(df_excld):\n\n ts = TimeSeries(os.environ['ALPHA_VANTAGE_KEY'])\n\n info = []\n symbols = []\n counter = 0\n\n for t in df_excld['Ticker']:\n\n if counter % 5 == 0:\n time.sleep(65)\n\n i, m = ts.get_daily(symbol=t, outputsize='full')\n info.append(i)\n symbols.append(m['2. Symbol'])\n counter += 1\n\n return info, symbols", "def aveVolumeCalc(ins, date):\n cal = ins.Currency().Calendar()\n enddate = cal.AdjustBankingDays(date, 0)\n startdate = cal.AdjustBankingDays(date, AVERAGING_PERIOD)\n\n prices=[]\n histprices = acm.FPrice.Select(\"instrument = %s and market = '%s' \\\n and day > '%s' and day <='%s'\" % \n (ins.Oid(), DAILY_MARKET, startdate, enddate))\n \n for price in histprices:\n settle = price.Settle()\n if settle >= 0:\n prices.append(settle)\n \n #upgrade 2013 fix for failure during run - acm.Math().AverageOf seems buggy\n try:\n avgprice = (sum(prices)/len(prices))\n except ZeroDivisionError:\n avgprice = 0\n \n #avgprice = acm.Math().AverageOf(prices, None)\n \n #Overwrite today's price if you find it \n newPrice = acm.FPrice.Select01(\"instrument = %s and market = '%s' and day = %s\" % \n (ins.Oid(), THREE_MONTH_MARKET, enddate),\n 'NaN')\n if not newPrice:\n newPrice = acm.FPrice()\n newPrice.Instrument(ins)\n newPrice.Day(enddate)\n newPrice.Market(THREE_MONTH_MARKET)\n newPrice.Currency(ins.Currency())\n\n newPrice.Settle(avgprice)\n try:\n newPrice.Commit()\n print 'INFO: %s price for %s was created on %s' %(THREE_MONTH_MARKET, ins.Name(), date)\n except Exception, err:\n print 'ERROR: %s price for %s did not commit: %s' %(THREE_MONTH_MARKET, ins.Name(), str(err))\n \n return newPrice", "def test_biweekly_bussiness_days_only(self):\n print()\n print(\"Test Bussiness Days Only\")\n start_date = timezone.now()\n start_date = start_date.replace(day=1, month = 9, year = 2020)\n end_date = start_date.replace(day=30)\n expense = BudgetExpense.objects.get(id = 700)\n\n expected_dates = []\n expected_date = expense.start_date\n expected_date = expected_date.replace(day = 4, month = 9, year = 2020)\n expected_dates.append(expected_date)\n expected_date = expected_date.replace(day = 21, month = 9, year = 2020)\n expected_dates.append(expected_date)\n\n\n print(\"EXPECTED\")\n print(\"==========\")\n for d in expected_dates:\n print(d)\n\n result = get_anticipated_transaction_occurences(expense, start_date, end_date)\n print()\n print(\"Actual Result\")\n print(\"============\")\n for r in result.get(expense):\n print(r)\n print()\n self.assertEquals(expected_dates, result.get(expense))", "def add_dividend(data, price='Close', adj='Adj_Close', out='Dividend'):\n logger = logging.getLogger(__name__)\n share = data[adj] / data[price]\n share = (share - share.shift(1)) / share.shift(1)\n data.loc[:,out] = np.round(share * data[price], 3).fillna(0)", "def get_stock_prices(ticker, start_date, end_date=None):\n if end_date is None:\n end_date = dt.date.today()\n\n shares = Share(ticker)\n df = pd.DataFrame(shares.get_historical(start_date.isoformat(),\n end_date.isoformat()))\n return df.set_index(\"Date\", drop=True) \\\n .drop(\"Symbol\", axis=1) \\\n .astype(float) \\\n .sort_index()", "def get_daily_list(context, data_dict):\n # noinspection PyUnresolvedReferences\n\n output = []\n start_date_str = _get_or_bust(data_dict, 'startDate')\n try:\n dt.strptime(start_date_str, '%Y-%m-%d')\n except ValueError:\n raise _ValidationError(\n 'startDate \\'{0}\\' not in YYYY-MM-DD format'.format(start_date_str)\n )\n start_date = parse(start_date_str,\n default=default_release_date).astimezone(gettz('UTC'))\n\n if 'endDate' in data_dict:\n end_date_str = data_dict['endDate']\n try:\n dt.strptime(end_date_str, '%Y-%m-%d')\n except ValueError:\n raise _ValidationError(\n 'endDate \\'{0}\\' not in YYYY-MM-DD format'.format(end_date_str)\n )\n end_date = parse(end_date_str,\n default=default_release_date).astimezone(gettz('UTC'))\n days = (end_date - start_date).days + 1\n if days < 1:\n raise _ValidationError(_(\n 'endDate \\'{0}\\' must be greater '\n 'than startDate \\'{1}\\''.format(\n end_date_str,\n start_date_str\n )\n ))\n else:\n days = 1\n\n for day in range(days):\n single_date = (start_date + datetime.timedelta(days=day))\n single_date_str = single_date.replace(tzinfo=None).isoformat()\n q = {\n 'q': (\n 'product_type_code:24 AND '\n 'last_release_date:\"{release_date}Z\"'.format(\n release_date=single_date_str\n )\n )\n }\n\n results = _get_action('package_search')(context, q)\n\n count = results['count']\n if count > 1:\n raise _ValidationError(\n 'More than one Daily for date \\'{0}\\''.format(single_date_str)\n )\n\n for result in results['results']:\n children = []\n\n for child in result.get('child_list', []):\n children.append(\n get_product(context, {\n 'productId': child\n })\n )\n\n result['children'] = children\n output.append(result)\n\n return output", "def get_date_pred():\r\n \r\n date_now = dt.datetime.now()\r\n date_pred = [date_now - dt.timedelta(days=1)+dt.timedelta(days=i) for i in range(8)]\r\n month_pred = [item.month for item in date_pred]\r\n day_pred = [item.day for item in date_pred]\r\n \r\n return date_pred,month_pred,day_pred", "def cumulative_returns(shares_allocation, capital, test_data):\n\n # list of DataFrames of cumulative returns for each stock\n daily_returns = []\n\n # iterates over every stock in the portfolio\n for stock in shares_allocation.index:\n\n # multiples shares by share prices in the validation dataset\n daily_returns.append(shares_allocation.loc[stock].values * test_data[stock])\n\n # concatenates every DataFrame in the above list to a single DataFrame\n daily_returns_df = pd.concat(daily_returns, axis=1).reset_index()\n\n # sets the index as the date\n daily_returns_df.set_index(\"Day\", inplace=True)\n\n # adds the cumulative returns for every stock\n cumulative_daily_returns = daily_returns_df.sum(axis=1)\n\n # returns the cumulative daily returns of the portfolio\n return cumulative_daily_returns", "def test_is_payday_positive_25(self):\n expected_count = 3\n expected_paydays = [\n date_class(2020,10,2), \n date_class(2020,10,16), \n date_class(2020,10,30)\n ]\n\n curr_date = date_class(2020,10,1)\n end_date = date_class(2020,10,31)\n paydays = []\n\n while curr_date <= end_date:\n is_payday = pay_cycle_object().is_payday(curr_date)\n if is_payday: \n paydays.append(curr_date)\n curr_date += timedelta(days=1)\n\n assert len(paydays) == expected_count, \\\n f'Got {len(paydays)}, expected {expected_count}'\n assert paydays == expected_paydays, \\\n f'Got {paydays}, expected {expected_paydays}'", "def calc_earning(self, data=None):\n result = Result()\n if data is None:\n data = self.security\n self.calcDecision()\n first_purchase_method = self.check_first_purchase_method()\n for i in np.arange(len(data['Close'])):\n if data['FinalDecision'].iloc[i] is None:\n pass\n elif data['FinalDecision'].iloc[i] == TransactionType.BUY:\n if data['FinalDecision'].iloc[i-1] == TransactionType.BUY:\n pass\n else:\n if (self.buys_made + self.sells_made) == 0:\n if first_purchase_method == FirstTransactionType.INIT_CAPITAL:\n self.shares_own = int((self.init_capital/data['Close'].iloc[i]))\n self.buys_made += 1\n elif first_purchase_method == FirstTransactionType.STOCK_QUANTITY:\n self.shares_own = self.stock_quantity\n self.buys_made += 1\n else:\n self.shares_own = int(self.final_capital / data['Close'].iloc[i])\n self.final_capital = self.final_capital % data['Close'].iloc[i]\n #print(self.shares_own)\n\n elif data['FinalDecision'].iloc[i] == TransactionType.SELL:\n if data['FinalDecision'].iloc[i-1] == TransactionType.SELL:\n pass\n else:\n if (self.buys_made + self.sells_made) == 0:\n pass\n else:\n self.final_capital += self.shares_own * data['Close'].iloc[i]\n self.shares_own = 0\n self.sells_made +=1\n #Checar si es el momento mas alto o bajo de ganancias\n if self.shares_own == 0:\n if (self.highest_point is None\n or self.highest_point < self.final_capital):\n self.highest_point = self.final_capital\n if (self.lowest_point is None\n or self.lowest_point > self.final_capital\n or self.lowest_point == 0):\n self.lowest_point = self.final_capital\n else:\n if (self.highest_point is None\n or self.highest_point < (self.shares_own * data['Close'].iloc[i])):\n self.highest_point = self.final_capital\n if (self.lowest_point is None\n or self.lowest_point > (self.shares_own * data['Close'].iloc[i])\n or self.lowest_point == 0):\n self.lowest_point = self.final_capital\n self.calcRealFinalCapital()\n self.calcDiferencePercentage()", "def run(zipped):\n annc_date_str, (buy_advance, sell_delay) = zipped\n print(f'Doing -> Buy = {buy_advance} Sell = {sell_delay}')\n stocks = pd.read_csv('data/HS_Composite/remaining_stocks.csv')\n stocks = stocks[stocks['Status'] != 1] # Remove the already listed stocks\n\n profit = list()\n annc_date = dt.datetime.strptime(annc_date_str, '%Y-%m-%d')\n start_date = annc_date - dt.timedelta(days=math.ceil(buy_advance / 5 * 7) + 30) # +30 for requiring enough data\n end_date = annc_date + dt.timedelta(days=math.ceil(sell_delay / 5 * 7) + 30)\n for row in stocks.values:\n ticker = str(row[4]).zfill(4) + '.hk'\n returns = cal_pnl(ticker, annc_date_str, annc_date, buy_advance, sell_delay, start_date, end_date)\n if returns is not None:\n profit.append((ticker, returns))\n stock_ave_pnl = np.round(np.mean([earning for _, earning in profit]), 3)\n benchmark_pnl = cal_pnl('^HSI', annc_date_str, annc_date, buy_advance, sell_delay, start_date, end_date)\n profit.append(('^HSI', benchmark_pnl))\n pd.DataFrame(profit, columns=['Ticker', 'Returns']).\\\n to_csv(f'data/pnl/buy_{buy_advance}_sell_{sell_delay}.csv', index=False)\n return [buy_advance, sell_delay, stock_ave_pnl, stock_ave_pnl - benchmark_pnl]", "def _create_payments(self, invoice):\n self.ensure_one()\n if self.schedule_id and self.schedule_id.occurences > 0:\n # TODO: make more intelligent price cut\n amount = invoice.amount_total\n amount_per_occurence = amount / self.schedule_id.occurences\n for day in self.schedule_id.day_ids:\n payment = self.env['account.payment'].new({\n 'payment_type': 'inbound',\n 'partner_type': 'customer',\n 'partner_id': self.member_id.partner_id.id,\n 'amount': amount_per_occurence,\n 'payment_date': day.day,\n 'journal_id': self.journal_id.id,\n })\n payment._onchange_journal()\n payment_values = dict(payment._cache)\n payment = self.env['account.payment'].create(payment_values)\n payment.invoice_ids = [(4, invoice.id, False)]", "def get_dates(amount, start_date):\n\n dates = []\n\n for _ in range(amount):\n dates.append(start_date.strftime(\"%d.%m.%Y\"))\n start_date += datetime.timedelta(days=1)\n\n return dates", "def total_investment_costs(dh: DataHandler):\n # discountrate series with index: scenarios\n discount_rate = dh.get(\"scalars\").loc[\"discountrate\", :]\n sc = 8760 / dh.scenarios.hours\n\n scen_hor_map = dh.scenarios.horizon\n\n # investment costs dataframe with columns: scenarios and index: alltec\n inv = dh.get(\"i_cost\").xs(\"invest\", level=\"par_cost\")\n assert all(\n k in scen_hor_map for k in inv.columns\n ), \"You have not defined a horizon level for a scenario.\"\n tec_inv = list(\n dh.get(\"i_cost\")\n .xs(\"invest\", level=\"par_cost\")\n .index.get_level_values(\"alltec\")\n .unique()\n )\n inv = inv.groupby([\"alltec\"]).apply(extract_horizon_specific_cost, scen_hor_map)\n\n # lifetime dataframe with columns: scenarios and index: alltec\n\n lt = dh.get(\"i_cost\").xs(\"lifetime\", level=\"par_cost\")\n lt.index = lt.index.droplevel(\"i_cost\")\n lt = lt.loc[tec_inv, :]\n\n # flex_premium dataframe with columns: scenarios and index: alltec\n fp = dh.get(\"i_cost\").xs(\"flex_premium\", level=\"par_cost\")\n fp.index = fp.index.droplevel(\"i_cost\")\n fp = fp.loc[tec_inv, :]\n\n inv = (\n inv\n * ((1 + discount_rate) ** lt * discount_rate)\n / ((1 + discount_rate) ** lt - 1)\n )\n\n # investment costs DataFrame with columns: scenarios and index: [alltec, regions]\n cost = inv / sc * fp\n cost = add_dimension(cost, dh.merge_stored_sets(\"r\"), \"r\")\n cost = cost.reorder_levels([\"alltec\", \"r\"])\n\n inv_capa = dh.get(\"o_inve\")\n inv_capa.index.names = change_tec_lvl_name_to_alltec(inv_capa.index.names)\n inv_capa.index = inv_capa.index.droplevel([\"new\"])\n inv_capa = inv_capa.astype(\"Float64\")\n\n return inv_capa.mul(cost)", "def financial(ticker_symbol):\n ticker = yf.Ticker(ticker_symbol, session=session)\n information = ticker.info\n\n # To check if input is a valid ticker\n if \"symbol\" in information:\n with open(r\"database/financials.json\", \"r+\") as r:\n data = json.load(r)\n check_financial_data(ticker_symbol, ticker, data, r)\n\n # url_ratings = \"https://finance.yahoo.com/calendar/earnings?symbol={}\".format(ticker_symbol)\n # text_soup_ratings = BeautifulSoup(get_earnings_html(url_ratings), \"lxml\")\n #\n # earnings_list, financial_quarter_list = [], []\n # # [[1, 0.56, 0.64], [2, 0.51, 0.65], [3, 0.7, 0.73], [4, 1.41, 1.68], [5, 0.98]]\n # count = 5\n # for earning in text_soup_ratings.findAll(\"tr\"):\n # tds = earning.findAll(\"td\")\n # if len(tds) > 0:\n # earning_date = tds[2].text.rsplit(\",\", 1)[0]\n # eps_est = tds[3].text\n # eps_act = tds[4].text\n # print(earning_date, eps_est, eps_act, ticker_symbol)\n # if eps_est != \"-\" and eps_act != \"-\":\n # if eps_act != \"-\":\n # earnings_list.append([count, earning_date, eps_est, eps_act])\n # else:\n # earnings_list.append([count, earning_date, eps_est])\n # else:\n # break\n # print(earnings_list)\n\n # if len(earnings_list) != 100:\n # tds = earning.findAll(\"td\")\n # if len(tds) > 0:\n # earning_date = tds[2].text.rsplit(\",\", 1)[0]\n # eps_est = tds[3].text\n # eps_act = tds[4].text\n # print(earning_date, eps_est, eps_act, ticker_symbol)\n #\n # if eps_act != \"-\":\n # earnings_list.append([count, eps_est, eps_act])\n # else:\n # earnings_list.append([count, eps_est])\n #\n # # Deduce financial quarter based on date of report\n # year_num = earning_date.split()[-1]\n # month_num = earning_date.split()[0]\n # if month_num in [\"Jan\", \"Feb\", \"Mar\"]:\n # year_num = int(year_num) - 1\n # quarter = \"Q4\"\n # elif month_num in [\"Apr\", \"May\", \"Jun\"]:\n # quarter = \"Q1\"\n # elif month_num in [\"Jul\", \"Aug\", \"Sep\"]:\n # quarter = \"Q2\"\n # else:\n # quarter = \"Q3\"\n # financial_quarter_list.append(\"{} {}\".format(year_num, quarter))\n # count -= 1\n # else:\n # break", "def rebalance(self, date):\n eod_values = self.df.shift(1).loc[date, 'values'].mul(1 + self.tc.instrument_returns.loc[date, 'daily'])\n eod_portfolio_value = sum(eod_values.values)\n\n previous_values = self.df.loc[date, 'values'].copy()\n position_value = self.target_weights.mul(eod_portfolio_value)\n trading_cost = abs(eod_values.div(eod_portfolio_value) - self.target_weights) * eod_portfolio_value * \\\n self.tc.commission\n current_values = position_value - trading_cost\n self.df.loc[date, 'values'] = current_values.values\n future_values = self.tc.instrument_returns.loc[date:, 'cumulative'].div(\n self.tc.instrument_returns.loc[date, 'cumulative']).mul(current_values, axis=1)\n self.df.loc[date:, 'values'] = future_values.values\n trade = pd.Series(current_values - previous_values)\n # Once we have calculated the end-of-day value of the portfolio, we set the allocation by looking at the\n # dollars invested in each ETF\n self.df.loc[date:, 'allocations'] = future_values.div(future_values.sum(axis=1), axis=0).values\n\n return trade", "def getIncomeAndDeductions(self, paycheck, record):\n # the income category\n source = record[1].strip()\n # if it is not blank\n if source:\n # the pay rate for this category\n rate = record[2].strip()\n rate = float(rate) if rate else 0\n # the hours worked in this category\n hours = record[3].strip()\n hours = float(hours) if hours else 80\n # the amount earned\n amount = (record[4].strip())\n amount = float(amount) if amount else 0\n\n # adjust the hours earned by the salaried people\n if hours == 0 and amount > 0: hours = 80\n\n # make an income record\n income = Income(\n category = source.lower(),\n amount = amount,\n rate = rate, hours = hours,\n salary = 0 if rate else amount\n )\n # record\n paycheck.source[income.category] = income\n\n # the federal deductions\n source = record[6].strip()\n # if there\n if source:\n # get the amount\n amount = float(record[7].strip())\n # record\n paycheck.federal[source] = amount\n\n # the state deductions\n source = record[8].strip()\n # if there\n if source:\n # get the amount\n amount = float(record[9].strip())\n # record\n paycheck.state[source] = amount\n\n # the personal deductions\n source = record[10].strip()\n # if there\n if source:\n # get the amount\n amount = float(record[11].strip())\n # record\n paycheck.personal[source] = amount\n\n # all done\n return", "def get_duty_percentage(self):\n container_line_ids = self\n hbl_customs_obj = self.env['hbl.customs.duty']\n for line in container_line_ids:\n p_line = line.purchase_line\n #Get the supplier from product by using po supplier id.\n product_supplier_id = p_line.product_id.seller_ids.filtered(lambda rec:rec.name.id == p_line.partner_id.id and rec.hts_codes_ids)\n #Get HTS code of the supplier\n hts_codes_ids = product_supplier_id and product_supplier_id[0].hts_codes_ids or False\n if hts_codes_ids:\n percentage = sum(hts_codes_ids.mapped('percentage'))\n line_customs_id = hbl_customs_obj.create({'hbl_line_id' : line.id,\n 'hts_ids': [(6,_, hts_codes_ids.ids)],\n 'duty_percentage': percentage,\n 'quantity' : line.qty_to_load,\n 'unit_price' : p_line.price_unit\n })\n line.write({'line_customs_id' : line_customs_id.id})", "def BacktestStrategy1(start_cond_dict, df, stock_exchange, invt_dict):\n total_days=df.shape[0]\n today_invt_dict=invt_dict\n invt_daily_list=[] # invt after today's transaction\n net_wealth_list=[]\n\n for i in range(total_days):\n if i==0:\n today_invt_dict=stock_exchange.FullBuyStocks(today_invt_dict, i)\n elif i==total_days-1: # last day\n today_invt_dict=stock_exchange.FullSellStocks(today_invt_dict, i)\n invt_daily_list.append(today_invt_dict)\n net_wealth_list.append(stock_exchange.EstimateNetWealth(today_invt_dict, i))\n \n PrintResult(\"Baseline Strategy\", net_wealth_list)\n plt.plot(net_wealth_list)\n plt.title(\"Baseline (1st day buy->hold->last day sell) Strategy\")\n plt.ylabel('Net Worth in USD') # Cash + Stock worth\n plt.show()\n return", "def __download(self, since = workingday(1900,1,1)):\n\t\tuntil = workingday.today()\n\n\t\tinput_tuple = (self.symbol,\n\t\t\tstr(since.month - 1), str(since.day), str(since.year),\n\t\t\tstr(until.month - 1), str(until.day), str(until.year))\n\n\t\tself.price = dict()\n\t\tself.dividend = dict()\n\t\tself.split = dict()\n\n\t\ttry:\n\t\t\turl = 'http://ichart.yahoo.com/table.csv?s=%s&g=d&a=%s&b=%s&c=%s&d=%s&e=%s&f=%s&ignore=.csv' % input_tuple\n\t\t\traw_data = urlopen(url)\n\t\t\traw_data.readline()\n\n\t\t\tfor line in raw_data:\n\t\t\t\tl = line.split(',')\n\t\t\t\td = workingday.strptime(l[0],'%Y-%m-%d')\n\t\t\t\trow = [\n\t\t\t\t\tfloat(l[1]), # Open\n\t\t\t\t\tfloat(l[2]), # High\n\t\t\t\t\tfloat(l[3]), # Low\n\t\t\t\t\tfloat(l[4]), # Close\n\t\t\t\t\tfloat(l[-1][:-1]), # Adj\n\t\t\t\t\tint(l[5])] # Volume\n\t\t\t\tself.price[d] = row\n\n\t\t\t# get dividend and split data\n\t\t\turl\t= 'http://ichart.finance.yahoo.com/x?s=%s&g=v&a=%s&b=%s&c=%s&d=%s&e=%s&f=%s&ignore=.csv' % input_tuple\n\t\t\traw_data = urlopen(url)\n\t\t\traw_data.readline()\n\n\t\t\tfor line in raw_data:\n\t\t\t\tl = line.split(',')\n\t\t\t\tif l[0] == 'DIVIDEND':\n\t\t\t\t\td = workingday(int(l[1][1:5]), int(l[1][5:7]), int(l[1][7:9]))\n\t\t\t\t\tself.dividend[d] = float(l[2][:-1])\n\t\t\t\telif l[0] == 'SPLIT':\n\t\t\t\t\td = workingday(int(l[1][1:5]), int(l[1][5:7]), int(l[1][7:9]))\n\t\t\t\t\tself.split[d] = tuple(map(int, l[2][:-1].split(':')))\n\n\t\texcept:\n\t\t\tprint 'Error downloading ' + self.symbol", "def calc_returns(prices):\n returns = []\n for i in range(len(prices) - 1):\n ret = (prices[i + 1] - prices[i]) / prices[i]\n returns.append(ret)\n return returns", "def refresh(self):\n lastDate = max(etf.data.index[-1] for etf in self.etfs.values())\n for etf in self.etfs.values():\n isLastDayMissing = etf.data.index[-1] < lastDate\n if isLastDayMissing and not etf.sold():\n lastDay = pd.DataFrame([etf.data.iloc[-1]], columns=etf.data.columns, index=[lastDate])\n etf.data = etf.data.append(lastDay)\n etf.calculateStats()\n # Get Profit/Loss series\n p_l = pd.DataFrame()\n for name, etf in self.etfs.items():\n p_l[name] = etf.data['Profit/Loss']\n p_l.fillna(method='ffill', inplace=True)\n self.data['Profit/Loss'] = p_l.sum(axis=1)\n\n # Get Invested amount seires\n inv = pd.DataFrame()\n for name, etf in self.etfs.items():\n inv[name] = etf.data['Invested']\n if etf.sold():\n inv.loc[etf.sell_date:,name] = -etf.profit_loss()\n inv.fillna(method='ffill', inplace=True)\n self.data['Invested'] = inv.sum(axis=1)\n\n self.data['Profit/Loss%'] = self.data['Profit/Loss'] / self.data['Invested'] * 100 # Calculates the Profit/Loss (%)\n self.data['Value'] = round(self.data['Invested'] + self.data['Profit/Loss'], 2)\n self.data['Gains'] = self.data['Profit/Loss'] - self.data['Profit/Loss'].shift(1)\n self.data['Gains%'] = self.data['Gains'] / self.data['Value'].shift(1) * 100", "def __init__(self, price, date=datetime.datetime.now(), dividend=0., annualDividend=0.):\n self.price = price\n self.date = date\n self.dividend = dividend\n self.annualDividend = annualDividend", "def _rate_dates(self, common_object):\n if common_object.IsKindOf(acm.FCashFlow):\n start_date = common_object.StartDate()\n elif common_object.IsKindOf(acm.FReset):\n start_date = common_object.Day()\n else:\n message = \"Rate dates for {0} object are not defined\".format(\n type(common_object))\n raise ProvisionHandlerError(message)\n\n end_date = acm.Time().DateAddDelta(start_date, 0, 3, 0)\n end_date = self._adjust_to_banking_day(end_date)\n\n return (start_date, end_date)", "def add_dates(date_list, dates):\n for date in dates:\n if date.strftime('%d-%b') not in date_list:\n date_list.append(date.strftime('%d-%b'))\n return date_list", "def add_dates(date_list, dates):\n for date in dates:\n if date.strftime('%d-%b') not in date_list:\n date_list.append(date.strftime('%d-%b'))\n return date_list", "def process_all_days(self,\n rise_limit: float,\n sink_limit: float,\n cool_off_span: int) -> None:\n self.ramp_up()\n for date_idx in range(1, len(self.stocks[0].price_history)):\n self.process_one_day(rise_limit, sink_limit, cool_off_span, date_idx)\n self.ramp_down()", "def calculate_forward_returns(data: pd.DataFrame, periods: list, price_key='close') -> pd.DataFrame:\n returns = pd.DataFrame(index=data.index)\n for period in periods:\n if type(data.index) == pd.MultiIndex:\n def multi_index_forward_returns(df: pd.DataFrame):\n return df[price_key].pct_change(periods=period).shift(-period)\n\n tmp = data.groupby(level=1).apply(multi_index_forward_returns).droplevel(0)\n returns[str(period) + '_period_return'] = tmp\n else:\n returns[str(period) + '_period_return'] = data[price_key].pct_change(periods=period).shift(-period)\n return returns", "def bond_price(fv, c,n,m,r):\n return sum([a*b for a,b in zip(discount_factors(r,n,m),bond_cashflows(fv, c, n, m))])", "def update_by_day(self, date):\n print 'UPDATE EXCHANGE RATE for day: %s' % date\n currencies = self.get_currencies()\n for code, name in currencies:\n if code in self.base_curr:\n _, created = Currency.objects.get_or_create(\n code=code, defaults={'name': name})\n if created:\n print('currency: %s created', code)\n\n for source in Currency.objects.filter(code__in=self.base_curr).all():\n exchange_rates = self.get_exchangerates_by_day(source.code, date)\n if exchange_rates:\n exchange_rates.pop(source.code)\n for code, rate in exchange_rates.iteritems():\n try:\n target = Currency.objects.get(code=code)\n exchange_rate = ExchangeRate.objects.get(date=date, source=source, target=target)\n exchange_rate.rate = rate\n exchange_rate.save()\n print('exchange rate updated %s, %s/%s=%s' % (date, source, target, rate))\n except ExchangeRate.DoesNotExist:\n exchange_rate = ExchangeRate.objects.create(date=date, source=source, target=target, rate=rate)\n print('exchange rate created %s, %s/%s=%s' % (date, source, target, rate))\n else:\n print('There is no rate for the current day')\n mail_admins('Exchange Rates Warning', 'There is no today exchange rate')\n break", "def test_get_occurences_weekly(self):\n print(\"Get weekly occurrences\")\n expense = BudgetExpense.objects.get(id = 400)\n start_date = expense.start_date\n end_date = start_date + timedelta(days = 40)\n print(start_date.strftime(\"%B %d, %y\")+\" and \"+end_date.strftime('%B %d, %y'))\n print(\"======================================\")\n result = get_anticipated_transaction_occurences(anticipated_transaction= expense, start_date = start_date, end_date = end_date)\n result_dates = []\n for current_expense in result.keys():\n print(current_expense)\n result_dates.extend(result.get(current_expense))\n for current_date in result_dates:\n print(current_date)\n print()\n expected_dates = []\n current_date = start_date\n while current_date < end_date: \n expected_dates.append(current_date)\n current_date += timedelta(days = 7)\n\n self.assertEquals(expected_dates, result_dates)", "def get_benchmark_returns(symbol, first_date, last_date):\n if symbol == '^GSPC':\n symbol = 'spy'\n\n data = pd_reader.DataReader(\n symbol,\n 'google',\n first_date,\n last_date\n )\n\n data = data['Close']\n\n data[pd.Timestamp('2008-12-15')] = np.nan\n data[pd.Timestamp('2009-08-11')] = np.nan\n data[pd.Timestamp('2012-02-02')] = np.nan\n\n data = data.fillna(method='ffill')\n\n return data.sort_index().tz_localize('UTC').pct_change(1).iloc[1:]", "def sell_function(data_points, n_days):\n\n prediction_df = model_arima(data_points, n_days)\n\n # Calculates the profit for each day predicted taking into account running\n # costs and compares it with the highest profit so far.\n profit_array = [None] * (n_days + 1)\n\n for count in range(n_days):\n price = prediction_df.iloc[count][\"Prediction\"]\n profit_array[count + 1] = price\n\n return profit_array", "def get_data(end_date, n, local, foreign):\n URL = \"https://api.exchangeratesapi.io/history\"\n PARAMS = {'start_at': str(get_weekday_n_days_ago(end_date, n)),\n 'end_at': str(end_date),\n 'symbols': foreign,\n 'base': local}\n r = requests.get(url=URL, params=PARAMS)\n data = r.json()\n input_data = []\n for day in data['rates']:\n input_data.append([datetime.strptime(day, '%Y-%m-%d').date(),\n float(\"{:.8f}\".format(data['rates'][day][foreign]))])\n input_data.sort(key=lambda x: x[0])\n return input_data[-n:]", "def _calc_return(self, order_original, perf_df):\r\n\r\n order = order_original.copy()\r\n no_sec = len(self.perf_data)\r\n price_names = np.array(['price_' + str(i) for i in xrange(1, no_sec + 1)])\r\n ret = np.zeros((np.shape(order)[0], no_sec))\r\n\r\n transaction_cost = 0\r\n\r\n # buy_list vs sell_list contains order bought vs sold that cannot be matched yet to determine the return\r\n # For example when something has been bought, but nothing or not enough has been sold yet, the residue will be\r\n # listed in these lists.\r\n buy_shares = np.zeros((np.shape(order)[0], no_sec))\r\n buy_price = np.zeros((np.shape(order)[0], no_sec))\r\n sell_shares = np.zeros((np.shape(order)[0], no_sec))\r\n sell_price = np.zeros((np.shape(order)[0], no_sec))\r\n\r\n # bl_first vs sl_first indicates which row in buy_list vs sell_list can be used to \"match\" bought/sold shares.\r\n # It automatically points to the oldest row with still outstanding shares. Initial value is -1\r\n # bl_last vs sl_last indicates which row in buy_list vs sell_list can be used to write outstanding shares to.\r\n bl_first = np.ones(no_sec).astype(int) * -1\r\n bl_last = np.zeros(no_sec).astype(int)\r\n sl_first = np.ones(no_sec).astype(int) * -1\r\n sl_last = np.zeros(no_sec).astype(int)\r\n\r\n for ind in range(0, np.shape(order)[0]):\r\n bl_first[(bl_first == -1) & (bl_last > 0)] = 0\r\n sl_first[(sl_first == -1) & (sl_last > 0)] = 0\r\n\r\n # Three situations, per type: buy, sell, nothing\r\n # If nothing, skip to next day\r\n # Only returns made on one day are determined, later they will be accumulated.\r\n\r\n # Situation A.A: Sell order & outstanding buys larger than sell order\r\n col_to_change = (order[ind, :] < 0) & (np.sum(buy_shares, 0) > -order[ind, :])\r\n if sum(col_to_change) != 0:\r\n share_cumsum = np.cumsum(buy_shares, 0)\r\n share_compl = (share_cumsum < -order[ind, :]) & col_to_change\r\n numb_shares = sum(buy_shares * share_compl, 0)[col_to_change]\r\n ret[ind, col_to_change] += numb_shares * perf_df.loc[ind, price_names[col_to_change]] \\\r\n - sum(buy_shares * buy_price * share_compl, 0)[col_to_change]\r\n buy_shares[share_compl] = 0\r\n bl_first += sum(share_compl)\r\n order[col_to_change] += numb_shares\r\n\r\n ret[ind, col_to_change] += perf_df.loc[ind, price_names[col_to_change]] * -order[ind, col_to_change] * (1 - transaction_cost) \\\r\n - buy_price[bl_first[col_to_change], col_to_change] \\\r\n * -order[ind, col_to_change] * (1 + transaction_cost)\r\n buy_shares[bl_first[col_to_change], col_to_change] += order[ind, col_to_change]\r\n order[ind, col_to_change] = 0\r\n\r\n # Situation A.B: Sell order & outstanding buys smaller than or equal to sell order\r\n # --> just fill out all outstanding buys, and change order. This order will be added to sell list in A.C\r\n col_to_change = (order[ind, :] < 0) & (np.sum(buy_shares, 0) > 0) \\\r\n & (np.sum(buy_shares, 0) <= -order[ind, :])\r\n if sum(col_to_change) != 0:\r\n numb_shares = buy_shares[:, col_to_change]\r\n price_shares = buy_price[:, col_to_change]\r\n ret[ind, col_to_change] += np.sum(numb_shares, 0) * \\\r\n perf_df.loc[ind, price_names[col_to_change]].values * (1 - transaction_cost) \\\r\n - np.sum(numb_shares * price_shares, 0) * (1 + transaction_cost)\r\n order[ind, col_to_change] += np.sum(numb_shares, 0)\r\n buy_shares[:, col_to_change] = 0\r\n bl_first[col_to_change] = bl_last[col_to_change] - 1\r\n\r\n # Situation A.C: Sell order & no outstanding buys\r\n col_to_change = (order[ind, :] < 0) & (np.sum(buy_shares, 0) == 0)\r\n if sum(col_to_change) != 0:\r\n row_to_change = bl_last[col_to_change]\r\n sell_shares[row_to_change, col_to_change] = -order[ind, col_to_change]\r\n sell_price[row_to_change, col_to_change] = perf_df.loc[ind, price_names[col_to_change]]\r\n sl_last[col_to_change] += 1\r\n\r\n # Situation B.A: Buy order & outstanding sells larger than buy order\r\n col_to_change = (order[ind, :] > 0) & (np.sum(sell_shares, 0) > order[ind, :])\r\n if sum(col_to_change) != 0:\r\n share_cumsum = np.cumsum(sell_shares, 0)\r\n share_compl = (share_cumsum < order[ind, :]) & col_to_change\r\n numb_shares = sum(sell_shares * share_compl, 0)[col_to_change]\r\n ret[ind, col_to_change] += sum(sell_shares * sell_price * share_compl, 0)[col_to_change] * (1 - transaction_cost)\\\r\n - numb_shares * perf_df.loc[ind, price_names[col_to_change]] * (1 + transaction_cost)\r\n sell_shares[share_compl] = 0\r\n sl_first += sum(share_compl)\r\n order[col_to_change] += -numb_shares\r\n\r\n ret[ind, col_to_change] += sell_price[sl_first[col_to_change], col_to_change] * order[ind, col_to_change] * (1 - transaction_cost)\\\r\n - perf_df.loc[ind, price_names[col_to_change]] * order[ind, col_to_change] * (1 + transaction_cost)\r\n sell_shares[sl_first[col_to_change], col_to_change] += -order[ind, col_to_change]\r\n order[ind, col_to_change] = 0\r\n\r\n # Situation B.B: Buy order & outstanding sells smaller than buy order\r\n # --> just fill out all outstanding sells, and change order. This order will be added to buy list in B.C\r\n col_to_change = (order[ind, :] > 0) & \\\r\n (np.sum(sell_shares, 0) > 0) & (np.sum(sell_shares, 0) <= order[ind, :])\r\n if sum(col_to_change) != 0:\r\n numb_shares = sell_shares[:, col_to_change]\r\n price_shares = sell_price[:, col_to_change]\r\n ret[ind, col_to_change] += np.sum(numb_shares * price_shares, 0) * (1 - transaction_cost) \\\r\n - np.sum(numb_shares, 0) * perf_df.loc[ind, price_names[col_to_change]] * (1 + transaction_cost)\r\n order[ind, col_to_change] += -np.sum(numb_shares, 0)\r\n sell_shares[:, col_to_change] = 0\r\n sl_first[col_to_change] = sl_last[col_to_change] - 1\r\n\r\n # Situation B.C: Buy order & no outstanding sells\r\n col_to_change = (order[ind, :] > 0) & (np.sum(sell_shares, 0) == 0)\r\n if sum(col_to_change) != 0:\r\n row_to_change = bl_last[col_to_change]\r\n buy_shares[row_to_change, col_to_change] = order[ind, col_to_change]\r\n buy_price[row_to_change, col_to_change] = perf_df.loc[ind, price_names[col_to_change]]\r\n bl_last[col_to_change] += 1\r\n\r\n ret_abs = np.array([sum(ret[:r]) for r in range(1, len(ret) + 1)])\r\n returns_abs = np.sum(ret_abs, 1)\r\n returns_rel = [i / self.context['max_notional'] + 1 for i in returns_abs]\r\n\r\n return returns_rel, returns_abs, ret_abs", "def SumCostByDay(dateOfPayment):\n\n logs.logger.debug(\n \"Start to adds all amount of Cost objects based on the payment date.\")\n try:\n searchedCostByDayFromDB = GetAllCostByDateOfPaymentFromDB(dateOfPayment)\n sumTotal = 0\n for item in searchedCostByDayFromDB:\n sumTotal += item.amount\n logs.logger.info(\n \"Based on the payment date adds all amount of Cost objects.\")\n return sumTotal\n except Exception as e:\n logs.logger.error(e, exc_info=True)", "def _calculate(self):\n source = self.source\n res = {}\n l_cols = [[], [], [], []]\n r_lines = {}\n dateline=None\n ###delete the below code when fetch data from database(assume: data in database has been pretreatment)\n if source[t.ror].min() > -99.0:\n pass\n else:\n source[t.ror] = np.where(\n source[t.ror] > -99.0, source[t.ror], -99.0)\n ###\n for account in self.accounts:\n source_account = source[source[t.account] == account]\n source_account = source_account.reset_index(drop=True)\n dateline=source_account[t.effective_date]\n ror=source_account[t.ror]/100\n returns_cum = ROR.ror_cum_ann(source_account, self.annualized)\n # double_return_cum=round(double_return_cum,2)+1\n returns_cum = returns_cum + 1\n growth_amounts = returns_cum * self.starting_value\n returns_cum, growth_amounts = round(returns_cum - 1, 4), \\\n round(growth_amounts, 2)\n l_cols[0].append(growth_amounts.iloc[-1, 0])#account growth amount\n l_cols[1].append(growth_amounts.iloc[-1, 1])#bench growth amount\n l_cols[2].append(returns_cum.iloc[-1, 0])#account return\n l_cols[3].append(returns_cum.iloc[-1, 1])#bench return\n r_lines[account] = [list(returns_cum.iloc[:,0]), list(growth_amounts.iloc[:, 0]),#list(returns_cum.iloc[:, 0])\n list(growth_amounts.iloc[:, 1])]#account return, account growth amount, bench growth amount\n res['account_vs_benchmark'] = {'xAxis': self.accounts,\n 'series': l_cols}\n res['growth_of_unit'] = {'xAxis': list(dateline),\n 'series': r_lines}\n return res\n # ret_dict = self._ret(accounts, starting_value, source, annualized)\n # return ret_dict", "def get_expiry_by_days(\n expiries: List[pendulum.DateTime], days: int, sort: bool = True\n) -> Optional[pendulum.DateTime]:\n if len(expiries) == 1:\n return expiries[0]\n if sort:\n expiries = sorted(expiries)\n today = pendulum.today(tz=\"local\").date()\n target_date = today.add(days=days)\n if expiries[-1] < target_date:\n # return None if the target date is greater than last expiry\n return None\n for i, expiry in enumerate(expiries):\n if expiry >= target_date:\n return expiry\n return expiry", "def __next__(self):\n found = False\n for i in self.dates:\n if self.date == i:\n found = True\n if found:\n for stock in self.stocks:\n # update positions to cary over to next day\n self.stock_data[stock].position['Position'][i] =\\\n self.stock_data[stock].position['Position'][self.date]\n\n self.cash['cash'][i] = self.cash['cash'][self.date]\n\n self.date = i\n return True\n return False", "def cash_flow_response_to_df(\n portfolio_cash_flows_response: lusid.ResourceListOfPortfolioCashFlow,\n sum_by_date: bool = True\n) -> pd.DataFrame:\n\n def select_cols(\n df: pd.DataFrame,\n filter_col: str,\n filter_value: str,\n cols_to_keep: list,\n ) -> pd.DataFrame:\n return df[df[filter_col] == filter_value][cols_to_keep]\n\n # Extract cash payment data from cash flow response\n cash_flows_dict = portfolio_cash_flows_response.to_dict()\n cash_flow_data = pd.json_normalize(cash_flows_dict[\"values\"])\n\n # Split pays and receives and handle -ve signage for pay outflows\n pay_data = select_cols(\n cash_flow_data,\n \"diagnostics.PayReceive\",\n \"Pay\",[\"payment_date\", \"amount\", \"source_transaction_id\"]\n )\n pay_data[\"amount\"] = pay_data[\"amount\"].apply(lambda x: -1 * x)\n pay_data.rename(columns={\"amount\": \"payAmount\"}, inplace=True)\n rec_data = select_cols(\n cash_flow_data,\n \"diagnostics.PayReceive\",\n \"Receive\",\n [\"payment_date\", \"amount\", \"source_transaction_id\"]\n )\n rec_data.rename(columns={\"amount\": \"receiveAmount\"}, inplace=True)\n\n # Merge on payment date and ignore join dupes\n merged_df = pay_data.merge(rec_data, on=[\"payment_date\", \"source_transaction_id\"])\n merged_df.drop_duplicates(subset=[\"payment_date\", \"source_transaction_id\"], keep=\"first\", inplace=True,\n ignore_index=True)\n\n # Add net flows and reduce index to dates\n merged_df['netAmount'] = merged_df['payAmount'] + merged_df['receiveAmount']\n merged_df[\"payment_date\"] = merged_df[\"payment_date\"].apply(lambda x: x.date())\n merged_df.set_index(keys=\"payment_date\", inplace=True)\n\n # Aggregate sub-holdings\n if sum_by_date:\n merged_df = merged_df.groupby(merged_df.index).sum()\n\n return merged_df", "def historical(config, stid, start_date):\n\n # Get dates\n start_date = start_date.replace(hour=6)\n end_date = datetime.utcnow()\n start, end = meso_api_dates(start_date, end_date)\n\n # Download CF6 files\n get_cf6_files(config, stid, 12)\n\n # Get the daily verification\n dailys = get_verification(config, stid, start, end, use_climo=True)\n\n return dailys", "def __init__(self, capitalCommitment, contributionRates, bow, growthRate, fundYield, lastInvestmentYear, lifeOfFund, segments, startDate):\n self.segments = int(segments)\n self.calculate = ModelCalculations(segments)\n if isinstance(startDate, datetime.date):\n self.startDate = startDate\n else:\n dateConverter = ConvertDate.ConvertDate()\n self.startDate = dateConverter(startDate)\n self.endDate = self.calculate.endDate(int(lifeOfFund), self.startDate)\n self.lastInvestmentYear = int(lastInvestmentYear) * self.segments\n self.lifeOfFund = int(lifeOfFund) * self.segments\n self.capitalCommitment = int(capitalCommitment)\n # old code below (before abstraction of rate expansion)\n #self.contributionRates = self._expandContributionRates(self.segments, contributionRates)\n contributionRates = self._appendExtraRates(contributionRates, self.lastInvestmentYear / self.segments)\n self.contributionRates = self.calculate.expandRates(contributionRates, self.segments, False)\n #self._validateContributionRates(self.contributionRates)\n self.bow = float(bow)\n self.growthRate = self.calculate.segmentInterest(self.segments, float(growthRate))#growthRate / self.segments\n self.fundYield = float(fundYield) / self.segments\n\n\n self._contributionList = []\n self._distributionList = []\n self._navList = []\n self._commitmentRemainingList = []\n self._netCashFlowList = []\n self._cummulativeCashFlowList = []\n self._dateList = []\n self._typeList = []\n self._distributionRates = []\n\n self._distributionRates = self.calculate.expandRates(self._getBaseDistributionRates(), self.segments, True)", "def boot_strap_back_ccy( sdate,\r\n ccy,\r\n instruments,\r\n Day_Counter ):\r\n convention = BS_Con[ccy]\r\n years_days_cash = convention[\"Cash_Day\"]\r\n swap_freq = convention[\"Swap_Freq\"]\r\n Day_Counter.set_convention(convention[\"Swap\"])\r\n instruments = BS_TF.check_instruments( instruments )\r\n \r\n sdate = str_2_date( sdate )\r\n \r\n flag = False\r\n if BS_TF.check_if_last_day_of_month( sdate ):\r\n flag = True\r\n \"\"\" Sdate stands for the begin of bootstrapping \r\n Inputs structure instruments contains:\r\n 1\\cash rates list of tuple and number of contracts \r\n 2\\future rates list of tuple and number of contracts\r\n 3\\swap rates list of tuple and number of contracts\r\n Structure of rate list looks like (time,rates)\r\n \"\"\"\r\n Cash_Rate = instruments[\"cash\"][0]\r\n Cash_Num = len(Cash_Rate)\r\n \"\"\" NOTE: inputs of futures rate should have one start row \r\n with date and the discount factor is interpolated from \r\n cash rate\r\n \"\"\"\r\n Swap_Rate = instruments[\"swap\"][0]\r\n units = 100\r\n\r\n discount_curve = []\r\n ans_curve = []\r\n discount_curve.append( [sdate,1] )\r\n ans_curve.append([sdate,1])\r\n \"\"\" Remeber par swap key dates location in discount_curve\r\n \"\"\"\r\n \r\n for i in range( 0, int(Cash_Num) ):\r\n \"\"\" Begin bootstrapping from Cash rates\r\n \"\"\"\r\n yearfrac = (Cash_Rate[i][0]-sdate).days/years_days_cash\r\n DF = 1.0/(1.0+Cash_Rate[i][1]/units*yearfrac)\r\n discount_curve.append([Cash_Rate[i][0],DF])\r\n if (Cash_Rate[i][0]-sdate).days <= 200:\r\n ans_curve.append([Cash_Rate[i][0],DF])\r\n\r\n Swap_Rate = BS_TF.augument_by_frequency( Swap_Rate, int(12/swap_freq) )\r\n \r\n \"\"\" Only do interpolation for \r\n the first three swaps \r\n 0.5y, 1y and 1.5y\r\n \"\"\"\r\n \"\"\" Pre-calculate the sum of discount \r\n factors of 0.5y, 1y and 1.5y based \r\n on the current discount curve we have \r\n \"\"\"\r\n sum_df = 0\r\n swap_frequency = relativedelta( months = int(12/swap_freq) )\r\n \"\"\" Move cur_date back to do bootstrapping\r\n \"\"\"\r\n cur_date = sdate\r\n for i in range( 1, len(discount_curve) ):\r\n while cur_date+swap_frequency < Swap_Rate[0][0] \\\r\n and cur_date >= discount_curve[i-1][0] \\\r\n and cur_date < discount_curve[i][0]:\r\n nxt_date = cur_date+swap_frequency\r\n if flag:\r\n nxt_date = BS_TF.last_day_of_month(cur_date+swap_frequency)\r\n yearfrac = Day_Counter.yearfrac( cur_date, nxt_date )\r\n DF = BS_TF.interpolation_act( nxt_date,\r\n discount_curve[i-1][0],\r\n\t\t\t\t\t\t discount_curve[i-1][1],\r\n\t\t\t\t\t\t\t\t\t discount_curve[i][0],\r\n\t\t\t\t\t\t\t\t\t discount_curve[i][1] )\r\n sum_df += DF*yearfrac\r\n ans_curve.append([nxt_date,DF])\r\n cur_date += swap_frequency\r\n if flag:\r\n cur_date = BS_TF.last_day_of_month(cur_date)\r\n \r\n cur_date = Swap_Rate[0][0]\r\n \r\n for i in range( 0, len(Swap_Rate) ):\r\n# if sum_df == 0:\r\n# print(\"Warning Cannot get correct 0.5y, 1y and 1.5y discount factors...\")\r\n# print(\"Current Date:\"+str(cur_date))\r\n# print(ccy)\r\n# print(ans_curve)\r\n \"\"\" Sum of previous discount \r\n factors stored in \"sum_df\"\r\n \"\"\"\r\n nxt_date = cur_date+swap_frequency\r\n if flag:\r\n nxt_date = BS_TF.last_day_of_month(nxt_date)\r\n yearfrac = Day_Counter.yearfrac( cur_date, nxt_date )\r\n rates = Swap_Rate[i][1]\r\n cur_DF = (100-sum_df*rates)/(100+rates*yearfrac)\r\n discount_curve.append([cur_date,cur_DF])\r\n ans_curve.append([cur_date,cur_DF])\r\n sum_df += cur_DF*yearfrac\r\n cur_date += swap_frequency\r\n if flag:\r\n cur_date = BS_TF.last_day_of_month(cur_date)\r\n \r\n sorted_discount_curve = sorted( ans_curve, key = lambda tup: tup[0] )\r\n return sorted_discount_curve", "def __init__(self):\n import datetime as dt\n import dateutil as du\n from dateutil import relativedelta\n\n #Initial date calculations\n self.right_now = dt.datetime.utcnow()\n self.beginDelta = -2\n self.endDelta = 365\n self.timeDeltaCashBegin = du.relativedelta.relativedelta(months=self.beginDelta)\n self.timeDeltaCashEnd = dt.timedelta(days=self.endDelta)\n self.begin_date = self.right_now + self.timeDeltaCashBegin\n self.end_date = self.right_now + self.timeDeltaCashEnd\n\n #today's date to initialize the Cash event\n self.today_date = str(dt.datetime.date(self.right_now))\n\n #time variable for event creation // included date list to decipher cash update days\n self.create_begin = dt.datetime.fromisoformat(self.right_now.date().isoformat()).isoformat() + 'Z'\n self.create_end = self.end_date.isoformat() + 'Z'\n self.create_duration = (self.end_date - self.right_now).days\n self.iterate_days = self.iterateList(self.create_duration)\n\n #time variables used in deletion code\n self.clear_begin = self.begin_date.isoformat() + 'Z'\n self.clear_end = self.end_date.isoformat() + 'Z'\n\n #Smaller size for event creation/deleting testing\n self.test_duration = 40\n self.test_days = self.iterateList(self.test_duration)\n \n #Store old event list to check if changes need to be made\n self.check_for_updates = []\n self.cash_history = []\n\n self.creds = self.getUsrCreds()\n self.service = self.buildAPICal(self.creds)\n self.usrCals = self.getUsrCals(self.service)\n\n #Check if Calendar is Present and get the details -- if not, build one\n if self.checkCashCal(self.usrCals) == False:\n self.usr_csh_id, self.usr_csh_cal = self.buildCashCal(self.usrCals)\n else:\n self.usr_csh_id = self.getCshID(self.usrCals)\n self.usr_csh_cal = self.getCshCal(self.usrCals)", "def test_aggr_date_input(self):\n\n actual_start_date = set([])\n actual_end_date = set([])\n for year in self.years:\n for my_date in self.dates:\n input_date = date(year, my_date[0], my_date[1])\n retail_date = RetailDate(input_date)\n actual_start_date.add(retail_date.year_start_date)\n actual_end_date.add(retail_date.year_end_date)\n\n # Verify the retail start dates\n expected_start = set([date(mTup[0], mTup[1], mTup[2]) for mTup in self.retail_start_dates])\n diff = expected_start.symmetric_difference(actual_start_date)\n self.assertEqual(len(diff), 0, \"Diff: \" + str(diff))\n\n # Verify the retail end dates\n expected_end = set([date(mTup[0], mTup[1], mTup[2]) for mTup in self.retail_end_dates])\n diff = expected_end.symmetric_difference(actual_end_date)\n self.assertEqual(len(diff), 0, \"Diff: \" + str(diff))", "def diveDates(self,start,finish):\n start = datetime.strptime(start,\"%Y-%m-%d\")\n finish = datetime.strptime(finish,\"%Y-%m-%d\")\n return start+(finish-start)/2", "def evaluate_cur_stocks(self):\n today = datetime.today()\n close_val = PRICE_DF.iloc[PRICE_DF.index.get_loc(today, method=\"ffill\")]\n close_val = close_val[self.cur_stocks.index]\n close_val = pd.DataFrame({\"PRICE_CURRENT\" : close_val.values}, index=self.cur_stocks.index)\n evaluated_stocks = pd.merge(self.cur_stocks, close_val, left_index=True, right_index=True)\n evaluated_stocks[\"VOLUME_CURRENT\"] = evaluated_stocks[\"AMOUNT\"] * evaluated_stocks[\"PRICE_CURRENT\"]\n evaluated_stocks[\"RETURN\"] = (evaluated_stocks[\"VOLUME_CURRENT\"] / evaluated_stocks[\"VOLUME_PURCHASE\"]) - 1\n return evaluated_stocks", "def split_data_by_days(data=None, dates=None, day_list=None,\n verbose=False, debug=False):\n if verbose:\n print('split_data_by_days called')\n\n # Create DataFrame of Data and dates\n df = DataFrame(data, index=dates, columns=['data'])\n # Add list of dates ( just year, month, day ) <= this is mappable, update?\n df['days'] = [datetime.datetime(*i.timetuple()[:3]) for i in dates]\n if debug:\n print(df)\n\n # Get list of unique days\n if isinstance(day_list, type(None)):\n day_list = sorted(set(df['days'].values))\n # Loop unique days and select data on these days\n data4days = []\n for day in day_list:\n print((day, df[df['days'] == day]))\n data4days += [df['data'][df['days'] == day]]\n # Just return the values ( i.e. not pandas array )\n data4days = [i.values.astype(float) for i in data4days]\n print([type(i) for i in data4days])\n# print data4days[0]\n# sys.exit()\n\n if debug:\n print(('returning data for {} days, with lengths: '.format(\n len(day_list)), [len(i) for i in data4days]))\n\n # Return as list of days (datetimes) + list of data for each day\n return data4days, day_list" ]
[ "0.6943807", "0.61015797", "0.59446824", "0.5888466", "0.575217", "0.57305545", "0.57155085", "0.55656976", "0.5565659", "0.55222803", "0.55169284", "0.54701203", "0.5462021", "0.53821945", "0.53751284", "0.53381366", "0.5314847", "0.53086144", "0.5304677", "0.529244", "0.5266538", "0.5231422", "0.5222594", "0.52073574", "0.51993316", "0.5194166", "0.5183944", "0.513712", "0.5112574", "0.51103634", "0.51102006", "0.5109174", "0.5096463", "0.5070201", "0.50556344", "0.5029434", "0.50284505", "0.5025596", "0.5014013", "0.5003263", "0.50031686", "0.49953645", "0.49932596", "0.4992011", "0.49907938", "0.4983485", "0.49543375", "0.49529657", "0.4946856", "0.49404362", "0.49378645", "0.49330303", "0.49295005", "0.49287927", "0.4928335", "0.49246413", "0.49157178", "0.4902162", "0.48935592", "0.48884034", "0.48713654", "0.48687127", "0.48653537", "0.48635644", "0.4852576", "0.48466346", "0.48419017", "0.48376012", "0.48368147", "0.48337546", "0.48205957", "0.48186287", "0.48026752", "0.4797573", "0.47946048", "0.47897753", "0.4789201", "0.4789201", "0.4786869", "0.4782111", "0.47806385", "0.47780192", "0.4777417", "0.47763622", "0.4774909", "0.47626835", "0.4759721", "0.4758092", "0.47470072", "0.47422203", "0.47373486", "0.47294798", "0.47287363", "0.47282907", "0.47226128", "0.47213215", "0.47091648", "0.47088143", "0.47071466", "0.47008595" ]
0.62604964
1
Returns a cash payment based on the dividends that should be paid out according to the accumulated bookkeeping of earned, unpaid, and stock dividends.
def pay_dividends(self, next_trading_day): net_cash_payment = 0.0 try: payments = self._unpaid_dividends[next_trading_day] # Mark these dividends as paid by dropping them from our unpaid del self._unpaid_dividends[next_trading_day] except KeyError: payments = [] # representing the fact that we're required to reimburse the owner of # the stock for any dividends paid while borrowing. for payment in payments: net_cash_payment += payment['amount'] # Add stock for any stock dividends paid. Again, the values here may # be negative in the case of short positions. try: stock_payments = self._unpaid_stock_dividends[next_trading_day] except KeyError: stock_payments = [] for stock_payment in stock_payments: payment_instrument = stock_payment['payment_instrument'] share_count = stock_payment['share_count'] # note we create a Position for stock dividend if we don't # already own the instrument if payment_instrument in self.positions: position = self.positions[payment_instrument] else: position = self.positions[payment_instrument] = Position( payment_instrument, ) position.amount += share_count return net_cash_payment
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cash_flow(self):\n _cash_flow = self.after_tax_profit() + self.depreciation()\n return _cash_flow", "def cash(self, qtt_100s, qtt_50s, qtt_20s):\n return (qtt_100s * 100) + (qtt_50s * 50) + (qtt_20s * 20)", "def test_discounted_payment_below_debit(self):\n debit_jobs([(self.job, A(600), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(20), A(0))], D(480))\n self.assert_balances(\n bank=A(480, 0, 0),\n balance=A(100), # debited (600) + credited (-500) = balance (100)\n debited=A(600),\n invoiced=A(600), # debited (600) + adjustment (0) = invoiced (600)\n paid=A(-500),\n credited=A(-500), # payment (-500) + adjustment (0) = credited (-500)\n promised=A(100),\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n )", "def earn_dividends(self, cash_dividends, stock_dividends):\n for cash_dividend in cash_dividends:\n self._dirty_stats = True # only mark dirty if we pay a dividend\n\n # Store the earned dividends so that they can be paid on the\n # dividends' pay_dates.\n div_owed = self.positions[cash_dividend.instrument].earn_dividend(\n cash_dividend,\n )\n try:\n self._unpaid_dividends[cash_dividend.pay_date].append(div_owed)\n except KeyError:\n self._unpaid_dividends[cash_dividend.pay_date] = [div_owed]\n\n for stock_dividend in stock_dividends:\n self._dirty_stats = True # only mark dirty if we pay a dividend\n\n div_owed = self.positions[\n stock_dividend.instrument\n ].earn_stock_dividend(stock_dividend)\n try:\n self._unpaid_stock_dividends[stock_dividend.pay_date].append(\n div_owed,\n )\n except KeyError:\n self._unpaid_stock_dividends[stock_dividend.pay_date] = [\n div_owed,\n ]", "def test_split_payment_with_discount_and_adjustment(self):\n debit_jobs(\n [\n (self.job, A(480), Entry.FLAT_DEBIT),\n (self.job2, A(480), Entry.WORK_DEBIT),\n ]\n )\n self.assertEquals(A(480), self.job2.account.balance)\n self.assert_balances(promised=A(960), balance=A(480), invoiced=A(480))\n credit_jobs(\n [\n (self.job, A(440), A(0), A(40)), # adjusted\n (self.job2, A(460), A(20), A(0)), # discounted\n ],\n D(900),\n )\n self.assert_balances(\n bank=A(900, 0, 0),\n debited=A(480),\n invoiced=A(440), # debited (480) + adjustment (-40) = invoiced (440)\n paid=A(-440),\n credited=A(-480), # payment (-440) + adjustment (-40) = credited (-480)\n partial=A(900).net_amount,\n tax=A(900).tax_amount,\n )\n self.assert_balances(\n bank=A(900, 0, 0),\n debited=A(480),\n invoiced=A(480), # debited (480) + adjustment (0) = invoiced (480)\n paid=A(-480),\n credited=A(-480), # payment (-480) + adjustment (0) = credited (-480)\n partial=A(900).net_amount,\n tax=A(900).tax_amount,\n switch_to_job=self.job2,\n )", "def test_discounted_payment_matching_debit(self):\n debit_jobs([(self.job, A(500), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(20), A(0))], D(480))\n self.assert_balances(\n bank=A(480, 0, 0),\n debited=A(500),\n invoiced=A(500), # debited (500) + adjustment (0) = invoiced (500)\n paid=A(-500),\n credited=A(-500), # payment (-500) + adjustment (0) = credited (-500)\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n )", "def get_cash(self):\r\n return self.cash", "def cash_income(df):\n return (df.aftertax_income -\n (1 - tc.HOUSING_CASH_SHARE) * df.housing_ben -\n (1 - tc.MCAID_CASH_SHARE) * df.mcaid_ben -\n (1 - tc.MCARE_CASH_SHARE) * df.mcare_ben -\n (1 - tc.OTHER_CASH_SHARE) * df.other_ben -\n (1 - tc.SNAP_CASH_SHARE) * df.snap_ben -\n (1 - tc.SSI_CASH_SHARE) * df.ssi_ben -\n (1 - tc.TANF_CASH_SHARE) * df.tanf_ben -\n (1 - tc.VET_CASH_SHARE) * df.vet_ben -\n (1 - tc.WIC_CASH_SHARE) * df.wic_ben)", "def cash_ratio(self):\n return self.cash / self.current_liabilities", "def test_adjusted_payment_still_below_invoice(self):\n debit_jobs([(self.job, A(600), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(0), A(20))], D(480))\n self.assert_balances(\n bank=A(480, 0, 0),\n balance=A(100), # debited (600) + credited (-500) = balance (100)\n debited=A(600),\n invoiced=A(580), # debited (600) + adjustment (-20) = invoiced (580)\n paid=A(-480),\n credited=A(-500), # payment (-480) + adjustment (-20) = credited (-500)\n promised=A(100),\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n )", "def calculate(self):\r\n if self.__calculation_type == self.__DIFFERENTIATED_PAY:\r\n for month in range(1, self.__principal_term+1):\r\n self.__differentiated_pay.append(\r\n ceil(\r\n (self.__credit_principal/self.__principal_term)\r\n + self.__credit_interest*(self.__credit_principal\r\n - (self.__credit_principal\r\n * (month-1))\r\n / self.__principal_term)\r\n )\r\n )\r\n self.__overpayment = sum(self.__differentiated_pay) - self.__credit_principal\r\n\r\n for i, dp in enumerate(self.__differentiated_pay, 1):\r\n print(f'Month {i}: paid out {dp}')\r\n print()\r\n print(f'Overpayment = {self.__overpayment}')\r\n\r\n elif self.__calculation_type == self.__ANNUITY:\r\n if self.__user_choice == self.__SEEK_ANNUITY_MONTHLY:\r\n self.__annuity_monthly = ceil(\r\n self.__credit_principal * ((self.__credit_interest\r\n * pow(1+self.__credit_interest\r\n , self.__principal_term)\r\n )\r\n / (pow(1+self.__credit_interest\r\n , self.__principal_term)\r\n - 1)\r\n )\r\n )\r\n self.__overpayment = (self.__annuity_monthly * self.__principal_term\r\n - self.__credit_principal\r\n )\r\n print(f'Your annuity payment = {self.__annuity_monthly}!')\r\n\r\n elif self.__user_choice == self.__SEEK_TERM:\r\n self.__principal_term = ceil(\r\n log(self.__annuity_monthly / (self.__annuity_monthly\r\n - (self.__credit_interest\r\n * self.__credit_principal))\r\n , 1+self.__credit_interest)\r\n )\r\n self.__overpayment = ceil(self.__annuity_monthly\r\n * self.__principal_term\r\n - self.__credit_principal\r\n )\r\n years = self.__principal_term // 12\r\n months = self.__principal_term % 12\r\n\r\n print(f'You need {years} year{\"s\" if self.__principal_term > 1 else \"\"}'\r\n f'{\" and \" + str(months) + \" months\" if months > 0 else \"\"}'\r\n f' to repay this credit!')\r\n\r\n elif self.__user_choice == self.__SEEK_CREDIT_PRINCIPAL:\r\n self.__credit_principal = ceil(\r\n self.__annuity_monthly\r\n / ((self.__credit_interest\r\n * pow(1+self.__credit_interest, self.__principal_term)\r\n )\r\n / (pow(1+self.__credit_interest, self.__principal_term)\r\n - 1)\r\n )\r\n )\r\n self.__overpayment = ceil(self.__annuity_monthly\r\n * self.__principal_term\r\n - self.__credit_principal)\r\n\r\n print(f'Your credit principal = {self.__credit_principal}!')\r\n print(f'Overpayment = {self.__overpayment}')\r\n\r\n else:\r\n print('Incorrect parameters')\r\n self.usage()", "def cash(self) -> float:\n return self._cash", "def test_adjusted_payment_matching_invoice(self):\n debit_jobs([(self.job, A(500), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(0), A(20))], D(480))\n self.assert_balances(\n bank=A(480, 0, 0),\n debited=A(500),\n invoiced=A(480), # debited (500) + adjustment (-20) = invoiced (480)\n paid=A(-480),\n credited=A(-500), # payment (-480) + adjustment (-20) = credited (-500)\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n )", "def _vcash(totmoney, totcftable, cashobj):\n cashl = []\n cashl.append(totmoney + totcftable.iloc[0].cash)\n for i in range(len(totcftable) - 1):\n date = totcftable.iloc[i + 1].date\n delta = totcftable.iloc[i + 1].cash\n if delta < 0:\n cashl.append(\n myround(\n delta\n / cashobj.price[cashobj.price[\"date\"] <= date].iloc[-1].netvalue\n )\n )\n else:\n cashl.append(delta)\n datadict = {\"date\": totcftable.loc[:, \"date\"], \"mf\": cashl}\n return pd.DataFrame(data=datadict)", "def test_payment(self):\n debit_jobs([(self.job, A(480), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(0), A(0))], D(480))\n self.assert_balances(\n bank=A(480, 0, 0),\n invoiced=A(480),\n paid=A(-480),\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n )", "def calc_cash_flow(self):\n s = self # shortcut variable\n\n # determine the changes caused by the heat pump on an annual basis.\n # First calculate annual totals for base case and heat pump case and\n # then calculate the change.\n ann_base = s.df_mo_dol_base.sum()\n ann_hp = s.df_mo_dol_hp.sum()\n ann_chg = ann_hp - ann_base\n initial_cost = np.zeros(s.hp_life+1)\n \n # Am not automatically adding sales tax to the initial cost as the user was\n # supposed to includes sales tax in their input.\n initial_cost[0] = -s.capital_cost * (1 - s.pct_financed) + s.rebate_dol\n loan_pmt = npf.pmt(s.loan_interest, s.loan_term, s.capital_cost * s.pct_financed)\n if loan_pmt < -0.01: # loan payment is negative\n loan_cost = [0.0] + [loan_pmt] * s.loan_term + [0.0] * (s.hp_life - s.loan_term)\n loan_cost = np.array(loan_cost)\n else:\n loan_cost = 0.0\n op_cost = -s.op_cost_chg * make_pattern(s.inflation_rate, s.hp_life)\n fuel_cost = -ann_chg.secondary_fuel_dol * make_pattern(s.fuel_esc_rate, s.hp_life)\n elec_cost = -ann_chg.elec_dol * make_pattern(s.elec_esc_rate, s.hp_life)\n cash_flow = initial_cost + loan_cost + op_cost + fuel_cost + elec_cost\n\n # calculate cumulative, discounted cash flow.\n disc_factor = np.ones(s.hp_life) * (1 + s.discount_rate)\n disc_factor = np.insert(disc_factor.cumprod(), 0, 1.0)\n cum_disc_cash_flow = np.cumsum(cash_flow / disc_factor)\n \n s.df_cash_flow = pd.DataFrame(\n {'initial_cost': initial_cost,\n 'loan_cost': loan_cost,\n 'op_cost': op_cost,\n 'fuel_cost': fuel_cost,\n 'elec_cost': elec_cost,\n 'cash_flow': cash_flow,\n 'cum_disc_cash_flow': cum_disc_cash_flow,\n }\n )\n s.df_cash_flow.index.name = 'year'\n \n # Calculate IRR and NPV for w/ and w/o PCE.\n s.summary['irr'] = npf.irr(s.df_cash_flow.cash_flow)\n s.summary['npv'] = npf.npv(s.discount_rate, s.df_cash_flow.cash_flow)\n \n # Add some summary fuel and electric usage and unit cost info\n s.summary['fuel_use_base'] = ann_base.secondary_fuel_units\n s.summary['fuel_use_hp'] = ann_hp.secondary_fuel_units\n s.summary['fuel_use_chg'] = ann_chg.secondary_fuel_units\n if ann_chg.secondary_fuel_units != 0.0:\n s.summary['fuel_price_incremental'] = ann_chg.secondary_fuel_dol / ann_chg.secondary_fuel_units\n else:\n s.summary['fuel_price_incremental'] = np.nan\n s.summary['elec_use_base'] = ann_base.elec_kwh\n s.summary['elec_use_hp'] = ann_hp.elec_kwh\n s.summary['elec_use_chg'] = ann_chg.elec_kwh\n s.summary['elec_rate_avg_base'] = ann_base.elec_dol / ann_base.elec_kwh\n s.summary['elec_rate_avg_hp'] = ann_hp.elec_dol / ann_hp.elec_kwh\n s.summary['elec_rate_incremental'] = ann_chg.elec_dol / ann_chg.elec_kwh", "def available_cash(self):\n return self._cash", "def calc_earning(self, data=None):\n result = Result()\n if data is None:\n data = self.security\n self.calcDecision()\n first_purchase_method = self.check_first_purchase_method()\n for i in np.arange(len(data['Close'])):\n if data['FinalDecision'].iloc[i] is None:\n pass\n elif data['FinalDecision'].iloc[i] == TransactionType.BUY:\n if data['FinalDecision'].iloc[i-1] == TransactionType.BUY:\n pass\n else:\n if (self.buys_made + self.sells_made) == 0:\n if first_purchase_method == FirstTransactionType.INIT_CAPITAL:\n self.shares_own = int((self.init_capital/data['Close'].iloc[i]))\n self.buys_made += 1\n elif first_purchase_method == FirstTransactionType.STOCK_QUANTITY:\n self.shares_own = self.stock_quantity\n self.buys_made += 1\n else:\n self.shares_own = int(self.final_capital / data['Close'].iloc[i])\n self.final_capital = self.final_capital % data['Close'].iloc[i]\n #print(self.shares_own)\n\n elif data['FinalDecision'].iloc[i] == TransactionType.SELL:\n if data['FinalDecision'].iloc[i-1] == TransactionType.SELL:\n pass\n else:\n if (self.buys_made + self.sells_made) == 0:\n pass\n else:\n self.final_capital += self.shares_own * data['Close'].iloc[i]\n self.shares_own = 0\n self.sells_made +=1\n #Checar si es el momento mas alto o bajo de ganancias\n if self.shares_own == 0:\n if (self.highest_point is None\n or self.highest_point < self.final_capital):\n self.highest_point = self.final_capital\n if (self.lowest_point is None\n or self.lowest_point > self.final_capital\n or self.lowest_point == 0):\n self.lowest_point = self.final_capital\n else:\n if (self.highest_point is None\n or self.highest_point < (self.shares_own * data['Close'].iloc[i])):\n self.highest_point = self.final_capital\n if (self.lowest_point is None\n or self.lowest_point > (self.shares_own * data['Close'].iloc[i])\n or self.lowest_point == 0):\n self.lowest_point = self.final_capital\n self.calcRealFinalCapital()\n self.calcDiferencePercentage()", "def cash_flow(self, request, pk=None, **kwargs):\n # Get the goal even though we don't need it (we could ust use the pk)\n # so we can ensure we have permission to do so.\n goal = self.get_object()\n txs = Transaction.objects.filter(Q(to_goal=goal) | Q(from_goal=goal),\n status=Transaction.STATUS_EXECUTED,\n reason__in=Transaction.CASH_FLOW_REASONS)\n txs = txs.order_by('executed').values_list('to_goal', 'executed', 'amount')\n return Response([(dt2ed(tx[1]), tx[2] if tx[0] else -tx[2]) for tx in txs])", "def remaining_payroll(session, employee):\n\n scontract = \\\n session.query(Contract).filter(\n Contract.employee == employee)\n sherees_paychecks_due = session.query(Invoice).filter(\n Invoice.contract == scontract, Invoice.voided == 0,\n Invoice.prcleared == 0, Invoice.posted == 1)\n do_not_delete_items = []\n total_due = 0\n for pc in sherees_paychecks_due:\n iitems = session.query(Iitem).filter(Iitem.invoice == pc)\n pay = 0\n for i in iitems:\n do_not_delete_items.append(i)\n pay += i.quantity * i.cost\n total_due += pay\n return sherees_paychecks_due, do_not_delete_items, total_due", "def cash_coupon(certificate, percentage):\n return sum(stake for name, stake in certificate['underlyings'].items()) * percentage", "def getCash(self) -> int:\n return self.state[CASH]", "def withdraw_cash(self, qtt_100s, qtt_50s, qtt_20s):\n amount = PaperMoneyCounter().cash(qtt_100s, qtt_50s, qtt_20s)\n if (self.__is_logged_in) and (amount <= self.__balance) and (amount <= 1000):\n self.__balance = float(Decimal(str(self.__balance - amount)))\n self.register_operation(self.ACTIONS['WITHDRAWING'], amount)\n return True\n\n return False", "def period_payment(yearly_payments_percentage, client_cost_reduction,\n days_with_payments, days_for_discount_rate):\n\n yearly_payments_percentage = Fraction(str(yearly_payments_percentage))\n client_cost_reduction = Fraction(str(client_cost_reduction))\n\n if days_with_payments == 0:\n payments = Fraction(0)\n else:\n payments = Fraction(days_with_payments, days_for_discount_rate)\n return (yearly_payments_percentage * client_cost_reduction * payments)", "def duty_free(price: int, discount: int, holiday_cost: int) -> int:\n if holiday_cost == 500:\n return holiday_cost\n\n discount /= 100\n price = holiday_cost / (price * discount)\n price = int(price)\n return price", "def get_current_pending_charge(self, partial_payments, validated_data):\n if partial_payments:\n previous_payments_sum = 0\n for partial_payment in partial_payments:\n previous_payments_sum += partial_payment.valorPagado\n pending_amount = previous_payments_sum % 1_000_000\n new_charge = 1_000_000 - (pending_amount + int(validated_data['valorPagado']))\n return new_charge", "def total_clearance(self):\n total_clearances = 0\n debit = 0 #variable to track the remaining debit\n clearances = self.clearance_set.all() #grab all the previous clerances\n for clearance in clearances:\n total_clearances += clearance.paid_value\n return total_clearances", "def test_discounted_payment_below_debit_with_recognized_revenue(self):\n debit_jobs([(self.job, A(600), Entry.WORK_DEBIT)], recognize_revenue=True)\n credit_jobs([(self.job, A(480), A(20), A(0))], D(480))\n self.assert_balances(\n bank=A(480, 0, 0),\n balance=A(100),\n invoiced=A(600),\n paid=A(-500),\n income=A(600).net_amount,\n tax=A(580).tax_amount,\n discounts=A(-20).net_amount,\n )", "def calculator(self, income):\n annuity = float(config.basic(income)) # 社保总额\n out = []\n if float(income) > 3500.00:\n taxable_income = (float(income) - float(annuity) - 3500.00) # 课税对象金额\n taxrate = self.tax_rate(taxable_income) # 税率\n deduction = deductions[taxrate] # 速算扣除数\n tax = taxable_income * taxrate - deduction # 个税金额\n after = float(income) - float(tax) - float(annuity) # 税后工资\n # print(\"社保总额:{}, 个税金额:{}, 税后工资:{}\".format(annuity, tax, after))\n else:\n tax = 0.00 # 个税金额\n after = float(income) - annuity\n for i in [annuity, tax, after]:\n out.append('{:.2f}'.format(i))\n return out", "def budget(self):\n\n budget = (_House.closing_cost*self.vars['after_repair_value']) - self.vars['purchase_price'] - self.vars['profit'] - _House.broker_fee\n return float(round(budget, 2))", "def returnDcto(self):\n if self.newTotal != self.total and self.newTotal >= 0:\n dcto = [None, None, None, None]\n dcto[0] = round(1 - (self.newTotal / self.total), 5)\n dcto[1] = self.percentage\n dcto[2] = self.amount\n dcto[3] = self.code\n else:\n dcto = [0, None, None, None]\n self.parent.orderTotal.updateDcto(dcto)\n self.accept()", "def AmOppCr(_cmp, e87482, e87487, e87492, e87497):\n\n \"\"\"\n This function calculates American Opportunity Credit\n for up to four eligible students\n\n \"\"\"\n\n # Expense should not exceed the cap of $4000.\n if _cmp == 1:\n\n c87482 = max(0., min(e87482, 4000.))\n c87487 = max(0., min(e87487, 4000.))\n c87492 = max(0., min(e87492, 4000.))\n c87497 = max(0., min(e87497, 4000.))\n else:\n c87482, c87487, c87492, c87497 = 0., 0., 0., 0.\n\n # Credit calculated as 100% of the first $2000 expense plus\n # 25% of amount exceeding $2000.\n if max(0, c87482 - 2000) == 0:\n c87483 = c87482\n else:\n c87483 = 2000 + 0.25 * max(0, c87482 - 2000)\n\n if max(0, c87487 - 2000) == 0:\n c87488 = c87487\n else:\n c87488 = 2000 + 0.25 * max(0, c87487 - 2000)\n\n if max(0, c87492 - 2000) == 0:\n c87493 = c87492\n else:\n c87493 = 2000 + 0.25 * max(0, c87492 - 2000)\n\n if max(0, c87497 - 2000) == 0:\n c87498 = c87497\n else:\n c87498 = 2000 + 0.25 * max(0, c87497 - 2000)\n\n # Sum of credits of all four students.\n c87521 = c87483 + c87488 + c87493 + c87498\n\n return (c87482, c87487, c87492, c87497, c87483, c87488, c87493, c87498,\n c87521)", "def cash_sum(self, room):\n self.cash = room.price\n return self.cash", "def calculate_compound_total(principal, interest, n):\n return principal * (1 + interest / 100) ** n", "async def balance(self, ctx):\r\n author = ctx.author\r\n with DB() as db:\r\n company = await self.get_active_company(ctx, db, author)\r\n history = db.query(CompanyHistory).filter(CompanyHistory.company == company.id).order_by(CompanyHistory.date.desc()).limit(2).all()\r\n net_worth = history[0].value\r\n delta = history[0].value - history[1].value if len(history) == 2 else 0\r\n percent = delta * 100 / history[1].value if len(history) == 2 else 0\r\n symbol = '⮝' if delta >= 0 else '⮟'\r\n embed = discord.Embed(title=f'{company.name}', description=f'{symbol}{round(percent, 2)}%', inline=True)\r\n embed.add_field(name='Cash Assets:', value=f'{round(company.balance, 2)} USD')\r\n embed.add_field(name='Net worth:', value=f'{round(net_worth, 2)} USD')\r\n await ctx.send(embed=embed)", "def partial_charge(self, params):\n return self.post(f\"{self.gateway_path}/partial_debit\", params)", "def initial_cash_balance(self) -> float:\n return self.buy_budget * len(self.stocks)", "def discount_card(subtotal):\n\n if \"gold\" in CARDS:\n return gold_card(subtotal) #This calculates the 5%\n\n elif \"silver\" in CARDS:\n return silver_card(subtotal) #This calculates the 2%\n\n elif \"gold\" in CARDS and \"silver\" in CARDS:\n return gold_card(subtotal)\n\n else:\n return 0 #Whitout discount", "def getCashFromBalance(lines):\n\n\tdef consolidate(group):\n\t\t\"\"\"\n\t\t[List] group => [Dictionary] consolidated position\n\n\t\tgroup is a list of cash entries of the same currency, here we add up\n\t\ttheir amount\n\t\t\"\"\"\n\t\tp = group[0].copy()\n\t\tp['balance'] = sum(map(lambda p: p['balance'], group))\n\t\treturn valmap(lambda v: removeBOM(v) if isinstance(v, str) else v, p)\n\n\n\treturn compose(\n\t\tpartial(valmap, consolidate)\n\t , partial(groupbyToolz, lambda d: d['currency'])\t\t\n\t , getRawPositions\n\t , lambda lines: lognContinue('getCashFromBalance(): start', lines)\n\t)(lines)", "def calculate_down_payment_invested(self, investment_return_rate):\n money_in_account = self.down_payment_amount\n for _ in range(0, self.length_stay):\n money_in_account += (money_in_account * investment_return_rate) / 100\n\n return money_in_account", "def budget_spent_with_commission(self):\n return self.budget_spent + self.budget_spent_commission", "def get_customer_balance_sheet(self):\n total = 0\n taxes = 0\n balances = 0\n un_paid_count = 0\n conflicts = 0\n unresolved_conflicts = 0\n projected_before_tax = 0\n\n invoice_list = Customer_Invoice.objects.all()\n count = len(invoice_list)\n for invoice in invoice_list:\n if invoice.invoice_quote.total_price_quoted:\n total += invoice.invoice_quote.total_price_quoted\n taxes += invoice.invoice_quote.tax_on_quote\n balances += invoice.get_balance_due()\n else:\n projected = invoice.get_cost()\n projected_before_tax += projected[1]\n if not invoice.paid_in_full:\n un_paid_count += 1\n for conflict in invoice.conflict.all():\n conflicts += 1\n if not conflict.conflict_resolution:\n unresolved_conflicts += 1\n profit = total - taxes\n\n return total, taxes, profit, balances, count, conflicts, unresolved_conflicts, projected_before_tax", "def daffodils(flNeeded,amtPaid, dzCost):\n\n\n import math\n\n dz = flNeeded / 12\n dozens = math.ceil (dz) #Rounds up to the nearest dozen\n\n totCost = dzCost * dozens\n toPay = totCost - amtPaid\n\n print (\"You will need to contribute\", toPay)", "def askCash(total):\n respuesta = float(input(\"CUANTO PAGO EL REPARTIDOR? \"))\n if respuesta <= total:\n result = float(total) - respuesta\n return result\n else:\n print(\"EL PAGO TIENE QUE SER MENOR O IGUAL AL TOTAL DE LA ORDEN\")\n askCash(total)", "def __check_for_dividends(self) -> None:\n excess = self._excess_to_distribute.get()\n daofund = self._daofund_to_distirbute.get()\n\n Logger.debug(f'Found treasury excess of {excess}.', TAG)\n if excess > 0:\n try:\n Logger.debug(f'Trying to send to ({self._dividends_score.get()}): {excess}.', TAG)\n self.icx.transfer(self._dividends_score.get(), excess)\n self.FundTransfer(self._dividends_score.get(), excess, \"Excess made by games\")\n Logger.debug(f'Sent div score ({self._dividends_score.get()}) {excess}.', TAG)\n self._total_distributed.set(self._total_distributed.get() + excess)\n self._excess_to_distribute.set(0)\n except BaseException as e:\n Logger.debug(f'Send failed. Exception: {e}', TAG)\n revert('Network problem. Excess not sent. '\n f'Exception: {e}')\n\n if daofund > 0:\n try:\n self._daofund_to_distirbute.set(0)\n self.icx.transfer(self._daofund_score.get(), daofund)\n self.FundTransfer(self._daofund_score.get(), daofund, \"Excess transerred to daofund\")\n except BaseException as e:\n revert('Network problem. DAOfund not sent. '\n f'Exception: {e}')", "def calculate_bonuses (the_sum_of_current_purchase):\n the_sum_of_previous_purchases = 0\n blue_card_percent = 0.05\n silver_card_percent = 0.07\n gold_card_percent = 0.1\n the_sum_of_previous_purchases = the_sum_of_previous_purchases + the_sum_of_current_purchase\n\n if the_sum_of_previous_purchases <1000:\n bonus_for_purchase = 0\n if 1000 <= the_sum_of_previous_purchases <= 15_000:\n bonus_for_purchase = the_sum_of_current_purchase * blue_card_percent\n\n if 15001 <= the_sum_of_previous_purchases < 150_000:\n bonus_for_purchase = the_sum_of_current_purchase * silver_card_percent\n\n if the_sum_of_previous_purchases >= 150_000:\n bonus_for_purchase = the_sum_of_current_purchase * gold_card_percent\n\n return bonus_for_purchase", "def generate_cashflows(path_account, isin_cash):\n # Read and parse\n df_account = pd.read_excel(path_account)\n df_account = df_account.rename(\n columns={\n \"Variación\": \"ccyDelta\",\n \"Unnamed: 8\": \"delta\",\n \"Saldo\": \"ccyAmount\",\n \"Unnamed: 10\": \"amount\",\n \"Fecha valor\": \"date\",\n }\n )\n\n df_account[\"date\"] = pd.to_datetime(df_account[\"date\"], dayfirst=True)\n df_account = df_account.drop(\n columns=[\"Fecha\", \"Hora\", \"Producto\", \"Tipo\", \"ID Orden\"]\n )\n\n # Generate changes in position\n deltas_df = df_account.groupby([\"date\", \"ISIN\", \"amount\"])[\"delta\"].sum()\n deltas_df = pd.DataFrame(deltas_df).reset_index()\n\n # Compute cashflows\n cashflows_df = deltas_df.pivot_table(\n index=\"date\", columns=\"ISIN\", values=\"delta\", aggfunc=\"sum\"\n )\n\n # Compute external cashflows\n cashflows_external_df = df_account.loc[\n df_account[\"Descripción\"].isin([\"Ingreso\", \"Retirada\"])\n ]\n\n # For some reason DEGIRO has the cashflows mark to market shifted by one.\n # and my guess is that unless there is a position transaction, they dont\n # write cash mark to markets on Fridays ...\n cashflows_df = cashflows_df.asfreq(\"D\")\n cashflows_df[isin_cash] = cashflows_df[isin_cash].shift()\n cashflows_df = cashflows_df.asfreq(\"B\")\n\n return cashflows_df, cashflows_external_df", "def calc_monthly_cash(self):\n # shortcut to self\n s = self\n\n # Start the DataFrames, base and w/ heat pump\n # Each starts with just an index column with the month\n # Make shortcut variables as well.\n s.df_mo_dol_base = dfb = s.df_mo_en_base[[]].copy()\n s.df_mo_dol_hp = dfh = s.df_mo_en_base[[]].copy()\n\n # Determine the base electric use by month. Approach is different \n # if there is electric heat.\n is_electric_heat = (s.exist_heat_fuel_id == constants.ELECTRIC_ID)\n if not is_electric_heat:\n # Fuel-based space heat.\n # The User supplied a January and a May kWh usage value that should\n # be used for the base case (no heat pump) total electricity use.\n # But, need to come up with a kWh value for every month. Do that by\n # adjusting the kWh pattern available for this city.\n #\n # Determine the multiplier to adjust to the pattern to the actual.\n pat_use = np.array(s.city.avg_elec_usage)\n mult = (s.elec_use_jan - s.elec_use_may) / (pat_use[0] - pat_use[4])\n pat_use = mult * pat_use\n pat_use += s.elec_use_jan - pat_use[0]\n\n # The electricity use in the base case\n dfb['elec_kwh'] = pat_use\n\n # rough estimate of a base demand: not super critical, as the demand rate \n # structure does not have blocks. Assume a load factor of 0.4\n dfb['elec_kw'] = dfb.elec_kwh / (DAYS_IN_MONTH * 24.0) / 0.4\n\n else:\n # Electric Heat Case\n # No Jan and May values are provided. Instead we have possibly some\n # DHW, clothes drying, and cooking. Plus, we have base lights/other appliances.\n # And finally we have the Elecric heat making up the base electric usage.\n\n # First, DHW, Clothes Drying and Cooking. Assume flat use through year.\n # This is a numpy array because DAYS_IN_MONTH is an array.\n elec_kwh = s.fuel_other_uses / 8760.0 * DAYS_IN_MONTH * 24.0\n\n # Now lights and other misc. appliances. Some monthly variation, given\n # by LIGHTS_OTHER_PAT.\n elec_kwh += s.lights_other_elec / 8760.0 * LIGHTS_OTHER_PAT * DAYS_IN_MONTH * 24.0\n\n # For the peak demand of those two categories of use, just assume 40% load factor.\n elec_kw = elec_kwh / (DAYS_IN_MONTH * 24.0) / 0.4\n\n # Now add in space heating kWh and kW\n elec_kwh += s.df_mo_en_base.total_kwh.values\n elec_kw += s.df_mo_en_base.total_kw.values\n\n # store results\n dfb['elec_kwh'] = elec_kwh\n dfb['elec_kw'] = elec_kw\n\n # Make an object to calculate electric utility costs\n elec_cost_calc = ElecCostCalc(s.utility, sales_tax=s.sales_tax, pce_limit=s.pce_limit)\n # cost function that will be applied to each row of the cost DataFrame\n cost_func = lambda r: elec_cost_calc.monthly_cost(r.elec_kwh, r.elec_kw)\n\n dfb['elec_dol'] = dfb.apply(cost_func, axis=1)\n\n if not is_electric_heat:\n # Now fuel use by month. Remember that the home heat model only looked at\n # space heating, so we need to add in the fuel use from the other end uses\n # that use this fuel.\n dfb['secondary_fuel_units'] = s.df_mo_en_base.secondary_fuel_units + \\\n s.fuel_other_uses / 12.0\n dfb['secondary_fuel_dol'] = dfb.secondary_fuel_units * s.exist_unit_fuel_cost * (1. + s.sales_tax)\n else:\n # Electric Heat, so no secondary fuel\n dfb['secondary_fuel_units'] = 0.0\n dfb['secondary_fuel_dol'] = 0.0\n\n # Total Electric + space heat\n dfb['total_dol'] = dfb.elec_dol + dfb.secondary_fuel_dol\n\n # Now with the heat pump\n # determine extra kWh used in the heat pump scenario. Note, this will\n # be negative numbers if the base case used electric heat.\n extra_kwh = (s.df_mo_en_hp.total_kwh - s.df_mo_en_base.total_kwh).values\n dfh['elec_kwh'] = dfb['elec_kwh'] + extra_kwh\n extra_kw = (s.df_mo_en_hp.total_kw - s.df_mo_en_base.total_kw).values\n dfh['elec_kw'] = dfb['elec_kw'] + extra_kw\n dfh['elec_dol'] = dfh.apply(cost_func, axis=1)\n\n # Now fuel, including other end uses using the heating fuel\n if not is_electric_heat:\n dfh['secondary_fuel_units'] = s.df_mo_en_hp.secondary_fuel_units + \\\n s.fuel_other_uses / 12.0\n dfh['secondary_fuel_dol'] = dfh.secondary_fuel_units * s.exist_unit_fuel_cost * (1. + s.sales_tax)\n else:\n # Electric Heat, so no secondary fuel\n dfh['secondary_fuel_units'] = 0.0\n dfh['secondary_fuel_dol'] = 0.0\n\n # Total Electric + space heat\n dfh['total_dol'] = dfh.elec_dol + dfh.secondary_fuel_dol", "def cash_money(amount: float) -> dict:\n breakdown = {100: 0, 50: 0, 20: 0, 5: 0, 2: 0, 1: 0, 0.25: 0, 0.10: 0, 0.05: 0, 0.01: 0} # setup dict\n amount = round(amount, 2) # round to 2 decimal places\n\n for key in breakdown.keys(): # loop through dict\n try:\n denomination = int(amount // key)\n except (TypeError, ZeroDivisionError) as error:\n print('Cannot divide by that argument!')\n return\n else:\n breakdown[key] = denomination # set value to amount of that denomination\n amount -= denomination * key\n\n breakdown = {k: v for k, v in breakdown.items() if v != 0} # remove empty keys\n\n return breakdown", "def _calc_return(self, order_original, perf_df):\r\n\r\n order = order_original.copy()\r\n no_sec = len(self.perf_data)\r\n price_names = np.array(['price_' + str(i) for i in xrange(1, no_sec + 1)])\r\n ret = np.zeros((np.shape(order)[0], no_sec))\r\n\r\n transaction_cost = 0\r\n\r\n # buy_list vs sell_list contains order bought vs sold that cannot be matched yet to determine the return\r\n # For example when something has been bought, but nothing or not enough has been sold yet, the residue will be\r\n # listed in these lists.\r\n buy_shares = np.zeros((np.shape(order)[0], no_sec))\r\n buy_price = np.zeros((np.shape(order)[0], no_sec))\r\n sell_shares = np.zeros((np.shape(order)[0], no_sec))\r\n sell_price = np.zeros((np.shape(order)[0], no_sec))\r\n\r\n # bl_first vs sl_first indicates which row in buy_list vs sell_list can be used to \"match\" bought/sold shares.\r\n # It automatically points to the oldest row with still outstanding shares. Initial value is -1\r\n # bl_last vs sl_last indicates which row in buy_list vs sell_list can be used to write outstanding shares to.\r\n bl_first = np.ones(no_sec).astype(int) * -1\r\n bl_last = np.zeros(no_sec).astype(int)\r\n sl_first = np.ones(no_sec).astype(int) * -1\r\n sl_last = np.zeros(no_sec).astype(int)\r\n\r\n for ind in range(0, np.shape(order)[0]):\r\n bl_first[(bl_first == -1) & (bl_last > 0)] = 0\r\n sl_first[(sl_first == -1) & (sl_last > 0)] = 0\r\n\r\n # Three situations, per type: buy, sell, nothing\r\n # If nothing, skip to next day\r\n # Only returns made on one day are determined, later they will be accumulated.\r\n\r\n # Situation A.A: Sell order & outstanding buys larger than sell order\r\n col_to_change = (order[ind, :] < 0) & (np.sum(buy_shares, 0) > -order[ind, :])\r\n if sum(col_to_change) != 0:\r\n share_cumsum = np.cumsum(buy_shares, 0)\r\n share_compl = (share_cumsum < -order[ind, :]) & col_to_change\r\n numb_shares = sum(buy_shares * share_compl, 0)[col_to_change]\r\n ret[ind, col_to_change] += numb_shares * perf_df.loc[ind, price_names[col_to_change]] \\\r\n - sum(buy_shares * buy_price * share_compl, 0)[col_to_change]\r\n buy_shares[share_compl] = 0\r\n bl_first += sum(share_compl)\r\n order[col_to_change] += numb_shares\r\n\r\n ret[ind, col_to_change] += perf_df.loc[ind, price_names[col_to_change]] * -order[ind, col_to_change] * (1 - transaction_cost) \\\r\n - buy_price[bl_first[col_to_change], col_to_change] \\\r\n * -order[ind, col_to_change] * (1 + transaction_cost)\r\n buy_shares[bl_first[col_to_change], col_to_change] += order[ind, col_to_change]\r\n order[ind, col_to_change] = 0\r\n\r\n # Situation A.B: Sell order & outstanding buys smaller than or equal to sell order\r\n # --> just fill out all outstanding buys, and change order. This order will be added to sell list in A.C\r\n col_to_change = (order[ind, :] < 0) & (np.sum(buy_shares, 0) > 0) \\\r\n & (np.sum(buy_shares, 0) <= -order[ind, :])\r\n if sum(col_to_change) != 0:\r\n numb_shares = buy_shares[:, col_to_change]\r\n price_shares = buy_price[:, col_to_change]\r\n ret[ind, col_to_change] += np.sum(numb_shares, 0) * \\\r\n perf_df.loc[ind, price_names[col_to_change]].values * (1 - transaction_cost) \\\r\n - np.sum(numb_shares * price_shares, 0) * (1 + transaction_cost)\r\n order[ind, col_to_change] += np.sum(numb_shares, 0)\r\n buy_shares[:, col_to_change] = 0\r\n bl_first[col_to_change] = bl_last[col_to_change] - 1\r\n\r\n # Situation A.C: Sell order & no outstanding buys\r\n col_to_change = (order[ind, :] < 0) & (np.sum(buy_shares, 0) == 0)\r\n if sum(col_to_change) != 0:\r\n row_to_change = bl_last[col_to_change]\r\n sell_shares[row_to_change, col_to_change] = -order[ind, col_to_change]\r\n sell_price[row_to_change, col_to_change] = perf_df.loc[ind, price_names[col_to_change]]\r\n sl_last[col_to_change] += 1\r\n\r\n # Situation B.A: Buy order & outstanding sells larger than buy order\r\n col_to_change = (order[ind, :] > 0) & (np.sum(sell_shares, 0) > order[ind, :])\r\n if sum(col_to_change) != 0:\r\n share_cumsum = np.cumsum(sell_shares, 0)\r\n share_compl = (share_cumsum < order[ind, :]) & col_to_change\r\n numb_shares = sum(sell_shares * share_compl, 0)[col_to_change]\r\n ret[ind, col_to_change] += sum(sell_shares * sell_price * share_compl, 0)[col_to_change] * (1 - transaction_cost)\\\r\n - numb_shares * perf_df.loc[ind, price_names[col_to_change]] * (1 + transaction_cost)\r\n sell_shares[share_compl] = 0\r\n sl_first += sum(share_compl)\r\n order[col_to_change] += -numb_shares\r\n\r\n ret[ind, col_to_change] += sell_price[sl_first[col_to_change], col_to_change] * order[ind, col_to_change] * (1 - transaction_cost)\\\r\n - perf_df.loc[ind, price_names[col_to_change]] * order[ind, col_to_change] * (1 + transaction_cost)\r\n sell_shares[sl_first[col_to_change], col_to_change] += -order[ind, col_to_change]\r\n order[ind, col_to_change] = 0\r\n\r\n # Situation B.B: Buy order & outstanding sells smaller than buy order\r\n # --> just fill out all outstanding sells, and change order. This order will be added to buy list in B.C\r\n col_to_change = (order[ind, :] > 0) & \\\r\n (np.sum(sell_shares, 0) > 0) & (np.sum(sell_shares, 0) <= order[ind, :])\r\n if sum(col_to_change) != 0:\r\n numb_shares = sell_shares[:, col_to_change]\r\n price_shares = sell_price[:, col_to_change]\r\n ret[ind, col_to_change] += np.sum(numb_shares * price_shares, 0) * (1 - transaction_cost) \\\r\n - np.sum(numb_shares, 0) * perf_df.loc[ind, price_names[col_to_change]] * (1 + transaction_cost)\r\n order[ind, col_to_change] += -np.sum(numb_shares, 0)\r\n sell_shares[:, col_to_change] = 0\r\n sl_first[col_to_change] = sl_last[col_to_change] - 1\r\n\r\n # Situation B.C: Buy order & no outstanding sells\r\n col_to_change = (order[ind, :] > 0) & (np.sum(sell_shares, 0) == 0)\r\n if sum(col_to_change) != 0:\r\n row_to_change = bl_last[col_to_change]\r\n buy_shares[row_to_change, col_to_change] = order[ind, col_to_change]\r\n buy_price[row_to_change, col_to_change] = perf_df.loc[ind, price_names[col_to_change]]\r\n bl_last[col_to_change] += 1\r\n\r\n ret_abs = np.array([sum(ret[:r]) for r in range(1, len(ret) + 1)])\r\n returns_abs = np.sum(ret_abs, 1)\r\n returns_rel = [i / self.context['max_notional'] + 1 for i in returns_abs]\r\n\r\n return returns_rel, returns_abs, ret_abs", "def curProfit(curPrice, prevPrice, demandIntcpt, k1, k2, a, b, unitCost, coff):\n\treturn curDemand(curPrice, prevPrice, demandIntcpt, k1, k2, a, b, coff) * (curPrice - unitCost)", "def cumulative_returns(shares_allocation, capital, test_data):\n\n # list of DataFrames of cumulative returns for each stock\n daily_returns = []\n\n # iterates over every stock in the portfolio\n for stock in shares_allocation.index:\n\n # multiples shares by share prices in the validation dataset\n daily_returns.append(shares_allocation.loc[stock].values * test_data[stock])\n\n # concatenates every DataFrame in the above list to a single DataFrame\n daily_returns_df = pd.concat(daily_returns, axis=1).reset_index()\n\n # sets the index as the date\n daily_returns_df.set_index(\"Day\", inplace=True)\n\n # adds the cumulative returns for every stock\n cumulative_daily_returns = daily_returns_df.sum(axis=1)\n\n # returns the cumulative daily returns of the portfolio\n return cumulative_daily_returns", "def payments(self, loan):\n self.currency_interest = \"XBT\"\n \n \"\"\"The lender agrees to provide the borrower half of the loan amount\n on the initial loan on the initial date\"\"\"\n loan.fund(on=self.initial_loan_date,\n amount=self.total_loan_amount * \\\n Decimal(0.5))\n \"\"\"The lender agrees to pledge the remaining loan amount toward\n the kickstarter campaign of the borrower.\"\"\"\n loan.fund(on=self.kickstarter_payment_date,\n amount=self.total_loan_amount * \\\n Decimal(0.5))\n \"\"\" Standard payment schedule - The borrower intends to\n payback period will be separated into 8 installments and\n completed in 8 months. The payback will begin in the 5th\n month. However, unless the special conditions are triggered,\n the borrower is required to only pay the interest on the loan\n until the final payment date.\"\"\"\n\n \"\"\" Special payment schedule - If First campaign funded over\n USD 65,000, the borrower must pay back entire loan including\n one year interest within the two months after Crowd Funding\n Platform pay the fund.\"\"\"\n\n \"\"\" If First campaign funded over USD 58,000, will pay back 4\n Installment in advance, after Crowd Funding Platform pay the\n fund. The rest of the loan will keep paying followed the\n standard schedule until all loan including interest is paid\n back.\"\"\"\n\n if (self.kickstarter_revenue > Money(65000, \"USD\")):\n payment_date = self.kickstarter_payment_date + \\\n relativedelta(months=2)\n loan.add_to_balance(on=payment_date,\n amount = loan.interest(payment_date,\n self.final_payment_date,\n loan.remaining_balance()))\n loan.payment(on=payment_date,\n amount = loan.remaining_balance())\n else:\n if (self.kickstarter_revenue > Money(58000, \"USD\")):\n payment_date = self.kickstarter_payment_date + \\\n relativedelta(months=2)\n loan.payment(on=payment_date,\n amount = lambda : loan.remaining_principal()() * Decimal(0.5))\n start_payment_date = self.initial_loan_date + \\\n relativedelta(months=4)\n loan.amortize(on=start_payment_date,\n amount = loan.remaining_balance(),\n payments=8,\n interval=relativedelta(months=1))\n \"\"\"The borrower agrees to pay back the any remaining principal\n and accrued interest one year after the loan is issued.\"\"\"\n loan.payment(on=self.final_payment_date,\n amount= loan.remaining_balance())", "def get_balance(self, payments):\n # calc monthly interest\n monthly_interest = self.__calculate_monthly_interest()\n m = 1 + monthly_interest\n\n # calculate balance\n balance = self.principle * (\n ((m ** self.__months) - (m ** payments)) / (\n (m ** self.__months) - 1))\n return balance", "def payment_transaction():\n print(\"Please insert coins.\")\n payment = dict(quarters=int(input(\"How many quarters?:\")) * 0.25,\n dime=int(input(\"How many dimes?: \")) * 0.10,\n nickles=int(input(\"How many nickles?: \")) * 0.05,\n pennies=int(input(\"How many pennies?: \")) * 0.01\n )\n\n return round(sum(payment.values()), 2)", "def test_discounted_payment_matching_debit_with_recognized_revenue(self):\n debit_jobs([(self.job, A(500), Entry.WORK_DEBIT)], recognize_revenue=True)\n credit_jobs([(self.job, A(480), A(20), A(0))], D(480))\n self.assert_balances(\n bank=A(480, 0, 0),\n invoiced=A(500),\n paid=A(-500),\n income=A(500).net_amount,\n tax=A(480).tax_amount,\n discounts=A(-20).net_amount,\n )", "def calc_fee(self, shares):\n return max(self.commission_min, abs(self.commission_pct * shares))", "def compute_total_customs_duty(self):\n for rec in self:\n total = 0.0\n extra_duty = 0.0\n price_total = rec.quantity * rec.unit_price\n# total = (price_total * duty_percentage)/100\n rec.price_total = price_total\n# for hts in rec.hts_ids:\n# if hts.extra_duty_applicable:\n# extra_duty += ((rec.quantity/hts.quantity) * hts.extra_duty)\n# rec.total = total + extra_duty\n\n return True", "def calculate_balance_contributions(self, request, parent_lookup_client, pk, format=None):\n # TODO: Make this work\n return Response(6666666)", "def total_paid(self) -> Decimal:\n return self.total_principal + self.total_interest", "def calculate_payments(yearly_payments_percentage, cost_reductions,\n days_with_payments, days_for_discount_rate):\n\n return [period_payment(yearly_payments_percentage, ccr,\n days_with_payments[i], days_for_discount_rate[i])\n for i, ccr in enumerate(cost_reductions)]", "def test_17_transaction_create_sell_cash(self):\n portfolio = Portfolio.get_portfolio_by_slug(\"test\")\n user = \"automated unit tester\"\n\n sell_cash_eur = Transaction.sell_cash(\n portfolio=portfolio,\n asset=\"EUR\",\n t_currency=TRANSACTION_CURRENCY_USD,\n amount=100000,\n unit_price=1.17,\n user=user\n )\n\n self.assertTrue(isinstance(sell_cash_eur, Transaction),\n msg=\"Transaction is NOT returning a valid object while selling EUR in cash\")\n print(\"Transaction sell_cash method is returning a valid EUR transaction: {}\".format(\n sell_cash_eur))\n\n \"\"\"Is transaction avoiding short sell cash objects?\"\"\"\n short_sell_cash_eur = Transaction.sell_cash(\n portfolio=portfolio,\n asset=\"EUR\",\n t_currency=TRANSACTION_CURRENCY_USD,\n amount=500000,\n unit_price=1.10,\n user=user\n )\n\n self.assertFalse(isinstance(short_sell_cash_eur, Transaction),\n msg=\"Transaction is NOT avoiding short selling EUR in cash\")\n print(\"Transaction sell_cash method is avoiding a short sell EUR transaction: {}\".format(\n short_sell_cash_eur))", "def test_adjusted_payment_below_debit_with_recognized_revenue(self):\n debit_jobs([(self.job, A(600), Entry.WORK_DEBIT)], recognize_revenue=True)\n credit_jobs([(self.job, A(480), A(0), A(20))], D(480))\n self.assert_balances(\n bank=A(480, 0, 0),\n balance=A(100), # <- job still has some balance\n invoiced=A(580),\n paid=A(-480), # <- 20.00 adjusted\n debited=A(600),\n credited=A(-500),\n income=A(580).net_amount,\n tax=A(580).tax_amount,\n ) # <- income is higher than bank balance", "def calculate_commission(self):\n now = datetime.datetime.today()\n dates_range = [\n utils.get_datetime(datetime_info)\n for datetime_info in ['00:00:01', '12:00:00', '23:59:59']\n ]\n if dates_range[0] <= now <= dates_range[0]:\n self.commission = (self.price * randint(0, 5))/100\n else:\n self.commission = (self.price * randint(4, 10))/100\n self.save()", "def get_payout_rule(self):\n assert self.project\n\n # 1st of January 2014\n start_2014 = timezone.datetime(2014, 1, 1, tzinfo=timezone.utc)\n\n if self.project.created >= start_2014:\n # New rules per 2014\n\n if self.project.amount_donated >= self.project.amount_asked:\n # Fully funded\n # If it's a Cheetah campaign then set 0 percent rule.\n partners = PartnerOrganization.objects.filter(slug__in=['cheetah']).all()\n if self.project.partner_organization in partners:\n return self.PayoutRules.zero\n # Default payout rule is 7 percent.\n return self.PayoutRules.seven\n elif self.project.amount_donated < settings.MINIMAL_PAYOUT_AMOUNT:\n # Funding less then minimal payment amount.\n return self.PayoutRules.hundred\n else:\n # Not fully funded\n return self.PayoutRules.twelve\n\n # Campaign started before 2014\n # Always 5 percent\n return self.PayoutRules.five", "def PV_NetCashflows(t):\n if t > last_t:\n return 0\n else:\n return (prj_incm_Premium(t)\n - prj_exps_Total(t)\n - prj_bnft_Total(t) / (1 + DiscRate(t))\n + PV_NetCashflows(t + 1) / (1 + DiscRate(t)))", "def calculate_contributions_balance(self, request, parent_lookup_client, pk, format=None):\n # TODO: Make this work\n return Response({'btc_amount': 2222, 'atc_amount': 88})", "def be_contribution_margin(cont_margin):\n\n # Create a dictionary for user input\n fixed_costs = {}\n\n # Request information for the calculation of a break even point\n print(\"What are your fixed costs - i.e. salary, instruments, etc.\")\n while True:\n try:\n item = input(\"Please enter expense name or done to compute results: \").capitalize()\n if item == 'Done':\n break\n else:\n cost = float(input(\"Cost $: \"))\n fixed_costs[item] = cost\n except:\n print(\"Invalid value entered, please try again. \")\n\n total_fixed_costs = sum(fixed_costs.values())\n contribution_margin = cont_margin\n break_even_point = total_fixed_costs / contribution_margin\n\n print(\"Fixed costs: {}\\nTotal fixed costs: {}\\nContribution margin: {}\\nBreak even point in units: {}\".format\n (fixed_costs, total_fixed_costs, contribution_margin, break_even_point))", "def cash_balance(self):\n cash_transaction = CashTransaction(self.user)\n return cash_transaction.get_balance_amount()", "def customer_paid(request, user_correct, tickets, total, payment_id):\n comp = Competition.objects.get(is_active=True)\n user = User.objects.get(id=request.user.id)\n order = Order.objects.get(user=user, ordered=False)\n new_order = update_orders(comp, order, user_correct, payment_id)\n if user_correct:\n create_entries(order, user, comp, tickets, new_order)\n email_order(request, order, user_correct)\n check_for_new_competition(comp)\n if comp.tickets_left == 0:\n pick_competition_winner()\n request.session['order_id'] = order.id", "def total_equity(self):\n return self.total_market_value + self.cash", "def discount(self, cart):", "def __calculate_portfolio_returns(self):\n\n p_bar = tqdm(range(1), desc=\" Calculating returns\", leave=False)\n\n trade_data = self.historical_trade_data\n\n # Helper functions to calculate cash inflow and outflow\n def f_min(x):\n return x.apply(lambda x: min(x, 0))\n\n def f_max(x):\n return x.apply(lambda x: max(x, 0))\n\n # Calculate cash inflow and outflow\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period cash inflow\"], self.tickers_list + [\"Total\"]]\n )\n ] = -1 * trade_data[\"Investment delta\"][:].apply(lambda x: f_min(x), axis=0)\n\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period cash outflow\"], self.tickers_list + [\"Total\"]]\n )\n ] = trade_data[\"Investment delta\"][:].apply(lambda x: f_max(x), axis=1)\n\n # Calculate period return\n\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period absolute return\"], self.tickers_list + [\"Total\"]]\n )\n ] = (trade_data[\"End Value\"] + trade_data[\"Period cash inflow\"]) - (\n trade_data[\"End Value\"].shift(1).fillna(0)\n + trade_data[\"Period cash outflow\"]\n )\n\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period percentage return\"], self.tickers_list + [\"Total\"]]\n )\n ] = (trade_data[\"End Value\"] + trade_data[\"Period cash inflow\"]) / (\n trade_data[\"End Value\"].shift(1).fillna(0)\n + trade_data[\"Period cash outflow\"]\n ) - 1\n\n trade_data[\"Period percentage return\"].fillna(0, inplace=True)\n\n self.historical_trade_data = trade_data\n\n self.portfolio_returns = self.historical_trade_data[\"Period percentage return\"][\n \"Total\"\n ]\n\n p_bar.n += 1\n p_bar.refresh()", "def compute_periodpayoff(self):\n logger.debug(u\"{} Period Payoff\".format(self.joueur))\n self.currentperiod.EXPERIENCE_NOM_COURT_periodpayoff = 0\n\n # cumulative payoff since the first period\n if self.currentperiod.EXPERIENCE_NOM_COURT_period < 2:\n self.currentperiod.EXPERIENCE_NOM_COURT_cumulativepayoff = \\\n self.currentperiod.EXPERIENCE_NOM_COURT_periodpayoff\n else: \n previousperiod = self.periods[self.currentperiod.EXPERIENCE_NOM_COURT_period - 1]\n self.currentperiod.EXPERIENCE_NOM_COURT_cumulativepayoff = \\\n previousperiod.EXPERIENCE_NOM_COURT_cumulativepayoff + \\\n self.currentperiod.EXPERIENCE_NOM_COURT_periodpayoff\n\n # we store the period in the self.periodes dictionnary\n self.periods[self.currentperiod.EXPERIENCE_NOM_COURT_period] = self.currentperiod\n\n logger.debug(u\"{} Period Payoff {}\".format(\n self.joueur,\n self.currentperiod.EXPERIENCE_NOM_COURT_periodpayoff))", "def test_portfolio_balance(\n session, account_checking, account_savings, account_sp500, asset_krw, asset_sp500\n):\n portfolio = Portfolio()\n portfolio.base_asset = asset_krw\n portfolio.add_accounts(account_checking, account_savings, account_sp500)\n\n assert portfolio.balance(parse_date(\"2016-05-20\")) == {}\n\n deposit(account_checking, asset_krw, 1500, parse_date(\"2016-05-01\"))\n deposit(account_savings, asset_krw, 3000, parse_date(\"2016-05-01\"))\n deposit(account_sp500, asset_sp500, 120, parse_date(\"2016-05-01\"))\n\n assert portfolio.balance(parse_date(\"2016-05-20\")) == {\n asset_krw: 4500,\n asset_sp500: 120,\n }\n\n deposit(account_savings, asset_krw, 4000, parse_date(\"2016-05-02\"))\n deposit(account_savings, asset_krw, 5000, parse_date(\"2016-05-03\"))\n\n assert portfolio.balance(parse_date(\"2016-05-20\")) == {\n asset_krw: 13500,\n asset_sp500: 120,\n }\n\n balance_adjustment(account_savings, asset_krw, 10000, parse_date(\"2016-05-04\"))\n\n assert portfolio.balance(parse_date(\"2016-05-20\")) == {\n asset_krw: 11500,\n asset_sp500: 120,\n }\n\n session.delete(portfolio)\n session.commit()", "def calculate_reserves(self):\n # TODO: Add back cash dividends and deduct exchange costs\n console.print(\"Still has to be build.\")", "def testInsufficientCash(self):\n\n bid_move = self._move()\n context = self._context()\n context.players[0].cash = 200\n bfpc = BiddingForPrivateCompany()\n\n self.assertFalse(bfpc.run(bid_move, context), bfpc.errors())", "def get_duration_dispersion_convexity(self,periods=1, current_period = 0): \n discounted_cashflow = [self.payment/(1+self.interest)**i for i in self.index[current_period:]]\n discount_sum = sum(discounted_cashflow)\n weight = [cf/discount_sum for cf in discounted_cashflow]\n time_weight = [weight[i] * i for i in range(1,len(weight))]\n sum_time_weight = sum(time_weight)\n dispersion_array = [((i - sum_time_weight)**2)*weight[i] for i in range(1,len(weight))]\n dispersion_statistic = sum(dispersion_array)\n cashflow_yield = np.irr([-self.table[\"balance\"][current_period]] + [self.payment] * (self.maturity - current_period))\n convexity_array = [i * (i+1) * weight[i] for i in range(1,len(weight))]\n convexity_statistic = sum(convexity_array)/(1+cashflow_yield)**2\n convexity = (sum_time_weight ** 2 + sum_time_weight + dispersion_statistic)/(1+cashflow_yield)**2\n \n return {\"duration\":sum_time_weight/periods,\\\n \"dispersion\":dispersion_statistic/periods,\\\n \"convexity\":convexity_statistic/periods}", "def budget_balance(self):\n budget_balance = round(self.budget() - self.total_spent(), 2)\n budget_balance_degree = round( (9000 * self.total_spent()) / (self.budget()), 4) #convert to degrees and round to four decimal places\n return (budget_balance, budget_balance_degree)", "def test_happy_path_scenario(self):\n debit_jobs([(self.job, A(480), Entry.FLAT_DEBIT)]) # progress invoice\n credit_jobs([(self.job, A(100), A(0), A(0))], D(100)) # progress payment\n debit_jobs(\n [(self.job, A(480), Entry.FLAT_DEBIT)], recognize_revenue=True\n ) # final invoice\n credit_jobs([(self.job, A(800), A(60), A(0))], D(800)) # final payment\n\n self.assert_balances(\n bank=A(900, 0, 0),\n invoiced=A(960),\n paid=A(-960),\n debited=A(480 * 2 + 380),\n credited=A(-480 * 2 - 380),\n income=A(960).net_amount,\n tax=A(900).tax_amount,\n discounts=A(-60).net_amount,\n )\n\n total_income = income_account().balance + discount_account().balance\n self.assertEqual(total_income, A(900).net_amount)", "def options_to_withdraw(self, amount):\n counter = PaperMoneyCounter() # aux class\n options = [] # options to withdraw\n remaining_cash = 0 # aux var\n\n if (amount % 20 == 0 or amount % 50 == 0) and (amount <= 1000): # is it allowed to withdraw?\n # prioritizing 100-dollar bills\n qtt_100s = counter.how_many_100s(amount)\n remaining_cash = counter.remaining_cash_without_100s(amount)\n\n qtt_50s = counter.how_many_50s(remaining_cash)\n remaining_cash = counter.remaining_cash_without_50s(remaining_cash)\n\n qtt_20s = counter.how_many_20s(remaining_cash)\n remaining_cash = counter.remaining_cash_without_20s(remaining_cash)\n\n if counter.cash(qtt_100s, qtt_50s, qtt_20s) == amount:\n options.append([int(qtt_100s), int(qtt_50s), int(qtt_20s)])\n\n # prioritizing 50-dollar bills\n qtt_100s = 0\n\n qtt_50s = counter.how_many_50s(amount)\n remaining_cash = counter.remaining_cash_without_50s(amount)\n\n qtt_20s = counter.how_many_20s(remaining_cash)\n remaining_cash = counter.remaining_cash_without_20s(remaining_cash)\n\n if counter.cash(qtt_100s, qtt_50s, qtt_20s) == amount:\n if not(options[0] == [qtt_100s, qtt_50s, qtt_20s]):\n options.append([int(qtt_100s), int(qtt_50s), int(qtt_20s)])\n\n # prioritizing 20-dollar bills\n qtt_100s = 0\n\n qtt_50s = 0\n\n qtt_20s = counter.how_many_20s(amount)\n\n if counter.cash(qtt_100s, qtt_50s, qtt_20s) == amount:\n if not(options[0] == [qtt_100s, qtt_50s, qtt_20s]):\n if not(options[1] == [qtt_100s, qtt_50s, qtt_20s]):\n options.append([int(qtt_100s), int(qtt_50s), int(qtt_20s)])\n\n return options\n\n return None # if it wasn't allowed to withdraw", "def list_dividends(\n self,\n ticker: Optional[str] = None,\n ticker_lt: Optional[str] = None,\n ticker_lte: Optional[str] = None,\n ticker_gt: Optional[str] = None,\n ticker_gte: Optional[str] = None,\n ex_dividend_date: Optional[Union[str, date]] = None,\n ex_dividend_date_lt: Optional[Union[str, date]] = None,\n ex_dividend_date_lte: Optional[Union[str, date]] = None,\n ex_dividend_date_gt: Optional[Union[str, date]] = None,\n ex_dividend_date_gte: Optional[Union[str, date]] = None,\n record_date: Optional[Union[str, date]] = None,\n record_date_lt: Optional[Union[str, date]] = None,\n record_date_lte: Optional[Union[str, date]] = None,\n record_date_gt: Optional[Union[str, date]] = None,\n record_date_gte: Optional[Union[str, date]] = None,\n declaration_date: Optional[Union[str, date]] = None,\n declaration_date_lt: Optional[Union[str, date]] = None,\n declaration_date_lte: Optional[Union[str, date]] = None,\n declaration_date_gt: Optional[Union[str, date]] = None,\n declaration_date_gte: Optional[Union[str, date]] = None,\n pay_date: Optional[Union[str, date]] = None,\n pay_date_lt: Optional[Union[str, date]] = None,\n pay_date_lte: Optional[Union[str, date]] = None,\n pay_date_gt: Optional[Union[str, date]] = None,\n pay_date_gte: Optional[Union[str, date]] = None,\n frequency: Optional[Union[int, Frequency]] = None,\n cash_amount: Optional[float] = None,\n cash_amount_lt: Optional[float] = None,\n cash_amount_lte: Optional[float] = None,\n cash_amount_gt: Optional[float] = None,\n cash_amount_gte: Optional[float] = None,\n dividend_type: Optional[Union[str, DividendType]] = None,\n limit: Optional[int] = None,\n sort: Optional[Union[str, Sort]] = None,\n order: Optional[Union[str, Order]] = None,\n params: Optional[Dict[str, Any]] = None,\n raw: bool = False,\n options: Optional[RequestOptionBuilder] = None,\n ) -> Union[Iterator[Dividend], HTTPResponse]:\n url = \"/v3/reference/dividends\"\n\n return self._paginate(\n path=url,\n params=self._get_params(self.list_dividends, locals()),\n raw=raw,\n deserializer=Dividend.from_dict,\n options=options,\n )", "def buy_and_pay(self):\n return self.price", "def _pay(self, asked_value):\n\t\tpayment_value = min(self.stock, asked_value)\n\t\tself.stock -= payment_value\n\t\treturn payment_value", "def get_total_to_pay(self):\n self.__total_to_pay = Order.get_price_subtotals(self) + \\\n Order.get_qst_subtotals(self) + \\\n Order.get_gst_subtotals(self)\n return self.__total_to_pay", "def add_dividend(data, price='Close', adj='Adj_Close', out='Dividend'):\n logger = logging.getLogger(__name__)\n share = data[adj] / data[price]\n share = (share - share.shift(1)) / share.shift(1)\n data.loc[:,out] = np.round(share * data[price], 3).fillna(0)", "def landlord_button_deposite_pay(self):\n payment_id = False\n acc_pay_form = self.env.ref(\n 'account.view_account_payment_form')\n account_jrnl_obj = self.env['account.journal'].search(\n [('type', '=', 'purchase')], limit=1)\n payment_obj = self.env['account.payment']\n payment_method_id = self.env.ref(\n 'account.account_payment_method_manual_in')\n for tenancy_rec in self:\n if tenancy_rec.acc_pay_dep_rec_id and \\\n tenancy_rec.acc_pay_dep_rec_id.id:\n return {\n 'view_type': 'form',\n 'view_id': acc_pay_form.id,\n 'view_mode': 'form',\n 'res_model': 'account.payment',\n 'res_id': tenancy_rec.acc_pay_dep_rec_id.id,\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'context': self._context,\n }\n if tenancy_rec.deposit == 0.00:\n raise Warning(_('Please Enter Deposit amount.'))\n if tenancy_rec.deposit < 0.00:\n raise Warning(\n _('The deposit amount must be strictly positive.'))\n vals = {\n 'partner_id': tenancy_rec.property_owner_id.parent_id.id,\n 'partner_type': 'supplier',\n 'journal_id': account_jrnl_obj.id,\n 'payment_type': 'outbound',\n 'communication': 'Deposit Received',\n 'tenancy_id': tenancy_rec.id,\n 'amount': tenancy_rec.deposit,\n 'property_id': tenancy_rec.property_id.id,\n 'payment_method_id': payment_method_id.id\n }\n payment_id = payment_obj.create(vals)\n return {\n 'view_mode': 'form',\n 'view_id': acc_pay_form.id,\n 'view_type': 'form',\n 'res_id': payment_id and payment_id.id,\n 'res_model': 'account.payment',\n 'type': 'ir.actions.act_window',\n 'nodestroy': True,\n 'target': 'current',\n 'domain': '[]',\n 'context': {\n 'close_after_process': True,\n }\n }", "def amount_to_charge(opportunity):\n amount = float(opportunity.amount)\n if opportunity.agreed_to_pay_fees:\n total = (amount + 0.30) / (1 - 0.022)\n else:\n total = amount\n return quantize(total)", "def get_payment(self):\n return self._payment_per_hour * self._hours_worked", "def cashier():\n rows = db.execute(\"SELECT cash FROM users WHERE id==:user_id\", user_id=session[\"user_id\"])\n if request.method==\"GET\":\n return render_template(\"cashier.html\", cash=rows[0][\"cash\"])\n else:\n # multiple forms in same html that send post request to same handler in flask see https://stackoverflow.com/questions/19794695/flask-python-buttons\n if request.form['submit_button'] == 'Value-1':\n deposit = float(request.form.get(\"deposit\"))\n # Update cash in 'users' table\n new_cash = rows[0][\"cash\"]+deposit\n db.execute(\"UPDATE users SET cash==:cash WHERE id==:user_id\", user_id=session[\"user_id\"], cash=new_cash)\n # Insert transaction into table cashier_history\n db.execute(\"INSERT INTO cashier_history(user_id, amount, deposit) VALUES(:user_id, :amount, :deposit)\", user_id=session[\"user_id\"], amount=deposit, deposit=1)\n # message to be retrieved in portfolio.html when user deposits cash\n flash('Deposit!')\n return redirect(\"/\")\n else:\n withdraw = float(request.form.get(\"withdraw\"))\n withdraw = withdraw*(-1)\n # Update cash in 'users' table\n new_cash = rows[0][\"cash\"]+withdraw\n db.execute(\"UPDATE users SET cash==:cash WHERE id==:user_id\", user_id=session[\"user_id\"], cash=new_cash)\n # Insert transaction into table cashier_history\n db.execute(\"INSERT INTO cashier_history(user_id, amount, deposit) VALUES(:user_id, :amount, :deposit)\", user_id=session[\"user_id\"], amount=withdraw, deposit=0)\n # message to be retrieved in portfolio.html when user withdraws cash\n flash('Withdraw!')\n return redirect(\"/\")", "def get_operation_balance_sheet(self):\n date_list = Operation_Invoice.objects.all().dates('origin', 'year')\n\n for years in date_list:\n Operation_Invoice.objects.filter(origin__year = years.year)\n\n expenses = 0\n balances = 0\n un_paid_count = 0\n conflicts = 0\n unresolved_conflicts = 0\n\n invoice_list = Operation_Invoice.objects.all()\n count = len(invoice_list)\n for invoice in invoice_list:\n expenses += invoice.invoice_amount\n balances += invoice.get_balance_due()\n if not invoice.paid_in_full:\n un_paid_count += 1\n for conflict in invoice.conflict.all():\n conflicts += 1\n if not conflict.conflict_resolution:\n unresolved_conflicts += 1\n\n return expenses, balances, count, conflicts, unresolved_conflicts", "def construct_current_holdings(self):\n d = dict((k,v) for k,v in [(s,0.0) for s in self.symbol_list])\n d['cash'] = self.initial_capital\n d['commission'] = 0.0\n d['total'] = self.initial_capital\n \n return d", "def construct_current_holdings(self):\n d = dict((k,v) for k,v in [(s,0.0) for s in self.symbol_list])\n d['cash'] = self.initial_capital\n d['commission'] = 0.0\n d['total'] = self.initial_capital\n \n return d", "def test_adjusted_payment_matching_debit_with_recognized_revenue(self):\n debit_jobs([(self.job, A(500), Entry.WORK_DEBIT)], recognize_revenue=True)\n credit_jobs([(self.job, A(480), A(0), A(20))], D(480))\n self.assert_balances(\n bank=A(480, 0, 0),\n invoiced=A(480),\n paid=A(-480),\n debited=A(500),\n credited=A(-500),\n income=A(480).net_amount,\n tax=A(480).tax_amount,\n )", "def test_underpayment(self):\n debit_jobs([(self.job, A(500), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(0), A(0))], D(480))\n diff = A(500) - A(480)\n self.assert_balances(\n bank=A(480, 0, 0),\n invoiced=A(500),\n paid=A(-480),\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n balance=diff,\n promised=diff,\n ) # <- negative balances because of overpayment", "def create_charges_for_balance(until=None, dry_run=False):\n #pylint:disable=too-many-nested-blocks\n until = datetime_or_now(until)\n LOGGER.info(\"create charges for balance at %s ...\", until)\n for organization in get_organization_model().objects.filter(\n nb_renewal_attempts__lt=settings.MAX_RENEWAL_ATTEMPTS):\n charges = Charge.objects.in_progress_for_customer(organization)\n # We will create charges only when we have no charges\n # already in flight for this customer.\n if not charges.exists():\n invoiceables = Transaction.objects.get_invoiceables(\n organization, until=until)\n LOGGER.debug(\"invoicables for %s until %s:\", organization, until)\n for invoicable in invoiceables:\n LOGGER.debug(\"\\t#%d %s %s\", invoicable.pk,\n invoicable.dest_amount, invoicable.dest_unit)\n balances = sum_dest_amount(invoiceables)\n if len(balances) > 1:\n raise ValueError(\"balances with multiple currency units (%s)\" %\n str(balances))\n # `sum_dest_amount` guarentees at least one result.\n invoiceable_amount = balances[0]['amount']\n invoiceable_unit = balances[0]['unit']\n if invoiceable_amount > 50:\n # Stripe will not processed charges less than 50 cents.\n active_subscriptions = Subscription.objects.active_for(\n organization, ends_at=until).filter(auto_renew=True)\n if not active_subscriptions.exists():\n # If we have a past due but it is not coming from a renewal\n # generated earlier, we will make a note of it but do not\n # charge the Card. Each provider needs to decide what to do\n # about collections.\n LOGGER.info('REVIEW %d %s to %s (requires manual charge)',\n invoiceable_amount, invoiceable_unit, organization)\n else:\n try:\n if not organization.processor_card_key:\n raise CardError(_(\"No payment method attached\"),\n 'card_absent')\n if not dry_run:\n Charge.objects.charge_card(\n organization, invoiceables,\n created_at=until)\n LOGGER.info('CHARGE %d %s to %s',\n invoiceable_amount, invoiceable_unit,\n organization)\n except CardError as err:\n # There was a problem with the Card (i.e. expired,\n # underfunded, etc.)\n charge_processor_key = getattr(\n err, 'charge_processor_key', None)\n LOGGER.info('FAILED CHARGE %d %s to %s (%s: %s)',\n invoiceable_amount, invoiceable_unit,\n organization.slug, charge_processor_key,\n err, extra={\n 'event': 'card-error',\n 'charge': charge_processor_key,\n 'detail': err.processor_details(),\n 'organization': organization.slug,\n 'amount': invoiceable_amount,\n 'unit': invoiceable_unit})\n organization.nb_renewal_attempts = (\n organization.nb_renewal_attempts + 1)\n final_notice = False\n if (organization.nb_renewal_attempts >=\n settings.MAX_RENEWAL_ATTEMPTS):\n final_notice = True\n if not dry_run:\n active_subscriptions.unsubscribe(at_time=until)\n if not dry_run:\n organization.save()\n signals.renewal_charge_failed.send(\n sender=__name__,\n invoiced_items=invoiceables,\n total_price=Price(\n invoiceable_amount, invoiceable_unit),\n final_notice=final_notice)\n except ProcessorError:\n # An error from the processor which indicates\n # the logic might be incorrect, the network down,\n # etc. We have already notified the admin\n # in `charge_card_one_processor`.\n pass\n elif invoiceable_amount > 0:\n LOGGER.info('SKIP %d %s to %s (less than 50 %s)',\n invoiceable_amount, invoiceable_unit, organization,\n invoiceable_unit)\n else:\n LOGGER.info('SKIP %s (one charge already in flight)',\n organization)", "def budget_problem3(balance, annualInterestRate):\r\n remaining = balance\r\n\r\n # creating the following bounds assists with bisection search\r\n lo = balance/12\r\n hi = ((balance * (annualInterestRate/12))**12)/12\r\n payment = (lo + hi)/2\r\n\r\n while remaining != 0:\r\n for month in range(12):\r\n remaining = (remaining - payment) * (1 + (annualInterestRate/12))\r\n if remaining > 0:\r\n lo = payment\r\n elif round(remaining,2) < -0.11:\r\n hi = payment\r\n else:\r\n break\r\n payment = (lo + hi)/2\r\n remaining = balance\r\n print 'Lowest Payment: ' + str(round(payment,2))\r\n return round(payment,2)", "def main():\n price = int(input())\n buy = int(input())\n free = int(input())\n must = int(input())\n\n buy1 = must // (buy+free)\n donut = buy1 * (buy+free)\n must %= buy+free\n\n pay = buy1*price*buy\n\n if must >= buy:\n buy1 = must // buy\n donut += buy1 * (buy+free)\n pay += buy1 * buy * price\n\n else:\n donut += must\n pay += must * price\n\n print(pay, donut)", "def calculate_contributions(self, request, parent_lookup_client, pk, format=None):\n # TODO: Make this work\n return Response({'btc_amount': 1111, 'atc_amount': 0})", "def calc(p=12, e=0.1):\n global base, total\n\n for i in range(1, p + 1):\n r = base * e\n print(\"Period: {}\".format(i))\n print(\" Return: {}\".format(r))\n base = base + r + putIn\n print(\" Base: {}\".format(base))\n total = total + 10000 + base\n print(\" Total: {}\".format(total))\n\n comp = pow(1 + e, p)\n print(\"Compound interest:{}\\nPeriod:{}\\nExpect return:{}\".format(comp, p, e))" ]
[ "0.65616626", "0.6471666", "0.6241464", "0.6134051", "0.6083674", "0.6017929", "0.59529567", "0.59450686", "0.57963526", "0.5787191", "0.5777359", "0.57426834", "0.5719649", "0.56672525", "0.5616136", "0.5607058", "0.55491143", "0.5521608", "0.5502035", "0.5492246", "0.5451199", "0.5405619", "0.5401534", "0.53903913", "0.53402203", "0.5334028", "0.53300625", "0.5308315", "0.5307092", "0.53025806", "0.527247", "0.52279437", "0.52127945", "0.51987714", "0.51680243", "0.5164511", "0.51595515", "0.5158757", "0.51569545", "0.5155962", "0.5155377", "0.51534516", "0.51496714", "0.5142074", "0.5131762", "0.51278794", "0.51250345", "0.5121193", "0.511813", "0.510819", "0.5078392", "0.507693", "0.5075933", "0.5074564", "0.50722307", "0.5071589", "0.5066886", "0.50635815", "0.5056203", "0.5040174", "0.5040162", "0.503473", "0.5028034", "0.502682", "0.5022003", "0.50168425", "0.50148636", "0.5013331", "0.5011143", "0.5009318", "0.5008348", "0.50042516", "0.49900988", "0.49760485", "0.4965564", "0.49651998", "0.49614823", "0.49480644", "0.4937777", "0.49353832", "0.4930605", "0.49273396", "0.4925309", "0.49242178", "0.49226898", "0.4919432", "0.49160576", "0.49146196", "0.49127495", "0.491166", "0.49108648", "0.4908657", "0.4908657", "0.49060234", "0.49025443", "0.48989758", "0.48986095", "0.48887217", "0.48782846", "0.48762527" ]
0.7525171
0
The current status of the positions. Returns
def stats(self): if self._dirty_stats: calculate_position_tracker_stats(self.positions, self._stats) self._dirty_stats = False return self._stats
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getstatus(self):\n with self.lock:\n return (self.status, self.time_start)", "def status(self):\n\t\treturn self._status", "def status(self):\n pass", "def status(self):\n pass", "def status(self):\n return self.state", "def status(self):", "def status(self):\n self._refresh_state()\n return self._data.get('status')", "def status(self):\n return self.m.status", "def status(self):\r\n return self._status", "def status(self):\r\n return self._status", "def status(self):\n return self.__status", "def status(self):\n return self.__status", "def moving_status(self):\n return self._read(MX_MOVING_STATUS)", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n return self._status", "def status(self):\n str = \"%s\\n\\tpv %s\\n\" % (self.name,self.pvname)\n str += \"\\tcurrent position (user,dial): %f,%f\\n\" % (self.wm(),self.wm_dial())\n str += \"\\tuser limits (low,high) : %f,%f\\n\" % (self.get_lowlim(),self.get_hilim())\n try:\n str += \"\\tpreset position : %s\" % (self.presets.state())\n except AttributeError:\n pass\n return str", "def status(self):\n return self.status", "def GetStatus(self):\r\n return self.status", "def getStatus():", "def get_status(self):\n return [l1.get_visible() for (l1, l2) in self.lines]", "def status(self):\n\n return self._status", "def status(self):\n\n return self._status", "def status(self):\n\n return self._status", "def status(self):\n assert(self.__complete)\n return self.__status", "def getstatus(self):\n return self.__status", "def state(self):\n return self.status", "def getStatus(self):\n return self.__status", "def status(self):\n raise NotImplementedError()", "def getStatus(self):\r\n return self.controller.getStatus()", "def status(self):\n return self._query_status()['status']", "def getStatus(self):\n return self._status", "def state(self):\r\n\r\n #Mark in wich direction is the prey\r\n prescence_prey_right = 1 if (self.prey.position[0] > self.body[0].position[0]) else 0\r\n prescence_prey_left = 1 if (self.prey.position[0] < self.body[0].position[0]) else 0\r\n prescence_prey_up = 1 if (self.prey.position[1] < self.body[0].position[1]) else 0\r\n prescence_prey_down = 1 if (self.prey.position[1] > self.body[0].position[1]) else 0\r\n #Direction where is moving\r\n actual_direction_right = 1 if (self.velocities[0] == 1) else 0\r\n actual_direction_left = 1 if (self.velocities[0] == -1) else 0\r\n actual_direction_up = 1 if (self.velocities[1] == -1) else 0\r\n actual_direction_down = 1 if (self.velocities[1] == 1) else 0\r\n #Mark if is an obstacle\r\n obstacles = np.ravel(self.obstacles())\r\n \r\n return (np.concatenate((\r\n [prescence_prey_right,\r\n prescence_prey_left,\r\n prescence_prey_up,\r\n prescence_prey_down,\r\n actual_direction_right,\r\n actual_direction_left,\r\n actual_direction_up,\r\n actual_direction_down],\r\n obstacles\r\n )))", "def status(self):\n return self._data['status']", "def get_status(self):\n return self._status", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def status(self):\n with self.__lock:\n assert(self.__complete)\n return self.__status", "def Status(self):\r\n\t\treturn self._get_attribute('status')", "def getPos(self):\n return self.__current_pos", "def status(self):\n return self._get(path='status')", "def status(self):\r\n return not self.sendQuery(\"isMoving\",\"isMoving\")", "def status(self):\n return None", "def status(self):\n return self._dbattr('status')", "def getStartState(self):\n #return (self.position, self.food.copy())\n return self.position", "def status(self):\n return self.get(self._names[\"status\"])", "def state(self) -> Set[Position]:\n return self._state", "def status(self):\n self.scion_sh('status')", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> dict[str, str] | None:\n return self._status", "def line_status(self):\n return self._line_status", "def status(self):\n return self.readvar('\\x5F\\x95',0)", "def get_working_status(self):\n #TODO: fix some issue on restarting and so on about current status\n return self.working_map[self.get_status()]", "def get_current_position(self):\n if self.table_ready:\n max_attempts = 0\n while (\n max_attempts < 3\n ): # Loop as long as there is a valid position from the corvus\n try:\n string = \"Error\"\n command = self.build_command(self.device, \"get_position\")\n string = self.vcw.query(self.device, command).strip()\n pos = self.pos_pattern.findall(string)[0]\n self.device[\"x_pos\"] = float(pos[0])\n self.device[\"y_pos\"] = float(pos[1])\n self.device[\"z_pos\"] = float(pos[2])\n return [float(i) for i in pos]\n except:\n self.log.error(\n \"The corvus has replied with a non valid position string: \"\n + str(string),\n exc_info=True,\n )\n max_attempts += 1", "def state(self):\n return self._attributes['status']", "def get_state(self):\n return self.get_pose()", "def get_pos(self) -> tuple:\n return self.pos", "def pointstatus( pos, ants=0 ) :\n antlist = helpers.makeList( ants )\n s.pointStatus( pos, antlist )", "def status(self):\n if hasattr(self, \"_status\"):\n return self._status\n else:\n return None", "def getInfoOnStatus(self):\n raise NotImplementedError();", "def status(self):\n if self._child:\n return self._child.status()\n return self._status" ]
[ "0.7082658", "0.7037973", "0.7028998", "0.7028998", "0.70281035", "0.70246303", "0.6955653", "0.6926556", "0.68866", "0.68866", "0.68487513", "0.68487513", "0.68325317", "0.6804291", "0.6804291", "0.6804291", "0.6804291", "0.6804291", "0.6804291", "0.6804291", "0.6804291", "0.6804291", "0.6804291", "0.6804291", "0.6804291", "0.6804291", "0.6804291", "0.6804291", "0.6804291", "0.6804291", "0.6804291", "0.6804291", "0.6804291", "0.6804291", "0.6804291", "0.6804291", "0.6804291", "0.6797018", "0.67931855", "0.6755919", "0.673547", "0.6734414", "0.67272824", "0.67272824", "0.67272824", "0.6718206", "0.6705025", "0.6693681", "0.6692047", "0.66732645", "0.6669723", "0.6650799", "0.66222984", "0.6596384", "0.6584277", "0.65790474", "0.65641016", "0.65641016", "0.65641016", "0.65563285", "0.65484744", "0.6545767", "0.6541734", "0.65312684", "0.65291715", "0.65284663", "0.6527993", "0.6511548", "0.6510974", "0.6508184", "0.6502447", "0.6502447", "0.6502447", "0.6502447", "0.6502447", "0.6502447", "0.6502447", "0.6502447", "0.6502447", "0.6502447", "0.6502447", "0.6502447", "0.6502447", "0.6502447", "0.6502447", "0.6502447", "0.6502447", "0.6502447", "0.6502447", "0.648102", "0.6476336", "0.64494956", "0.64473647", "0.6447307", "0.6428898", "0.64217734", "0.6421198", "0.64203376", "0.64153236", "0.6414229", "0.63955593" ]
0.0
-1
Add a transaction to ledger, updating the current state as needed.
def process_transaction(self, transaction): instrument = transaction.instrument if isinstance(instrument, Future): try: old_price = self._payout_last_sale_prices[instrument] except KeyError: self._payout_last_sale_prices[instrument] = transaction.price else: position = self.position_tracker.positions[instrument] amount = position.amount price = transaction.price self._cash_flow( self._calculate_payout( instrument.multiplier, amount, old_price, price, ), ) if amount + transaction.amount == 0: del self._payout_last_sale_prices[instrument] else: self._payout_last_sale_prices[instrument] = price else: self._cash_flow(-(transaction.price * transaction.amount)) self.position_tracker.execute_transaction(transaction) # we only ever want the dict form from now on transaction_dict = transaction.to_dict() try: self._processed_transactions[transaction.dt].append( transaction_dict, ) except KeyError: self._processed_transactions[transaction.dt] = [transaction_dict]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addTransaction(self, transaction):\n self.transactions.append(transaction)\n self.transactionIDs.add(transaction.id)", "def add(self, transaction):\n if isinstance(transaction, Transaction):\n # If the transaction already exists\n if(transaction.hash in self.transaction_index):\n print(\"Debug: The transaction already exists in the list\")\n return None\n\n self.transaction_list.append(transaction)\n size = len(self.transaction_list)-1\n self.transaction_index[transaction.hash] = size\n else:\n raise Exception(\"Error: not a transaction\")", "def enter_transaction():\n _state.transactions = get_transactions() + 1", "def add_transaction_to_user_coin_history(user, amount, transaction=0, purchase=False):\n if purchase and isinstance(purchase, Suggestion):\n transaction = UserCoinHistory(user=user, coins_change=amount, suggestion=purchase, transaction=transaction)\n else:\n transaction = UserCoinHistory(user=user, coins_change=amount, transaction=transaction)\n\n transaction.save()", "def add_transaction(self, tx_json):\n recv_tx = Transaction.from_json(tx_json)\n if not recv_tx.verify():\n raise Exception(\"New transaction failed signature verification.\")\n with self.all_tx_lock:\n if tx_json in self._all_transactions:\n print(f\"{self.name} - Transaction already exist in pool.\")\n return\n self._all_transactions.add(tx_json)", "def AddTransaction(self, tx):\n if BC.Default() is None:\n return False\n\n if tx.Hash.ToBytes() in self.MemPool.keys():\n return False\n\n if BC.Default().ContainsTransaction(tx.Hash):\n return False\n\n if not tx.Verify(self.MemPool.values()):\n logger.error(\"Verifying tx result... failed\")\n return False\n\n self.MemPool[tx.Hash.ToBytes()] = tx\n\n return True", "def new_transaction(self, sender, recipient, amount):\n self.current_transactions.append({\n 'sender': sender,\n 'recipient': recipient,\n 'amount': amount\n })\n return self.last_block['index']+1", "def add_transaction():\n index = blockchain.add_transaction(request.form['sender'], request.form['receiver'], request.form['amount'])\n response = {'message': \"Transaction will be added to Block #{0}\".format(index)}\n return jsonify(response), 200", "def add_value(transaction_amount, last_transaction=[1]):\n blockchain.append([last_transaction, transaction_amount])", "def new_transaction(self, sender, recipient, amount):\n self.current_transactions.append({\n 'sender': sender,\n 'recipient': recipient,\n 'amount': amount\n })\n return self.last_block['index'] + 1", "def add_transaction(self, bitfinex_id, bitfinex_currency, bitfinex_timestamp, bitfinex_price, bitfinex_amount):\n\n new_transaction = Transaction(bitfinex_id=bitfinex_id,\n bitfinex_currency=bitfinex_currency,\n bitfinex_timestamp=bitfinex_timestamp,\n bitfinex_price=bitfinex_price,\n bitfinex_amount=bitfinex_amount,\n languages=languages,\n skills=skills)\n\n self.session.add(new_transaction)\n self.session.commit()\n\n return new_transaction.id", "def new_transaction(self, sender, recipient, amount):\n self.current_transactions.append({\n 'sender':sender,\n 'recipient':recipient,\n 'amount':amount\n })\n\n return self.last_block['index']+1", "def add_transaction(self,transaction):\n if type(transaction) != PoWGenericTransaction:\n raise Exception('TYPEERROR','transaction should be type of \"PoWGenericTransaction\" but got {}'.format(type(transaction)))\n if not transaction.is_validation_passed():\n print 'The transaction is not valid. Skipped...'\n return\n self.transactions.append(transaction)", "def new_transaction(self, sender, recipient, amount):\n\n self.current_transactions.append({\n 'sender': sender,\n 'recipient': recipient,\n 'amount': amount,\n })\n\n return self.last_block['index'] + 1", "def new_transaction(self, sender, recipient, amount):\n self.current_transactions.append({\n 'sender': sender,\n 'recipient': recipient,\n 'amount': amount,\n })\n\n return self.last_block['index'] + 1", "def add_transaction(self, block, transaction):\n cmd = \"\"\"INSERT INTO %s(%s, %s, %s, %s, %s, %s)\n VALUES(?,?,?,?,?,?);\"\"\" %(TABLE_TRANSACTIONS,\n COL_TRANSACTION_BLOCK,\n COL_TRANSACTION_SENDER,\n COL_TRANSACTION_RECEIVER,\n COL_TRANSACTION_AMOUNT,\n COL_TRANSACTION_SUB_TIME,\n COL_TRANSACTION_VER_TIME)\n self.__dbcursor.execute(cmd, (block, transaction.sender,\n transaction.receiver,\n transaction.amount,\n transaction.submitted_time,\n transaction.verified_time))", "def record_transaction(self, transaction: Transaction) -> bool:\n if self._locked:\n print('Failed to record transaction! Your account has been locked!'\n )\n return False\n\n if transaction.amount > self.bank_balance:\n print('Failed to record transaction! Not enough balance!')\n return False\n\n budget = self.budget_manager.get_budget(transaction.budget_category)\n if budget.locked:\n print('Failed to record transaction! This budget has been locked!')\n return False\n\n self.transactions.append(transaction)\n self.bank_balance -= transaction.amount\n budget.amount_spent += transaction.amount\n self._warn_and_lock_if_needed(transaction)\n return True", "def new_transaction(self, sender, recipient, amount):\n self.current_transactions.append(\n {\n 'sender': sender,\n 'recipient': recipient,\n 'amount': amount\n }\n )\n\n return self.last_block['index'] + 1", "def new_transaction(self, sender, recipient, amount):\n self.pending_transactions.append({\n 'sender' : sender,\n 'recipient' : recipient,\n 'amount' : amount\n })\n return self.last_block['index'] + 1", "def add_transaction(self, recipient, sender, amount=1.0):\n # we are using OrderedDict to get an ordered dictionary so that the hash doesn't change due to the order changing\n transaction = Transaction(sender, recipient, amount)\n\n if Verification.verify_transaction(transaction, self.get_balance):\n self.__open_transactions.append(transaction)\n self.save_data()\n return True\n return False", "def add_income(transaction):\n conn = create_connection(database)\n\n sql = ''' UPDATE card\n SET balance = balance + ?\n WHERE number = ?'''\n\n with conn:\n cur = conn.cursor()\n cur.execute(sql, transaction)\n conn.commit()", "def new_transaction(self, sender, recipient, amount):\n\t\tself.current_transaction.append({\n\t\t\t'sender': sender,\n\t\t\t'recipient' : recipient,\n\t\t\t'amount' : amount,\n\t\t\t})\n\n\t\treturn len(self.chain)", "def add_tx(self, txid, tx):\n outputs = tx.outputs()\n so = outputs and outputs[0][1]\n # Note: ScriptOutput here is the subclass defined in this file, not\n # address.ScriptOutput\n if not isinstance(so, ScriptOutput):\n return\n transaction_type = so.message.transaction_type\n try:\n if transaction_type == \"GENESIS\":\n self._add_genesis_or_mint_tx(so, outputs, txid, tx)\n elif transaction_type == \"MINT\":\n self._add_genesis_or_mint_tx(so, outputs, txid, tx)\n elif transaction_type == \"SEND\":\n self._add_send_tx(so, outputs, txid, tx)\n elif transaction_type == \"COMMIT\":\n return # ignore COMMIT, they don't produce any tokens\n else:\n raise InvalidOutputMessage(\"Bad transaction type\")\n except (AssertionError, ValueError, KeyError, TypeError, IndexError) as e:\n self.print_error(f\"ERROR: tx {txid}; exc =\", repr(e))", "def add_transaction(self):\r\n transactionvariable = self.transactionvariable.get()\r\n transactionvariable = (ast.literal_eval(transactionvariable)[0]) # converts to tuple\r\n pattern = re.compile('\\d+(\\.\\d+)?')\r\n match = re.search(pattern, self.difference_box.get())\r\n if self.difference_box.get() == \"\":\r\n pass\r\n else:\r\n if match: \r\n self.cursor.execute(\"\"\"UPDATE transactions SET Difference = ? WHERE TransactionID = ?\"\"\",\r\n (self.difference_box.get(), transactionvariable,))\r\n else:\r\n messagebox.showinfo(\"Error\", \"Transaction incorrect format (+/-DD)\")\r\n\r\n if self.dateandtime_box.get() == \"\":\r\n pass\r\n else:\r\n try:\r\n datetime.strptime(self.dateandtime_box.get(), '%Y-%m-%d %H:%M:%S') \r\n self.cursor.execute(\"\"\"UPDATE transactions SET DateAndTime = ? WHERE TransactionID = ?\"\"\",\r\n (self.dateandtime_box.get(), transactionvariable,))\r\n except ValueError:\r\n messagebox.showinfo(\"Error\", \"Date and time incorrect format (YYYY-MM-DD HH:MM:SS)\")\r\n\r\n self.db.commit()\r\n self.edit_transaction_window.destroy()\r\n FinancesFrame.update_table(self)", "def add_transaction(recipient,sender=owner,amount=1.0):\n #transaction ={\n # 'sender':sender,\n # 'recipient':recipient,\n # 'amount':amount\n #}\n transaction=OrderedDict([('sender',sender),('recipient',recipient),('amount',amount)])\n if verify_transaction(transaction):\n open_transactions.append(transaction)\n participants.add(sender)\n participants.add(recipient)\n save_data()\n return True\n else:\n return False", "def apply(self, new_transaction: Transaction):\n\t\tif self.__number in [new_transaction.receiving_account, new_transaction.sending_account]:\n\t\t\tself.transactions.append(new_transaction)\n\t\telse:\n\t\t\traise ValueError(\n\t\t\t\t\"This account is not involved in the desired transaction. Transaction Cancelled\"\n\t\t\t)", "def add_transaction(recipient, sender=owner, amount=1.0):\n # transaction = {\n # 'sender': sender,\n # 'recipient': recipient,\n # 'amount': amount\n # }\n\n transaction = OrderedDict([('sender', sender), ('recipient', recipient), ('amount', amount)])\n\n if verify_transaction(transaction):\n open_transactions.append(transaction)\n # Since this is defined as a set, any duplicate vlaues will be ignored!\n participants.add(sender)\n participants.add(recipient)\n return True\n return False", "def new_transaction():\n\n data = request.get_json()\n\n if not data:\n return \"No transation data passed\", 400\n\n required = ['sender', 'recipient', 'amount']\n\n if not (list(data.keys()) == required):\n return 'Missing Value', 400\n \n block_index = blockchain.add_transaction(data['sender'], data['recipient'], data['amount'])\n response = {'message':f'Adding the transaction to block at index: {block_index}'}\n\n return jsonify(response), 201", "def new_transaction(self, sender, recipient, amount):\n\n\t\tself.current_transactions.append({\n\t\t\t'sender': sender,\n\t\t\t'recipient': recipient,\n\t\t\t'amount': amount,\t\t\n\t\t})\n\t\t\n\t\treturn self.last_block()['index'] + 1", "def add_validated_transaction(self,transaction):\n tx_hash = sha256(json.dumps(transaction).encode()).hexdigest()\n if tx_hash not in list(self.unconfirmed_transactions.keys()):\n self.unconfirmed_transactions[tx_hash] = transaction", "def add_coins(user, amount, transaction=0):\n # below line of code creates table row for user if none exists\n UserCoins.objects.get_or_create(user=user)\n user_row = UserCoins.objects.get(user=user)\n old_coins_value = user_row.coins\n user_row.coins = old_coins_value + amount\n user_row.save()\n add_transaction_to_user_coin_history(user, amount, transaction)", "def add_transaction(self, date, payee_id, description, amount):\n # [todo] - implement error handling and parameter checking pre-execution\n\n # open a cursor\n cur = self.get_cursor()\n\n self.reset_auto_increment('transactions')\n\n # add transaction with required values\n stmt = \"INSERT INTO transactions \" + \\\n \"VALUES ('0', \" + \\\n \"'{0}-{1}-{2}', \".format(date.year, date.month, date.day) + \\\n \"'{0}', '{1}', \".format(payee_id, description) + \\\n \"'{0}')\".format(amount)\n\n cur.execute(stmt)\n\n # close the cursor\n self.close_cursor()", "def start_transaction(self) -> None:\n pass", "def create_transaction(self, sender, recipient, amount):\n\n self.current_transactions.append({\n 'sender': sender,\n 'recipient': recipient,\n 'amount': amount,\n })\n return self.last_block['index'] + 1", "def recTrans(self,NoSh,BorS,Price,TS):\n self.TL.append(Transaction(NoSh,BorS,Price,TS))\n self.Price=Price", "def add_transaction(self):\r\n pattern = re.compile('\\d+(\\.\\d+)?')\r\n match = re.search(pattern, self.difference_box.get())\r\n if match:\r\n try:\r\n datetime.strptime(self.dateandtime_box.get(), '%Y-%m-%d %H:%M:%S')\r\n self.cursor.execute(\r\n \"\"\"INSERT INTO transactions(Difference, DateAndTime, TransactionStatus) VALUES (?,?,?)\"\"\",\r\n (self.difference_box.get(), self.dateandtime_box.get(), \"Successful\",))\r\n self.db.commit()\r\n self.create_transaction_window.destroy()\r\n FinancesFrame.update_table(self)\r\n except ValueError:\r\n messagebox.showinfo(\"Error\", \"Transaction format incorrect\")\r\n else:\r\n messagebox.showinfo(\"Error\", \"Difference is invalid format\")", "def push_tx(self, crypto, tx_hex):\n raise NotImplementedError(\n \"This service does not support pushing transactions to the network. \"\n \"Or rather it has no defined 'push_tx' method.\"\n )", "def add_transaction(table, id, store_id, hr_id, crm_id, quantity):\n record = [id, store_id, hr_id, crm_id, quantity]\n table.append(record)\n\n return table", "def record_transaction(self) -> None:\n Menu.prompt_record_transaction()\n tx_data = Transaction.prompt_record_tx()\n new_tx = Transaction.generate_new_tx(tx_data)\n\n # Convert the user budget category int input to the enum\n budget_category_int = new_tx.budget_category\n budget_category = BudgetManager.category_mapping[budget_category_int]\n\n # Retrieve the budget object using the enum as the key\n budget = self.user.budget_manager.budget_dict[budget_category]\n\n # Validate the transaction before proceeding\n validated_tx, error_msg = self.validate_transaction_record(new_tx,\n budget)\n if not validated_tx:\n print(\"\\n[red]Warning:[/red] Unable to record transaction!\")\n print(error_msg)\n print(f\"{self.user.account}\\n\")\n print(budget)\n return\n\n # User has successfully recorded a transaction\n budget.add_amount_spent(new_tx.tx_amount)\n self.user.account.add_amount_spent(new_tx.tx_amount)\n self.user.tx_manager.add_transaction(new_tx)\n self.user.update_lock_status()\n print(\"\\nSuccessfully recorded the following transaction:\")\n print(new_tx)\n print(\"\\nTransaction has been recorded under the following budget \"\n \"category:\")\n print(budget)\n\n self.user.check_and_issue_user_warnings(budget)", "def deposit(self, amount):\n self.balance += amount\n self.transactions.append((\"Deposit\", amount))\n print \"Your new balance is $%d.\" % self.balance", "def announce_new_transaction(tx_data):\n for peer in peers:\n url = \"{}/add_received_transaction\".format(peer)\n headers = {'Content-Type': \"application/json\"}\n requests.post(url,\n data=json.dumps(tx_data),\n headers=headers)", "def add_UI_transaction(account):\n\t_day = read_day()\n\t_amount = read_amount()\n\t_type = read_type()\n\tadd_transaction(_day, _amount, _type, account)", "def _addObject(self, row):\n if row.has_key('id'):\n txn_id = row.get('id')\n\n # increment transaction id\n prefix = self.context.getLedger().getTransactionPrefix()\n txn_number = int(txn_id.replace(prefix, ''))\n if self.context.getLedger().getTransactionID() < txn_number:\n self.context.getLedger().setTransactionID(txn_number)\n\n else:\n txn_id = self.context.generateUniqueId(type_name='Transaction')\n\n # create transaction if it doesn't exist\n if not self.context.hasObject(txn_id):\n self.context.invokeFactory(type_name='Transaction', id=txn_id,\n title=row['title'], effectiveDate=row['effectiveDate'])\n\n txn = self.context[txn_id]\n\n # lookup account\n pc = getToolByName(self.context, 'portal_catalog')\n brains = pc(id=row['Account'])\n __traceback_info__ = str(row)\n assert len(brains) == 1\n row['Account'] = brains[0].getObject()\n\n # create transaction entry\n entry_id = txn.generateUniqueId('TransactionEntry')\n entry = TransactionEntry(entry_id)\n txn._setObject(entry_id, entry)\n entry = txn._getOb(entry_id)\n\n row['id'] = entry_id\n entry.edit(**row)", "def add_item(self, transaction_id, description, amount, category_id):\n # [todo] - implement error handling and parameter checking pre-execution\n\n # open a cursor\n cur = self.get_cursor()\n\n self.reset_auto_increment('transaction_items')\n\n # add transaction with required values\n stmt = \"INSERT INTO transaction_items \" + \\\n \"VALUES ('0', \" + \\\n \"'{0}', '{1}', \".format(transaction_id, description) + \\\n \"'{0}', '{1}')\".format(amount, category_id)\n\n cur.execute(stmt)\n\n # close the cursor\n self.close_cursor()", "def begin_transaction(self, event=None):\n assert not self._current_transaction\n self._current_transaction = ActionStack()", "def add_transaction(self, recipient, sender, amount, signature):\n\n if self.hosting_node == None:\n return None\n transaction = Transaction(sender, recipient, amount, signature)\n if Verification.verify_transaction(transaction, self.get_balance):\n self.__open_transactions.append(transaction)\n self.save_data()\n return True\n return False", "def add_transaction(self, transaction, signature, client_public_key):\r\n # Check If transaction is already in the transaciton_pool\r\n if transaction not in self.transaction_pool:\r\n # Verify With All Other Nodes\r\n if self.verify_transaction(transaction, signature, client_public_key):\r\n # Encrypt the transaction\r\n client_public_key = load_pem_public_key(client_public_key, default_backend())\r\n encrypted_transaction = client_public_key.encrypt(\r\n json.dumps(transaction).encode(),\r\n padding.OAEP(\r\n mgf = padding.MGF1(algorithm=hashes.SHA256()),\r\n algorithm = hashes.SHA256(),\r\n label = None\r\n )\r\n )\r\n\r\n self.transaction_pool.append(str(encrypted_transaction))\r\n\r\n else: return False, self.transaction_pool # Return False if Verification fails\r\n\r\n # Return True if transaction was already in transaction_pool or if verification was successful and new transaction was added\r\n return True, self.transaction_pool", "def add_deposit(self, tx_id: str, insert_time: int, amount: float, asset: str, auto_commit=True):\n row = (tx_id, insert_time, asset, amount)\n self.add_row(tables.SPOT_DEPOSIT_TABLE, row, auto_commit)", "def add(self, amount):\n self.amount += amount", "def add_transaction(self, recipient, sender, signature, amount=1.0):\n if not self.hosting_node:\n return False\n\n transaction = Transaction(sender, recipient, signature, amount)\n\n if not Verification.verify_transaction(transaction, self.get_balance):\n return False\n\n self.__open_transactions.append(transaction)\n self.save_data()\n return True", "def AddTx(self, request: loopchain_pb2.TxSend, context):\n\n utils.logger.spam(f\"peer_outer_service:AddTx try validate_dumped_tx_message\")\n channel_name = request.channel or conf.LOOPCHAIN_DEFAULT_CHANNEL\n StubCollection().channel_stubs[channel_name].sync_task().add_tx(request)\n return loopchain_pb2.CommonReply(response_code=message_code.Response.success, message=\"success\")", "def add_lending_purchase(self, purchase_id: int, purchase_time: int, lending_type: str, asset: str, amount: float,\n auto_commit: bool = True):\n row = (purchase_id, purchase_time, lending_type, asset, amount)\n self.add_row(tables.LENDING_PURCHASE_TABLE, row, auto_commit=auto_commit)", "def add_buy(self, trade):\n trade = self._format_sql(trade, self.buy_table)\n self.buys[trade['id']] = trade", "def InvocationAddTransactionHash(builder, transactionHash):\n return AddTransactionHash(builder, transactionHash)", "def add_tag(self, transaction, citation_handle, tag_handle):\n citation = self.dbstate.db.get_citation_from_handle(citation_handle)\n citation.add_tag(tag_handle)\n self.dbstate.db.commit_citation(citation, transaction)", "def start_transaction(self,):\n\n if self.tx is not None:\n raise OverlappedTransaction(str(self.tx.xid))\n\n modlogger.debug(\"start tx\")\n opid = self.new_opid()\n xaction = StartTxOperation(opid,opid)\n self.tx = Transaction(opid,self.home, track_state = self.track_state)\n self._add_operation(opid,xaction)\n return opid", "def add_transaction(self, recipient, sender, signature, amount=1.0, is_reciving=False):\n\n # if self.public_key == None:\n # return False\n transaction = Transaction(sender, recipient, signature, amount)\n if Verification.verify_transaction(transaction, self.get_balence):\n self.open_transactions.append(transaction)\n self.save_data()\n if not is_reciving:\n for node in self.peer_nodes:\n url = 'http://{}/broadcast-transaction'.format(node)\n try:\n response = requests.post(url, json={\n \"sender\": sender, \"recipient\": recipient, \"amount\": amount, \"signature\": signature})\n if response.status_code == 400 or response.status_code == 500:\n print(\"transaction declined, need resolving\")\n return False\n if response.status_code == 409:\n self.resolve_conflits = True\n except requests.exceptions.ConnectionError:\n continue\n return True\n return False", "def commit_transaction(self) -> None:\n pass", "def startTransaction(self) -> int:\n ...", "def transaction_add(request, form_class=TransactionForm, template_name='budget/transactions/add.html'):\n if request.POST:\n form = form_class(request.POST)\n \n if form.is_valid():\n transaction = form.save()\n return HttpResponseRedirect(reverse('budget_transaction_list'))\n else:\n form = form_class()\n return render_to_response(template_name, {\n 'form': form,\n }, context_instance=RequestContext(request))", "async def add_transaction(self, collection, user, message, flag):\n raw_message = message.clean_content\n raw_message = raw_message[raw_message.index(\" \"):]\n if flag:\n final_message = \"You paid < \" + raw_message + \" >\"\n else:\n final_message = message.author.name + \" paid < \" + raw_message + \" >\"\n\n doc = await collection.find_one({'id': user.id})\n if len(doc['transactions']) >= 10:\n await collection.update_one({'id': user.id}, {'$pop': {'transactions': -1}})\n\n await collection.update_one({'id': user.id}, {'$push': {'transactions': {\"message\": final_message}}})", "def add_cash(self, delta):\n self._cash += delta", "def createNewTransaction(self, amount, sender, recipient):\n newTransaction = Transaction(amount, sender, recipient)\n self.pendingTransactions.append(newTransaction)\n return self.getLastBlock().index + 1", "def addMoney(self, deposit_amount):\r\n self.balance_amt = self.balance_amt + deposit_amount", "def __request_tx_data(self) -> bool:\n tx_amount, tx_recipient = ask_for_tx()\n return self.blockchain.add_transaction(self.blockchain.owner, tx_recipient, tx_amount, participants=self.participants)", "def add_transactions(self, transactions):\n\n if not transactions:\n Exception(\"transactions cannot be empty!\")\n return\n\n if not type(transactions) == list:\n Exception(\"Transactions must be a sent in a list!\")\n return\n\n for i, tx in enumerate(transactions):\n if not self.validate_transaction(tx):\n return\n new_block = Block.create_from_transaction(tx, self.blocks[-1].header_hash)\n self.validate_and_add_block(new_block)", "def start_transaction(self):\n raise Unsupported()", "def blockchain_set_tx_detail(transaction):\n info_endpoint = \"address/%s?format=json\" % transaction.to_address\n try:\n info = json.loads(util.call_api(info_endpoint))\n except:\n return\n\n transaction.txid = info['txs'][0]['hash']\n transaction.amount_paid = round(info['total_received'] * SATOSHI, 8)\n\n if transaction.amount_paid >= transaction.amount_btc:\n transaction.status = Transaction.STATUS_CONFIRMED\n send_webhook.apply_async(kwargs={'transaction_id': transaction.id})\n\n transaction.save()", "def new_transaction(self, sender, recipient, amount):\n\n new_trans = Transaction(sender=sender,\n recipient=recipient,\n amount=amount)\n\n new_trans.save()\n\n return self.last_block['id'] + 1", "def mine_transactions(self, address):\n transaction = Transaction(walletoffrom=None, walletofto=address, amount=self.reward)\n self.current_transactions.append(transaction)\n\n block = Block(target=self.target, transactions=self.current_transactions, previoushash=self.last_block().__hash__())\n\n\n self.chain.append(block)\n self.current_transactions = []", "def commit(self, transaction):\n raise NotImplementedError", "def select_user_and_add_transaction(self):\n def add_transaction(to_user):\n print(\"Amount of transaction:\")\n amount = input()\n new_transaction = transaction.Transaction(amount)\n to_user.add_transaction(new_transaction)\n\n try:\n selected_user = self.prompt_user_selection()\n add_transaction(selected_user)\n except ValueError:\n print(\"No changes made.\")", "def add_trade(self, trade):\n if not trade:\n return False\n\n self.lock()\n\n trade.id = self._next_trade_id\n self._next_trade_id += 1\n\n self.trades.append(trade)\n self.unlock()", "def add_block(self, block_name, transactions, timestamp, hash_value):\n\n transacted_amount = 0\n for transaction in transactions:\n transacted_amount += transaction.amount\n self.add_transaction(block_name, transaction)\n\n cmd = \"\"\"INSERT INTO %s(%s, %s, %s, %s, %s)\n VALUES(?,?,?,?,?);\"\"\" %(TABLE_BLOCKCHAIN,\n COL_BLOCKCHAIN_BLOCK,\n COL_BLOCKCHAIN_TRANS_COUNT,\n COL_BLOCKCHAIN_AMOUNT,\n COL_BLOCKCHAIN_TIME,\n COL_BLOCKCHAIN_BLOCK_HASH)\n self.__dbcursor.execute(cmd, (block_name, len(transactions),\n transacted_amount, timestamp,\n hash_value))", "def deposit(self, amount):\n self.transactions += [('deposit', amount)]\n self.balance = self.balance + amount\n return self.balance", "def set_transaction(self, transaction):\n self.transaction_map[transaction.id] = transaction", "def _store_transaction(account, transaction):\n tr_tx = transaction['tx']\n meta = transaction.get('meta', {})\n\n if meta.get('TransactionResult') != 'tesSUCCESS':\n return\n\n amount = meta.get('delivered_amount') or tr_tx.get('Amount', {})\n\n is_unprocessed = (\n tr_tx['TransactionType'] == 'Payment' and\n tr_tx['Destination'] == account and\n isinstance(amount, dict) and\n not Transaction.objects.filter(hash=tr_tx['hash'])\n )\n if is_unprocessed:\n logger.info(\n format_log_message(\n 'Saving transaction: %s', transaction\n )\n )\n\n transaction_object = Transaction.objects.create(\n account=tr_tx['Account'],\n hash=tr_tx['hash'],\n destination=account,\n ledger_index=tr_tx['ledger_index'],\n destination_tag=tr_tx.get('DestinationTag'),\n source_tag=tr_tx.get('SourceTag'),\n status=Transaction.RECEIVED,\n currency=amount['currency'],\n issuer=amount['issuer'],\n value=amount['value']\n )\n\n logger.info(\n format_log_message(\n \"Transaction saved: %s\", transaction_object\n )\n )", "def add_state(self, state):\n self._validate_state(state)\n self._state.add(state)", "def add_transactions(shard_state, collation, txqueue, shard_id, min_gasprice=0, mainchain_state=None):\n if not txqueue:\n return\n pre_txs = len(collation.transactions)\n log.info('Adding transactions, %d in txqueue, %d dunkles' % (len(txqueue.txs), pre_txs))\n while 1:\n tx = txqueue.pop_transaction(\n max_gas=shard_state.gas_limit - shard_state.gas_used,\n min_gasprice=min_gasprice\n )\n if tx is None:\n break\n try:\n apply_shard_transaction(mainchain_state, shard_state, shard_id, tx)\n collation.transactions.append(tx)\n except (InsufficientBalance, BlockGasLimitReached, InsufficientStartGas,\n InvalidNonce, UnsignedTransaction) as e:\n log.info(str(e))\n pass\n log.info('Added %d transactions' % (len(collation.transactions) - pre_txs))", "def transaction_amount(self, transaction_amount):\n\n self._transaction_amount = transaction_amount", "def begin_transaction(self, request):\n return self._call_method('beginTransaction', request,\n datastore_v1_pb2.BeginTransactionResponse)", "def makeNewTx(self):\n new_tx = self.makeTx() # ABSTRACT - Make a new tx.\n logging.info(\"New tx (%d) created by miner %d\" % (new_tx.id, self.id))\n self.changed_last_step = True\n self.handleNewTx(new_tx, self.id)\n self.checkAllTx()", "def buy(self, stock, amount):\n self.orders[stock] += amount", "def insert_to_db(self) -> None:\n query = '''INSERT INTO ESLReceipts(Transaction_Number, Date, Description, Memo,\n Amount_Debit, Amount_Credit, Balance, Check_Number, \n Fees, Card_Type, Is_Payment, Is_Transaction, User_id)\n VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?);'''\n self.db.commit(query, values=self.to_tuple())\n\n if self.is_transaction \\\n and self.transaction is not None \\\n and not self.transaction.exists_in_db():\n self.transaction.insert_to_db()", "def Add_File(self,tx,filename,newcontents):\n if tx != self.tx:\n raise InvalidTransaction(tx)\n fullname = os.path.join(self.home,filename)\n h = win32_txf.CreateFileTransacted(fullname,transaction = tx,\n desired_access = win32_txf.const.GENERIC_WRITE,\n creation_disposition = win32_txf.const.CREATE_ALWAYS)\n #TODO handle partial writes\n win32_txf.WriteFile(h,newcontents)\n win32_txf.CloseHandle(h)", "def start_transaction(self):\n self._connection.execute_nonquery(\"sql\", \"START TRANSACTION\", True)", "def record_transaction(self, amount=None, transaction_date=None, paid=False):\n if transaction_date is None:\n transaction_date = timezone.now()\n\n if amount is None:\n amount = self.plan_cost.cost\n SubscriptionTransaction = swapper.load_model(\n \"subscriptions_api\", \"SubscriptionTransaction\"\n )\n return SubscriptionTransaction.objects.create(\n user=self.user,\n subscription=self, # A transaction should link to is subscription\n date_transaction=transaction_date,\n amount=amount,\n paid=paid\n )", "def add(self, *args: Union[Transaction, TransactionInstruction]) -> Transaction:\n for arg in args:\n if isinstance(arg, Transaction):\n self.instructions = self.instructions + arg.instructions\n elif isinstance(arg, TransactionInstruction):\n self.instructions = (*self.instructions, arg)\n else:\n raise ValueError(\"invalid instruction:\", arg)\n\n return self", "def set_transaction(self, transaction: Transaction):\n self.transaction_map[transaction.id] = transaction", "def deposit(self, amount, description=''):\n self.ledger.append({'amount': amount, 'description': description})", "def makeTx(self):\n new_tx = transaction.Tx(self.simulation.tick, self.id, self.id_bag.getNextId(), [])\n self.simulation.all_tx.append(new_tx)\n return new_tx", "def create_tx(\n self,\n query: str,\n query_params: Optional[Mapping[str, Any]] = None,\n ):\n tx = self.get_session().begin_transaction()\n try:\n # logger.info(query)\n tx.run(query, parameters=query_params)\n tx.commit()\n except Exception as e:\n logger.error(e)\n finally:\n tx.close()", "def _update(self, tx_message: TransactionMessage) -> None:\n assert (\n self._amount_by_currency_id is not None\n and self._quantities_by_good_id is not None\n ), \"Cannot apply state update, current state is not initialized!\"\n\n self._amount_by_currency_id[tx_message.currency_id] += tx_message.sender_amount\n\n for good_id, quantity_delta in tx_message.tx_quantities_by_good_id.items():\n self._quantities_by_good_id[good_id] += quantity_delta", "def transaction(self, transaction):\n # Allow for a list of blocks..\n transaction = utils.request_type(transaction)\n\n res = r.get(self.url + self.tx_info + str(transaction))\n return self.execute(res)", "def add(self, amount: float, reason: str = \"\") -> \"Bank\":\n\n if amount == 0: # Pointless, do nothing.\n return 0\n\n self.__record_ledger__(amount, reason)\n self.balance += amount\n return self", "def save_transaction(**kwargs):\n if not 'user_id' in kwargs:\n raise AttributeError(\"Cannot create a transaction without user_id\")\n\n\n return History.create(\n user_id=kwargs['user_id'],\n from_curr=kwargs['currencyFrom'],\n to_curr=kwargs['currencyTo'],\n amount=kwargs['amountTo'],\n address_in=kwargs['payinAddress'],\n address_out=kwargs['payoutAddress'],\n extraid=kwargs['payinExtraId'],\n transaction_id=kwargs['id'],\n exchange_status=kwargs['status'],\n )", "def add_transaction(self, date: datetime, instruments: List[Instrument]):\n\n for inst in instruments: # type: Instrument\n self._update_profit(inst.cost)", "def startTransaction(self):\n if self._transaction is None:\n d = self._config.startTxn()\n\n def processTxn(result):\n self._transaction = result\n return self._transaction\n\n d.addCallback(processTxn)\n return d\n else:\n raise TransactionAlreadyStartedError(\"Transaction already started. Call commit or rollback to close it\")", "def add(self, state, action, reward, new_state, done):\n experience = (state, action, reward, new_state, done)\n self.buffer.append(experience)", "def add_withdraw(self, withdraw_id: str, tx_id: str, apply_time: int, asset: str, amount: float, fee: float,\n auto_commit: bool = True):\n row = (withdraw_id, tx_id, apply_time, asset, amount, fee)\n self.add_row(tables.SPOT_WITHDRAW_TABLE, row, auto_commit=auto_commit)", "def add_transact_type(self, transact_type):\n self._transact_types.add(transact_type)" ]
[ "0.73368466", "0.7106181", "0.6942782", "0.66168725", "0.65834856", "0.6570095", "0.6479877", "0.64787537", "0.6422333", "0.64186", "0.6378191", "0.63757354", "0.63565505", "0.63033175", "0.630115", "0.6284071", "0.6255765", "0.62522966", "0.6241244", "0.6228167", "0.6210412", "0.6203582", "0.61911833", "0.6110395", "0.60661364", "0.6021049", "0.60111284", "0.5983493", "0.598273", "0.5956269", "0.5947005", "0.5927761", "0.59110606", "0.5884179", "0.58726215", "0.5840563", "0.57908314", "0.57698935", "0.57675093", "0.57635623", "0.57524747", "0.5740189", "0.56887925", "0.56851727", "0.5678449", "0.56770134", "0.56687367", "0.5660405", "0.56398886", "0.56380755", "0.5632973", "0.56245", "0.56135744", "0.5594151", "0.5590188", "0.55657333", "0.55568665", "0.5542844", "0.55365264", "0.5525172", "0.5510162", "0.55016667", "0.5500819", "0.54769224", "0.5464744", "0.54567957", "0.54462665", "0.5444271", "0.5437817", "0.54349446", "0.54332757", "0.54117477", "0.5410628", "0.54017144", "0.539156", "0.5391314", "0.53816164", "0.53791136", "0.5377322", "0.537685", "0.53619075", "0.5358976", "0.53548574", "0.5351353", "0.534679", "0.53427666", "0.5336123", "0.5320002", "0.53091294", "0.53088194", "0.52927303", "0.5287245", "0.52860475", "0.52814114", "0.5278468", "0.5275752", "0.5271357", "0.5262457", "0.52614343", "0.52499044", "0.52282065" ]
0.0
-1
Retrieve the dictform of all of the transactions in a given bar or for the whole simulation.
def transactions(self, dt=None): if dt is None: # flatten the by-day transactions return [ txn for by_day in itervalues(self._processed_transactions) for txn in by_day ] return self._processed_transactions.get(dt, [])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transaction_data(self):\n return list(map(lambda transaction:transaction.to_json(), self.transaction_map.values()))", "def transaction_base() -> Dict[str, Any]:\n return {\n \"first_name\": \"Donald\",\n \"last_name\": \"Duck\",\n \"company\": \"Duck Co\",\n \"email\": \"[email protected]\",\n \"telephone\": \"991234567\",\n \"mobile\": \"+358991234567\",\n \"street\": \"1313 Webfoot Walk\",\n \"postal_code\": \"00000\",\n \"city\": \"Duckburg\",\n \"country\": \"US\",\n \"information\": \"Quack, damn you!\",\n \"items\": [],\n }", "def get_transactions_as_json(self):\n transactions = self.transaction_map.values() \n transactions_json = list(map( lambda t: t.to_json(), transactions )) \n return transactions_json", "def transactions(self, transactions: list):\n num_txs = len(transactions)\n transactions_size = num_txs * self._message_size['tx']\n return {\n 'id': 'transactions',\n 'transactions': transactions,\n 'size': kB_to_MB(transactions_size)\n }", "def transactions(self):\n url = f'{self._ynab.api_url}/budgets/{self.id}/transactions'\n response = self._ynab.session.get(url)\n if not response.ok:\n self._logger.error('Error retrieving transactions, response was : %s with status code : %s',\n response.text,\n response.status_code)\n return []\n return [Transaction(self._ynab, transaction)\n for transaction in response.json().get('data', {}).get('transactions', [])]", "def fetch_bank_transactions(self):\n return self.fetch('/bank_transactions')", "def transactions(self):\n url = f'{self._ynab.api_url}/budgets/{self.budget.id}/accounts/{self.id}/transactions'\n response = self._ynab.session.get(url)\n if not response.ok:\n self._logger.error('Error retrieving transactions, response was : %s with status code : %s',\n response.text,\n response.status_code)\n return []\n return [Transaction(self._ynab, transaction)\n for transaction in response.json().get('data', {}).get('transactions', [])]", "def get_transaction_data():\n data = parse_json()\n income_instances = create_transactions(data['incomes'])\n expense_instances = create_transactions(data['expenses'])\n for expense in expense_instances:\n expense.amount = -(expense.amount)\n transactions = income_instances + expense_instances\n return transactions", "def test_wallets_get_transaction_list(self):\n pass", "def get_transactions(self):\n # open a cursor object\n cur = self.get_cursor()\n\n # get transactions from database\n cur.execute(\"SELECT * FROM transactions\")\n transactions_data = cur.fetchall()\n\n # convert into a dict of values.\n transactions_list = []\n [transactions_list.append({'transaction_id': transaction[0],\n 'date': transaction[1],\n 'payee_id': transaction[2],\n 'description': transaction[3],\n 'amount': transaction[4]})\n for transaction in transactions_data]\n\n # close the cursor\n self.close_cursor()\n\n return transactions_list", "def unbalanced(self):\n # TODO: Find a way to make a sql query to return all unbalanced transactions\n return []", "def _buildTotalsDict(self, fromdt, todt):\r\n pairs = [(t, t.effectForPeriod(fromdt, todt)) for t in self.transactions]\r\n return dict(pairs)", "def transactions(self):\r\n return tx.AccountTransactions(self)", "def load_data():\n dataFile = open('transactions.json', 'r')\n data = json.load(dataFile)\n transactions = data['transactions']#TID\n items = data['items']#item sets\n return items, transactions", "def report(self):\n result = {}\n result_buy = 0\n result_sell = 0\n result_outcome = 0\n for pair in self.pairs:\n orders = self.get_orders_for(pair)\n buy, sell = self.get_buy_and_sell_costs(orders)\n outcome = sell - buy\n result_pair = {\"buy\": buy, \"sell\": sell, \"outcome\": outcome}\n result.update({pair: result_pair})\n result_buy += buy\n result_sell += sell\n result_outcome += outcome\n\n # Add global results\n all = {\"buy\": result_buy, \"sell\": result_sell, \"outcome\": result_outcome}\n result.update({\"all\": all})\n return result", "def transactions(self):\r\n return tx.Transactions(self)", "def transactions(self):\n return copy.deepcopy(self._transactions)", "def get_transactions():\n\n wallet = \"TTfoWGU2M939cgZm8CksPtz1ytJRM9GiN7\"\n\n url = \"https://api.trongrid.io/v1/accounts/{}/transactions\".format(wallet)\n\n response = requests.request(\"GET\", url)\n\n print(response.text)", "def new_get_buys_transaction_history(self, cb_account):\n date: datetime = now()\n if cb_account == \"wallet_id_btc\":\n return MockAPIObject(\n data=[{\n \"created_at\": str(date + timedelta(days=-1)),\n \"resource\": \"buy\",\n \"status\": \"completed\",\n \"amount\": {\n \"amount\": 10,\n \"currency\": \"BTC\"\n },\n \"total\": {\n \"amount\": 10,\n \"currency\": \"BTC\"\n },\n \"fees\": [{\n \"amount\": {\n \"amount\": 1,\n \"currency\": \"EUR\"\n }\n }]\n }, {\n \"created_at\": str(date + timedelta(days=1)),\n \"resource\": \"buy\",\n \"status\": \"completed\",\n \"amount\": {\n \"amount\": 5,\n \"currency\": \"BTC\"\n },\n \"total\": {\n \"amount\": 5,\n \"currency\": \"BTC\"\n },\n \"fees\": [{\n \"amount\": {\n \"amount\": 0.5,\n \"currency\": \"EUR\"\n }\n }]\n }])\n else:\n return MockAPIObject()", "def dict(self):\n return {\"data\": self.data.dict(), \"inventory\": self.inventory.dict()}", "def get_pending_transactions():\n\n return History.get_pending().get()", "def _amount_all(self):\n res = {}\n ut_obj = self.env['l10n.ut']\n for iwdl_brw in self.browse(self.ids):\n # Using a clousure to make this call shorter\n f_xc = ut_obj.sxc(\n iwdl_brw.invoice_id.company_id.currency_id.id,\n iwdl_brw.invoice_id.currency_id.id,\n iwdl_brw.islr_wh_doc_id.date_uid)\n\n res[iwdl_brw.id] = {\n 'amount': (iwdl_brw.base_amount * (iwdl_brw.retencion_islr / 100.0)) or 0.0,\n 'currency_amount': 0.0,\n 'currency_base_amount': 0.0,\n }\n for xml_brw in iwdl_brw.xml_ids:\n res[iwdl_brw.id]['amount'] = xml_brw.wh\n res[iwdl_brw.id]['currency_amount'] = f_xc(\n res[iwdl_brw.id]['amount'])\n res[iwdl_brw.id]['currency_base_amount'] = f_xc(\n iwdl_brw.base_amount)", "def buildMarketOrdersData(self):\n d = {}\n for orderID, myMarketOrderDict in self.frame.mode.game.marketOrders.iteritems():\n if myMarketOrderDict['system'] == self.mySystemDict['id']:\n amount = myMarketOrderDict['amount']\n min = myMarketOrderDict['min']\n max = myMarketOrderDict['max']\n value = myMarketOrderDict['value']\n if myMarketOrderDict['type'] == 'sell':\n s = 'SELL (%d %s) for MIN=%d' % (amount, value, min)\n elif myMarketOrderDict['type'] == 'buy-all':\n s = 'BUY ALL (%d %s) for MAX=%d' % (amount, value, max)\n elif myMarketOrderDict['type'] == 'buy-any':\n s = 'BUY ANY (%d %s) for MAX=%d' % (amount, value, max)\n d[orderID] = s\n return d", "def decoderawtransaction(self, hexstring):\n return dict(self.proxy.decoderawtransaction(hexstring))", "def __preprocess_transactions(self):\n\n p_bar = tqdm(range(14), desc=\"Preprocessing transactions\", leave=False)\n\n try:\n # 0. If optional fields not in the transactions add missing\n optional_fields = [\n \"Sector\",\n \"Industry\",\n \"Country\",\n \"Region\",\n \"Fees\",\n \"Premium\",\n \"ISIN\",\n ]\n if not set(optional_fields).issubset(set(self.__transactions.columns)):\n for field in optional_fields:\n if field not in self.__transactions.columns:\n self.__transactions[field] = np.nan\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 1. Convert Date to datetime\n self.__transactions[\"Date\"] = pd.to_datetime(self.__transactions[\"Date\"])\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 2. Sort transactions by date\n self.__transactions = self.__transactions.sort_values(by=\"Date\")\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 3. Capitalize Ticker and Type [of instrument...]\n self.__transactions[\"Ticker\"] = self.__transactions[\"Ticker\"].map(\n lambda x: x.upper() if isinstance(x, str) else x\n )\n self.__transactions[\"Type\"] = self.__transactions[\"Type\"].map(\n lambda x: x.upper() if isinstance(x, str) else x\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 4. Translate side: [\"deposit\", \"buy\"] -> 1 and [\"withdrawal\", \"sell\"] -> -1\n self.__transactions[\"Signal\"] = self.__transactions[\"Side\"].map(\n lambda x: 1\n if x.lower() in [\"deposit\", \"buy\"]\n else (-1 if x.lower() in [\"withdrawal\", \"sell\"] else 0)\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 5. Convert quantity to signed integer\n self.__transactions[\"Quantity\"] = (\n abs(self.__transactions[\"Quantity\"]) * self.__transactions[\"Signal\"]\n )\n\n # Adjust quantity and price for splits\n for ticker in self.__transactions[\"Ticker\"].unique():\n try:\n splits_df = get_splits(ticker)\n if not splits_df.empty:\n splits_df = splits_df.tz_localize(tz=None)\n for split_date in splits_df.index:\n self.__transactions[\"Quantity\"] = np.where(\n (self.__transactions[\"Ticker\"] == ticker)\n & (self.__transactions[\"Date\"] < split_date),\n self.__transactions[\"Quantity\"]\n * splits_df.loc[split_date].values,\n self.__transactions[\"Quantity\"],\n )\n self.__transactions[\"Price\"] = np.where(\n (self.__transactions[\"Ticker\"] == ticker)\n & (self.__transactions[\"Date\"] < split_date),\n self.__transactions[\"Price\"]\n / splits_df.loc[split_date].values,\n self.__transactions[\"Price\"],\n )\n\n except Exception:\n console.print(\"\\nCould not get splits adjusted\")\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 6. Determining the investment/divestment value\n self.__transactions[\"Investment\"] = (\n self.__transactions[\"Quantity\"] * self.__transactions[\"Price\"]\n + self.__transactions[\"Fees\"]\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 7. Reformat crypto tickers to yfinance format (e.g. BTC -> BTC-USD)\n crypto_trades = self.__transactions[self.__transactions.Type == \"CRYPTO\"]\n self.__transactions.loc[\n (self.__transactions.Type == \"CRYPTO\"), \"Ticker\"\n ] = [\n f\"{crypto}-{currency}\"\n for crypto, currency in zip(\n crypto_trades.Ticker, crypto_trades.Currency\n )\n ]\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 8. Reformat STOCK/ETF tickers to yfinance format if ISIN provided.\n\n # If isin not valid ticker is empty\n self.__transactions[\"yf_Ticker\"] = self.__transactions[\"ISIN\"].apply(\n lambda x: yf.utils.get_ticker_by_isin(x) if not pd.isna(x) else np.nan\n )\n\n empty_tickers = list(\n self.__transactions[\n (self.__transactions[\"yf_Ticker\"] == \"\")\n | (self.__transactions[\"yf_Ticker\"].isna())\n ][\"Ticker\"].unique()\n )\n\n # If ticker from isin is empty it is not valid in yfinance, so check if user provided ticker is supported\n removed_tickers = []\n for item in empty_tickers:\n with suppress_stdout():\n # Suppress yfinance failed download message if occurs\n valid_ticker = not (\n yf.download(\n item,\n start=datetime.datetime.now() + datetime.timedelta(days=-5),\n progress=False,\n ).empty\n )\n if valid_ticker:\n # Invalid ISIN but valid ticker\n self.__transactions.loc[\n self.__transactions[\"Ticker\"] == item, \"yf_Ticker\"\n ] = np.nan\n else:\n self.__transactions.loc[\n self.__transactions[\"Ticker\"] == item, \"yf_Ticker\"\n ] = \"\"\n removed_tickers.append(item)\n\n # Merge reformatted tickers into Ticker\n self.__transactions[\"Ticker\"] = self.__transactions[\"yf_Ticker\"].fillna(\n self.__transactions[\"Ticker\"]\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 9. Remove unsupported ISINs that came out empty\n self.__transactions.drop(\n self.__transactions[self.__transactions[\"Ticker\"] == \"\"].index,\n inplace=True,\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 10. Create tickers dictionary with structure {'Type': [Ticker]}\n unsupported_type = self.__transactions[\n (~self.__transactions[\"Type\"].isin([\"STOCK\", \"ETF\", \"CRYPTO\"]))\n ].index\n if unsupported_type.any():\n self.__transactions.drop(unsupported_type, inplace=True)\n console.print(\n \"[red]Unsupported transaction type detected and removed. Supported types: stock, etf or crypto.[/red]\"\n )\n\n for ticker_type in set(self.__transactions[\"Type\"]):\n self.tickers[ticker_type] = list(\n set(\n self.__transactions[\n self.__transactions[\"Type\"].isin([ticker_type])\n ][\"Ticker\"]\n )\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 11. Create list with tickers except cash\n self.tickers_list = list(set(self.__transactions[\"Ticker\"]))\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 12. Save transactions inception date\n self.inception_date = self.__transactions[\"Date\"].iloc[0]\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 13. Populate fields Sector, Industry and Country\n if (\n self.__transactions.loc[\n self.__transactions[\"Type\"] == \"STOCK\",\n optional_fields,\n ]\n .isnull()\n .values.any()\n ):\n # If any fields is empty for stocks (overwrites any info there)\n self.__load_company_data()\n\n p_bar.n += 1\n p_bar.refresh()\n\n # Warn user of removed ISINs\n if removed_tickers:\n p_bar.disable = True\n console.print(\n f\"\\n[red]The following tickers are not supported and were removed: {removed_tickers}.\"\n f\"\\nManually edit the 'Ticker' field with the proper Yahoo Finance suffix or provide a valid ISIN.\"\n f\"\\nSuffix info on 'Yahoo Finance market coverage':\"\n \" https://help.yahoo.com/kb/exchanges-data-providers-yahoo-finance-sln2310.html\"\n f\"\\nE.g. IWDA -> IWDA.AS[/red]\\n\"\n )\n except Exception:\n console.print(\"\\nCould not preprocess transactions.\")\n raise", "def get_aggregated_values(self):\n if not self._initialized:\n raise Exception(\"To readout you must first initialize, then\"\n \"process batches!\")\n else:\n ret_vals = [q.readout() for q in self.quantities]\n return dict(zip(self.quantity_names, ret_vals))", "def tx_prices(self) -> Dict[str, List[float]]:\n agent_pbk_to_name = self.game.configuration.agent_pbk_to_name\n results = {\n agent_name: [] for agent_name in agent_pbk_to_name.values()\n } # type: Dict[str, List[float]]\n\n temp_game = Game(self.game.configuration, self.game.initialization)\n\n # compute the partial scores for every agent after every transaction\n # (remember that indexes of the transaction start from one, because index 0 is reserved for the initial scores)\n for idx, tx in enumerate(self.game.transactions):\n temp_game.settle_transaction(tx)\n results[agent_pbk_to_name[tx.seller_pbk]].append(tx.amount)\n\n return results", "def wem_market_value_all():\n __query = \"\"\"\n select\n date_trunc('month', wfs.trading_interval) AS trading_day,\n sum(wfs.eoi_quantity * wbs.price) as energy_interval,\n wf.fueltech_id\n from wem_facility_scada wfs\n left join wem_facility wf on wfs.facility_id = wf.code\n join wem_balancing_summary wbs on wfs.trading_interval = wbs.trading_interval\n where\n wf.fueltech_id is not null\n group by 1, wf.fueltech_id\n order by 1 desc, 2 asc\n \"\"\"\n\n query = __query.format()\n\n json_envelope = {}\n\n with engine.connect() as c:\n rows = c.execute(query)\n\n current_tech = None\n\n for row in rows:\n\n current_tech = row[2]\n\n if current_tech not in json_envelope.keys():\n json_envelope[current_tech] = {\n \"id\": f\"wem.fuel_tech.{current_tech}.market_value\",\n \"fuel_tech\": current_tech,\n \"region\": \"wa\",\n \"type\": \"market_value\",\n \"units\": \"AUD\",\n \"history\": {\n \"interval\": \"1M\",\n \"start\": None,\n \"last\": None,\n \"data\": [],\n },\n }\n\n if (\n json_envelope[current_tech][\"history\"][\"start\"] == None\n or row[0] < json_envelope[current_tech][\"history\"][\"start\"]\n ):\n json_envelope[current_tech][\"history\"][\"start\"] = row[0]\n\n if (\n json_envelope[current_tech][\"history\"][\"last\"] == None\n or row[0] > json_envelope[current_tech][\"history\"][\"last\"]\n ):\n json_envelope[current_tech][\"history\"][\"last\"] = row[0]\n\n json_envelope[current_tech][\"history\"][\"data\"].append(row[1])\n\n return [json_envelope[i] for i in json_envelope.keys()]", "def get_goal(self):\n request_name = \"list_inventory_orders\"\n\n orders = self.make_request(request_name)\n order_dict = dict()\n for order in orders:\n order_name = order[\"@id\"].encode('utf-8')\n item_dict = dict()\n for item in order[\"items\"]:\n item_id = item[\"inventory-item-id\"].encode('utf-8')\n item_quantity = item[\"quantity\"]\n item_dict[item_id] = item_quantity\n order_dict[order_name] = item_dict\n return order_dict", "def _get_dicts(self, unit_set):\n name_dict = {}\n unit_dict = {}\n for unit in unit_set:\n name_dict[unit.name] = unit.coef\n unit_dict[unit.unit] = unit\n\n return name_dict, unit_dict", "def get_transactions(filters, as_dict=1):\n\tfilter_by_voucher = 'AND gl.voucher_type = %(voucher_type)s' if filters.get('voucher_type') else ''\n\tgl_entries = frappe.db.sql(\"\"\"\n\t\tSELECT\n\n\t\t\t/* either debit or credit amount; always positive */\n\t\t\tcase gl.debit when 0 then gl.credit else gl.debit end as 'Umsatz (ohne Soll/Haben-Kz)',\n\n\t\t\t/* 'H' when credit, 'S' when debit */\n\t\t\tcase gl.debit when 0 then 'H' else 'S' end as 'Soll/Haben-Kennzeichen',\n\n\t\t\t/* account number or, if empty, party account number */\n\t\t\tacc.account_number as 'Konto',\n\n\t\t\t/* against number or, if empty, party against number */\n\t\t\t%(temporary_against_account_number)s as 'Gegenkonto (ohne BU-Schlüssel)',\n\n\t\t\tgl.posting_date as 'Belegdatum',\n\t\t\tgl.voucher_no as 'Belegfeld 1',\n\t\t\tLEFT(gl.remarks, 60) as 'Buchungstext',\n\t\t\tgl.voucher_type as 'Beleginfo - Art 1',\n\t\t\tgl.voucher_no as 'Beleginfo - Inhalt 1',\n\t\t\tgl.against_voucher_type as 'Beleginfo - Art 2',\n\t\t\tgl.against_voucher as 'Beleginfo - Inhalt 2',\n\t\t\tgl.party_type as 'Beleginfo - Art 3',\n\t\t\tgl.party as 'Beleginfo - Inhalt 3',\n\t\t\tcase gl.party_type when 'Customer' then 'Debitorennummer' when 'Supplier' then 'Kreditorennummer' else NULL end as 'Beleginfo - Art 4',\n\t\t\tpar.debtor_creditor_number as 'Beleginfo - Inhalt 4'\n\n\t\tFROM `tabGL Entry` gl\n\n\t\t\t/* Kontonummer */\n\t\t\tleft join `tabAccount` acc \n\t\t\ton gl.account = acc.name\n\n\t\t\tleft join `tabCustomer` cus\n\t\t\ton gl.party_type = 'Customer'\n\t\t\tand gl.party = cus.name\n\n\t\t\tleft join `tabSupplier` sup\n\t\t\ton gl.party_type = 'Supplier'\n\t\t\tand gl.party = sup.name\n\n\t\t\tleft join `tabParty Account` par\n\t\t\ton par.parent = gl.party\n\t\t\tand par.parenttype = gl.party_type\n\t\t\tand par.company = %(company)s\n\n\t\tWHERE gl.company = %(company)s \n\t\tAND DATE(gl.posting_date) >= %(from_date)s\n\t\tAND DATE(gl.posting_date) <= %(to_date)s\n\t\t{}\n\t\tORDER BY 'Belegdatum', gl.voucher_no\"\"\".format(filter_by_voucher), filters, as_dict=as_dict)\n\n\treturn gl_entries", "def transactions(self):\n return self._call_account_method(\n 'transactions'\n )", "def tx_counts(self) -> Dict[str, Dict[str, int]]:\n agent_pbk_to_name = self.game.configuration.agent_pbk_to_name\n result = {agent_name: 0 for agent_name in agent_pbk_to_name.values()}\n results = {\"seller\": result.copy(), \"buyer\": result.copy()}\n\n temp_game = Game(self.game.configuration, self.game.initialization)\n\n # compute the partial scores for every agent after every transaction\n # (remember that indexes of the transaction start from one, because index 0 is reserved for the initial scores)\n for idx, tx in enumerate(self.game.transactions):\n temp_game.settle_transaction(tx)\n results[\"seller\"][agent_pbk_to_name[tx.seller_pbk]] += 1\n results[\"buyer\"][agent_pbk_to_name[tx.buyer_pbk]] += 1\n\n return results", "def query_symbol_bars(self, symbol: str):\n return self._call_txtrader_api('query_symbol_bars', {'symbol': symbol})", "def compute_helper_mempool_dictionaries():\n txn_density_dict = {}\n txn_parents_dict = {}\n txn_size_dict = {}\n mempool_data = parse_mempool_csv()\n for elem in mempool_data:\n size = elem.weight/MAXIMUM_BLOCK_WEIGHT # weight mapped to (0,1)\n txn_size_dict[elem.txid] = size \n txn_density_dict[elem.txid] = elem.fee/size\n if elem.parents != '':\n txn_parents_dict[elem.txid] = elem.parents.strip().split(';')\n return txn_density_dict,txn_parents_dict,txn_size_dict", "def get(self, transaction_ids):\n try:\n transaction_ids = list(set(list(transaction_ids)))\n request = {\"transaction_ids\": transaction_ids}\n response = {}\n # Validate User Input\n validations_result = validate_transaction_ids(transaction_ids)\n if validations_result is not None and len(validations_result) > 0:\n response = {\"ResponseCode\": ResponseCodes.InvalidRequestParameter.value,\n \"ResponseDesc\": ResponseCodes.InvalidRequestParameter.name,\n \"ValidationErrors\": validations_result}\n else:\n transaction_outputs_dict = {}\n for transaction_id in sorted(transaction_ids):\n transaction_outputs = db_session.query(TransactionOutput).filter(\n TransactionOutput.transaction_id == transaction_id).order_by(\n TransactionOutput.id.asc()).all()\n\n trans_output_as_list = []\n total_num_of_transaction_outputs = 0\n for transaction_output in transaction_outputs:\n output_address_response = json.loads(\n requests.get('http://localhost:5000/bitcoin/transactions/outputs/addresses',\n {'transaction_id': transaction_id,\n 'transaction_output_id': transaction_output.id}).text)\n if output_address_response[\"ResponseCode\"] == ResponseCodes.Success.value:\n trans_output_as_list.append(serialize_transaction_output(transaction_output,\n output_address_response[\n \"NumberOfOutputAddresses\"],\n output_address_response[\n \"OutputAddresses\"]))\n total_num_of_transaction_outputs = total_num_of_transaction_outputs + 1\n else:\n response = {\"ResponseCode\": output_address_response[\"ResponseCode\"],\n \"ResponseDesc\": output_address_response[\"ResponseDesc\"],\n \"ErrorMessage\": \"Internal Error in Transaction Output Address Service : \"\n + output_address_response[\"ErrorMessage\"]\n }\n break\n transaction_outputs_dict[transaction_id] = {\"NumberOfOutputs\": total_num_of_transaction_outputs,\n \"TransactionOutputs\": trans_output_as_list}\n\n if total_num_of_transaction_outputs > 0:\n response = {\"ResponseCode\": ResponseCodes.Success.value,\n \"ResponseDesc\": ResponseCodes.Success.name,\n \"TransactionOutputData\": transaction_outputs_dict\n }\n else:\n response = {\"ResponseCode\": ResponseCodes.NoDataFound.value,\n \"ResponseDesc\": ResponseCodes.NoDataFound.name,\n \"ErrorMessage\": ResponseDescriptions.NoDataFound.value}\n except Exception as ex:\n response = {\"ResponseCode\": ResponseCodes.InternalError.value,\n \"ResponseDesc\": ResponseCodes.InternalError.name,\n \"ErrorMessage\": str(ex)}\n finally:\n return response", "def produce(self, request, meta, raven_variables, dispatch, t, level=None):\n #balance = defaultdict(float)\n interaction = self.get_interaction()\n balance, meta = interaction.produce(request, meta, raven_variables, dispatch, t, level)\n #for resource, quantity in int_balance.items():\n # balance[resource] += quantity\n return balance, meta", "def raw_get_transaction(cls, txid):\n r = requests.get(cls.MAIN_TX_API.format(txid), timeout=DEFAULT_TIMEOUT)\n r.raise_for_status() # pragma: no cover\n return r.json()", "def _balances(self) -> Dict[str, int]:\n\n return self.client.get(self._resources(\"balance\"))", "def tx_transaction_mirs(self, txs_hash: str, pandas: bool=False) -> dict:\n \n tx_transaction_mirs = self.network + bf_tx_url + txs_hash + bf_tx_transaction_mirs_url\n\n response = query_blockfrost(tx_transaction_mirs, self.api_key, self.proxies)\n \n return pd.DataFrame.from_dict(response) if pandas else response", "def get_transactions(self):\n\n df = self.__transactions[\n [\n \"Date\",\n \"Type\",\n \"Ticker\",\n \"Side\",\n \"Price\",\n \"Quantity\",\n \"Fees\",\n \"Investment\",\n \"Currency\",\n \"Sector\",\n \"Industry\",\n \"Country\",\n \"Region\",\n ]\n ]\n df = df.replace(np.nan, \"-\")\n df[\"Date\"] = df[\"Date\"].dt.strftime(\"%Y-%m-%d\")\n df.sort_values(by=\"Date\", ascending=False, inplace=True)\n return df", "def jsonrpc_gettx(self, txid):\n txnw = self.node.storage.db.get(hex2b(txid))\n if txnw is None:\n return {}\n txnw = TxnWrapper.unserialize(SerializationBuffer(txnw))\n info = hex_bytes_in_dict(\n txnw.txn.to_dict())\n\n # Add blockheight\n info['blockheight'] = txnw.timestamp / TIME_MULTIPLIER\n return info", "def stats(self):\n return {attr: getattr(self, attr) for attr in ['cash', 'rawg_quantity', 'rawg_demand', 'rawg_price', 'rig_quantity', 'rig_supply', 'rig_price']}", "def get_df_transactions():\n\n _, res = DBX.files_download(c.io.FILE_TRANSACTIONS)\n return pd.read_excel(io.BytesIO(res.content), index_col=0)", "def __init__(self, miser, fromdt, todt, numBars = 100):\r\n def keysToString(indict):\r\n \"\"\"Return a new dict that has converted `indict`'s keys from\r\n Transaction to string.\"\"\"\r\n newD = {}\r\n for k, v in indict.iteritems():\r\n newD[k.name] = v\r\n return newD\r\n\r\n self.income = dictToSortedList(keysToString(miser.income(fromdt, todt)))\r\n self.expenses = dictToSortedList(keysToString(miser.expenses(fromdt, todt)))\r\n self.numBars = numBars\r\n \r\n sumStr = \"\\nProfile of expenses:\"\r\n sumStr += self.expensesBar\r\n\r\n print sumStr", "def get_balance(self):\n\n return {\n 'saturacion': (self.config['saturacion'] + 100) * 100.0 / 200.0,\n 'contraste': (self.config['contraste'] + 100) * 100.0 / 200.0,\n 'brillo': (self.config['brillo'] + 100) * 100.0 / 200.0,\n 'hue': (self.config['hue'] + 100) * 100.0 / 200.0,\n 'gamma': (self.config['gamma'] + 100) * 100.0 / 200.0\n }", "def _amount_all(self, cr, uid, ids,field_name, arg, context={}):\n res={}\n for record in self.browse(cr, uid, ids, context=context):\n res[record.id] = { 'amount_untaxed': 0.0, 'amount_tax': 0.0, 'amount_total': 0.0}\n amount_untaxed = 0.0\n amount_tax = 0.0\n amount_total = 0.0\n\t if not record.allowances_lines_after and record.allowances_lines_before:\n \tfor line in record.allowances_lines_before:\n \tamount_untaxed += line.amount_untaxed\n \tamount_tax += line.amount_tax\n \tamount_total += line.amount_total\n \tres[record.id]['amount_untaxed'] = amount_untaxed \n \tres[record.id]['amount_tax'] = amount_tax \n \tres[record.id]['amount_total'] = amount_total \n\n\t elif record.allowances_lines_after and record.allowances_lines_before :\n \tfor line in record.allowances_lines_after:\n \tamount_untaxed += line.amount_untaxed\n \tamount_tax += line.amount_tax\n \tamount_total += line.amount_total\n \tres[record.id]['amount_untaxed'] = amount_untaxed \n \tres[record.id]['amount_tax'] = amount_tax \n \tres[record.id]['amount_total'] = amount_total \n return res", "def get_biases(self):\n return list(self.b.values())", "def get_biases(self):\n return list(self.b.values())", "def get_biases(self):\n return list(self.b.values())", "def get_biases(self):\n return list(self.b.values())", "def determineUnitHistory():\n\tunitTracker = Unitiser()\n\t\n\timport transactions\n\ttrades = transactions.allTransactions()\n\t\n\thistory = dict()\n\t\n\tfor date in timeline():\n\t\t#print(\"\\ntimelime:\", date.strftime('%Y-%m-%d %H:%M:%S'))\n\t\timport valuator\n\t\tvalue = valuator.getPortfolioValueAt(date)\n\t\tif date in trades:\n\t\t\tprior = getPortfolioBefore(date)\n\t\t\tprior_value = valuator.getPortfolioValueAt(date, portfolio = prior)\n\n\t\t\tinvested = Decimal('0.0')\n\t\t\tfor equity in trades[date]:\n\t\t\t\ttrade = trades[date][equity]\n\t\t\t\t#print(equity, trade)\n\t\t\t\tif trade['action'] == 'buy':\n\t\t\t\t\tinvested = invested + Decimal(trade['value'])\n\t\t\t\telif trade['action'] == 'sell':\n\t\t\t\t\tinvested = invested - Decimal(trade['value'])\n\n\t\t\tsince = getPortfolioAt(date)\n\t\t\tsince_value = valuator.getPortfolioValueAt(date, portfolio = since)\n\n\t\t\t#print(\"change amount is\", invested)\n\t\t\tif invested > 0:\n\t\t\t\tunitTracker.invest(invested, prior_value)\n\t\t\telif invested < 0:\n\t\t\t\tunitTracker.divest(abs(invested), prior_value)\n\n\t\thistory[date] = {\n\t\t\t 'date' : date,\n\t\t\t 'value' : value.quantize(TWOPLACES),\n\t\t\t 'units' : unitTracker.numberOfUnits().quantize(TWOPLACES),\n\t\t\t 'price' : unitTracker.pricePerUnit(value).quantize(TWOPLACES),\n\t\t\t 'invested' : unitTracker.invested\n\t\t\t }\n\t\n\treturn history", "def get_all_stat(self):\n all_stat=dict()\n for stat_type in self.log_book.keys():\n stat = self.get_stat(stat_type)\n all_stat[stat_type] = stat\n return all_stat", "def T1s(self) -> Dict[int, Optional[float]]:\n return {qs.id: qs.T1 for qs in self.qubits_specs}", "def get_string_of_transactions(self):\n s = \"\"\n for transaction in self.transactions:\n s += transaction.to_string()\n return s", "def get_values(self, currency):\n curr_dict = {\n \"brazilian_real\": None,\n \"american_dollar\": None,\n \"european_euro\": None,\n \"british_pound\": None,\n \"japanese_yen\": None,\n \"swiss_frank\": None,\n \"canadian_dollar\": None,\n \"australian_dollar\": None\n }\n index = 0\n for key in curr_dict:\n if key != currency:\n # list comprehension to get values from data\n curr_dict[key] = [\n element for record in select_records(currency, 1) for element in record\n if element == record[index] and isinstance(element, float)\n ]\n index += 1\n else:\n continue\n return curr_dict", "def submit(\n self,\n dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: Dict[Text, Any],\n ) -> List[Dict]:\n\n search_type = tracker.get_slot(\"search_type\")\n transaction_history = tracker.get_slot(\"transaction_history\")\n transactions_subset = transaction_history.get(search_type, {})\n vendor = tracker.get_slot(\"vendor_name\")\n if vendor:\n transactions = transactions_subset.get(vendor.lower())\n vendor = f\" with {vendor}\"\n else:\n transactions = [\n v for k in list(transactions_subset.values()) for v in k\n ]\n vendor = \"\"\n\n start_time = parser.isoparse(tracker.get_slot(\"start_time\"))\n end_time = parser.isoparse(tracker.get_slot(\"end_time\"))\n\n for i in range(len(transactions) - 1, -1, -1):\n transaction = transactions[i]\n transaction_date = parser.isoparse(transaction.get(\"date\"))\n\n if transaction_date < start_time or transaction_date > end_time:\n transactions.pop(i)\n\n numtransacts = len(transactions)\n total = sum([t.get(\"amount\") for t in transactions])\n slotvars = {\n \"total\": f\"{total:.2f}\",\n \"numtransacts\": numtransacts,\n \"start_time_formatted\": tracker.get_slot(\"start_time_formatted\"),\n \"end_time_formatted\": tracker.get_slot(\"end_time_formatted\"),\n \"vendor_name\": vendor,\n }\n\n dispatcher.utter_message(\n template=f\"utter_searching_{search_type}_transactions\", **slotvars\n )\n dispatcher.utter_message(\n template=f\"utter_found_{search_type}_transactions\", **slotvars\n )\n\n return [\n SlotSet(\"time\", None),\n SlotSet(\"time_formatted\", None),\n SlotSet(\"start_time\", None),\n SlotSet(\"end_time\", None),\n SlotSet(\"start_time_formatted\", None),\n SlotSet(\"end_time_formatted\", None),\n SlotSet(\"grain\", None),\n SlotSet(\"search_type\", None),\n SlotSet(\"vendor_name\", None),\n ]", "def get_dict(self):\n return", "def _table_tree(self, real_account):\n return [{\n 'account': ra.account,\n 'balances_children':\n serialize_inventory(realization.compute_balance(ra),\n at_cost=True),\n 'balances': serialize_inventory(ra.balance, at_cost=True),\n 'is_leaf': len(ra) == 0 or bool(ra.txn_postings),\n 'postings_count': len(ra.txn_postings)\n } for ra in realization.iter_children(real_account)]", "def histogramintegrals(self):\n return {}", "def showTransactions(self):\n self.scanTransactions()\n txns = []\n\n # Summarize the stats\n for x in range(len(self._trans)):\n stats = self._trans[x]\n trans_time = 0\n remote_calls = 0\n for name, stat in stats:\n trans_time += stat.total_tt\n remote_calls += 1\n txns.append((x, trans_time, remote_calls))\n\n results = [\"TX#\\tTime\\tCalls\",\n \"=\" * 22]\n\n for item in txns:\n results.append(\"%3d\\t%4f\\t%5d\" % item)\n \n return \"\\n\".join(results)", "def get_all_orders():", "def summary(self,attr='raw'):\n g = {}\n g['gid'] = map(lambda x : x.gid, self.taxonomies)\n g['sp'] = map(lambda x : x.presences.species , self.taxonomies)\n \n g['gns'] = map(lambda x : x.presences.genera , self.taxonomies) \n g['fam'] = map(lambda x : x.presences.families , self.taxonomies)\n g['ord'] = map(lambda x : x.presences.orders , self.taxonomies)\n g['cls'] = map(lambda x : x.presences.classes , self.taxonomies)\n g['phy'] = map(lambda x : x.presences.phyla , self.taxonomies)\n g['kng'] = map(lambda x : x.presences.kingdoms , self.taxonomies)\n #g['all'] = map(lambda x : (x.gid,int(x.presences.species),int(x.genera),int(x.families),int(x.orders),int(x.classes),int(x.phyla),int(x.kingdoms)),self.taxonomies)\n keys = settings.TAXONOMIC_TREE_KEYS\n if attr == 'int':\n for key in keys:\n g[key] = map(lambda p : int(p) ,g[key])\n elif attr == 'str':\n for key in keys:\n g[key] = map(lambda p : str(p) ,g[key]) \n elif attr == 'list':\n for key in keys:\n g[key] = map(lambda p : p.list ,g[key]) \n elif attr == 'mapping':\n for key in keys:\n g[key] = map(lambda p : p.map ,g[key]) \n elif attr == 'raw':\n return g\n else:\n logger.error(\"Wrong attribute selection\")\n return None\n \n return g", "def test_get_transaction_types(self):\n pass", "def dump(self):\n result = super(BattleshipTransaction, self).dump()\n\n result['Name'] = self._name\n result['Action'] = self._action\n result['Ships'] = self._ships\n if self._action == 'JOIN':\n result['Board'] = self._board\n if self._action == 'FIRE':\n result['Row'] = self._row\n result['Column'] = self._column\n if self._reveal_space is not None:\n result['RevealSpace'] = self._reveal_space\n if self._reveal_nonce is not None:\n result['RevealNonce'] = self._reveal_nonce\n\n return result", "def _get_amount_total_base(self):\n res = {}\n for txt in self:\n res[txt.id] = 0.0\n for txt_line in txt.txt_ids:\n if txt_line.invoice_id.type in ['out_refund', 'in_refund']:\n res[txt.id] -= txt_line.untaxed\n else:\n res[txt.id] += txt_line.untaxed\n return res", "async def b_chain() -> dict:\n authority_chain = await chain.consensus()\n return {\"chain\": authority_chain[\"chain\"]}", "def chart_of_accounts(qbo_session, attrs = \"strict\"):\n\n #query all the accounts\n accounts = qbo_session.get_objects(\"Account\")\n\n #by strict, I mean the order the docs say to use when udpating:\n #https://developer.intuit.com/docs/0025_quickbooksapi/\n #0050_data_services/030_entity_services_reference/account\n\n if attrs == \"strict\":\n attrs = [\n \"Id\", \"SyncToken\", \"MetaData\", \"Name\", \"SubAccount\",\n \"ParentRef\", \"Description\", \"FullyQualifiedName\", \"Active\",\n \"Classification\", \"AccountType\", \"AccountSubType\", \"AcctNum\",\n \"OpeningBalance\", \"OpeningBalanceDate\", \"CurrentBalance\",\n \"CurentBalanceWithSubAccounts\", \"CurrencyRef\"\n ]\n\n else:\n #TODO: validate the attrs against the 'strict' list above\n pass\n\n #As a first cut, we'll sort them by AccountType in trial balance order\n\n tb_type_order = [\n \"Bank\", \"Accounts Receivable\", \"Other Current Asset\",\n \"Fixed Asset\", \"Other Asset\",\n \"Accounts Payable\", \"Credit Card\",\n \"Other Current Liability\", \"Other Liability\",\n \"Equity\",\n \"Income\", \"Other Income\",\n \"Expense\", \"Other Expense\", \"Cost of Goods Sold\"\n ]\n\n accounts_by_type = {} #{Accounts_Payable:[row_list]\n\n for a_id in accounts:\n a = accounts[a_id]\n at = a[\"AccountType\"]\n if at not in tb_type_order:\n raise Exception(\"Unexpected AccountType: %s\" % at)\n\n if at not in accounts_by_type:\n accounts_by_type[at]=[]\n\n this_row = []\n for field in attrs:\n if field not in a:\n this_row.append(\"\")\n else:\n value = a[field]\n if isinstance(value,(list,tuple,dict)):\n this_row.append(\"<complex>\")\n else:\n this_row.append(a[field])\n\n accounts_by_type[at].append(this_row)\n\n rows = [attrs] #headers are the first row\n for at in tb_type_order:\n if at in accounts_by_type:\n for row in accounts_by_type[at]:\n rows.append(row)\n\n return rows", "def get_tarefa_mais_barata(tarefas):\n\tdict_custo_total = {}\n\ttarefa_barata = {}\n\tfor tarefa in tarefas:\n \t\ttarefa_id = tarefa['identificador']\n\t\tif not (dict_custo_total.has_key(tarefa_id)):\n\t\t\tdict_custo_total[tarefa_id] = {'tarefa': tarefa, 'custo': 0}\n\t\t\tfor outra_tarefa in tarefas:\n\t\t\t\tif (outra_tarefa['identificador'] != tarefa_id):\n\t\t\t\t\tdict_custo_total[tarefa_id]['custo'] += calcula_custo(tarefa['tempo_de_execucao'], outra_tarefa['custo_por_hora'])\n\n\t\tif (tarefa_barata == {} or (tarefa_barata['custo'] > dict_custo_total[tarefa_id]['custo'])):\n\t\t\ttarefa_barata = dict_custo_total[tarefa_id]\n\n\treturn tarefa_barata['tarefa']", "def fetch_all_tx(self):\n transactions = []\n for block in self.chain:\n transactions.append(block.data)\n return transactions", "async def test_all_transactions(self):\n response = await self.collect(get_request_json_return_value=self.GATLING_JSON)\n self.assert_measurement(response, value=\"2\", entities=self.expected_entities)", "def to_json(self) -> Dict[str, Any]:\n\n return {\n **self.index.to_json(),\n \"timelock\": self.timelock,\n \"amount\": self.amount,\n \"spend_key\": self.spend_key.hex(),\n \"state\": self.state.value,\n }", "def gettransaction(self, txid):\n return TransactionInfo(**self.proxy.gettransaction(txid))", "def get_transaction_prices(self):\n cleaned_data = self.cleaned_data()\n supplier_cleaned_data = cleaned_data.get('cleaned_supplier_data')\n transaction_cleaned_data = cleaned_data.get('cleaned_transaction_data')\n merged_data = self.merge_supplier_transaction(supplier_cleaned_data, transaction_cleaned_data)\n calculated_data = self.calculate_prices(merged_data)\n self.export_calculated_prices(calculated_data)\n return calculated_data", "def get_tx_info(tx):\n\n input_addresses = []\n output_addresses = []\n payments = []\n\n try:\n response = json.loads(make_request('http://tbtc.blockr.io/api/v1/tx/info/' + tx))\n except Exception as e:\n status = json.loads(e.message).get('status')\n if status in ['error', 'fail']:\n return {'from': None, 'to': None, 'amount': None, 'confirmations': 0}\n\n vins = response.get('data').get('vins')\n vouts = response.get('data').get('vouts')\n confirmations = response.get('data').get('confirmations')\n\n for i in range(len(vins)):\n if vins[i].get('address') not in input_addresses:\n input_addresses.append(vins[i].get('address'))\n for i in range(len(vouts)):\n output_addresses.append(vouts[i].get('address'))\n payments.append(vouts[i].get('amount'))\n\n return {'from': input_addresses, 'to': output_addresses, 'amount': payments, 'confirmations': confirmations}", "def test_execute_dump_all_transaction(self):\n\n instruction = Instruction(\"dump()\")\n\n with std_out() as (out, err):\n self.transaction_manager.execute(instruction)\n\n output = out.getvalue().strip()\n\n self.assertEqual(output, \"{1: {'x14': { x14: 140 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x12': { x12: 120 }, 'x6': { x6: 60 }, 'x20': { x20: 200 }, 'x4': { x4: 40 }}, 2: {'x14': { x14: 140 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x11': { x11: 110 }, 'x12': { x12: 120 }, 'x1': { x1: 10 }, 'x6': { x6: 60 }, 'x20': { x20: 200 }, 'x4': { x4: 40 }}, 3: {'x14': { x14: 140 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x12': { x12: 120 }, 'x6': { x6: 60 }, 'x20': { x20: 200 }, 'x4': { x4: 40 }}, 4: {'x14': { x14: 140 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x3': { x3: 30 }, 'x12': { x12: 120 }, 'x13': { x13: 130 }, 'x6': { x6: 60 }, 'x20': { x20: 200 }, 'x4': { x4: 40 }}, 5: {'x14': { x14: 140 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x12': { x12: 120 }, 'x6': { x6: 60 }, 'x20': { x20: 200 }, 'x4': { x4: 40 }}, 6: {'x14': { x14: 140 }, 'x20': { x20: 200 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x12': { x12: 120 }, 'x6': { x6: 60 }, 'x15': { x15: 150 }, 'x4': { x4: 40 }, 'x5': { x5: 50 }}, 7: {'x14': { x14: 140 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x12': { x12: 120 }, 'x6': { x6: 60 }, 'x20': { x20: 200 }, 'x4': { x4: 40 }}, 8: {'x14': { x14: 140 }, 'x20': { x20: 200 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x12': { x12: 120 }, 'x6': { x6: 60 }, 'x7': { x7: 70 }, 'x4': { x4: 40 }, 'x17': { x17: 170 }}, 9: {'x14': { x14: 140 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x12': { x12: 120 }, 'x6': { x6: 60 }, 'x20': { x20: 200 }, 'x4': { x4: 40 }}, 10: {'x19': { x19: 190 }, 'x14': { x14: 140 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x9': { x9: 90 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x12': { x12: 120 }, 'x6': { x6: 60 }, 'x20': { x20: 200 }, 'x4': { x4: 40 }}}\")", "def get_dict(self):\n return {\n \"type\": self.item_type,\n \"size\": self.size,\n \"toppings\": self.toppings,\n \"price\": self.get_price()\n }", "def to_dict(self):\n d = {'Name': self._name,\n 'Account Type': self.account_type}\n if self._assets:\n d['Assets'] = [to_dict(asset) for asset in self._assets.values()]\n if self._cash != 0:\n d['Available Cash'] = self._cash\n return d", "def _get_new_bar(self, symbol):\n for b in self.symbol_data[symbol]:\n yield b", "def _construct_all_holdings(self):\n d = dict((s, 0.0) for s in self.symbol_list)\n d['datetime'] = self.backtest_date\n d['cash'] = self.initial_capital\n d['commission'] = 0.0\n d['total'] = self.initial_capital\n d['buy_times'] = 0\n d['sell_times'] = 0\n d['total_times'] = 0\n d['hold'] = 0\n return [d]", "async def get_transactions(self, guild_id, user):\n doc = await self.db[str(guild_id)].find_one({'id': user.id})\n if doc is None or len(doc['transactions']) == 0:\n return -1\n else:\n return doc['transactions']", "def getData(self):\n return dict(self._dump_data)", "def getDataDict(self):\n #code begins here \n return self.__dflst,self.__dfwells", "def _retrieve_transaction_table_input(self, execution_arn: str) -> Dict:\n response = self.client.get_execution_history(executionArn=execution_arn,maxResults=1000)\n events = response[\"events\"]\n record_purchase_entered_events = [\n event\n for event in events\n if event[\"type\"] == \"TaskStateEntered\" and event[\"stateEnteredEventDetails\"][\"name\"] == \"InsertPurchase\"\n ]\n\n record_refund_entered_events = [\n event\n for event in events\n if event[\"type\"] == \"TaskStateEntered\" and event[\"stateEnteredEventDetails\"][\"name\"] == \"InsertRefund\"\n ]\n\n record_error_entered_events = [\n event\n for event in events\n if event[\"type\"] == \"TaskStateEntered\" and event[\"stateEnteredEventDetails\"][\"name\"] == \"InsertError\"\n ]\n \n self.assertTrue(\n record_purchase_entered_events,\n \"Cannot find InsertPurchase TaskStateEntered event\",\n )\n self.assertTrue(\n record_refund_entered_events,\n \"Cannot find InsertPurchase TaskStateEntered event\",\n )\n self.assertTrue(\n record_error_entered_events,\n \"Cannot find InsertPurchase TaskStateEntered event\",\n )\n purchase_table_input=[] #PurchaseTable inputs\n refund_table_input=[] # RefundTable inputs\n error_table_input=[] # ErrorTable inputs\n for transaction in record_purchase_entered_events:\n transaction_input = json.loads(transaction[\"stateEnteredEventDetails\"][\"input\"])\n\n purchase_table_input.append(transaction_input)\n self.inserted_purchase_record_id.append(transaction_input[\"TransactionId\"]) # save this ID for cleaning up PurchaseTable\n\n for transaction in record_refund_entered_events:\n transaction_input = json.loads(transaction[\"stateEnteredEventDetails\"][\"input\"])\n\n refund_table_input.append(transaction_input)\n self.inserted_refund_record_id.append(transaction_input[\"TransactionId\"]) # save this ID for cleaning up RefundTable\n\n for transaction in record_error_entered_events:\n transaction_input = json.loads(transaction[\"stateEnteredEventDetails\"][\"input\"])\n\n error_table_input.append(transaction_input)\n self.inserted_error_record_id.append(transaction_input[\"TransactionId\"]) # save this ID for cleaning up ErrorTable\n\n return purchase_table_input, refund_table_input, error_table_input", "def get_transaction_totals(self, params=None):\n return self.get(f\"{self.gateway_path}/totals\", params)", "def transactions_df():\n return pd.DataFrame(\n {\n \"user_id\": [1, 1, 1, 2, 2, 2, 3, 3, 3],\n \"item_id\": [11, 22, 22, 11, 22, 33, 33, 33, 44],\n \"amount\": [10, 20, 30, 40, 50, 60, 70, 80, 90],\n }\n )", "def amenity_all():\n state_holder = []\n for state in models.storage.all(\"Amenity\").values():\n state_holder.append(state.to_dict())\n return_holder = jsonify(state_holder)\n return return_holder", "def dictOfBetas(self, free=True, fixed=False):\n s = {}\n for e in self.children:\n d = e.dictOfBetas(free, fixed)\n s = dict(s, **d)\n return s", "def _buildTransDict(self, fromdt, todt, ttype):\r\n totalsDict = self._buildTotalsDict(fromdt, todt)\r\n sortedTotsList = dictToSortedList(totalsDict)\r\n\r\n return dict([(k,v) for k,v in totalsDict.iteritems() if type(k) == ttype])", "def return_as_dictionary(self):\n output_dict = Inventory.return_as_dictionary(self)\n output_dict['material'] = self.material\n output_dict['size'] = self.size\n\n return output_dict", "def all_transactions(self, request):\n user_id = request.data[\"user\"]\n user = User.objects.get(id=user_id)\n user_transactions = user.transactions.all()\n serializer = TransactionSerializer(user_transactions, many=True)\n\n return Response(serializer.data)", "def get_all_books() -> List[Dict]:\n pass", "def dict_values(self):\n return self.__dict__", "def getCurrentSystemFunds(self):\n e = self.myParent.myEmpireDict\n d = self.myParent.mySystemDict\n return [e['CR'], d['AL'],d['EC'],d['IA']]", "def get_transactions_sum_data(month_objects, amount_type):\n if (amount_type == 'expenses'):\n chart_data = [\n result['amount__sum'].quantize(D('0.01')).copy_abs()\n if result['amount__sum'] != None\n else 0\n for result in \n [month.transactions.filter(amount__lte=0).aggregate(Sum('amount')) for month in month_objects]\n ]\n elif (amount_type == 'incomes'):\n chart_data = [\n result['amount__sum'].quantize(D('0.01')).copy_abs() \n if result['amount__sum'] != None\n else 0\n for result in \n [month.transactions.filter(amount__gt=0).aggregate(Sum('amount')) for month in month_objects]\n ]\n \n return chart_data", "def get_chain_data(self, symbol: str): \n return self.trader.fetch_chain_data(symbol)", "def _load_transactions(self):\r\n\t\tlogger.debug(\"Enter\")\r\n\t\ttry:\r\n\t\t\twith open(self._state_file, 'rb') as tmp:\r\n\t\t\t\tlogger.debug(\"There is a file.\")\r\n\t\t\t\ttmp_dict = pickle.load(tmp)\r\n\t\t\t\tlogger.debug(\"Dictionary loaded from file: %s\" % tmp_dict)\r\n\t\texcept IOError as e: # File doesn't exists\r\n\t\t\tlogger.debug(\"Exit - No file. Error message: %s\" % e)\r\n\t\t\ttmp_dict = {}\r\n\t\t\t\r\n\t\treturn tmp_dict", "def return_as_dictionary(self):\n output_dict = Inventory.return_as_dictionary(self)\n output_dict['brand'] = self.brand\n output_dict['voltage'] = self.voltage\n\n return output_dict", "def dictOfBetas(self, free=True, fixed=False):\n if fixed and self.status != 0:\n return {self.name: self}\n\n if free and self.status == 0:\n return {self.name: self}\n\n return dict()", "def transaction(self, transaction):\n # Allow for a list of blocks..\n transaction = utils.request_type(transaction)\n\n res = r.get(self.url + self.tx_info + str(transaction))\n return self.execute(res)", "def __init__(self):\n self.transaction_index = {}\n self.transaction_list = []" ]
[ "0.63048834", "0.602332", "0.5960751", "0.56639814", "0.56584823", "0.5563182", "0.5531879", "0.55191296", "0.55072176", "0.5477383", "0.54718333", "0.5458796", "0.5426807", "0.54197216", "0.541453", "0.5338547", "0.5327534", "0.53225285", "0.53161556", "0.5279584", "0.52619934", "0.5237087", "0.52224517", "0.52208483", "0.52202314", "0.52195555", "0.5216679", "0.5214561", "0.5192317", "0.5190232", "0.51868635", "0.51672924", "0.5163985", "0.51561975", "0.51450855", "0.5128815", "0.5127976", "0.51187205", "0.50943434", "0.50937515", "0.5092108", "0.50841814", "0.506848", "0.5064129", "0.5063724", "0.50629747", "0.50383264", "0.503129", "0.503129", "0.503129", "0.503129", "0.5001976", "0.50000143", "0.49497277", "0.49371666", "0.4936105", "0.4934925", "0.4934214", "0.49326798", "0.4931223", "0.49275663", "0.49220523", "0.49218124", "0.49053317", "0.4904125", "0.48903102", "0.4890083", "0.48821458", "0.48813823", "0.4880626", "0.48732132", "0.4867718", "0.48675153", "0.48642778", "0.48608243", "0.48523393", "0.4852034", "0.4846197", "0.48452893", "0.48409748", "0.48356566", "0.4831061", "0.4829988", "0.48288727", "0.48272905", "0.482643", "0.48256853", "0.48212123", "0.48201984", "0.48171985", "0.48094684", "0.48060596", "0.48027426", "0.48025575", "0.4800572", "0.47963676", "0.47922558", "0.47921076", "0.47916767", "0.47877666", "0.47861665" ]
0.0
-1
Force a computation of the current portfolio state.
def update_portfolio(self): if not self._dirty_portfolio: return portfolio = self._portfolio pt = self.position_tracker portfolio.positions = pt.get_positions() position_stats = pt.stats portfolio.positions_value = position_value = ( position_stats.net_value ) portfolio.positions_exposure = position_stats.net_exposure self._cash_flow(self._get_payout_total(pt.positions)) start_value = portfolio.portfolio_value # update the new starting value portfolio.portfolio_value = end_value = portfolio.cash + position_value pnl = end_value - start_value if start_value != 0: returns = pnl / start_value else: returns = 0.0 portfolio.pnl += pnl portfolio.returns = ( (1 + portfolio.returns) * (1 + returns) - 1 ) # the portfolio has been fully synced self._dirty_portfolio = False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_portfolio(self, portfolio: PortfolioController):\n now = portfolio.get_history(seconds_back=0)\n future = portfolio.get_history(seconds_back=-self.update_interval)\n\n for fund in portfolio.funds:\n best_currency = max(portfolio.currencies, key=lambda currency: future_value(fund, currency, now, future))\n if best_currency != fund.currency:\n portfolio.request_transfer(fund, best_currency)", "def _initalize_portfolio_with_cash(self):\n self.cash = copy.copy(self.starting_cash)\n\n if self.starting_cash > 0.0:\n self.history.append(\n PortfolioEvent.create_subscription(\n self.current_dt, self.starting_cash, self.starting_cash\n )\n )\n\n self.logger.info(\n '(%s) Funds subscribed to portfolio \"%s\" '\n '- Credit: %0.2f, Balance: %0.2f' % (\n self.current_dt.strftime(settings.LOGGING[\"DATE_FORMAT\"]),\n self.portfolio_id,\n round(self.starting_cash, 2),\n round(self.starting_cash, 2)\n )\n )", "def backtest_portfolio(self):\n raise NotImplementedError(\"Should implement backtest_portfolio()!\")", "def updateState(self):\n self.state = self.microgridPolicy.computeState();", "def portfolio(self):\n self.update_portfolio()\n return self._immutable_portfolio", "def after_run(self):\n # Calculate the performance of the strategy and portfolio\n self.portfolio.calc_stats()\n self.calc_performance()\n\n return self", "def reset(self): \n ptf_asset_vals, bmk_asset_vals = self._generate_initial_portfolios()\n process_params = self._generate_initial_process()\n self.state_vars = StateVariables( timestamp=0,\n ptf_asset_vals=ptf_asset_vals, \n bmk_asset_vals=bmk_asset_vals, \n process_params=process_params )\n return self._get_observation_from_state_vars()", "def track_portfolio(self, p):\n\n global st_refresh_thread\n\n if self.terminate:\n return\n\n p.refresh()\n\n self.lock.acquire()\n self.active_portfolio = p\n self.display_portfolio(p)\n self.lock.release()\n\n if not self.refresh_thread:\n thr_args = list()\n thr_args.append(self)\n self.refresh_thread = threading.Thread(target=st_refresh_thread,\n args=thr_args)\n self.refresh_thread.start()", "def _ensure_calculated(self):\n # return immediately if already done\n if self._state > State.UNCALCULATED:\n return\n\n # do the actual calculation, which must be implemented by the subclass\n # but first, set default state that we expect unless the implementing class overrides\n self._state = State.QUALIFIED \n self._calculate()", "def on_iteration_start(self):\n\n self.Xfprv = self.Xf.copy()\n if (not self.opt['FastSolve'] or isinstance(self.backtrack,\n BacktrackRobust)):\n self.Yfprv = self.Yf.copy()\n\n if self.opt['Monotone']:\n if self.k == 0:\n self.objfn = self.eval_objfn()\n self.objfn_prev = self.objfn", "def set_cash(self, cash):\n portfolio = self.get_portfolio_object()\n if portfolio is not None:\n portfolio.cash += cash\n portfolio.initial_cash += cash", "def compute(self):\n if self._computed:\n return\n\n self._compute()\n self._computed = True", "def __calculate_portfolio_returns(self):\n\n p_bar = tqdm(range(1), desc=\" Calculating returns\", leave=False)\n\n trade_data = self.historical_trade_data\n\n # Helper functions to calculate cash inflow and outflow\n def f_min(x):\n return x.apply(lambda x: min(x, 0))\n\n def f_max(x):\n return x.apply(lambda x: max(x, 0))\n\n # Calculate cash inflow and outflow\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period cash inflow\"], self.tickers_list + [\"Total\"]]\n )\n ] = -1 * trade_data[\"Investment delta\"][:].apply(lambda x: f_min(x), axis=0)\n\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period cash outflow\"], self.tickers_list + [\"Total\"]]\n )\n ] = trade_data[\"Investment delta\"][:].apply(lambda x: f_max(x), axis=1)\n\n # Calculate period return\n\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period absolute return\"], self.tickers_list + [\"Total\"]]\n )\n ] = (trade_data[\"End Value\"] + trade_data[\"Period cash inflow\"]) - (\n trade_data[\"End Value\"].shift(1).fillna(0)\n + trade_data[\"Period cash outflow\"]\n )\n\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period percentage return\"], self.tickers_list + [\"Total\"]]\n )\n ] = (trade_data[\"End Value\"] + trade_data[\"Period cash inflow\"]) / (\n trade_data[\"End Value\"].shift(1).fillna(0)\n + trade_data[\"Period cash outflow\"]\n ) - 1\n\n trade_data[\"Period percentage return\"].fillna(0, inplace=True)\n\n self.historical_trade_data = trade_data\n\n self.portfolio_returns = self.historical_trade_data[\"Period percentage return\"][\n \"Total\"\n ]\n\n p_bar.n += 1\n p_bar.refresh()", "def update_portfolio_on_market(self, market: MarketEvent):\n self._portfolio.update_market_value(market)", "def __update_portfolio_handler(self, msg):\n pass", "def on_iteration_start(self):\n\n self.Xprv = self.X.copy()\n if (not self.opt['FastSolve'] or isinstance(self.backtrack,\n BacktrackRobust)):\n self.Yprv = self.Y.copy()\n\n if self.opt['Monotone']:\n if self.k == 0:\n self.objfn = self.eval_objfn()\n self.objfn_prev = self.objfn", "def rebalance(context, data):\n logger.debug('rebalancing on: %s', algo.get_datetime())\n\n context.trend_filter = False\n\n # new_portfolio = algo.pipeline_output('pipeline').dropna(subset=['overall_rank']).sort_values('momentum', ascending=False)\n\n new_portfolio = algo.pipeline_output('pipeline').dropna(subset=['overall_rank']).sort_values('momentum', ascending=False)\n\n for equity, row in new_portfolio.iterrows():\n logger.debug('new portfolio (before filtering) - equity: %s', equity)\n\n # print(new_portfolio)\n\n # new_portfolio = new_portfolio[new_portfolio['overall_rank'].notna() & new_portfolio['momentum'] > 40][:20]\n \n # new_portfolio = new_portfolio[(new_portfolio['momentum_decile'] > 8)][:20]\n\n new_portfolio = new_portfolio.nlargest(20, ['overall_rank', 'momentum']) #<- $600K PL in 10 years\n\n # new_portfolio = new_portfolio.nlargest(20, ['momentum', 'overall_rank']) #<- 1M PL in 10 years\n\n if logger.level is logging.DEBUG:\n for equity, row in new_portfolio.iterrows():\n logger.debug('new portfolio - (after filtering) equity: %s', equity)\n \n\n # print(len(new_portfolio.index))\n\n # volatility driven weights\n # new_portfolio['inverse_volatility'] = new_portfolio['volatility'].apply(lambda x: 1 / x)\n # inv_vola_sum = new_portfolio['inverse_volatility'].sum()\n # new_portfolio['target_weight'] = new_portfolio['inverse_volatility'].apply(lambda x: x / inv_vola_sum)\n\n # portfolio size driven weights\n # num_equities = len(new_portfolio.index)\n # new_portfolio['target_weight'] = 1 / num_equities\\\n\n # logger.info('len existing portfolio: %s', len(context.portfolio.positions))\n\n if logger.level is logging.DEBUG:\n for equity, values in context.portfolio.positions.items():\n logger.debug('context.portfolio.positions - equity: %s, amount: %s, cost_basis: %s, sold_on: %s, sold_at_price: %s', equity, values.amount, values.cost_basis, values.last_sale_date, values.last_sale_price)\n\n \n order_target(algo.sid('FIBBG000NTFYM5'), 0)\n logger.debug('selling all bonds')\n\n for equity in context.portfolio.positions:\n if equity is algo.sid('FIBBG000NTFYM5'): \n continue\n if equity not in set(new_portfolio.index.tolist()):\n # logger.info('selling %s', equity)\n order_target_percent(equity, 0)\n\n stock_weights = 1.0 / max(len(context.portfolio.positions), len(new_portfolio.index))\n\n logger.debug('len existing portfolio (afer ejection): %s', len(context.portfolio.positions))\n logger.debug('len new portfolio: %s', len(new_portfolio.index))\n logger.debug('stock_weights: %s', stock_weights)\n\n # print(context.portfolio.positions.get(algo.sid('FIBBG000NTFYM5')))\n\n # spy = context.portfolio.positions.get(algo.sid('FIBBG000NTFYM5'))\n\n # if (spy is not None) and (spy.amount > 0):\n # order_target_percent(algo.sid('FIBBG000NTFYM5'), 0)\n\n for equity, row in new_portfolio.iterrows():\n if row.trend_filter is True:\n # logger.info('buying %s', equity)\n context.trend_filter = True\n order_target_percent(equity, stock_weights)\n else:\n context.trend_filter = False\n \n logger.debug('cash: %s', context.portfolio.cash)\n logger.debug('portfolio_value: %s', context.portfolio.portfolio_value)\n logger.debug('num_positions: %s', len(context.portfolio.positions))\n logger.debug('positions: %s', context.portfolio.positions)", "def recalculate() -> None:\n NotImplemented", "def _on_state_update(self) -> None:\n super()._on_state_update()\n self._set_futures(True)", "def precalculate():\n pass", "def precalculate():\n pass", "def final(self, state):\n \"*** YOUR CODE HERE ***\"\n return\n util.raiseNotDefined()", "def execute(self) -> None:\n self.state()", "def _run_computation(self):\n with self.swap(stats_jobs_continuous.StatisticsAggregator,\n 'get_statistics', self._mock_get_statistics):\n ModifiedUserImpactAggregator.start_computation()\n self.process_and_flush_pending_tasks()", "def before_trading_start(context, data):\n factors = pipeline_output('ff_example')\n\n # get the data we're going to use\n returns = factors['returns']\n mkt_cap = factors.sort_values(['market_cap'], ascending=True)\n be_me = factors.sort_values(['be_me'], ascending=True)\n\n # to compose the six portfolios, split our universe into portions\n half = int(len(mkt_cap)*0.5)\n small_caps = mkt_cap[:half]\n big_caps = mkt_cap[half:]\n \n thirty = int(len(be_me)*0.3)\n seventy = int(len(be_me)*0.7)\n growth = be_me[:thirty]\n neutral = be_me[thirty:seventy]\n value = be_me[seventy:]\n\n # now use the portions to construct the portfolios.\n # note: these portfolios are just lists (indices) of equities\n small_value = small_caps.index.intersection(value.index)\n small_neutral = small_caps.index.intersection(neutral.index)\n small_growth = small_caps.index.intersection(growth.index)\n \n big_value = big_caps.index.intersection(value.index)\n big_neutral = big_caps.index.intersection(neutral.index)\n big_growth = big_caps.index.intersection(growth.index)\n\n # take the mean to get the portfolio return, assuming uniform\n # allocation to its constituent equities.\n sv = returns[small_value].mean()\n sn = returns[small_neutral].mean()\n sg = returns[small_growth].mean()\n \n bv = returns[big_value].mean()\n bn = returns[big_neutral].mean()\n bg = returns[big_growth].mean()\n\n # computing SMB\n context.smb = (sv + sn + sg)/3 - (bv + bn + bg)/3\n\n # computing HML\n context.hml = (sv + bv)/2 - (sg + bg)/2", "def reset(self):\n for key in self.portfolio.keys():\n self.portfolio[key] = {'holdings': 0}\n self.buys[key] = 0\n self.portfolio['balance'] = 2500000.0", "def apply(self, gameState):\n pass", "def liquidate(self) -> None:\n if self.position.is_close:\n return\n\n if self.position.pnl > 0:\n self.take_profit = self.position.qty, self.price\n else:\n self.stop_loss = self.position.qty, self.price", "def make_uncurrent(self):\n pass", "def calculate_portfolio(self, request, pk=None, **kwargs):\n goal = self.get_object()\n\n check_state(Goal.State(goal.state), Goal.State.ACTIVE)\n\n setting_str = request.query_params.get('setting', None)\n if not setting_str:\n raise ValidationError(\"Query parameter 'setting' must be specified and a valid JSON string\")\n try:\n setting = ujson.loads(setting_str)\n except ValueError:\n raise ValidationError(\"Query parameter 'setting' must be a valid json string\")\n\n # Create the settings object from the dict\n serializer = serializers.GoalSettingStatelessSerializer(data=setting)\n serializer.is_valid(raise_exception=True)\n settings = serializer.create_stateless(serializer.validated_data, goal)\n\n try:\n data = self.build_portfolio_data(calculate_portfolio(settings=settings,\n data_provider=DataProviderDjango(),\n execution_provider=ExecutionProviderDjango()))\n return Response(data)\n except Unsatisfiable as e:\n rdata = {'reason': \"No portfolio could be found: {}\".format(e)}\n if e.req_funds is not None:\n rdata['req_funds'] = e.req_funds\n\n return Response({'error': rdata}, status=status.HTTP_400_BAD_REQUEST)", "def swap_active_portfolio(self):\n\n self.lock.acquire()\n\n self.clear_main()\n\n w = self.windows['MAIN']\n l = 1\n\n for p in self.portfolios:\n # Only support 9 portfolios since that makes this easier to deal\n # with.\n if l >= 10:\n break\n\n w.addstr(l, 0, '%2d' % l, curses.A_BOLD | curses.color_pair(1))\n w.addstr(l, 3, p.name)\n l += 1\n\n self.refresh()\n\n # Wait for the user to give is a key.\n while True:\n c = self.stdscr.getch()\n\n if c < ord('1') and c > ord('9'):\n continue\n\n index = c - ord('1')\n\n if index < len(self.portfolios):\n break\n\n self.portfolios[index].refresh()\n\n self.active_portfolio = self.portfolios[index]\n self.display_portfolio(self.active_portfolio)\n self.lock.release()", "def set_curr_value(self, val):\n # only goal that is in progress can have it's current value changed\n if self._status != EGoalStatus.IN_PROGRESS:\n raise NotImplementedError('Cannot set value to finished or not started goal')\n # try cast to int - mainly for QuantifiedGoal representation\n val = self.fw.types.try_float_cast(val)\n # update both in the stages object and in raw data\n self._values[EStage.CURRENT] = self._data_process(val)\n self._skeleton.curr_value = val\n # use progressor to update the database\n self._progressor.dump_to_database(self)", "def atomistic_step(self):\n # first minimize in vacuum, in either case, \n # fixes problems with langevin bond deformation.\n self.system.minimize()\n \n if self.system.should_solvate:\n with self.system.solvate() as sol:\n with self.system.minimize(**sol) as mn:\n with self.system.equilibriate(**mn) as eq:\n self.system.md(**eq)\n else:\n self.system.equilibriate()\n self.system.md()", "def apply(self):\r\n return", "def apply(self):\r\n return", "def apply(self):\r\n return", "def apply(self):\r\n return", "def apply(self):\r\n return", "def _reset_solution(self, firm_size):\n x_lower, x_upper = self.model.workers.lower, self.model.workers.upper\n y_upper = self.model.firms.upper\n initial_V = np.array([y_upper, firm_size])\n\n if self.model.assortativity == 'positive':\n self.ivp.set_initial_value(initial_V, x_upper)\n wage = self.evaluate_wage(x_upper, initial_V)\n profit = self.evaluate_profit(x_upper, initial_V)\n self._solution = np.hstack((x_upper, initial_V, wage, profit))\n else:\n self.ivp.set_initial_value(initial_V, x_lower)\n wage = self.evaluate_wage(x_lower, initial_V)\n profit = self.evaluate_profit(x_lower, initial_V)\n self._solution = np.hstack((x_lower, initial_V, wage, profit))", "def simulate_trading(self):\n self._generate_trading_instances()\n self._run_backtest()\n self.portfolio.output_equity()\n res=self.portfolio.get_statistics()\n self.plot.plot_equity()\n return res", "def reactivate(self):\n self.write({'active': True, 'state': 'running'})\n STAGE = self.env['anytracker.stage']\n for ticket in self:\n starts = STAGE.search([('method_id', '=', ticket.method_id.id),\n ('progress', '=', 0)])\n if len(starts) != 1:\n raise except_orm(\n _('Configuration error !'),\n _('One and only one stage should have a 0% progress'))\n # write stage in a separate line to recompute progress & risk\n ticket.write({'stage_id': starts[0].id})\n self.recompute_parents()", "def initialize_portfolio(self):\n\n raise NotImplementedError('''\n Must implement initialize_portfolio. Call help() for details.\n ''')", "def _calculate(self):\n raise NotYetImplemented()", "def optimizeForReturn(required_return, stock_db, use_genetic):\n print('Optimizing portfolio for %f' % required_return)\n pf = PortfolioFactory(stock_db, required_return, use_genetic=use_genetic)\n desired_portfolio = pf.desired_portfolio\n print('Required Return: %f' % required_return)\n print('Expected Return: %f' % math.pow(\n desired_portfolio.average_return, Config.DAYS_IN_YEAR))\n print('Downside Risk: %f' % desired_portfolio.downside_risk)\n print('Downside Correl: %f' % desired_portfolio.downside_correl)\n print('Score: %f' % desired_portfolio.score)\n\n # Write desired portfolio.\n DataIO.writeDesiredPortfolio(\n desired_portfolio, stock_db,\n 'output/DesiredPortfolio_%.0f_%.4f_%s.csv' % (\n Config.MINIMUM_AMOUNT_DATA, required_return, Config.TODAY.date()))\n\n print('Finished for %f' % required_return)\n\n return desired_portfolio", "def calc(self):\n return None", "def update_state(self, progress, policy_state=None):\n raise NotImplementedError", "def solve():\n game_state.is_solving = ~game_state.is_solving\n\n if game_state.is_solving:\n solve_button.set_label(\"Pause\")\n else:\n solve_button.set_label(\"Solve\")\n\n game_state.is_dirty = True\n\n return solve", "def restart(self):\n self.state ='active'\n if self.budget <= 0:\n return self.leave()\n self.cards =BJCards()\n self.bet_amount =0", "def trade(self, portfolio: Portfolio, stock_market_data: StockMarketData) -> List[Order]:\n assert portfolio is not None\n assert stock_market_data is not None\n assert stock_market_data.get_companies() == [Company.A, Company.B]\n\n current_state = self.states_compution(stock_market_data, portfolio)\n current_portfolio_value = portfolio.get_value(stock_market_data)\n\n if self.train_while_trading is False:\n # for test set use trained ann\n action = np.argmax(self.model.predict(current_state)[0])\n\n else:\n action = self.select_action(current_state, current_portfolio_value)\n if self.last_state is not None:\n reward = self.calc_reward(current_portfolio_value)\n self.append_memory(self.last_state, self.last_action_a, reward, current_state)\n # train model as soon as sufficient memory is reached\n if len(self.memory) > self.min_size_of_memory_before_training:\n self.train_model()\n\n # Split action into individual actions for Company A and B\n current_action_a = 0\n current_action_b = 0\n assert action < 9 and action >= 0\n if action == 0:\n current_action_a = OrderType.SELL\n current_action_b = OrderType.SELL\n amount_to_sell_a = portfolio.get_stock(Company.A)\n amount_to_sell_b = portfolio.get_stock(Company.B)\n elif action == 1:\n current_action_a = OrderType.SELL\n amount_to_sell_a = portfolio.get_stock(Company.A)\n current_action_b = OrderType.BUY\n stock_price = stock_market_data.get_most_recent_price(Company.A)\n amount_to_buy_b = int(portfolio.cash/stock_price)\n elif action == 2:\n current_action_a = OrderType.BUY\n stock_price = stock_market_data.get_most_recent_price(Company.A)\n amount_to_buy_a = int(portfolio.cash/stock_price)\n current_action_b = OrderType.SELL\n amount_to_sell_b = portfolio.get_stock(Company.B)\n elif action == 3:\n current_action_a = OrderType.BUY\n stock_price = stock_market_data.get_most_recent_price(Company.A)\n amount_to_buy_a = int((portfolio.cash/stock_price)/2)\n current_action_b = OrderType.BUY\n stock_price = stock_market_data.get_most_recent_price(Company.B)\n amount_to_buy_b = int((portfolio.cash/stock_price)/2)\n elif action == 4:\n current_action_a = OrderType.SELL\n amount_to_sell_a = portfolio.get_stock(Company.A)\n # current_action_b = \"hold\"\n elif action == 5:\n current_action_a = OrderType.BUY\n stock_price = stock_market_data.get_most_recent_price(Company.A)\n amount_to_buy_a = int(portfolio.cash/stock_price)\n # current_action_b = \"hold\"\n elif action == 6:\n # current_action_a = \"hold\"\n current_action_b = OrderType.SELL\n amount_to_sell_b = portfolio.get_stock(Company.B)\n elif action == 7:\n # current_action_a = \"hold\"\n current_action_b = OrderType.BUY\n stock_price = stock_market_data.get_most_recent_price(Company.B)\n amount_to_buy_b = int(portfolio.cash/stock_price)\n\n order_list = []\n\n if current_action_a != 0:\n if current_action_a == OrderType.SELL and amount_to_sell_a > 0:\n order_list.append(Order(current_action_a, Company.A, amount_to_sell_a))\n elif current_action_a == OrderType.BUY and portfolio.cash > 0:\n order_list.append(Order(current_action_a, Company.A, amount_to_buy_a))\n\n if current_action_b != 0:\n if current_action_b == OrderType.SELL and amount_to_sell_b > 0:\n order_list.append(Order(current_action_b, Company.B, amount_to_sell_b))\n elif current_action_b == OrderType.BUY and portfolio.cash > 0:\n order_list.append(Order(current_action_b, Company.B, amount_to_buy_b))\n\n self.last_action_a = action\n self.last_state = current_state\n self.last_portfolio_value = current_portfolio_value\n return order_list", "def hedge_portfolio(context, data):\r\n factors = get_alphas_and_betas(context, data)\r\n beta_exposure = 0.0\r\n count = 0\r\n for asset in context.portfolio.positions:\r\n if asset in factors and asset != context.index:\r\n if not np.isnan(factors[asset].beta):\r\n beta_exposure += factors[asset].beta\r\n count += 1\r\n beta_hedge = -1.0 * beta_exposure / count\r\n dollar_amount = context.portfolio.portfolio_value * beta_hedge\r\n record(beta_hedge=beta_hedge)\r\n if not np.isnan(dollar_amount):\r\n order_target_value(context.index, dollar_amount)", "def set_initial(self, value):\n # TODO: Make an Initial Stock Adjust here\n pass", "def index():\n\n #select user's portfolio\n rows = db.execute(\"SELECT * FROM portfolio WHERE userid=:id\", id=session[\"user_id\"])\n\n #set temporary holding place for cash to zero\n tcash = 0\n\n #update the stock information in user's portfolio\n for row in rows:\n stock = row[\"stock\"]\n number = row[\"number\"]\n quote = lookup(stock)\n total = float(number) * float(quote[\"price\"])\n tcash += total\n db.execute(\"UPDATE portfolio SET price=:price, total=:total WHERE userid=:id AND stock=:stock AND number=:number\", price=usd(quote[\"price\"]), total=total, id=session[\"user_id\"], stock=stock, number=number)\n\n #select user's cash and updated portfolio\n updated_cash = db.execute(\"SELECT cash FROM users WHERE id=:id\", id=session[\"user_id\"])\n tcash += updated_cash[0][\"cash\"]\n updated_stock = db.execute(\"SELECT stock, SUM(number) AS number, price, SUM(total) AS stock_total FROM portfolio WHERE userid=:id GROUP BY stock HAVING SUM(number) > 0\", id=session[\"user_id\"])\n\n return render_template(\"index.html\", stocks=updated_stock, cash=usd(updated_cash[0][\"cash\"]), all_total=usd(tcash))", "def ComputerFinalStateOfCharge(self):\r\n pass", "def ramp_up(self) -> None:\n self.cash_balance: float = self.initial_cash_balance()\n for stock in self.stocks:\n initial_date_idx = 0\n self.cash_balance = stock.buy(initial_date_idx, self.cash_balance, self.buy_budget)", "def display_portfolio(self, p):\n\n if self.terminate:\n return\n\n w = self.windows['MAIN']\n\n self.clear_main()\n self.__display_portfolio(p, w)\n self.clear_header()\n self.set_header(p)\n\n self.refresh()", "def call_back(e):\n gh_env.Component.ExpireSolution(False)", "def solution_update(self, theta, force=False):\n \n self.x = self.eval(theta, force)\n \n return", "def _localSetState(self,pdict):\n super()._localSetState(pdict)\n self.transition = pdict.pop('transition')\n self.steadyStatePb = pdict.pop('steadyStatePb')", "def reset_for_new_run(\n self,\n state: State\n ):\n\n super().reset_for_new_run(state)\n\n self.epsilon = self.original_epsilon\n self.greedy_action = list(self.Q.keys())[0]", "def act(self, current_state, setpoint):\n s = self.last_s # Refers only to theta\n a = self.last_a # Refers only the last action taken\n s_p = self.map_state(\n current_state['theta'], current_state['theta_dot'])\n\n # Update the q-table based on the Bellman Equation\n r = self.reward_func(current_state)\n q_predict = self.q_table[s + (a,)]\n\n if not self.reset_env:\n q_target = r + self.params['gamma'] * np.max(self.q_table[s_p])\n else:\n q_target = r\n print(str(r) + ' Reset')\n self.reset_env = False\n # self.q_table[40,1]=90\n\n self.q_table[s+(a,)] += self.params['learn_rate'] * \\\n (q_target - q_predict)\n # print(self.params['epsilon'])\n # Take the next action\n if (np.random.uniform() < self.params['epsilon']) or (np.max(np.abs(self.q_table[s_p])) < 1e-3):\n ac = np.random.randint(self.num_actions)\n self.dist[ac] += 1\n else:\n ac = np.argmax(self.q_table[s_p])\n\n #ac = 0\n\n self.last_a = ac\n self.last_s = s_p\n if self.cont_decay == 1000:\n self.params['epsilon'] *= self.params['decay_rate']\n self.cont_decay = 0\n else:\n self.cont_decay += 1\n u = self.map_force(ac)\n f_u = self.F_cum + u\n print(self.params)\n print(self.q_table[30:51])\n return f_u", "def apply(self):\n pass", "def apply(self):\n pass", "def forces(self):\n pass", "def portfolio():\n #Query transactions by user id\n trans = Transactions.query.filter_by(owner=session['user_id']).all()\n \n #Create list of comanies user owns stock in\n companies = []\n for t in trans:\n if t.symbol not in companies:\n companies.append(t.symbol)\n\n #Create list of current stock dictionaries and total their values\n total = 0\n stocks = []\n for company in companies:\n trans = Transactions.query.filter_by(owner=session['user_id'], symbol=company).all()\n stock = {}\n stock['shares'] = 0\n for t in trans:\n stock['shares'] += t.shares\n if stock['shares'] > 0:\n stock['symbol'] = company\n stock['name'] = lookup(company)['name']\n stock['price'] = lookup(company)['price']\n stock['total'] = stock['shares'] * stock['price']\n stock['price'] = usd(stock['price'])\n stock['total'] = usd(stock['total'])\n total += float(stock['total'][1:].replace(',', ''))\n stocks.append(stock)\n\n #Set user cash and total values\n value = {}\n value['cash'] = usd(Users.query.filter_by(id=session['user_id']).first().cash)\n value['total'] = usd(total + float(value['cash'][1:].replace(',', '')))\n\n #Add values to list\n stocks.append(value)\n\n #Return list of dictionaries\n return stocks", "def reset_new_conditions (self):\n self.solver.pop()\n self.solver.push()", "def updater(self, func_name, ml_results=None):\n\n closed_pos = []\n\n # Iterates all active positions\n for pos in self.active_short_positions + self.active_long_positions:\n\n # Check if position reached target /expired / active strategy, and updates the position accordingly\n if getattr(pos, func_name)(ml_results=ml_results):\n\n closed_pos.append(pos)\n\n if pos.order_type == Consts.LONG and self.longs:\n self.active_long_positions.remove(pos)\n self.closed_long_positions.append(pos)\n if pos.order_type == Consts.SHORT and self.shorts:\n self.active_short_positions.remove(pos)\n self.closed_short_positions.append(pos)\n\n # Updates Portfolio stats according to new closed positions\n self.closed_positions(closed_pos)\n\n # Updates capital stats after last operations\n self.update_capital_stats()", "def portfolio_performance(returns,weights):\r\n print('Calculating Portfolio Performance')\r\n # returns=target_asset_port_data_attributes['component_returns']\r\n # weights =target_asset_port_data_attributes['effective_weights']\r\n\r\n component_returns= returns\r\n compnent_weights = pd.DataFrame(data=np.nan,index= component_returns.index,columns=component_returns.columns)\r\n compnent_weights.loc[weights.index,:] = weights\r\n\r\n portfolio_dates = component_returns.index\r\n components = component_returns.columns\r\n\r\n # pre-allocate\r\n BoP_df = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=components)\r\n EoP_df = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=components)\r\n PnL_df = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=components)\r\n portfolio_BoP = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Portfolio BoP'])\r\n portfolio_EoP = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Portfolio EoP'])\r\n portfolio_PnL = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Portfolio PnL'])\r\n \r\n portfolio_index = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Index'])\r\n previous_index_value = np.int64(1)\r\n\r\n pre_date = portfolio_dates[0]\r\n # set BoP to start weights\r\n for date,row in component_returns.iterrows():\r\n # print(date)\r\n # 1st date\r\n if date == portfolio_dates[0]:\r\n BoP_df.loc[date] = compnent_weights.iloc[0,:]\r\n EoP_df.loc[date] = BoP_df.loc[date] * (1+component_returns.loc[date])\r\n PnL_df.loc[date] = EoP_df.loc[date].subtract(BoP_df.loc[date])\r\n\r\n portfolio_BoP.loc[date] = BoP_df.loc[date].sum()\r\n portfolio_EoP.loc[date] = EoP_df.loc[date].sum()\r\n portfolio_PnL.loc[date] = PnL_df.loc[date].sum()\r\n\r\n portfolio_index.loc[date] = np.nansum([previous_index_value,portfolio_PnL.loc[date].values])\r\n previous_index_value = portfolio_index.loc[date]\r\n pre_date = date\r\n\r\n # after first date\r\n else:\r\n BoP_df.loc[date] = EoP_df.loc[pre_date]\r\n # weights override\r\n if date in compnent_weights.index:\r\n none_NaN_index = ~compnent_weights.loc[date].isnull()\r\n if not compnent_weights.loc[date][none_NaN_index].empty:\r\n tmp_sum = BoP_df.loc[date].sum()\r\n BoP_df.loc[date][none_NaN_index.values] = (compnent_weights.loc[date][none_NaN_index.values].values)*tmp_sum\r\n\r\n \r\n EoP_df.loc[date] = BoP_df.loc[date] * (1+component_returns.loc[date])\r\n PnL_df.loc[date] = EoP_df.loc[date].subtract(BoP_df.loc[date])\r\n\r\n portfolio_BoP.loc[date] = BoP_df.loc[date].sum()\r\n portfolio_EoP.loc[date] = EoP_df.loc[date].sum()\r\n portfolio_PnL.loc[date] = PnL_df.loc[date].sum()\r\n \r\n portfolio_index.loc[date] = np.nansum([previous_index_value,portfolio_PnL.loc[date].values])\r\n previous_index_value = portfolio_index.loc[date]\r\n pre_date = date\r\n\r\n\r\n portfolio_returns = portfolio_index.pct_change(1) \r\n portfolio_returns.columns = ['Returns']\r\n\r\n portfolio_index\r\n perf = portfolio_index.calc_stats()\r\n \r\n output = pd.Series(data = [perf,PnL_df,portfolio_index,portfolio_BoP,portfolio_EoP,BoP_df], index=['Portfolio Perf','Component PnL','portfolio_index','portfolio_BoP','portfolio_EoP','BoP_df'])\r\n return output", "def step(self):\n self.solver.integrate(self.t)\n self.state = self.solver.y", "def solve(self, current_state: dict) -> dict:", "def _update_objective(self):\n # rewrap the cost if the solver has been run\n self.Finalize()\n return", "def rebalance(self, date):\n eod_values = self.df.shift(1).loc[date, 'values'].mul(1 + self.tc.instrument_returns.loc[date, 'daily'])\n eod_portfolio_value = sum(eod_values.values)\n\n previous_values = self.df.loc[date, 'values'].copy()\n position_value = self.target_weights.mul(eod_portfolio_value)\n trading_cost = abs(eod_values.div(eod_portfolio_value) - self.target_weights) * eod_portfolio_value * \\\n self.tc.commission\n current_values = position_value - trading_cost\n self.df.loc[date, 'values'] = current_values.values\n future_values = self.tc.instrument_returns.loc[date:, 'cumulative'].div(\n self.tc.instrument_returns.loc[date, 'cumulative']).mul(current_values, axis=1)\n self.df.loc[date:, 'values'] = future_values.values\n trade = pd.Series(current_values - previous_values)\n # Once we have calculated the end-of-day value of the portfolio, we set the allocation by looking at the\n # dollars invested in each ETF\n self.df.loc[date:, 'allocations'] = future_values.div(future_values.sum(axis=1), axis=0).values\n\n return trade", "def __setstate__(self, state):\n self.__dict__ = state\n self.get_esoh_solver = lru_cache()(self._get_esoh_solver)", "def compute_portvals(start_date, end_date, orders_file, start_val):\n \n #Read order file\n orders = pd.read_csv( orders_file, parse_dates = [0])\n \n #Get symbols making up the portfolio\n stock_symbols = list( set( orders[\"Symbol\"] ) )\n dates = pd.date_range(start_date, end_date)\n \n #Read stock prices\n stock_prices = get_data(stock_symbols, dates)\n \n #Create a portfolio keeping track of positions, \n #_CASH column indicates cash position, _VALUE total portfolio value\n #_LEVERAGE the leverage of portfolio when we allow for short selling\n symbols = stock_symbols[:] #Shallow copy of the list\n symbols.append(\"_CASH\")\n symbols.append(\"_VALUE\")\n symbols.append(\"_LEVERAGE\")\n \n #Index contains only business days, same dates as stock prices\n portfolio = pd.DataFrame(index=stock_prices.index, columns = symbols )\n portfolio.fillna(0) \n portfolio[\"_CASH\"][0] = start_val\n portfolio[\"_VALUE\"][0] = start_val\n \n #Snapshot of a portfolio at any time. To avoid using numerical indexes\n portfolio_snapshot = dict.fromkeys ( symbols, 0 )\n portfolio_snapshot[\"_CASH\"] = start_val\n portfolio[\"_VALUE\"] = start_val\n \n #Now calcualte portfolio day by day\n for date in portfolio.index:\n #Check transactions for the day\n day_orders = orders[ orders[\"Date\"] == date ] \n \n for ord in day_orders.iterrows():\n symbol = ord[1][ \"Symbol\"] \n stock_price = stock_prices[ symbol ][ date ]\n shares = ord[1][\"Shares\" ]\n side = ord[1][\"Order\"]\n \n if side == \"BUY\":\n portfolio_snapshot[ \"_CASH\" ] -= stock_price * shares\n portfolio_snapshot[ symbol ] += shares \n elif side == \"SELL\":\n portfolio_snapshot[ \"_CASH\" ] += stock_price * shares\n portfolio_snapshot[ symbol ] -= shares\n else:\n raise \"Order not recognized.\"\n \n #Compute portfolio value\n portfolio_snapshot[ \"_VALUE\" ] = portfolio_snapshot[ \"_CASH\" ]\n shorts = longs = 0\n for symbol in stock_symbols: \n stock_price = stock_prices[ symbol ][ date ]\n shares = portfolio_snapshot[ symbol ]\n notional = stock_price*shares\n if shares > 0:\n longs += notional\n else:\n shorts += notional\n \n portfolio_snapshot[ \"_VALUE\" ] += notional\n \n #Compute leverage\n leverage = (longs+shorts)/(longs-shorts + portfolio_snapshot[ \"_CASH\" ] )\n portfolio_snapshot[ \"_LEVERAGE\" ] = leverage\n \n #Assert we never achieve a leverage > 2.0\n if leverage > 2:\n raise \"Leverage > 2.0 achieved\"\n \n #Update portfolio from the daily snapshot\n #TODO: Is this causing performance issues?\n for symbol in portfolio.keys():\n portfolio[ symbol ][ date ] = portfolio_snapshot[ symbol ]\n \n return portfolio", "def force(self, **kwargs):\n log.info(\"Forcing a build\")\n self._force = True", "def __call__(self):\n\n self.initialise()\n\n # //\n # // ToDo: Add exception wrappers for plugin calls\n #//\n subJobs = TrackerDB.getJobsByState(\"submitted\", self.cooloff)\n self.updateSubmitted(*subJobs.keys())\n runningJobs = TrackerDB.getJobsByState(\"running\")\n self.updateRunning(*runningJobs.keys())\n completeJobs = TrackerDB.getJobsByState(\"complete\")\n self.updateComplete(*completeJobs.keys())\n failedJobs = TrackerDB.getJobsByState(\"failed\")\n self.updateFailed(*failedJobs.keys())\n self.cleanup()\n\n return", "def trigger(self):\n # Update current state.\n self.current_inst = self.future_inst\n self.current_timer = self.future_timer\n # Initialize future state.\n self.future_inst = self.current_inst\n self.future_timer = max(0, self.current_timer - 1)", "def reset_state(self, noised_results, global_state):\n new_tree_state = self._tree_aggregator.reset_state(global_state.tree_state)\n return attr.evolve(\n global_state,\n samples_cumulative_sum=noised_results,\n tree_state=new_tree_state)", "def _update_state(self) -> None:\n raise NotImplementedError(\"\")", "def my_rebalance(context,data):\n log.info(\"rebalancing...\")\n context.output = pipeline_output('my_pipeline')\n log.info(\"retrieved pipeline output...\")\n \n # These are the securities that we are interested in trading each day.\n context.security_list = context.output.index\n \n if context.prime == False:\n order_target_percent(symbol('SPY'),1) #hold SPY as a default \n context.prime = True\n \n weight= 1.0/len(context.security_list)\n \n for stock in context.security_list:\n log.info(\"Buying %s\" % (stock.symbol))\n order_target_percent(stock, weight)\n \n #: Exit any positions we might have\n for stock in context.portfolio.positions:\n if data.can_trade(stock) and stock not in context.security_list:\n log.info(\"Exiting our positions on %s\" % (stock.symbol))\n order_target_percent(stock, 0)", "def update_price_model(self, good, order_type, is_successful, clearing_price=0):\n\n SIGNIFICANT = 0.25 # 25% more or less is \"significant\"\n SIG_IMBALANCE = 0.33\n LOW_INVENTORY = 0.1 # 10% of ideal inventory = \"LOW\"\n HIGH_INVENTORY = 2.0 # 200% of ideal inventory = \"HIGH\"\n MIN_PRICE = 0.01 # lowest allowed price of a Good\n\n if is_successful:\n # add this trade to the observed trading range\n self.observed_trading_range[good].append(clearing_price)\n\n public_mean_price = self.market.mean_price(good)\n belief = self.price_belief[good]\n mean = belief.mean()\n wobble = 0.05 # the degree which the Pop should bid outside the belief\n\n # how different the public mean price is from the price belief\n delta_to_mean = mean - public_mean_price\n\n if is_successful:\n if order_type is OrderType.buy_order and delta_to_mean > SIGNIFICANT:\n # this Pop overpaid, shift belief towards mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n elif order_type is OrderType.sell_order and delta_to_mean < -SIGNIFICANT:\n # this Pop underpaid!, shift belief towards mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n\n # increase the belief's certainty\n belief.low += wobble * mean\n belief.high -= wobble * mean\n\n else:\n # shift towards mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n\n # check for inventory special cases\n stocks = self.inventory.get_amount(good)\n ideal = self.inventory.get_ideal(good)\n\n # if we're buying and inventory is too low\n # meaning we're desperate to buy\n if order_type is OrderType.buy_order and stocks < LOW_INVENTORY * ideal:\n wobble *= 2\n\n # if we're selling and inventory is too high\n # meaning we're desperate to sell\n elif order_type is OrderType.sell_order and stocks > HIGH_INVENTORY * ideal:\n wobble *= 2\n # all other cases\n else:\n sells = self.market.history.sell_orders.average(good, 1)\n buys = self.market.history.buy_orders.average(good, 1)\n\n # TODO: figure out why this is sometimes 0\n if sells + buys > 0:\n\n supply_vs_demand = (sells - buys) / (sells + buys)\n\n if supply_vs_demand > SIG_IMBALANCE or supply_vs_demand < -SIG_IMBALANCE:\n # too much supply? lower bid lower to sell faster\n # too much demand? raise price to buy faster\n\n new_mean = public_mean_price * (1 - supply_vs_demand)\n delta_to_mean = mean - new_mean\n\n # shift the price belief to the new price mean\n belief.low -= delta_to_mean / 2\n belief.high -= delta_to_mean / 2\n\n\n # decrease belief's certainty since we've just changed it (we could be wrong)\n belief.low -= wobble * mean\n belief.high += wobble * mean\n\n # make sure the price belief doesn't decrease below the minimum\n if belief.low < MIN_PRICE:\n belief.low = MIN_PRICE\n elif belief.high < MIN_PRICE:\n belief.high = MIN_PRICE", "def step(self):\n y = np.random.rand(self.p.lambda_, self.p.d).T\n x = self.p.m.reshape(-1, 1) * y\n f = np.array(list(map(sum, x)))\n self.p.used_budget += self.p.lambda_\n self.p.population = Population(x, y, f)\n self.p.m_old = self.p.m.copy()\n self.p.m *= np.linalg.norm(y, axis=1).reshape(-1, 1)\n self.p.adapt()\n self.p.old_population = self.p.population.copy()", "def backtest_portfolio(self):\n self.rank=dict()\n self.accuracy=dict()\n portfolio = dict()\n \n for algo in self.algos:\n portfolio[algo]=pd.DataFrame(index=self.positions.index)\n self.pos_diff=dict()\n self.pos_diff[algo] = self.positions[algo].diff()\n \n portfolio[algo]['price_diff'] = self.bars['Close']-self.bars['Open']\n #portfolio['price_diff'][0:5] = 0.0\n portfolio[algo]['profit'] = self.positions[algo] * portfolio[algo]['price_diff']\n portfolio[algo]['total'] = self.initial_capital + portfolio[algo]['profit'].cumsum()\n portfolio[algo]['returns'] = portfolio[algo]['total'].pct_change()\n d=np.array(portfolio[algo]['profit']).copy()\n d[d>0]=1\n d[d<0]=0\n d[np.array(self.positions[algo])==0]=1\n for i in np.arange(1,len(d)+1):\n c=float(sum(d[0:i]))/(i)\n d[i-1]=c\n portfolio[algo]['accuracy']=d\n self.rank[algo]=float(portfolio[algo]['total'][-1] - portfolio[algo]['total'][0])\n self.returns=portfolio\n c=np.array(self.returns[algo]['profit'])\n c[c>0]=1\n c[c<0]=0\n c[np.array(self.positions[algo])==0]=1\n accuracy=round(float(c.sum())/len(c),2)*self.rank[algo]\n self.accuracy[algo]=accuracy\n #self.ranking= sorted(self.rank.items(), key=operator.itemgetter(1), reverse=True)\n self.ranking= sorted(self.accuracy.items(), key=operator.itemgetter(1))\n self.ready=True\n return (portfolio, self.rank, self.ranking)", "def _stage1(self):\n self.start_progress()\n tasks = list(self._chain_dict(self._model.adjust_tasks))\n if len(tasks) == 0:\n self._stage2(self._no_adjustments_case())\n else:\n task = lambda : self._run_adjust_tasks(tasks)\n locator.get(\"pool\").submit(task, self._stage2)", "def test_reset(self):\n self.p.C[0][0] = np.inf\n self.step()", "def apply(self):\n prequire(False, \"Called abstract version of apply\")", "def apply(self, simulation):\n t = simulation.time\n dt = simulation.timeStep\n if main_rank == 0:\n simulation.printState()\n # OpenCL update\n self.numMethod(self.gpu_field.gpu_data[self.component],\n self.color)\n self.window.widget.updateGL()\n if simulation.currentIteration > 1:\n self.window.label.setText(\n self.labelText + \"t={0:6.2f}, fps={1:6.2f}\".format(\n t + dt,\n 1. / (self.timer.f_timers.values()[0].t - self.ctime)))\n self.ctime = self.timer.f_timers.values()[0].t", "def apply_immediately(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"apply_immediately\")", "def momentum(portfolio_item, transaction_volume, cash_allocation):\n from yahooquery import Ticker\n from math import floor\n import talib\n from .TradeHistoryItem import log_trade\n from API.Help import is_increasing, initialize_alpaca\n\n alpaca = initialize_alpaca()\n\n yahoo_ticker = Ticker(str(portfolio_item))\n info = yahoo_ticker.history()\n ma_5 = talib.SMA(info['close'], timeperiod=5)\n ma_20 = talib.SMA(info['close'], timeperiod=20)\n volume = info['volume']\n\n if portfolio_item.shares == 0:\n # if the price goes from below the sma to above, buy\n if ma_5[-1] > (ma_20[-1] * 1.1) and is_increasing(volume, 3):\n print('buying {} shares of {}'.format(transaction_volume, str(portfolio_item)))\n alpaca.submit_order(str(portfolio_item), transaction_volume, 'buy', 'market', 'day')\n portfolio_item.buy(transaction_volume=transaction_volume, cash_allocated=cash_allocation)\n log_trade(portfolio_item=portfolio_item, transaction_volume=transaction_volume, transaction_type=0)\n # if the price goes from above the sma to below, short\n elif ma_5[-1] < (ma_20[-1] * .9) and not is_increasing(volume, 3) and portfolio_item.shares == 0:\n transaction_volume = floor(cash_allocation / (portfolio_item.ticker.price_now * 1.1))\n print('shorting {} shares of {}'.format(transaction_volume, str(portfolio_item)))\n alpaca.submit_order(str(portfolio_item), transaction_volume, 'sell', 'market', 'day')\n portfolio_item.short(transaction_volume=transaction_volume, cash_allocated=cash_allocation)\n log_trade(portfolio_item=portfolio_item, transaction_volume=transaction_volume, transaction_type=3)", "def update_job_state(self, job):", "def reset_continued(self): \n self._recent_goal_continued = False\n self._update_action = False\n self._update_action_without_pause = False", "def compute(self, *args):\n\n pass", "def set_force(state):\n global _FORCE\n _FORCE = bool(state)", "def solvate(self):\n\n pass", "def reset(self) -> None:\n self.cash_balance = self.initial_cash_balance()", "def reset_state(self, noised_results, global_state):\n del noised_results\n new_tree_state = self._tree_aggregator.reset_state(global_state.tree_state)\n return attr.evolve(\n global_state,\n previous_tree_noise=self._zero_initial_noise(),\n tree_state=new_tree_state)", "def calculate_profit(self):", "def assumed_state(self) -> bool:\n return self._optimistic", "def start(self):\n if self.is_cancelled:\n with self.changing('is_finished'):\n self.finished = True\n return\n \n \n with self.changing('is_executing'):\n self.executing = True \n \n with self.changing('is_finished', 'is_executing'):\n try:\n # if we're starting, we no longer have any dependencies\n self.dependencies.clear()\n \n if not self.is_cancelled:\n self.main()\n finally:\n self.executing = False\n self.finished = True", "def update_state(self, context):\n pass", "def ExecuteBeforeSolutionLoop(self):\n super().ExecuteBeforeSolutionLoop()\n num_of_vaviables = len(self.variables) + len(self.nonhistorical_variables)\n self.values = [[-1e6] * num_of_vaviables for _ in self.found_positions]" ]
[ "0.60446954", "0.5951977", "0.5695806", "0.5687665", "0.56560236", "0.5653465", "0.56520754", "0.5633276", "0.559409", "0.5562694", "0.54696906", "0.54538536", "0.5440695", "0.5416885", "0.5380987", "0.5366457", "0.5364525", "0.5331849", "0.5323494", "0.53195906", "0.53195906", "0.53085035", "0.53000546", "0.5282828", "0.52782464", "0.52712125", "0.52664864", "0.52351665", "0.52097136", "0.52049536", "0.51982033", "0.51883304", "0.5182944", "0.5154984", "0.5154984", "0.5154984", "0.5154984", "0.5154984", "0.5149544", "0.5130684", "0.51288253", "0.51150465", "0.51070714", "0.5084055", "0.5065029", "0.5063094", "0.5061666", "0.505786", "0.50572574", "0.505325", "0.5047704", "0.5046374", "0.5031111", "0.50302446", "0.50280464", "0.5014093", "0.5010491", "0.5001591", "0.49955475", "0.49925703", "0.49922067", "0.49922067", "0.4970376", "0.49590984", "0.49583974", "0.4955242", "0.49544924", "0.49499753", "0.4943746", "0.49430194", "0.4942275", "0.4941596", "0.49398565", "0.493672", "0.49363688", "0.493313", "0.49307764", "0.49250042", "0.4922194", "0.4918863", "0.4908058", "0.49073812", "0.49048898", "0.48806018", "0.48805353", "0.48775002", "0.48772317", "0.48731717", "0.48729417", "0.48727772", "0.48674056", "0.4860246", "0.48544538", "0.48526222", "0.48511964", "0.48468214", "0.48451972", "0.48441783", "0.48390174", "0.48363125" ]
0.66357535
0
Compute the current portfolio. Notes This is cached, repeated access will not recompute the portfolio until the portfolio may have changed.
def portfolio(self): self.update_portfolio() return self._immutable_portfolio
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_portfolio(self):\n if not self._dirty_portfolio:\n return\n\n portfolio = self._portfolio\n pt = self.position_tracker\n\n portfolio.positions = pt.get_positions()\n position_stats = pt.stats\n\n portfolio.positions_value = position_value = (\n position_stats.net_value\n )\n portfolio.positions_exposure = position_stats.net_exposure\n self._cash_flow(self._get_payout_total(pt.positions))\n\n start_value = portfolio.portfolio_value\n\n # update the new starting value\n portfolio.portfolio_value = end_value = portfolio.cash + position_value\n\n pnl = end_value - start_value\n if start_value != 0:\n returns = pnl / start_value\n else:\n returns = 0.0\n\n portfolio.pnl += pnl\n portfolio.returns = (\n (1 + portfolio.returns) *\n (1 + returns) -\n 1\n )\n\n # the portfolio has been fully synced\n self._dirty_portfolio = False", "def get_portfolio_object(self):\n return self.__get_portfolio_object(self.portfolio_name, self.portfolio_user)", "def update_portfolio(self, portfolio: PortfolioController):\n now = portfolio.get_history(seconds_back=0)\n future = portfolio.get_history(seconds_back=-self.update_interval)\n\n for fund in portfolio.funds:\n best_currency = max(portfolio.currencies, key=lambda currency: future_value(fund, currency, now, future))\n if best_currency != fund.currency:\n portfolio.request_transfer(fund, best_currency)", "def get_portfolio_pnl(self):\n\n return self._portfolio", "def portfolio():\n #Query transactions by user id\n trans = Transactions.query.filter_by(owner=session['user_id']).all()\n \n #Create list of comanies user owns stock in\n companies = []\n for t in trans:\n if t.symbol not in companies:\n companies.append(t.symbol)\n\n #Create list of current stock dictionaries and total their values\n total = 0\n stocks = []\n for company in companies:\n trans = Transactions.query.filter_by(owner=session['user_id'], symbol=company).all()\n stock = {}\n stock['shares'] = 0\n for t in trans:\n stock['shares'] += t.shares\n if stock['shares'] > 0:\n stock['symbol'] = company\n stock['name'] = lookup(company)['name']\n stock['price'] = lookup(company)['price']\n stock['total'] = stock['shares'] * stock['price']\n stock['price'] = usd(stock['price'])\n stock['total'] = usd(stock['total'])\n total += float(stock['total'][1:].replace(',', ''))\n stocks.append(stock)\n\n #Set user cash and total values\n value = {}\n value['cash'] = usd(Users.query.filter_by(id=session['user_id']).first().cash)\n value['total'] = usd(total + float(value['cash'][1:].replace(',', '')))\n\n #Add values to list\n stocks.append(value)\n\n #Return list of dictionaries\n return stocks", "def current_portfolio_weights(self) -> 'pd.Series[float]':\n position_values = pd.Series({\n asset: (\n position.last_sale_price *\n position.amount *\n asset.price_multiplier\n )\n for asset, position in self.positions.items()\n }, dtype=\"float64\")\n return position_values / self.portfolio_value", "def generate_portfolio_data(self):\n self.__load_portfolio_historical_prices()\n self.__populate_historical_trade_data()\n self.__calculate_portfolio_returns()\n self.__calculate_portfolio_performance()", "def __calculate_portfolio_returns(self):\n\n p_bar = tqdm(range(1), desc=\" Calculating returns\", leave=False)\n\n trade_data = self.historical_trade_data\n\n # Helper functions to calculate cash inflow and outflow\n def f_min(x):\n return x.apply(lambda x: min(x, 0))\n\n def f_max(x):\n return x.apply(lambda x: max(x, 0))\n\n # Calculate cash inflow and outflow\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period cash inflow\"], self.tickers_list + [\"Total\"]]\n )\n ] = -1 * trade_data[\"Investment delta\"][:].apply(lambda x: f_min(x), axis=0)\n\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period cash outflow\"], self.tickers_list + [\"Total\"]]\n )\n ] = trade_data[\"Investment delta\"][:].apply(lambda x: f_max(x), axis=1)\n\n # Calculate period return\n\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period absolute return\"], self.tickers_list + [\"Total\"]]\n )\n ] = (trade_data[\"End Value\"] + trade_data[\"Period cash inflow\"]) - (\n trade_data[\"End Value\"].shift(1).fillna(0)\n + trade_data[\"Period cash outflow\"]\n )\n\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period percentage return\"], self.tickers_list + [\"Total\"]]\n )\n ] = (trade_data[\"End Value\"] + trade_data[\"Period cash inflow\"]) / (\n trade_data[\"End Value\"].shift(1).fillna(0)\n + trade_data[\"Period cash outflow\"]\n ) - 1\n\n trade_data[\"Period percentage return\"].fillna(0, inplace=True)\n\n self.historical_trade_data = trade_data\n\n self.portfolio_returns = self.historical_trade_data[\"Period percentage return\"][\n \"Total\"\n ]\n\n p_bar.n += 1\n p_bar.refresh()", "def __get_portfolio_object(self, name, user):\n portfolio = self.__get_object_portfolio_bulk(name, user)\n if portfolio is None:\n portfolio = self.__get_object_portfolio_bulk(name, user, \"portfolio_update\")\n if portfolio is None:\n portfolio = self.db_tool.session.query(Portfolio) \\\n .outerjoin(Orders)\\\n .join(Stock)\\\n .filter(name == Portfolio.name) \\\n .filter(user == Portfolio.user).first()\n self.bulk_data[\"portfolio_update\"].append(portfolio)\n return portfolio", "def backtest_portfolio(self):\n self.rank=dict()\n self.accuracy=dict()\n portfolio = dict()\n \n for algo in self.algos:\n portfolio[algo]=pd.DataFrame(index=self.positions.index)\n self.pos_diff=dict()\n self.pos_diff[algo] = self.positions[algo].diff()\n \n portfolio[algo]['price_diff'] = self.bars['Close']-self.bars['Open']\n #portfolio['price_diff'][0:5] = 0.0\n portfolio[algo]['profit'] = self.positions[algo] * portfolio[algo]['price_diff']\n portfolio[algo]['total'] = self.initial_capital + portfolio[algo]['profit'].cumsum()\n portfolio[algo]['returns'] = portfolio[algo]['total'].pct_change()\n d=np.array(portfolio[algo]['profit']).copy()\n d[d>0]=1\n d[d<0]=0\n d[np.array(self.positions[algo])==0]=1\n for i in np.arange(1,len(d)+1):\n c=float(sum(d[0:i]))/(i)\n d[i-1]=c\n portfolio[algo]['accuracy']=d\n self.rank[algo]=float(portfolio[algo]['total'][-1] - portfolio[algo]['total'][0])\n self.returns=portfolio\n c=np.array(self.returns[algo]['profit'])\n c[c>0]=1\n c[c<0]=0\n c[np.array(self.positions[algo])==0]=1\n accuracy=round(float(c.sum())/len(c),2)*self.rank[algo]\n self.accuracy[algo]=accuracy\n #self.ranking= sorted(self.rank.items(), key=operator.itemgetter(1), reverse=True)\n self.ranking= sorted(self.accuracy.items(), key=operator.itemgetter(1))\n self.ready=True\n return (portfolio, self.rank, self.ranking)", "def backtest_portfolio(self):\n raise NotImplementedError(\"Should implement backtest_portfolio()!\")", "def compute_portvals(start_date, end_date, orders_file, start_val):\n \n #Read order file\n orders = pd.read_csv( orders_file, parse_dates = [0])\n \n #Get symbols making up the portfolio\n stock_symbols = list( set( orders[\"Symbol\"] ) )\n dates = pd.date_range(start_date, end_date)\n \n #Read stock prices\n stock_prices = get_data(stock_symbols, dates)\n \n #Create a portfolio keeping track of positions, \n #_CASH column indicates cash position, _VALUE total portfolio value\n #_LEVERAGE the leverage of portfolio when we allow for short selling\n symbols = stock_symbols[:] #Shallow copy of the list\n symbols.append(\"_CASH\")\n symbols.append(\"_VALUE\")\n symbols.append(\"_LEVERAGE\")\n \n #Index contains only business days, same dates as stock prices\n portfolio = pd.DataFrame(index=stock_prices.index, columns = symbols )\n portfolio.fillna(0) \n portfolio[\"_CASH\"][0] = start_val\n portfolio[\"_VALUE\"][0] = start_val\n \n #Snapshot of a portfolio at any time. To avoid using numerical indexes\n portfolio_snapshot = dict.fromkeys ( symbols, 0 )\n portfolio_snapshot[\"_CASH\"] = start_val\n portfolio[\"_VALUE\"] = start_val\n \n #Now calcualte portfolio day by day\n for date in portfolio.index:\n #Check transactions for the day\n day_orders = orders[ orders[\"Date\"] == date ] \n \n for ord in day_orders.iterrows():\n symbol = ord[1][ \"Symbol\"] \n stock_price = stock_prices[ symbol ][ date ]\n shares = ord[1][\"Shares\" ]\n side = ord[1][\"Order\"]\n \n if side == \"BUY\":\n portfolio_snapshot[ \"_CASH\" ] -= stock_price * shares\n portfolio_snapshot[ symbol ] += shares \n elif side == \"SELL\":\n portfolio_snapshot[ \"_CASH\" ] += stock_price * shares\n portfolio_snapshot[ symbol ] -= shares\n else:\n raise \"Order not recognized.\"\n \n #Compute portfolio value\n portfolio_snapshot[ \"_VALUE\" ] = portfolio_snapshot[ \"_CASH\" ]\n shorts = longs = 0\n for symbol in stock_symbols: \n stock_price = stock_prices[ symbol ][ date ]\n shares = portfolio_snapshot[ symbol ]\n notional = stock_price*shares\n if shares > 0:\n longs += notional\n else:\n shorts += notional\n \n portfolio_snapshot[ \"_VALUE\" ] += notional\n \n #Compute leverage\n leverage = (longs+shorts)/(longs-shorts + portfolio_snapshot[ \"_CASH\" ] )\n portfolio_snapshot[ \"_LEVERAGE\" ] = leverage\n \n #Assert we never achieve a leverage > 2.0\n if leverage > 2:\n raise \"Leverage > 2.0 achieved\"\n \n #Update portfolio from the daily snapshot\n #TODO: Is this causing performance issues?\n for symbol in portfolio.keys():\n portfolio[ symbol ][ date ] = portfolio_snapshot[ symbol ]\n \n return portfolio", "def getPortfolioValue(self, start_t, t):\n sum_tmp=0\n for item in self.portfolio.keys():\n if \"DJI_\" in item:\n t_tmp=datetime.strftime(pd.date_range(end=t,periods=1,freq='B')[0],'%Y-%m-%d')\n price=universe.get_price_in_currency(item,t_tmp,'CAD')\n elif 'rf_rate' in item:\n price=universe.get_security(item).get_cc_return(start_t,t) \n else:\n price=universe.get_price_in_currency(item,t,'CAD')\n #price=universe.get_security(item).price[t]\n amount=self.portfolio[item]\n sum_tmp=sum_tmp+price*amount\n \n return sum_tmp", "def get_portfolio_pnl_tsd(self):\n\n return self._tsd_portfolio", "def portfolio_performance(returns,weights):\r\n print('Calculating Portfolio Performance')\r\n # returns=target_asset_port_data_attributes['component_returns']\r\n # weights =target_asset_port_data_attributes['effective_weights']\r\n\r\n component_returns= returns\r\n compnent_weights = pd.DataFrame(data=np.nan,index= component_returns.index,columns=component_returns.columns)\r\n compnent_weights.loc[weights.index,:] = weights\r\n\r\n portfolio_dates = component_returns.index\r\n components = component_returns.columns\r\n\r\n # pre-allocate\r\n BoP_df = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=components)\r\n EoP_df = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=components)\r\n PnL_df = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=components)\r\n portfolio_BoP = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Portfolio BoP'])\r\n portfolio_EoP = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Portfolio EoP'])\r\n portfolio_PnL = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Portfolio PnL'])\r\n \r\n portfolio_index = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Index'])\r\n previous_index_value = np.int64(1)\r\n\r\n pre_date = portfolio_dates[0]\r\n # set BoP to start weights\r\n for date,row in component_returns.iterrows():\r\n # print(date)\r\n # 1st date\r\n if date == portfolio_dates[0]:\r\n BoP_df.loc[date] = compnent_weights.iloc[0,:]\r\n EoP_df.loc[date] = BoP_df.loc[date] * (1+component_returns.loc[date])\r\n PnL_df.loc[date] = EoP_df.loc[date].subtract(BoP_df.loc[date])\r\n\r\n portfolio_BoP.loc[date] = BoP_df.loc[date].sum()\r\n portfolio_EoP.loc[date] = EoP_df.loc[date].sum()\r\n portfolio_PnL.loc[date] = PnL_df.loc[date].sum()\r\n\r\n portfolio_index.loc[date] = np.nansum([previous_index_value,portfolio_PnL.loc[date].values])\r\n previous_index_value = portfolio_index.loc[date]\r\n pre_date = date\r\n\r\n # after first date\r\n else:\r\n BoP_df.loc[date] = EoP_df.loc[pre_date]\r\n # weights override\r\n if date in compnent_weights.index:\r\n none_NaN_index = ~compnent_weights.loc[date].isnull()\r\n if not compnent_weights.loc[date][none_NaN_index].empty:\r\n tmp_sum = BoP_df.loc[date].sum()\r\n BoP_df.loc[date][none_NaN_index.values] = (compnent_weights.loc[date][none_NaN_index.values].values)*tmp_sum\r\n\r\n \r\n EoP_df.loc[date] = BoP_df.loc[date] * (1+component_returns.loc[date])\r\n PnL_df.loc[date] = EoP_df.loc[date].subtract(BoP_df.loc[date])\r\n\r\n portfolio_BoP.loc[date] = BoP_df.loc[date].sum()\r\n portfolio_EoP.loc[date] = EoP_df.loc[date].sum()\r\n portfolio_PnL.loc[date] = PnL_df.loc[date].sum()\r\n \r\n portfolio_index.loc[date] = np.nansum([previous_index_value,portfolio_PnL.loc[date].values])\r\n previous_index_value = portfolio_index.loc[date]\r\n pre_date = date\r\n\r\n\r\n portfolio_returns = portfolio_index.pct_change(1) \r\n portfolio_returns.columns = ['Returns']\r\n\r\n portfolio_index\r\n perf = portfolio_index.calc_stats()\r\n \r\n output = pd.Series(data = [perf,PnL_df,portfolio_index,portfolio_BoP,portfolio_EoP,BoP_df], index=['Portfolio Perf','Component PnL','portfolio_index','portfolio_BoP','portfolio_EoP','BoP_df'])\r\n return output", "def calculate_portfolio(self, request, pk=None, **kwargs):\n goal = self.get_object()\n\n check_state(Goal.State(goal.state), Goal.State.ACTIVE)\n\n setting_str = request.query_params.get('setting', None)\n if not setting_str:\n raise ValidationError(\"Query parameter 'setting' must be specified and a valid JSON string\")\n try:\n setting = ujson.loads(setting_str)\n except ValueError:\n raise ValidationError(\"Query parameter 'setting' must be a valid json string\")\n\n # Create the settings object from the dict\n serializer = serializers.GoalSettingStatelessSerializer(data=setting)\n serializer.is_valid(raise_exception=True)\n settings = serializer.create_stateless(serializer.validated_data, goal)\n\n try:\n data = self.build_portfolio_data(calculate_portfolio(settings=settings,\n data_provider=DataProviderDjango(),\n execution_provider=ExecutionProviderDjango()))\n return Response(data)\n except Unsatisfiable as e:\n rdata = {'reason': \"No portfolio could be found: {}\".format(e)}\n if e.req_funds is not None:\n rdata['req_funds'] = e.req_funds\n\n return Response({'error': rdata}, status=status.HTTP_400_BAD_REQUEST)", "def initialize_portfolio(self):\n\n raise NotImplementedError('''\n Must implement initialize_portfolio. Call help() for details.\n ''')", "def rebalance(context, data):\n logger.debug('rebalancing on: %s', algo.get_datetime())\n\n context.trend_filter = False\n\n # new_portfolio = algo.pipeline_output('pipeline').dropna(subset=['overall_rank']).sort_values('momentum', ascending=False)\n\n new_portfolio = algo.pipeline_output('pipeline').dropna(subset=['overall_rank']).sort_values('momentum', ascending=False)\n\n for equity, row in new_portfolio.iterrows():\n logger.debug('new portfolio (before filtering) - equity: %s', equity)\n\n # print(new_portfolio)\n\n # new_portfolio = new_portfolio[new_portfolio['overall_rank'].notna() & new_portfolio['momentum'] > 40][:20]\n \n # new_portfolio = new_portfolio[(new_portfolio['momentum_decile'] > 8)][:20]\n\n new_portfolio = new_portfolio.nlargest(20, ['overall_rank', 'momentum']) #<- $600K PL in 10 years\n\n # new_portfolio = new_portfolio.nlargest(20, ['momentum', 'overall_rank']) #<- 1M PL in 10 years\n\n if logger.level is logging.DEBUG:\n for equity, row in new_portfolio.iterrows():\n logger.debug('new portfolio - (after filtering) equity: %s', equity)\n \n\n # print(len(new_portfolio.index))\n\n # volatility driven weights\n # new_portfolio['inverse_volatility'] = new_portfolio['volatility'].apply(lambda x: 1 / x)\n # inv_vola_sum = new_portfolio['inverse_volatility'].sum()\n # new_portfolio['target_weight'] = new_portfolio['inverse_volatility'].apply(lambda x: x / inv_vola_sum)\n\n # portfolio size driven weights\n # num_equities = len(new_portfolio.index)\n # new_portfolio['target_weight'] = 1 / num_equities\\\n\n # logger.info('len existing portfolio: %s', len(context.portfolio.positions))\n\n if logger.level is logging.DEBUG:\n for equity, values in context.portfolio.positions.items():\n logger.debug('context.portfolio.positions - equity: %s, amount: %s, cost_basis: %s, sold_on: %s, sold_at_price: %s', equity, values.amount, values.cost_basis, values.last_sale_date, values.last_sale_price)\n\n \n order_target(algo.sid('FIBBG000NTFYM5'), 0)\n logger.debug('selling all bonds')\n\n for equity in context.portfolio.positions:\n if equity is algo.sid('FIBBG000NTFYM5'): \n continue\n if equity not in set(new_portfolio.index.tolist()):\n # logger.info('selling %s', equity)\n order_target_percent(equity, 0)\n\n stock_weights = 1.0 / max(len(context.portfolio.positions), len(new_portfolio.index))\n\n logger.debug('len existing portfolio (afer ejection): %s', len(context.portfolio.positions))\n logger.debug('len new portfolio: %s', len(new_portfolio.index))\n logger.debug('stock_weights: %s', stock_weights)\n\n # print(context.portfolio.positions.get(algo.sid('FIBBG000NTFYM5')))\n\n # spy = context.portfolio.positions.get(algo.sid('FIBBG000NTFYM5'))\n\n # if (spy is not None) and (spy.amount > 0):\n # order_target_percent(algo.sid('FIBBG000NTFYM5'), 0)\n\n for equity, row in new_portfolio.iterrows():\n if row.trend_filter is True:\n # logger.info('buying %s', equity)\n context.trend_filter = True\n order_target_percent(equity, stock_weights)\n else:\n context.trend_filter = False\n \n logger.debug('cash: %s', context.portfolio.cash)\n logger.debug('portfolio_value: %s', context.portfolio.portfolio_value)\n logger.debug('num_positions: %s', len(context.portfolio.positions))\n logger.debug('positions: %s', context.portfolio.positions)", "def index():\n def getListOfCompanies(username, symbolOrPriceOrNumber):\n if symbolOrPriceOrNumber == \"symbol\" or symbolOrPriceOrNumber == \"price\" or symbolOrPriceOrNumber == \"number\":\n rows = db.execute(\"SELECT {0} FROM portfolio WHERE username=:username\".format(symbolOrPriceOrNumber), username=username)\n if symbolOrPriceOrNumber == \"symbol\" and len(rows) >= 1:\n namesList = []\n for row in rows:\n namesList.append(lookup(row[symbolOrPriceOrNumber])[\"name\"])\n return namesList\n elif symbolOrPriceOrNumber == \"price\" and len(rows) >= 1:\n pricseList = []\n for row in rows:\n pricseList.append(row[symbolOrPriceOrNumber])\n return pricseList\n elif symbolOrPriceOrNumber == \"number\" and len(rows) >= 1:\n numbersList = []\n for row in rows:\n numbersList.append(row[symbolOrPriceOrNumber])\n return numbersList\n else:\n return None\n else:\n return None\n\n def getTotalValueHolding(username):\n priceRow = db.execute(\"SELECT price FROM portfolio WHERE username=:username\", username=username)\n numberRow = db.execute(\"SELECT number FROM portfolio WHERE username=:username\", username=username)\n\n if len(priceRow) >= 1 and len(numberRow) >= 1 and len(priceRow) == len(numberRow):\n totalList = []\n for i in range(len(priceRow)):\n totalList.append(float(priceRow[i][\"price\"]) * float(numberRow[i][\"number\"]))\n\n return totalList\n\n username = db.execute(\"SELECT username FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"username\"]\n companiesNames = getListOfCompanies(username, \"symbol\")\n numberOfShares = getListOfCompanies(username, \"number\")\n prices = getListOfCompanies(username, \"price\")\n totalValueHolding = getTotalValueHolding(username)\n\n currentCashBalance = db.execute(\"SELECT cash FROM users WHERE id=:userId\", userId=session[\"user_id\"])[0][\"cash\"]\n total = 0\n if totalValueHolding:\n for totalValue in totalValueHolding:\n total = total + totalValue\n\n cashAndStocksTotalValue = float(currentCashBalance) + total\n\n return render_template(\"index.html\", username=username, companiesNames=companiesNames, numberOfShares=numberOfShares,\n prices=prices, totalValueHolding=totalValueHolding, currentCashBalance=currentCashBalance, cashAndStocksTotalValue=cashAndStocksTotalValue)", "def portfolio_table(self):\n idx = set(name.split('-')[0].split('.')[0] for name, etf in self.etfs.items() if not etf.sold())\n table = pd.DataFrame({'Invested': 0, 'Shares':0, 'Share Price':0, 'Present Value':0, 'P/L':0, 'P/L%':0},index=idx)\n for name, etf in self.etfs.items():\n if not etf.sold():\n table.loc[name.split('-')[0].split('.')[0], 'Invested'] += etf.initial_investment()\n table.loc[name.split('-')[0].split('.')[0], 'Shares'] += etf.n_shares\n table.loc[name.split('-')[0].split('.')[0], 'Share Price'] = etf.stock_price()\n table.loc[name.split('-')[0].split('.')[0], 'Present Value'] += etf.present_value()\n table.loc[name.split('-')[0].split('.')[0], 'P/L'] += etf.profit_loss()\n table.insert(1, 'PMA', round(table['Invested'] / table['Shares'], 2))\n table.insert(3, 'Initial Weight', round(table['Invested'] / table['Invested'].sum() * 100, 2))\n table.insert(4, 'Present Weight', round(table['Present Value'] / table['Present Value'].sum() * 100, 2))\n table['P/L%'] = round(table['P/L'] / table['Invested'] * 100, 2)\n table['P/L'] = round(table['P/L'], 2)\n table['Present Value'] = round(table['Present Value'], 2)\n return table.sort_values('Invested', 0, ascending=False)", "def index():\n\n #select user's portfolio\n rows = db.execute(\"SELECT * FROM portfolio WHERE userid=:id\", id=session[\"user_id\"])\n\n #set temporary holding place for cash to zero\n tcash = 0\n\n #update the stock information in user's portfolio\n for row in rows:\n stock = row[\"stock\"]\n number = row[\"number\"]\n quote = lookup(stock)\n total = float(number) * float(quote[\"price\"])\n tcash += total\n db.execute(\"UPDATE portfolio SET price=:price, total=:total WHERE userid=:id AND stock=:stock AND number=:number\", price=usd(quote[\"price\"]), total=total, id=session[\"user_id\"], stock=stock, number=number)\n\n #select user's cash and updated portfolio\n updated_cash = db.execute(\"SELECT cash FROM users WHERE id=:id\", id=session[\"user_id\"])\n tcash += updated_cash[0][\"cash\"]\n updated_stock = db.execute(\"SELECT stock, SUM(number) AS number, price, SUM(total) AS stock_total FROM portfolio WHERE userid=:id GROUP BY stock HAVING SUM(number) > 0\", id=session[\"user_id\"])\n\n return render_template(\"index.html\", stocks=updated_stock, cash=usd(updated_cash[0][\"cash\"]), all_total=usd(tcash))", "def update_portfolio_on_market(self, market: MarketEvent):\n self._portfolio.update_market_value(market)", "def portfolio_analytics(port_returns, market_returns):\n\n # add the intercept to the model\n x2 = sm.add_constant(market_returns)\n\n # train the model\n estimator = sm.OLS(port_returns, x2)\n model = estimator.fit()\n\n # get portfolio analytics\n alpha, beta = model.params\n r_squared = model.rsquared\n regression = model.predict()\n\n return alpha, beta, r_squared, regression", "def portfolio_analytics(port_returns, market_returns):\n\n # add the intercept to the model\n x2 = sm.add_constant(market_returns)\n\n # train the model\n estimator = sm.OLS(port_returns, x2)\n model = estimator.fit()\n\n # get portfolio analytics\n alpha, beta = model.params\n r_squared = model.rsquared\n regression = model.predict()\n\n return alpha, beta, r_squared, regression", "def __init__(\n self,\n portfolio,\n market=None,\n commission_min=5.00,\n commission_pct=0.0,\n buy_percent=1.0,\n sell_percent=1.0,\n pm_threshold=0.0,\n pm_order=1.0,\n risk_free_return=1.0,\n name=None\n ):\n\n # Assumptions\n self.name = name if name else portfolio.name\n self.commission_min = commission_min\n self.commission_pct = commission_pct\n self.buy_percent = buy_percent\n self.sell_percent = sell_percent\n self.pm_threshold = pm_threshold\n self.pm_order = pm_order\n self.risk_free_return = risk_free_return\n self.performance = {}\n\n # Inputs\n self.portfolio = portfolio\n self.market = copy.deepcopy(market) if market else Asset(np.ones(len(self.portfolio.dates)))\n\n # Trading states\n self.long_open = {symbol:False for symbol in portfolio.assets.keys()}\n self.short_open = {symbol:False for symbol in portfolio.assets.keys()}\n\n # Keep track of intermidiate results for performance\n self.trade_data = []\n recordings = [\n 'buy price', 'buy shares', 'buy fees', 'buy date',\n 'sell price', 'sell shares', 'sell fees', 'sell date',\n 'gain', 'profit', 'loss', 'return', 'win/loose',\n 'min balance', 'min date', 'max balance', 'max date',\n 'drawdown', 'drawdown days',\n 'volatility', 'expected_return', 'beta', 'lpm', 'hpm',\n 'max', 'mean', 'min'\n ]\n self.record = {symbol:pd.DataFrame(columns=recordings) for symbol in portfolio.assets.keys()}\n self.max = {symbol:[portfolio.assets[symbol].c.iloc[0], None] for symbol in portfolio.assets.keys()}\n self.min = {symbol:[999999999999999, None] for symbol in portfolio.assets.keys()}\n self.drawdown = {symbol:[999999999999999, None] for symbol in portfolio.assets.keys()}", "def get_stock(self, investor):\n\n # Find out the stock details \n sym, qty, price = investor.portfolios[0].portfolios[0]\n # p = investor.portfolios[0]\n \n # Check if broker has a portfolio\n if self.portfolios[0]:\n self.portfolios[0].add_stock(sym, qty, price)\n else:\n # Broker doesn't have a portfolio\n p = Portfolio()\n #logging.info(\"p is: %s\" % p)\n p.add_stock(sym, qty, price)\n self.add_portfolio(p)\n logging.info(\"Broker's portfolios AFTER addition: %s\" % self)\n # logging.info(\"WHAT ARE YOU\")\n logging.info(\"Investor portfolio BEFORE removal: %s\" % investor.portfolios[0].portfolios)\n investor.portfolios[0].remove_stock(sym, qty)\n logging.info(\"Investor portfolio AFTER removal: %s\" % investor.portfolios[0])\n # investor.portfolios[0].portfolios.remove( (sym, qty, price) )\n \n # investor.portfolios[0].remove(sym, qty, price)\n total_price = qty * price\n investor.portfolios[0].value -= total_price\n investor.cash += qty * float(price)", "def _initalize_portfolio_with_cash(self):\n self.cash = copy.copy(self.starting_cash)\n\n if self.starting_cash > 0.0:\n self.history.append(\n PortfolioEvent.create_subscription(\n self.current_dt, self.starting_cash, self.starting_cash\n )\n )\n\n self.logger.info(\n '(%s) Funds subscribed to portfolio \"%s\" '\n '- Credit: %0.2f, Balance: %0.2f' % (\n self.current_dt.strftime(settings.LOGGING[\"DATE_FORMAT\"]),\n self.portfolio_id,\n round(self.starting_cash, 2),\n round(self.starting_cash, 2)\n )\n )", "def before_trading_start(context, data):\n factors = pipeline_output('ff_example')\n\n # get the data we're going to use\n returns = factors['returns']\n mkt_cap = factors.sort_values(['market_cap'], ascending=True)\n be_me = factors.sort_values(['be_me'], ascending=True)\n\n # to compose the six portfolios, split our universe into portions\n half = int(len(mkt_cap)*0.5)\n small_caps = mkt_cap[:half]\n big_caps = mkt_cap[half:]\n \n thirty = int(len(be_me)*0.3)\n seventy = int(len(be_me)*0.7)\n growth = be_me[:thirty]\n neutral = be_me[thirty:seventy]\n value = be_me[seventy:]\n\n # now use the portions to construct the portfolios.\n # note: these portfolios are just lists (indices) of equities\n small_value = small_caps.index.intersection(value.index)\n small_neutral = small_caps.index.intersection(neutral.index)\n small_growth = small_caps.index.intersection(growth.index)\n \n big_value = big_caps.index.intersection(value.index)\n big_neutral = big_caps.index.intersection(neutral.index)\n big_growth = big_caps.index.intersection(growth.index)\n\n # take the mean to get the portfolio return, assuming uniform\n # allocation to its constituent equities.\n sv = returns[small_value].mean()\n sn = returns[small_neutral].mean()\n sg = returns[small_growth].mean()\n \n bv = returns[big_value].mean()\n bn = returns[big_neutral].mean()\n bg = returns[big_growth].mean()\n\n # computing SMB\n context.smb = (sv + sn + sg)/3 - (bv + bn + bg)/3\n\n # computing HML\n context.hml = (sv + bv)/2 - (sg + bg)/2", "def calc_portfolio_risk(\n context,\n data,\n risk_func,\n hist_days=180,\n **kwargs):\n\n \n positions = context.portfolio.positions\n positions_index = pd.Index(positions)\n share_counts = pd.Series( \n index=positions_index, \n data=[positions[asset].amount for asset in positions] \n )\n\n current_prices = data.current(positions_index, 'price') \n current_weights = (\n share_counts * current_prices / context.portfolio.portfolio_value\n )\n \n prices = data.history(\n current_weights.index.tolist(),\n 'price',\n hist_days,\n '1d'\n )\n\n daily_rets = prices.pct_change()\n daily_rets = daily_rets - daily_rets.mean(skipna=True)\n daily_rets = daily_rets.fillna(0.0)\n\n risk = risk_func(current_weights.values, daily_rets, **kwargs)\n return risk", "def __display_portfolio(self, p, w):\n\n global st_sort_key\n global st_reverse_sort\n\n line = 1\n total_assets = 0\n total_change = 0\n\n p.assets.sort(key=st_sort_key, reverse=st_reverse_sort)\n\n for s in p.assets:\n # Make sure we have space to write the portfolio totals.\n if line >= (curses.LINES - 3):\n break\n\n total_assets += (p.asset_counts[s.symb()] * s.price())\n total_change += (p.asset_counts[s.symb()] * s.change())\n\n # Color red/green for stocks going up/down.\n change_color = curses.color_pair(0)\n if s.change() > 0:\n change_color = curses.color_pair(1)\n elif s.change() < 0:\n change_color = curses.color_pair(2)\n\n direction = ''\n if s.change() > 0:\n direction = u'\\u25b2'\n elif s.change() < 0:\n direction = u'\\u25bc'\n\n w.addstr(line, 0, '%-15s' % s.name()[0:14])\n w.addstr(line, 16, '%-5s' % s.symb(), curses.A_BOLD)\n w.addstr(line, 22, '%9.2f' % s.price())\n w.addstr(line, 32, direction.encode('utf-8'), change_color)\n w.addstr(line, 33, '%6.2f %5.2f%%' % (abs(s.change()),\n abs(s.change_percent()) *\n 100),\n change_color)\n w.addstr(line, 47, '|')\n w.addstr(line, 49, '%-6d' % p.asset_counts[s.symb()])\n w.addstr(line, 56, '%11.2f' % (p.asset_counts[s.symb()] *\n s.price()))\n w.addstr(line, 68, '%10.2f' % (p.asset_counts[s.symb()] *\n s.change()),\n change_color)\n\n line += 1\n\n line += 1\n\n # Get overall change (of assets) for the portfolio.\n overall_change = total_assets - p.cost_basis()\n overall_color = curses.color_pair(0)\n if overall_change > 0:\n overall_color = curses.color_pair(1)\n elif overall_change < 0:\n overall_color = curses.color_pair(2)\n\n # Color red/green for assets changing.\n change_color = curses.color_pair(0)\n if total_change > 0:\n change_color = curses.color_pair(1)\n elif total_change < 0:\n change_color = curses.color_pair(2)\n\n # Print accumulated stats for the portfolio.\n w.addstr(line, 0, 'Daily:')\n w.addstr(line, 8, '$%.2f' % total_change,\n curses.A_BOLD | change_color)\n w.addstr(line, 23, 'Total:')\n w.addstr(line, 30, '$%.2f' % overall_change,\n curses.A_BOLD | overall_color)\n w.addstr(line + 1, 0, 'Assets:')\n w.addstr(line + 1, 8, '$%.2f' % total_assets)\n w.addstr(line + 1, 23, 'Cash: $%.2f' % p.cash)\n w.addstr(line + 1, 44, 'Total value:')\n w.addstr(line + 1, 58, '$%.2f' % (p.cash + total_assets),\n curses.A_BOLD)", "def optimizeForReturn(required_return, stock_db, use_genetic):\n print('Optimizing portfolio for %f' % required_return)\n pf = PortfolioFactory(stock_db, required_return, use_genetic=use_genetic)\n desired_portfolio = pf.desired_portfolio\n print('Required Return: %f' % required_return)\n print('Expected Return: %f' % math.pow(\n desired_portfolio.average_return, Config.DAYS_IN_YEAR))\n print('Downside Risk: %f' % desired_portfolio.downside_risk)\n print('Downside Correl: %f' % desired_portfolio.downside_correl)\n print('Score: %f' % desired_portfolio.score)\n\n # Write desired portfolio.\n DataIO.writeDesiredPortfolio(\n desired_portfolio, stock_db,\n 'output/DesiredPortfolio_%.0f_%.4f_%s.csv' % (\n Config.MINIMUM_AMOUNT_DATA, required_return, Config.TODAY.date()))\n\n print('Finished for %f' % required_return)\n\n return desired_portfolio", "def load_portfolio(self):\n\n if self.terminate:\n return\n\n # First get a file.\n self.lock.acquire()\n self.windows['ACTION'].erase()\n\n curses.curs_set(1)\n\n tp = curses.textpad.Textbox(self.windows['ACTION'])\n file_str = tp.edit()\n curses.curs_set(0)\n\n self.clear_action()\n self.lock.release()\n\n self.refresh()\n\n # Now that we have a portfolio file, let's load it up and\n # fire off a thread to update it.\n p = Portfolio(file_str.strip())\n self.portfolios.append(p)\n self.track_portfolio(p)", "def display_portfolio(self, p):\n\n if self.terminate:\n return\n\n w = self.windows['MAIN']\n\n self.clear_main()\n self.__display_portfolio(p, w)\n self.clear_header()\n self.set_header(p)\n\n self.refresh()", "def income_model_constant_portfolio_return(num_of_years=30, trials=100, method='normal'):\n\n sim_fia_cv = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_base_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_base_income = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_port_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_port_income = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_base_total_pre_income = pd.DataFrame(index=range(num_of_years + 1))\n sim_port_total_pre_income = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_base_total_preincome = pd.DataFrame(index=range(num_of_years + 1))\n sim_port_total_preincome = pd.DataFrame(index=range(num_of_years + 1))\n\n # read_income_inputs = pd.read_csv(src + \"income_model_inputs.csv\", index_col='Items')\n read_income_inputs = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_model_inputs',\n index_col=[0])\n\n # read_returns_est = pd.read_csv(src + \"income_assets_returns_estimates.csv\", index_col='Symbol')\n read_returns_est = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_assets_returns_estimates',\n index_col=[0])\n\n # read_returns_est.drop(['BM', read_returns_est.index[-1]], axis=0, inplace=True)\n # read_portfolio_inputs = pd.read_csv(src + \"income_portfolio_inputs.csv\", index_col='Items')\n\n # read_asset_weights = pd.read_csv(src + \"asset_weights.csv\", index_col='Asset')\n read_asset_weights = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='asset_weights',\n index_col=[0])\n\n read_asset_weights.drop(read_asset_weights.index[-1], axis=0, inplace=True)\n\n # read random returns for simulation\n read_normal = pd.read_csv(src + 'sort_normal.csv', index_col=[0], parse_dates=True)\n read_small = pd.read_csv(src + 'sort_small_to_large.csv', index_col=[0], parse_dates=True)\n read_large = pd.read_csv(src + 'sort_large_to_small.csv', index_col=[0], parse_dates=True)\n assets_col_names = list(read_normal.columns)\n\n tickers = list(read_asset_weights.index)\n wts = np.array(read_asset_weights.loc[:, 'base'])\n\n def asset_median_returns(data, ticker):\n return data.filter(regex=ticker).median(axis=1)\n\n # # dataframe for unsorted returns (normal)\n # median_returns_normal = pd.DataFrame({t: asset_median_returns(read_normal, t) for t in tickers})\n # median_returns_normal.loc[:, 'portfolio_return'] = median_returns_normal.dot(wts)\n # median_normal_fia = pd.DataFrame({'FIA': asset_median_returns(read_normal, 'r_FIA')})\n #\n # # dataframe for smallest to largest returns\n # median_returns_smallest = pd.DataFrame({t: asset_median_returns(read_small, t) for t in tickers})\n # median_returns_smallest.loc[:, 'portfolio_return'] = median_returns_smallest.dot(wts)\n # median_smallest_fia = pd.DataFrame({'FIA': asset_median_returns(read_small, 'r_FIA')})\n #\n # # dataframe for unsorted returns (normal)\n # median_returns_largest = pd.DataFrame({t: asset_median_returns(read_large, t) for t in tickers})\n # median_returns_largest.loc[:, 'portfolio_return'] = median_returns_largest.dot(wts)\n # median_largest_fia = pd.DataFrame({'FIA': asset_median_returns(read_large, 'r_FIA')})\n\n years = list(range(0, num_of_years + 1))\n income_cols = ['year', 'strategy_term', 'index_returns', 'term_ret', 'term_ret_with_par', 'term_annualize',\n 'ann_net_spread', 'term_ret_netspr', 'high_inc_benefit_base', 'rider_fee', 'eoy_income',\n 'contract_value']\n\n term = int(read_income_inputs.loc['term', 'inputs'])\n fia_ret = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Returns']\n fia_risk = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Risk']\n par_rate = float(read_income_inputs.loc['par_rate', 'inputs'])\n spread = float(read_income_inputs.loc['spread', 'inputs'])\n bonus_term = int(read_income_inputs.loc['bonus_term', 'inputs'])\n premium = float(read_income_inputs.loc['premium', 'inputs'])\n income_bonus = float(read_income_inputs.loc['income_bonus', 'inputs'])\n\n income_starts = int(read_income_inputs.loc['start_income_years', 'inputs'])\n income_growth = float(read_income_inputs.loc['income_growth', 'inputs'])\n rider_fee = float(read_income_inputs.loc['rider_fee', 'inputs'])\n inc_payout_factor = float(read_income_inputs.loc['income_payout_factor', 'inputs'])\n contract_bonus = float(read_income_inputs.loc['contract_bonus', 'inputs'])\n social = float(read_income_inputs.loc['social', 'inputs'])\n inflation = float(read_income_inputs.loc['inflation', 'inputs'])\n wtd_cpn_yield = float(read_income_inputs.loc['wtd_coupon_yld', 'inputs'])\n life_expectancy = int(read_income_inputs.loc['life_expectancy_age', 'inputs'])\n clients_age = int(read_income_inputs.loc['clients_age', 'inputs'])\n \n # -------------For Constant Growth Rates------------------------\n const_fia_index_ret = float(read_income_inputs.loc['const_fia_index_ret', 'inputs'])\n const_risky_port_ret = float(read_income_inputs.loc['const_risky_port_ret', 'inputs'])\n\n # ---------------INCOME MODEL--------------------------------------------\n runs = 0\n returns_dict = {}\n asset_dict = {}\n fia_dict = {}\n\n income_df = pd.DataFrame(index=years, columns=income_cols)\n income_df.loc[:, 'year'] = years\n income_df.loc[:, 'strategy_term'] = income_df.loc[:, 'year'] % term\n income_df.loc[:, 'strategy_term'] = income_df['strategy_term'].apply(lambda x: 1 if x == 0 else 0)\n\n if method == 'normal':\n # income_df.loc[:, 'index_returns'] = read_normal.loc[:, '{}_{}'.format('r_FIA', str(runs))]\n # ----------CONSTANT FIA INDEX GROWTH RATE-------------------\n income_df.loc[:, 'index_returns'] = const_fia_index_ret\n\n elif method == 'smallest':\n income_df.loc[:, 'index_returns'] = read_small.loc[:, '{}_{}'.format('r_FIA', str(runs))]\n\n else:\n income_df.loc[:, 'index_returns'] = read_large.loc[:, '{}_{}'.format('r_FIA', str(runs))]\n\n # income_df.loc[:, 'index_returns'] = np.random.normal(fia_ret, fia_risk, size=(len(years), 1))\n\n cumprod = (1. + income_df['index_returns']).rolling(window=term).agg(lambda x: x.prod()) - 1\n income_df.loc[:, 'term_ret'] = np.where(income_df.loc[:, 'strategy_term'] == 1, cumprod, 0)\n income_df.loc[:, 'term_ret_with_par'] = income_df.loc[:, 'term_ret'] * par_rate\n income_df.loc[:, 'term_annualize'] = income_df.loc[:, 'term_ret_with_par'].apply(\n lambda x: (1 + x) ** (1 / term) - 1)\n income_df.loc[:, 'ann_net_spread'] = income_df.loc[:, 'term_annualize'] - spread\n income_df.loc[:, 'ann_net_spread'] = np.where(income_df.loc[:, 'strategy_term'] == 1,\n income_df.loc[:, 'ann_net_spread'], 0)\n income_df.loc[:, 'term_ret_netspr'] = income_df.loc[:, 'ann_net_spread'].apply(lambda x: (1 + x) ** term - 1)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'high_inc_benefit_base'] = premium * (1 + income_bonus)\n\n elif counter <= min(bonus_term, income_starts):\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base'] * \\\n (1 + income_growth)\n else:\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base']\n\n income_df.loc[:, 'rider_fee'] = income_df.loc[:, 'high_inc_benefit_base'] * rider_fee\n income_df.loc[:, 'eoy_income'] = np.where(income_df.loc[:, 'year'] > income_starts,\n income_df.loc[:, 'high_inc_benefit_base'] * inc_payout_factor, 0)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'contract_value'] = premium * (1 + contract_bonus)\n\n elif income_df.loc[counter, 'strategy_term'] == 1:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee']\n x2 = (x1 * (1 + income_df.loc[counter, 'term_ret_netspr'])) - income_df.loc[counter, 'eoy_income']\n income_df.loc[counter, 'contract_value'] = x2\n\n else:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee'] - \\\n income_df.loc[counter, 'eoy_income']\n\n income_df.loc[counter, 'contract_value'] = x1\n\n # variable stores the income number that is used in the base and fia portfolio calcs.\n\n income_from_fia = income_df.loc[income_df.index[-1], 'eoy_income']\n\n income_df.loc[:, 'contract_value'] = income_df.loc[:, 'contract_value'].apply(lambda x: 0 if x <= 0 else x)\n\n sim_fia_cv.loc[:, str(runs)] = income_df.loc[:, 'contract_value']\n\n # -------------------------------------BASE MODEL---------------------------------------------\n\n base_wts = read_asset_weights.loc[:, 'base']\n base_assets = list(base_wts.index)\n base_weights = list(base_wts.values)\n base_returns = list(read_returns_est.loc[:, 'Annualized Returns'].values)\n base_std = list(read_returns_est.loc[:, 'Annualized Risk'].values)\n\n base_investment = float(read_income_inputs.loc['risky_assets', 'Base'])\n adv_fees = float(read_income_inputs.loc['advisor_fees', 'Base'])\n\n # -------------------required income----------------------------------\n req_annual_income = float(read_income_inputs.loc['annual_income', 'inputs'])\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n cpn_income_base = base_investment * wtd_cpn_yield\n\n # ----------------------RANDOM RETURNS--------------------------\n r_cols = ['r_{}'.format(name) for name in base_assets]\n boy_value = ['bv_{}'.format(name) for name in base_assets]\n eoy_value = ['ev_{}'.format(name) for name in base_assets]\n\n random_returns = pd.DataFrame(index=income_df.index, columns=r_cols)\n\n # for c in range(len(r_cols)):\n # ret = np.random.normal(base_returns[c], base_std[c], size=(len(random_returns.index), 1))\n\n if method == 'smallest':\n this_run_cols = ['{}_{}'.format(cname, str(runs)) for cname in r_cols]\n random_returns = read_small.loc[:, this_run_cols]\n\n # random_returns.loc[:, r_cols[c]] = np.sort(ret.flatten())\n # asset_dict.update({'{}_{}'.format(r_cols[c], str(runs)): np.sort(ret.flatten())})\n\n elif method == 'largest':\n this_run_cols = ['{}_{}'.format(cname, str(runs)) for cname in r_cols]\n random_returns = read_large.loc[:, this_run_cols]\n\n # random_returns.loc[:, r_cols[c]] = np.flip(np.sort(ret.flatten()))\n # asset_dict.update({'{}_{}'.format(r_cols[c], str(runs)): np.flip(np.sort(ret.flatten()))})\n\n else:\n this_run_cols = ['{}_{}'.format(cname, str(runs)) for cname in r_cols]\n random_returns = read_normal.loc[:, this_run_cols]\n\n # random_returns.loc[:, r_cols[c]] = ret.flatten()\n # asset_dict.update({'{}_{}'.format(r_cols[c], str(runs)): ret.flatten()})\n\n # store the simulated assets returns in one dictionary\n # returns_dict.update({str(runs): random_returns})\n\n # collect the asset based returns from all simulation and calculate the median returns.\n # def get_median_returns(sym):\n # cols = [sym + '_' + str(c) for c in np.arange(trials)]\n # asset_df = pd.DataFrame({c: asset_dict.get(c) for c in cols})\n # return asset_df.median(axis=1)\n #\n # asset_median_returns = pd.DataFrame({symbol: get_median_returns(symbol) for symbol in r_cols})\n #\n # asset_median_returns.loc[:, 'simulated_portfolio_median_returns'] = asset_median_returns.dot(base_weights)\n\n base_df = random_returns.copy()\n pre_income_base_df = random_returns.copy()\n\n # base_investment = float(read_portfolio_inputs.loc['risky_assets', 'Base'])\n\n fia_portfolio_df = random_returns.copy()\n pre_income_port_df = random_returns.copy()\n port_investment = float(read_income_inputs.loc['risky_assets', 'FIA'])\n cpn_income_port = port_investment * wtd_cpn_yield\n\n # ---------Initial Investments for pre-income account values---------------------\n pre_income_base_inv = base_investment\n pre_income_port_inv = port_investment\n # ----------------------------------------BASE PORTFOLIO----------------------------\n for name in boy_value:\n base_df.loc[:, name] = 0.0\n pre_income_base_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n # ---------------For year 0, the year of investment------------\n\n # ------------Calculate the annual portfolio returns - Gross Returns--------------------\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment for c in range(len(boy_value))]\n\n # -------------Record the Pre Income Base Portfolio-----------------------------\n\n pre_income_base_df.loc[counter, boy_value] = [base_weights[c] *\n pre_income_base_inv for c in range(len(boy_value))]\n pre_income_base_df.loc[counter, 'total'] = pre_income_base_df.loc[counter, boy_value].sum()\n pre_income_base_inv = pre_income_base_df.loc[counter, boy_value].sum()\n\n # ------------------Pre Income Block Ends------------------------\n\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n\n base_df.loc[counter, 'total_net_fees'] = 0.0\n base_df.loc[counter, 'income'] = 0.0\n # base_investment = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'total_pre_income'] = base_investment\n\n elif (counter > 0) and (counter < income_starts):\n\n # ----For years between the start of the investment and start if the income---------------\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # -------------Record the Pre Income Base Portfolio-----------------------------\n pre_income_base_df.loc[counter, boy_value] = [\n base_weights[c] * pre_income_base_inv * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # pre_income_base_df.loc[counter, 'total'] = pre_income_base_df.loc[counter, boy_value].sum()\n pre_income_base_df.loc[counter, 'total'] = base_investment * (1 + const_risky_port_ret)\n pre_income_base_df.loc[counter, 'adv_fees'] = pre_income_base_df.loc[counter, 'total'] * adv_fees\n pre_income_base_df.loc[counter, 'total_net_fees'] = pre_income_base_df.loc[counter, 'total'] - \\\n pre_income_base_df.loc[counter, 'adv_fees']\n pre_income_base_inv = pre_income_base_df.loc[counter, 'total_net_fees'] + cpn_income_base\n\n # ------------------Pre Income Block Ends------------------------\n\n # base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'total'] = base_investment * (1 + 0.06)\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - base_df.loc[\n counter, 'adv_fees']\n\n # --coupon payment is invested back into the risky portfolio until the income is withdrawn----\n base_investment = base_df.loc[counter, 'total_net_fees'] + cpn_income_base\n base_df.loc[counter, 'total_pre_income'] = base_df.loc[counter, 'total_net_fees']\n\n else:\n\n # -------------For Years after the income started----------------------\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # -------------Record the Pre Income Base Portfolio-----------------------------\n pre_income_base_df.loc[counter, boy_value] = [\n base_weights[c] * pre_income_base_inv * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # pre_income_base_df.loc[counter, 'total'] = pre_income_base_df.loc[counter, boy_value].sum()\n pre_income_base_df.loc[counter, 'total'] = base_investment * (1 + const_risky_port_ret)\n pre_income_base_df.loc[counter, 'adv_fees'] = pre_income_base_df.loc[counter, 'total'] * adv_fees\n pre_income_base_df.loc[counter, 'total_net_fees'] = pre_income_base_df.loc[counter, 'total'] - \\\n pre_income_base_df.loc[counter, 'adv_fees']\n pre_income_base_inv = pre_income_base_df.loc[counter, 'total_net_fees'] + cpn_income_base\n\n # ------------------Pre Income Block Ends------------------------\n\n # base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'total'] = base_investment * (1 + const_risky_port_ret)\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = income_needed + social\n\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - \\\n base_df.loc[counter, 'adv_fees'] - \\\n base_df.loc[counter, 'income']\n\n base_df.loc[counter, 'total_pre_income'] = base_df.loc[counter, 'total'] - \\\n base_df.loc[counter, 'adv_fees']\n\n base_investment = base_df.loc[counter, 'total_net_fees']\n\n # -------------------Portfolio with PreIncome Values----------------------------\n sim_base_total_preincome.loc[:, 's_{}'.format(str(runs))] = pre_income_base_df.loc[:, 'total_net_fees']\n sim_base_total_preincome.fillna(float(read_income_inputs.loc['risky_assets', 'Base']), inplace=True)\n # --------------------------------PreIncome Block Ends----------------------------\n\n base_df.loc[:, 'adj_total'] = base_df.loc[:, 'total_net_fees'].apply(lambda x: x if x > 0 else 0)\n sim_base_total.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'total_net_fees']\n sim_base_income.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'income']\n sim_base_total_pre_income.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'total_pre_income']\n\n # ----------------------------FIA PORTFOLIO----------------------------------------------\n for name in boy_value:\n fia_portfolio_df.loc[:, name] = 0.0\n pre_income_port_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment\n for c in range(len(boy_value))]\n\n # -------------Record the Pre Income Base Portfolio-----------------------------\n\n pre_income_port_df.loc[counter, boy_value] = [base_weights[c] *\n pre_income_port_inv for c in range(len(boy_value))]\n pre_income_port_df.loc[counter, 'total'] = pre_income_port_df.loc[counter, boy_value].sum()\n pre_income_port_inv = pre_income_port_df.loc[counter, boy_value].sum()\n\n # ------------------Pre Income Block Ends------------------------\n\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'total_net_fees'] = 0.0\n fia_portfolio_df.loc[counter, 'income'] = 0.0\n # port_investment = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'total_pre_income'] = port_investment\n\n elif (counter > 0) and (counter < income_starts):\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # ------------------Record the Pre Income Base Portfolio-----------------------------\n pre_income_port_df.loc[counter, boy_value] = [\n base_weights[c] * pre_income_port_inv * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # pre_income_port_df.loc[counter, 'total'] = pre_income_port_df.loc[counter, boy_value].sum()\n \n # -----------------------CONSTANT GROWTH RATE-----------------\n pre_income_port_df.loc[counter, 'total'] = port_investment * (1 + const_risky_port_ret)\n\n pre_income_port_df.loc[counter, 'adv_fees'] = pre_income_port_df.loc[counter, 'total'] * adv_fees\n pre_income_port_df.loc[counter, 'total_net_fees'] = pre_income_port_df.loc[counter, 'total'] - \\\n pre_income_port_df.loc[counter, 'adv_fees']\n pre_income_port_inv = pre_income_port_df.loc[counter, 'total_net_fees'] + cpn_income_base\n\n # ------------------Pre Income Block Ends------------------------\n\n # fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n \n # -------CONSTANT GROWTH RATE-----------------\n fia_portfolio_df.loc[counter, 'total'] = port_investment * (1 + const_risky_port_ret)\n\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees']\n fia_portfolio_df.loc[counter, 'total_pre_income'] = fia_portfolio_df.loc[counter, 'total_net_fees']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees'] + cpn_income_port\n\n else:\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # -------------Record the Pre Income Base Portfolio-----------------------------\n pre_income_port_df.loc[counter, boy_value] = [\n base_weights[c] * pre_income_port_inv * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # pre_income_port_df.loc[counter, 'total'] = pre_income_port_df.loc[counter, boy_value].sum()\n pre_income_port_df.loc[counter, 'total'] = port_investment * (1 + const_risky_port_ret)\n pre_income_port_df.loc[counter, 'adv_fees'] = pre_income_port_df.loc[counter, 'total'] * adv_fees\n pre_income_port_df.loc[counter, 'total_net_fees'] = pre_income_port_df.loc[counter, 'total'] - \\\n pre_income_port_df.loc[counter, 'adv_fees']\n pre_income_port_inv = pre_income_port_df.loc[counter, 'total_net_fees'] + cpn_income_base\n\n # ------------------Pre Income Block Ends------------------------\n\n # fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'total'] = port_investment * (1 + const_risky_port_ret)\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = income_needed + social\n\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees'] - \\\n fia_portfolio_df.loc[counter, 'income']\n\n fia_portfolio_df.loc[counter, 'total_pre_income'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees']\n\n sim_port_total.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'total_net_fees'] + \\\n income_df.loc[:, 'contract_value']\n\n sim_port_income.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'income']\n\n fia_portfolio_df.loc[:, 'adj_total'] = fia_portfolio_df.loc[:, 'total_net_fees'].apply(\n lambda x: x if x > 0 else 0)\n\n sim_port_total_pre_income.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'total_pre_income']\n\n # -------------------Portfolio with PreIncome Values----------------------------\n sim_port_total_preincome.loc[:, 's_{}'.format(str(runs))] = pre_income_port_df.loc[:, 'total_net_fees'] + \\\n income_df.loc[:, 'contract_value']\n\n sim_port_total_preincome.fillna(float(read_income_inputs.loc['risky_assets', 'FIA']), inplace=True)\n \n # --------------------------------PreIncome Block Ends----------------------------\n\n # ------------------Calculate % of portfolios ending value greater than required LIFETIME cumm. income---------\n total_income_by_age = sim_base_income.loc[:, sim_base_income.columns[0]].cumsum()\n total_income_by_acturial_age = total_income_by_age.loc[life_expectancy - clients_age]\n total_income_by_age.fillna(0, inplace=True)\n income_dataframe = pd.DataFrame(total_income_by_age)\n income_dataframe.loc[:, 'remaining_income_by_acturial_age'] = total_income_by_age.apply(\n lambda x: total_income_by_acturial_age - x)\n\n s = income_dataframe.loc[:, 'remaining_income_by_acturial_age']\n base_prob_of_success = sim_base_total.gt(s, axis=0).sum(axis=1)\n port_prob_of_success = sim_port_total.gt(s, axis=0).sum(axis=1)\n\n # ----------------------------Portfolio sufficient for NEXT YEARS income needs-------------------\n next_year_income = sim_base_income.loc[:, sim_base_income.columns[0]].shift(-1).fillna(0) # Yearly Income Reqd.\n base_success_next_year = sim_base_total.gt(next_year_income, axis=0).sum(axis=1)\n\n base_for_next_year_need = sim_base_total[sim_base_total.gt(next_year_income, axis=0)]\n\n port_success_next_year = sim_port_total.gt(next_year_income, axis=0).sum(axis=1)\n\n port_for_next_year_need = sim_port_total[sim_port_total.gt(next_year_income, axis=0)]\n\n # ---------------Portfolio for 45 years of simulation---------------------------------------\n base_success_portfolio = sim_base_total[sim_base_total.gt(next_year_income, axis=0)]\n port_success_portfolio = sim_port_total[sim_port_total.gt(next_year_income, axis=0)]\n\n # ----------------Portfolio Simulation until the acturial age------------------------------\n acturial_years = life_expectancy - clients_age\n base_success_portfolio_act_age = base_success_portfolio.loc[acturial_years, :]\n port_success_portfolio_act_age = port_success_portfolio.loc[acturial_years, :]\n\n # -------------------------Base Portfolio TS with max Terminal Value ----------------------------\n if base_success_portfolio_act_age.isnull().sum() == trials:\n base_max_portfolio = 0.0\n else:\n base_max_portfolio = base_success_portfolio.loc[:, base_success_portfolio_act_age.idxmax()]\n\n # -------------------------FIA Portfolio TS with max Terminal Value ----------------------------\n if port_success_portfolio_act_age.isnull().sum() == trials:\n port_max_portfolio = 0.0\n else:\n port_max_portfolio = port_success_portfolio.loc[:, port_success_portfolio_act_age.idxmax()]\n\n # ------------------------------Average age with full income------------------------------\n base_mean_age = ((life_expectancy - clients_age) - base_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).mean()\n\n port_mean_age = ((life_expectancy - clients_age) - port_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).mean()\n\n # ----------------------------Median Age with full Income------------------------------------------\n base_median_age = ((life_expectancy - clients_age) - base_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).median()\n\n port_median_age = ((life_expectancy - clients_age) - port_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).median()\n\n # --------------Mean Value for all the portfolios at end of the acturial age--------------------\n base_act_avg_porfolio = base_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).mean()\n port_act_avg_porfolio = port_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).mean()\n\n # --------------Median Value for all the portfolios at end of the acturial age--------------------\n base_act_median_porfolio = base_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).median()\n port_act_median_porfolio = port_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).median()\n\n # # --------------Mean Value for all the portfolios in the simulation--------------------\n # base_sim_mean = base_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().mean()\n # port_sim_mean = port_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().mean()\n #\n # # --------------Median Value for all the portfolios in the simulation--------------------\n # base_sim_mean = base_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().median()\n # port_sim_mean = port_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().median()\n\n # -------Max Portfolio value at the end of acturial age----------------------------------------\n base_act_max = base_success_portfolio.loc[life_expectancy - clients_age, :].max()\n port_act_max = port_success_portfolio.loc[life_expectancy - clients_age, :].max()\n\n # -------Min Portfolio value at the end of acturial age----------------------------------------\n base_act_min = base_success_portfolio.loc[life_expectancy - clients_age, :].min()\n port_act_min = port_success_portfolio.loc[life_expectancy - clients_age, :].min()\n\n # ---------------------Lifetime Average Income----------------------------------\n base_total_income = sim_base_income.cumsum().loc[acturial_years, :].mean()\n port_total_income = income_from_fia + sim_port_income\n port_total_income = port_total_income.cumsum().loc[acturial_years, :].mean()\n\n simulation_stats = pd.DataFrame(index=['Average Years', 'Median Years', 'Average Age', 'Median Age',\n 'Average Portfolio (act.age)', 'Median Portfolio (act.age)',\n 'Max Portfolio Value', 'Min Portfolio Value',\n 'Average Lifetime Income'], columns=['Base', 'FIA'])\n\n simulation_stats.loc['Average Years', :] = [base_mean_age, base_mean_age]\n simulation_stats.loc['Median Years', :] = [base_median_age, base_median_age]\n simulation_stats.loc['Average Age', :] = [base_mean_age + clients_age, base_mean_age + clients_age]\n simulation_stats.loc['Median Age', :] = [base_median_age + clients_age, base_median_age + clients_age]\n simulation_stats.loc['Average Portfolio (act.age)', :] = [base_act_avg_porfolio, port_act_avg_porfolio]\n simulation_stats.loc['Median Portfolio (act.age)', :] = [base_act_median_porfolio, port_act_median_porfolio]\n simulation_stats.loc['Max Portfolio Value', :] = [base_act_max, port_act_max]\n simulation_stats.loc['Min Portfolio Value', :] = [base_act_min, port_act_min]\n simulation_stats.loc['Average Lifetime Income', :] = [base_total_income, port_total_income]\n comments = ['Average years of portfolios that meet the next years income needs for the lifetime',\n 'Median years of portfolios that meet the next years income needs for the lifetime',\n 'Average Clients Age',\n 'Median Clients Age',\n 'Average of terminal values for the portfolios at the end of the acturial life',\n 'Median of terminal values for the portfolios at the end of the acturial life',\n 'Maximum of terminal values for the portfolios at the end of the acturial life',\n 'Minimum of terminal values for the portfolios at the end of the acturial life',\n 'Average of total income generated by all portfolios at the end of the acturial life']\n\n simulation_stats.loc[:, 'Notes'] = comments\n\n # --------------------------------------------------------------------------------\n\n # # -----------------------------------income breakdown for Base portfolio----------------------------------\n # base_df.to_csv(src + 'base_port_detail.csv')\n # sim_base_total.to_csv(src + 'base_ending_values.csv')\n # income_breakdown_base = pd.DataFrame(sim_base_total.quantile(0.5, axis=1))\n # income_breakdown_base.loc[:, 'income_from_portfolio'] = sim_base_income.quantile(0.5, axis=1)\n # income_breakdown_base.loc[:, 'fia_income'] = 0.0\n # income_breakdown_base.loc[:, 'social_security_income'] = social\n # income_breakdown_base.loc[:, 'coupon_income'] = cpn_income_base\n #\n # income_breakdown_base.rename(columns={income_breakdown_base.columns[0]: 'portfolio_ending_value'}, inplace=True)\n # income_breakdown_base.loc[:, 'income_from_portfolio'][\n # income_breakdown_base.loc[:, 'portfolio_ending_value'] <= 0] = 0\n # income_breakdown_base.loc[:, 'total_income'] = income_breakdown_base.loc[:, income_breakdown_base.columns[1:]].sum(\n # axis=1)\n #\n # # --------------------------------------Block Ends-----------------------------------------------------------\n #\n # # ---------------------------------------income breakdown for FIA portfolio----------------------------------\n # fia_portfolio_df.to_csv(src + 'fia_port_detail.csv')\n # sim_port_total.to_csv(src + 'fiaport_ending_values.csv')\n #\n # income_breakdown_port = pd.DataFrame(sim_port_total.quantile(0.5, axis=1))\n # income_breakdown_port.loc[:, 'income_from_portfolio'] = sim_port_income.quantile(0.5, axis=1)\n # income_breakdown_port.loc[:, 'fia_income'] = income_from_fia\n # income_breakdown_port.loc[:, 'social_security_income'] = social\n # income_breakdown_port.loc[:, 'coupon_income'] = cpn_income_port\n #\n # income_breakdown_port.rename(columns={income_breakdown_port.columns[0]: 'portfolio_ending_value'}, inplace=True)\n # income_breakdown_port.loc[:, 'income_from_portfolio'][\n # income_breakdown_port.loc[:, 'portfolio_ending_value'] <= 0] = 0\n # income_breakdown_port.loc[:, 'total_income'] = income_breakdown_port.loc[:, income_breakdown_port.columns[1:]].sum(\n # axis=1)\n #\n # # ----------------------------------Block Ends-------------------------------------------------------------\n q_cut = [0.0, 0.1, 0.25, 0.5, 0.75, 0.95, 1.0]\n sim_base_income[sim_base_total < income_needed] = 0.0\n\n sim_port_income[sim_port_total < income_net_fia_income] = 0\n\n sim_port_income = sim_port_income + income_from_fia\n\n # base_quantile = sim_base_total.loc[sim_base_total.index[-1]].quantile([0.05, 0.25, 0.50, 0.75, 0.90])\n #\n # port_quantile = sim_port_total.loc[sim_port_total.index[-1]].quantile([0.05, 0.25, 0.50, 0.75, 0.90])\n\n base_quantile = sim_base_total.loc[sim_base_total.index[-1]].quantile(q_cut)\n\n port_quantile = sim_port_total.loc[sim_port_total.index[-1]].quantile(q_cut)\n\n # q_cut = [0.0, .05, 0.25, 0.5, 0.75, 0.95, 1.0]\n cols = ['Min', '10th', '25th', '50th', '75th', '90th', 'Max']\n\n # ------------------------------------------drop year 0-----------------------------------------\n sim_base_total = sim_base_total[1:]\n sim_port_total = sim_port_total[1:]\n\n # ---------------------------------plot for histogram for porfolios--------------------------------------\n # base_term_value = sim_base_total.loc[sim_base_total.index[:life_expectancy - clients_age], :]\n # fact = 1 / len(base_term_value)\n # base_ann_ret = (base_term_value.iloc[-1] / base_term_value.iloc[0]) ** fact - 1\n # counts, bins, bars = plt.hist(base_ann_ret)\n\n # ------------------------quantile analysis for base terminal value-----------------------------\n base_qcut = pd.DataFrame(index=sim_base_total.index, columns=cols)\n for c in range(len(cols)):\n base_qcut.loc[:, cols[c]] = sim_base_total.quantile(q_cut[c], axis=1)\n\n base_qcut.clip(lower=0, inplace=True)\n\n sim_base_total.clip(lower=0, inplace=True)\n\n # -------------------------------------quantile analysis for base income----------------------------\n base_income_qcut = pd.DataFrame(index=sim_base_income.index, columns=cols)\n for c in range(len(cols)):\n base_income_qcut.loc[:, cols[c]] = sim_base_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # base_income_qcut = base_income_qcut.loc[income_starts:]\n\n # ---------------------------------quantile analysis for portfolio terminal value ---------------\n\n port_qcut = pd.DataFrame(index=sim_port_total.index, columns=cols)\n for c in range(len(cols)):\n port_qcut.loc[:, cols[c]] = sim_port_total.quantile(q_cut[c], axis=1)\n\n port_qcut.clip(lower=0, inplace=True)\n\n # ----------------------------------quantile analysis for portfolio income----------------------------\n port_income_qcut = pd.DataFrame(index=sim_port_income.index, columns=cols)\n for c in range(len(cols)):\n port_income_qcut.loc[:, cols[c]] = sim_port_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # port_income_qcut = port_income_qcut.loc[income_starts:]\n\n # ----------probability ending value will be less than 0 at the end of the horizon -----------------------\n # base_legacy_risk = (sim_base_total.loc[sim_base_total.index[-1]] < 0).sum() / (trials)\n\n base_legacy_risk = (sim_base_total.loc[sim_base_total.index[life_expectancy - clients_age]] < 0).sum() / trials\n port_legacy_risk = (sim_port_total.loc[sim_port_total.index[life_expectancy - clients_age]] < 0).sum() / trials\n\n # port_legacy_risk = (sim_port_total.loc[sim_port_total.index[-1]] <= 0).sum() / (trials)\n\n legacy_risk = pd.DataFrame([base_legacy_risk, port_legacy_risk,\n 'Prob. of portfolio value less than 0 at the end of the expected life'],\n index=['base', 'fia_portfolio', 'Notes'],\n columns=['Ruin Probability'])\n\n # -----------Year-wise probability of ending value greater than 0 -----------------\n base_psuccess = sim_base_total.apply(lambda x: x > 0).sum(axis=1) / trials\n port_psuccess = sim_port_total.apply(lambda x: x > 0).sum(axis=1) / trials\n\n # -----------------------WRITING FILES TO EXCEL ---------------------------\n\n writer = pd.ExcelWriter(dest_simulation + method + '_leveled_growth_simulation.xlsx', engine='xlsxwriter')\n read_income_inputs.to_excel(writer, sheet_name='inputs_for_income')\n\n read_returns_est.to_excel(writer, sheet_name='asset_returns_estimates')\n # read_portfolio_inputs.to_excel(writer, sheet_name='portfolio_inputs')\n\n age_index = list(range(clients_age + 1, clients_age + len(base_qcut) + 1))\n # base_qcut.loc[:, 'clients_age'] = age_index\n # base_qcut.loc[:, 'comment'] = ''\n # base_qcut.loc[:, 'comment'] = np.where(base_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n base_inv = float(read_income_inputs.loc['risky_assets', 'Base'])\n base_qcut.loc[:, 'age'] = age_index\n base_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n\n # -----------------------To start with year 0---------------------------------\n insert_col = [base_inv, base_inv, base_inv, base_inv, base_inv, base_inv,\n base_inv, clients_age, np.nan]\n base_qcut.loc[len(base_qcut) + 1, :] = 0.0\n base_qcut = base_qcut.shift(1)\n base_qcut.iloc[0] = insert_col\n base_qcut.reset_index(drop=True, inplace=True)\n base_qcut.to_excel(writer, sheet_name='base_ending_value_quantiles')\n # base_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='base_ending_value_quantiles')\n\n # base_income_qcut = base_income_qcut[1:] base_income_qcut.loc[:, 'clients_age'] = age_index\n # base_income_qcut.loc[:, 'comment'] = '' base_income_qcut.loc[:, 'comment'] = np.where(\n # base_income_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n\n base_income_qcut = base_income_qcut.loc[1:, :]\n base_income_qcut.loc[:, 'age'] = age_index\n base_income_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n base_income_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='base_income_quantiles')\n\n # age_index = list(range(clients_age+1, clients_age + len(port_qcut)+1))\n # port_qcut.loc[:, 'clients_age'] = age_index\n # port_qcut.loc[:, 'comment'] = ''\n # port_qcut.loc[:, 'comment'] = np.where(port_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n\n port_qcut.loc[:, 'age'] = age_index\n port_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n port_qcut.loc[len(port_qcut) + 1, :] = 0.0\n port_qcut = port_qcut.shift(1)\n port_qcut.iloc[0] = insert_col\n port_qcut.reset_index(drop=True, inplace=True)\n port_qcut.to_excel(writer, sheet_name='fia_port_ending_value_quantiles')\n # port_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='fia_port_ending_value_quantiles')\n\n # port_income_qcut = port_income_qcut[1:] port_income_qcut.loc[:, 'clients_age'] = age_index\n # port_income_qcut.loc[:, 'comment'] = '' port_income_qcut.loc[:, 'comment'] = np.where(\n # port_income_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n\n port_income_qcut = port_income_qcut.loc[1:, :]\n port_income_qcut.loc[:, 'age'] = age_index\n port_income_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n port_income_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='fia_port_income_quantiles')\n\n prob_success_df = pd.concat([base_psuccess, port_psuccess], axis=1)\n prob_success_df.rename(columns={prob_success_df.columns[0]: 'prob(ending_value>0)_base',\n prob_success_df.columns[1]: 'prob(ending_value>0)_port'}, inplace=True)\n\n # prob_success_df.loc[:, 'clients_age'] = age_index\n # prob_success_df.loc[:, 'comment'] = ''\n # prob_success_df.loc[:, 'comment'] = np.where(prob_success_df.clients_age == life_expectancy, 'expected_life', \"\")\n\n prob_success_df.loc[:, 'age'] = age_index\n prob_success_df.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n prob_success_df.loc[:, 'prob(ending_value>lifetime_req income)_base'] = base_prob_of_success / trials\n prob_success_df.loc[:, 'prob(ending_value>lifetime_req income)_port'] = port_prob_of_success / trials\n prob_success_df.loc[:, 'prob(ending_value>next_year_req_income)_base'] = base_success_next_year / trials\n prob_success_df.loc[:, 'prob(ending_value>next_year_req_income)_port'] = port_success_next_year / trials\n prob_success_df.loc[:, 'base_max_portfolio_at_acturial_age'] = base_max_portfolio\n prob_success_df.loc[:, 'port_max_portfolio_at_acturial_age'] = port_max_portfolio\n\n # --------------------Percentile Portfolio's based on Acturial Life------------------------\n base_success = prob_success_df.loc[life_expectancy - clients_age, 'prob(ending_value>next_year_req_income)_base']\n port_success = prob_success_df.loc[life_expectancy - clients_age, 'prob(ending_value>next_year_req_income)_port']\n\n # acturial_age_base_tv = sim_base_total.loc[:life_expectancy - clients_age, ]\n # percentile_base_tv = sim_base_total.apply(lambda x: np.nanpercentile(x, base_success), axis=1)\n\n # ----------------Year wise percentile portfolio to meet next year income. Based on the success at acturial age.\n # Yearly portfolio values that can provide the next year income below the success rate at end of life (Percentile)-\n\n # acturial_age_base = base_for_next_year_need.loc[:life_expectancy - clients_age, ]\n # acturial_age_base = base_for_next_year_need.copy().fillna(0)\n percentile_base = base_for_next_year_need.apply(lambda x: np.nanpercentile(x, base_success), axis=1)\n\n # ----Pre Income Portfolio based on the Probab. of Success to meet next year's income at the end on the Act. Age\n base_pre_income_success = sim_base_total_preincome.apply(lambda x: np.nanpercentile(x, base_success), axis=1)\n base_ann_ret_pre_income = base_pre_income_success.pct_change().fillna(0)\n\n # acturial_age_port_tv = sim_port_total.loc[:life_expectancy - clients_age, ]\n # percentile_port_tv = sim_port_total.apply(lambda x: np.nanpercentile(x, port_success), axis=1)\n\n # ----------------Year wise percentile portfolio to meet next year income. Based on the success at acturial age.\n # Yearly portfolio values that can provide the next year income below the success rate at end of life (Percentile)-\n\n # acturial_age_port = port_for_next_year_need.loc[:life_expectancy - clients_age, ]\n # acturial_age_base = port_for_next_year_need.copy().fillna(0)\n percentile_port = port_for_next_year_need.apply(lambda x: np.nanpercentile(x, port_success), axis=1)\n\n # ----Pre Income Portfolio based on the Probab. of Success to meet next year's income at the end on the Act. Age\n port_pre_income_success = sim_port_total_preincome.apply(lambda x: np.nanpercentile(x, port_success), axis=1)\n port_ann_ret_pre_income = port_pre_income_success.pct_change().fillna(0)\n\n prob_success_df.loc[:, 'acturial_success_percentile_base_portfolio'] = percentile_base\n prob_success_df.loc[:, 'acturial_success_percentile_port_portfolio'] = percentile_port\n\n prob_success_df.loc[:, 'base_pre_income_ann_ret'] = base_ann_ret_pre_income\n prob_success_df.loc[:, 'port_pre_income_ann_ret'] = port_ann_ret_pre_income\n\n # prob_success_df.loc[:, 'terminalVal_success_percentile_base_portfolio'] = percentile_base_tv\n # prob_success_df.loc[:, 'terminalVal_success_percentile_port_portfolio'] = percentile_port_tv\n\n sim_base_total_preincome.to_excel(writer, sheet_name='base_preincome_portfolios')\n # -------Add premium to year 0 value to get total portfolio value---------\n sim_port_total_preincome.iloc[0] = sim_port_total_preincome.iloc[0] + premium\n sim_port_total_preincome.to_excel(writer, sheet_name='port_preincome_portfolios')\n\n # -------------For Simulation slide - BASE Portfolio - Can Delete --------------------\n # base_qcut_preinc = pd.DataFrame(index=sim_base_total_preincome.index, columns=cols)\n # for c in range(len(cols)):\n # base_qcut_preinc.loc[:, cols[c]] = sim_base_total_preincome.quantile(q_cut[c], axis=1)\n #\n # # -------------For Simulation slide - Proposed Portfolio --------------------\n # port_qcut_preinc = pd.DataFrame(index=sim_port_total_preincome.index, columns=cols)\n # for c in range(len(cols)):\n # port_qcut_preinc.loc[:, cols[c]] = sim_port_total_preincome.quantile(q_cut[c], axis=1)\n #\n # base_qcut_preinc.to_excel(writer, sheet_name='base_preincome_quantiles')\n # port_qcut_preinc.to_excel(writer, sheet_name='port_preincome_quantiles')\n\n prob_success_df.to_excel(writer, sheet_name='success_probability')\n\n # --------------BASE - Accumulation and Income Breakdown based on the success percentile portfolio---------------\n base_df.to_csv(src + 'base_port_detail.csv')\n sim_base_total.to_csv(src + 'base_ending_values.csv')\n income_breakdown_base = pd.DataFrame(sim_base_total.quantile(base_success, axis=1))\n income_breakdown_base.loc[:, 'income_from_risky_assets'] = sim_base_income.quantile(base_success, axis=1) \\\n - social - cpn_income_port\n income_breakdown_base.loc[:, 'guaranteed_income'] = 0.0\n income_breakdown_base.loc[:, 'social_security_income'] = social\n income_breakdown_base.loc[:, 'coupon_income'] = cpn_income_base\n\n income_breakdown_base.rename(columns={income_breakdown_base.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_base.loc[:, 'income_from_risky_assets'][\n income_breakdown_base.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_base.loc[:, 'total_income'] = income_breakdown_base.loc[:, income_breakdown_base.columns[1:]].sum(\n axis=1)\n\n # ----------FIA PORTFOLIO - Accumulation and Income Breakdown based on the success percentile portfolio-----------\n fia_portfolio_df.to_csv(src + 'fia_port_detail.csv')\n sim_port_total.to_csv(src + 'fiaport_ending_values.csv')\n\n income_breakdown_port = pd.DataFrame(sim_port_total.quantile(port_success, axis=1))\n income_breakdown_port.loc[:, 'income_from_risky_assets'] = sim_port_income.quantile(port_success, axis=1) \\\n - income_from_fia - social - cpn_income_port\n income_breakdown_port.loc[:, 'guaranteed_income'] = income_from_fia\n income_breakdown_port.loc[:, 'social_security_income'] = social\n income_breakdown_port.loc[:, 'coupon_income'] = cpn_income_port\n\n income_breakdown_port.rename(columns={income_breakdown_port.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_port.loc[:, 'income_from_risky_assets'][\n income_breakdown_port.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_port.loc[:, 'total_income'] = income_breakdown_port.loc[:, income_breakdown_port.columns[1:]].sum(\n axis=1)\n\n # -------------------Write simulation Statistics-------------------------------------\n simulation_stats.to_excel(writer, sheet_name='simulation_statistics')\n\n # port_psuccess.to_excel(writer, sheet_name='fia_port_success_probability')\n\n income_breakdown_base = income_breakdown_base.loc[1:, :]\n income_breakdown_base.loc[:, 'age'] = age_index\n income_breakdown_base.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n income_breakdown_base.loc[income_starts:, :].to_excel(writer, sheet_name='base_income_breakdown_median')\n\n income_breakdown_port = income_breakdown_port.loc[1:, :]\n income_breakdown_port.loc[:, 'age'] = age_index\n income_breakdown_port.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n income_breakdown_port.loc[income_starts:, :].to_excel(writer, sheet_name='fia_income_breakdown_median')\n\n legacy_risk.to_excel(writer, sheet_name='ruin_probability')\n\n # if method == 'normal':\n # median_returns_normal.loc[:, 'fia_median_returns'] = median_normal_fia\n # median_returns_normal.to_excel(writer, sheet_name='gr_port_median_normal')\n #\n # elif method == 'smallest':\n # median_returns_smallest.loc[:, 'fia_median_returns'] = median_smallest_fia\n # median_returns_smallest.to_excel(writer, sheet_name='gr_port_median_asc')\n #\n # else:\n # median_returns_largest.loc[:, 'fia_median_returns'] = median_largest_fia\n # median_returns_largest.to_excel(writer, sheet_name='gr_port_median_desc')\n\n # ---------------------Histogram for S&P Forecast---------------------------------------\n sp_returns = read_returns_est.loc['SPXT Index', 'Annualized Returns']\n sp_risk = read_returns_est.loc['SPXT Index', 'Annualized Risk']\n sp_random_ret = np.random.normal(loc=sp_returns, scale=sp_risk, size=10000)\n bins, data = np.histogram(sp_random_ret, bins=20)\n df_ret = pd.DataFrame(data, columns=['Return_range'])\n df_bins = pd.DataFrame(bins, columns=['Count'])\n df_hist = df_ret.join(df_bins)\n\n df_hist.to_excel(writer, sheet_name='sp500_histogram')\n writer.save()\n\n print(\"simulation completed....\")", "def index():\n user_stocks_list = db.execute(\"SELECT stock FROM transactions WHERE id = :current_id\", current_id=session[\"user_id\"])\n user_stocks = []\n for stock in user_stocks_list:\n if stock['stock'] not in user_stocks:\n user_stocks.append(stock['stock'])\n\n stock_portfolio = []\n\n for possible_stock in user_stocks:\n bought_shares_list = db.execute(\"SELECT SUM(units) FROM transactions WHERE (id = :current_id AND stock = :stock AND type = :t)\",\n current_id=session[\"user_id\"], stock=possible_stock, t='B')\n bought_shares = 0\n bought_shares = bought_shares_list[0][\"SUM(units)\"]\n sold_shares_list = db.execute(\"SELECT SUM(units) FROM transactions WHERE (id = :current_id AND stock = :stock AND type = :t)\",\n current_id=session[\"user_id\"], stock=possible_stock, t='S')\n sold_shares = 0\n sold_shares = sold_shares_list[0][\"SUM(units)\"]\n if sold_shares == None:\n sold_shares = 0\n\n available_shares = 0\n if bought_shares != None and (bought_shares - sold_shares) > 0:\n available_shares = bought_shares - sold_shares\n current_price = int(lookup(possible_stock)[\"price\"])\n market_value = current_price * available_shares\n dict_stock = {}\n dict_stock['name_stock'] = possible_stock\n dict_stock['shares_quantity'] = available_shares\n dict_stock['current_price'] = current_price\n dict_stock['market_value'] = market_value\n stock_portfolio.append(dict_stock)\n else:\n pass\n\n available_money_list = db.execute(\"SELECT cash FROM users WHERE id = :current_id\", current_id=session[\"user_id\"])\n available_money = usd(available_money_list[0]['cash'])\n\n username_list = db.execute(\"SELECT username FROM users WHERE id = :current_id\", current_id=session[\"user_id\"])\n username = username_list[0][\"username\"]\n\n sum_market_values = 0\n for collection in stock_portfolio:\n sum_market_values += int(collection['market_value'])\n\n total_value = usd(available_money_list[0]['cash'] + sum_market_values)\n\n return render_template(\"index.html\", stock_portfolio=stock_portfolio, user_stocks=user_stocks, money=available_money, name=username, total_value=total_value)", "def add_portfolio(self, portfolio):\n self.portfolios.append(portfolio)", "def add_portfolio(self, portfolio):\n self.portfolios.append(portfolio)", "def get_portfolio_value(prices, allocs, start_val):\n normed = prices/prices.iloc[0]\n alloced = np.multiply(allocs, normed)\n pos_vals = alloced * start_val\n port_val = pos_vals.sum(axis=1)\n return port_val", "def hedge_portfolio(context, data):\r\n factors = get_alphas_and_betas(context, data)\r\n beta_exposure = 0.0\r\n count = 0\r\n for asset in context.portfolio.positions:\r\n if asset in factors and asset != context.index:\r\n if not np.isnan(factors[asset].beta):\r\n beta_exposure += factors[asset].beta\r\n count += 1\r\n beta_hedge = -1.0 * beta_exposure / count\r\n dollar_amount = context.portfolio.portfolio_value * beta_hedge\r\n record(beta_hedge=beta_hedge)\r\n if not np.isnan(dollar_amount):\r\n order_target_value(context.index, dollar_amount)", "def after_run(self):\n # Calculate the performance of the strategy and portfolio\n self.portfolio.calc_stats()\n self.calc_performance()\n\n return self", "def index():\n # Establish userID.\n userID = session[\"user_id\"]\n # Isolate all results from portfolio table for the current user.\n portfolio = db.execute(\"SELECT * FROM portfolio WHERE id=:userID\", userID=session[\"user_id\"])\n # Cash for current user (first row, cash column)\n cash = db.execute(\"SELECT cash FROM users WHERE id=:userID\", userID=userID)[0][\"cash\"]\n # Empty list to store stock data as iterating through rows.\n stockData = []\n # Set total for combined stoc value to 0.\n totalAllStocks = 0\n\n # Iterate over rows from portfolio and allocate a row for each stock that has more than 0 owned.\n for row in portfolio:\n if row[\"numOwned\"] != 0:\n stockData.append(row)\n\n # Iterate over rows in stock data and provide value for each column. Other values for use in html are already in list from previous loop.\n # Had to play around with usd, once in usd is a str rather than float so usd always has to be post calculations.\n for row in stockData:\n stock = lookup(row[\"symbol\"])\n row[\"name\"] = stock[\"name\"]\n row[\"currentPrice\"] = usd(stock[\"price\"])\n row[\"total\"] = usd(row[\"numOwned\"] * stock[\"price\"])\n totalAllStocks += row[\"numOwned\"] * stock[\"price\"]\n # Grand Total is combined stock values and cash value.\n grandTotal = totalAllStocks + cash\n # Return index.html input sources.\n return render_template(\"index.html\", stockData=stockData, cash=usd(cash), totalAllStocks = usd(totalAllStocks), grandTotal=usd(grandTotal))", "def portfolio(user_id: int, db: Connection) -> Dict[str, Union[str, int, float]]:\n\n # accumulator\n user_portfolio = User_state_info([], 0, 0)\n\n # get user cash from `users` table\n if (cash := get_cash(user_id, db)) is None:\n return [-1, f\"`get_cash: {DB_ERROR} or {USER_NOT_FOUND}\"]\n\n # fill result structure with cash\n user_portfolio.cash = round(cash,2)\n\n # get user balance from `shares` table\n balance = get_shares_info(user_id, db)\n\n # fill result structure with `symbol` and `qty`\n user_portfolio.shares = fill_qty(balance)\n\n # fill result structure with 'company_name\" and `price`\n user_portfolio.shares = get_company_and_price(user_portfolio.shares)\n\n # calc and fill `total` field\n user_portfolio.total = sum([x.total for x in user_portfolio.shares]) + user_portfolio.cash\n \n return user_portfolio.__dict__", "def track_portfolio(self, p):\n\n global st_refresh_thread\n\n if self.terminate:\n return\n\n p.refresh()\n\n self.lock.acquire()\n self.active_portfolio = p\n self.display_portfolio(p)\n self.lock.release()\n\n if not self.refresh_thread:\n thr_args = list()\n thr_args.append(self)\n self.refresh_thread = threading.Thread(target=st_refresh_thread,\n args=thr_args)\n self.refresh_thread.start()", "def get_porfolio_signal(self):\n\n return self._portfolio_signal", "async def list(self, ctx, user=None, date=None):\n if not user:\n user = ctx.message.author\n else:\n user = util.GetUserFromNameStr(ctx.message.server.members, user)\n change = GetPortfolioChange(user.id)\n portfolio = GetPortfolio(user.id, util.GetTimestamp(date))\n await self.bot.say(\n '```%s\\'s portfolio:\\n'\n 'Total Value: $%s (%.2f%s) \\n'\n '%s```' % (user, portfolio.Value(), change, \"%\", portfolio.AsTable()))", "def index():\n # Use a place holder ':curr_id' to call the session id which is the user's id\n rows = db.execute(\"SELECT stocks.symbol, stocks.name, portfolio.shares FROM portfolio JOIN users ON users.id = portfolio.user_id JOIN stocks ON portfolio.stock_id = stocks.id WHERE users.id==:curr_id\", curr_id=session[\"user_id\"])\n # Make a select query only on cash to be able to display it in portfolio's table\n row_cash = db.execute(\"SELECT cash FROM users WHERE id==:curr_id\", curr_id=session[\"user_id\"])\n\n # gets the current price of each stock queried\n if rows:\n for r in rows:\n r_shares = r[\"shares\"]\n r_symbol = r[\"symbol\"]\n # run lookup function to get current price\n dict_2 = lookup(r_symbol)\n # Adds the key \"price\" and its value to the dictionary \"rows\"\n r[\"price\"] = dict_2[\"price\"]\n # Calculates the grand total (stocks’ total value plus cash)\n total = sum([r[\"price\"]*r[\"shares\"] for r in rows]) + row_cash[0][\"cash\"]\n return render_template(\"portfolio.html\", rows=rows, row_cash=row_cash, total=total)", "def index():\n\n rows = db.execute(\"SELECT * FROM portfolio WHERE id = :id\", id=session[\"user_id\"])\n users = db.execute(\"SELECT * FROM users WHERE id = :id\", id=session[\"user_id\"])\n cash = users[0][\"cash\"]\n total = 0\n\n for row in rows:\n symbol = row[\"symbol\"]\n shares = row[\"shares\"]\n stock = lookup(symbol)\n price_t = float(stock[\"price\"]) * shares\n db.execute(\"UPDATE portfolio SET price=:price WHERE id=:id AND symbol=:symbol\",\n price=float(stock[\"price\"]), id=session[\"user_id\"], symbol=row[\"symbol\"])\n total += price_t\n\n TOTAL = total + cash\n return render_template(\"index.html\", rows=rows, cash=usd(cash), TOTAL=usd(TOTAL))", "def evaluate_portfolio(username):\n user_obj = User.query.filter(User.username == username).first()\n date = request.args.get('date')\n\n if user_obj is None:\n return util.build_json_response('User does not exist')\n\n if not util.is_valid_date_string(date):\n return util.build_json_response(\"Not a valid date of the form YYYY-MM-DD\")\n\n following_date = util.add_days_to_date(date, 1)\n equities = db.session.query(Portfolio.ticker, func.sum(Portfolio.quantity))\\\n .filter(Portfolio.user_id == user_obj.id) \\\n .filter(Portfolio.transaction_date <= following_date) \\\n .group_by(Portfolio.ticker).all()\n\n e_total = 0\n for equity in equities:\n price = equity[1] * market_data.get_stock_price(equity[0], date, 'low')\n e_total += price\n\n total = round(e_total + user_obj.balance, 2)\n cash = round(user_obj.balance, 2)\n e_total = round(e_total, 2)\n\n return util.build_json_response(\"Portfolio totals retrieved\", equity_total=e_total, cash_balance=cash, account_total=total)", "def optimize_portfolio(sd=dt.datetime(2008,1,1), ed=dt.datetime(2009,1,1), \\\n syms=['GOOG','AAPL','GLD','XOM'], gen_plot=False):\n\n # Read in adjusted closing prices for given symbols, date range\n dates = pd.date_range(sd, ed)\n prices_all = get_data(syms, dates) # automatically adds SPY\n prices = prices_all[syms] # only portfolio symbols\n prices_SPY = prices_all['SPY'] # only SPY, for comparison later\n\n\t# find the allocations for the optimal portfolio\n #1 provide an initial guess for x\n allocs = np.ones(len(syms))/len(syms)\n #2 Provide constraints to the optimizer\n bounds = [(0,1) for i in syms]\n constraints = ({ 'type': 'eq', 'fun': lambda inputs: 1.0 - np.sum(inputs) })\n #3 call the optimizer\n res = spo.minimize(get_sharpe_ratio, allocs, \n \t\t\t\t\targs=prices, \n \t\t\t\t\tbounds = bounds,\n \t\t\t\t\tconstraints=constraints)\n allocs = res.x\n \n # Get daily portfolio value\n port_val = get_portfolio_value(prices, allocs, 1.0)\n \n # Get portfolio statistics\n cr, adr, sddr, sr = get_portfolio_stats(port_val, \n \t\t\t\t\t\t\t\t\t\tdaily_rf=0.0, \n \t\t\t\t\t\t\t\t\t\tsamples_per_year=252)\n \n # Compare daily portfolio value with SPY using a normalized plot\n if gen_plot:\n # add code to plot here\n df_temp = pd.concat([port_val, prices_SPY], keys=['Portfolio', 'SPY'], axis=1)\n plot_normalized_data(df_temp)\n\n return allocs, cr, adr, sddr, sr", "def __update_portfolio_handler(self, msg):\n pass", "def income_model_asset_based_portfolio_custom(num_of_years=30, trials=100, method='normal', income=True):\n\n sim_fia_cv = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_base_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_base_income = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_port_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_port_income = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_base_total_pre_income = pd.DataFrame(index=range(num_of_years + 1))\n sim_port_total_pre_income = pd.DataFrame(index=range(num_of_years + 1))\n\n # read_income_inputs = pd.read_csv(src + \"income_model_inputs.csv\", index_col='Items')\n read_income_inputs = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_model_inputs',\n index_col=[0])\n\n # read_returns_est = pd.read_csv(src + \"income_assets_returns_estimates.csv\", index_col='Symbol')\n read_returns_est = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_assets_returns_estimates',\n index_col=[0])\n\n clean_names = list(read_returns_est.index)\n clean_names = [s.split(' ')[0] for s in clean_names]\n read_returns_est.loc[:, 'names'] = clean_names\n read_returns_est.set_index('names', drop=True, inplace=True)\n read_returns_est = read_returns_est[:-1]\n read_returns_est.rename(index={'SBMMTB3': 'Cash', read_returns_est.index[-1]: 'FIA'}, inplace=True)\n\n # ---------------Returns DataFrame based on the use input------------------------------------\n ann_ret = np.full((num_of_years + 1, len(read_returns_est)), read_returns_est.loc[:, 'Annualized Returns'])\n read_normal = pd.DataFrame(ann_ret, index=np.arange(num_of_years + 1), columns=read_returns_est.index)\n # read_normal.rename(columns={read_normal.columns[-1]: 'FIA'}, inplace=True)\n user_est_fia_return = float(read_income_inputs.loc['fia_forecast', 'inputs'])\n read_normal.loc[:, 'FIA'] = user_est_fia_return\n\n read_returns_est.loc['FIA', 'Annualized Returns'] = user_est_fia_return\n\n # read_returns_est.drop(['BM', read_returns_est.index[-1]], axis=0, inplace=True)\n # read_portfolio_inputs = pd.read_csv(src + \"income_portfolio_inputs.csv\", index_col='Items')\n\n # read_asset_weights = pd.read_csv(src + \"asset_weights.csv\", index_col='Asset')\n read_asset_weights = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='asset_weights',\n index_col=[0])\n\n read_asset_weights.drop(read_asset_weights.index[-1], axis=0, inplace=True)\n\n # read random returns for simulation\n # read_normal = pd.read_csv(src + 'sort_normal.csv', index_col=[0], parse_dates=True)\n # read_small = pd.read_csv(src + 'sort_small_to_large.csv', index_col=[0], parse_dates=True)\n # read_large = pd.read_csv(src + 'sort_large_to_small.csv', index_col=[0], parse_dates=True)\n assets_col_names = list(read_normal.columns)\n\n tickers = list(read_asset_weights.index)\n wts = np.array(read_asset_weights.loc[:, 'base'])\n\n def asset_median_returns(data, ticker):\n return data.filter(regex=ticker).median(axis=1)\n\n # dataframe for unsorted returns (normal)\n median_returns_normal = pd.DataFrame({t: asset_median_returns(read_normal, t) for t in tickers})\n median_returns_normal.loc[:, 'portfolio_return'] = median_returns_normal.dot(wts)\n median_normal_fia = pd.DataFrame({'FIA': asset_median_returns(read_normal, 'FIA')})\n #\n # # dataframe for smallest to largest returns\n # median_returns_smallest = pd.DataFrame({t: asset_median_returns(read_small, t) for t in tickers})\n # median_returns_smallest.loc[:, 'portfolio_return'] = median_returns_smallest.dot(wts)\n # median_smallest_fia = pd.DataFrame({'FIA': asset_median_returns(read_small, 'r_FIA')})\n #\n # # dataframe for unsorted returns (normal)\n # median_returns_largest = pd.DataFrame({t: asset_median_returns(read_large, t) for t in tickers})\n # median_returns_largest.loc[:, 'portfolio_return'] = median_returns_largest.dot(wts)\n # median_largest_fia = pd.DataFrame({'FIA': asset_median_returns(read_large, 'r_FIA')})\n\n years = list(range(0, num_of_years + 1))\n income_cols = ['year', 'strategy_term', 'index_returns', 'term_ret', 'term_ret_with_par', 'term_annualize',\n 'ann_net_spread', 'term_ret_netspr', 'high_inc_benefit_base', 'rider_fee', 'eoy_income',\n 'contract_value']\n\n term = int(read_income_inputs.loc['term', 'inputs'])\n fia_ret = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Returns']\n fia_risk = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Risk']\n par_rate = float(read_income_inputs.loc['par_rate', 'inputs'])\n spread = float(read_income_inputs.loc['spread', 'inputs'])\n bonus_term = int(read_income_inputs.loc['bonus_term', 'inputs'])\n premium = float(read_income_inputs.loc['premium', 'inputs'])\n income_bonus = float(read_income_inputs.loc['income_bonus', 'inputs'])\n\n income_starts = int(read_income_inputs.loc['start_income_years', 'inputs'])\n income_growth = float(read_income_inputs.loc['income_growth', 'inputs'])\n rider_fee = float(read_income_inputs.loc['rider_fee', 'inputs'])\n inc_payout_factor = float(read_income_inputs.loc['income_payout_factor', 'inputs'])\n contract_bonus = float(read_income_inputs.loc['contract_bonus', 'inputs'])\n social = float(read_income_inputs.loc['social', 'inputs'])\n inflation = float(read_income_inputs.loc['inflation', 'inputs'])\n wtd_cpn_yield = float(read_income_inputs.loc['wtd_coupon_yld', 'inputs'])\n life_expectancy = int(read_income_inputs.loc['life_expectancy_age', 'inputs'])\n clients_age = int(read_income_inputs.loc['clients_age', 'inputs'])\n\n # ---------------INCOME MODEL--------------------------------------------\n runs = 0\n returns_dict = {}\n asset_dict = {}\n fia_dict = {}\n while runs < trials:\n print(runs)\n\n income_df = pd.DataFrame(index=years, columns=income_cols)\n income_df.loc[:, 'year'] = years\n income_df.loc[:, 'strategy_term'] = income_df.loc[:, 'year'] % term\n income_df.loc[:, 'strategy_term'] = income_df['strategy_term'].apply(lambda x: 1 if x == 0 else 0)\n\n income_df.loc[:, 'index_returns'] = read_normal.loc[:, 'FIA']\n # income_df.loc[:, 'index_returns'] = np.random.normal(fia_ret, fia_risk, size=(len(years), 1))\n\n cumprod = (1. + income_df['index_returns']).rolling(window=term).agg(lambda x: x.prod()) - 1\n income_df.loc[:, 'term_ret'] = np.where(income_df.loc[:, 'strategy_term'] == 1, cumprod, 0)\n income_df.loc[:, 'term_ret_with_par'] = income_df.loc[:, 'term_ret'] * par_rate\n income_df.loc[:, 'term_annualize'] = income_df.loc[:, 'term_ret_with_par'].apply(\n lambda x: (1 + x) ** (1 / term) - 1)\n income_df.loc[:, 'ann_net_spread'] = income_df.loc[:, 'term_annualize'] - spread\n income_df.loc[:, 'ann_net_spread'] = np.where(income_df.loc[:, 'strategy_term'] == 1,\n income_df.loc[:, 'ann_net_spread'], 0)\n income_df.loc[:, 'term_ret_netspr'] = income_df.loc[:, 'ann_net_spread'].apply(lambda x: (1 + x) ** term - 1)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'high_inc_benefit_base'] = premium * (1 + income_bonus)\n\n elif counter <= min(bonus_term, income_starts):\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base'] * \\\n (1 + income_growth)\n else:\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base']\n\n income_df.loc[:, 'rider_fee'] = income_df.loc[:, 'high_inc_benefit_base'] * rider_fee\n income_df.loc[:, 'eoy_income'] = np.where(income_df.loc[:, 'year'] > income_starts,\n income_df.loc[:, 'high_inc_benefit_base'] * inc_payout_factor, 0)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'contract_value'] = premium * (1 + contract_bonus)\n\n elif income_df.loc[counter, 'strategy_term'] == 1:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee']\n x2 = (x1 * (1 + income_df.loc[counter, 'term_ret_netspr'])) - income_df.loc[counter, 'eoy_income']\n income_df.loc[counter, 'contract_value'] = x2\n\n else:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee'] - \\\n income_df.loc[counter, 'eoy_income']\n\n income_df.loc[counter, 'contract_value'] = x1\n\n # variable stores the income number that is used in the base and fia portfolio calcs.\n\n income_from_fia = income_df.loc[income_df.index[-1], 'eoy_income']\n\n income_df.loc[:, 'contract_value'] = income_df.loc[:, 'contract_value'].apply(lambda x: 0 if x <= 0 else x)\n\n sim_fia_cv.loc[:, str(runs)] = income_df.loc[:, 'contract_value']\n\n # -------------------------------------BASE MODEL---------------------------------------------\n\n base_wts = read_asset_weights.loc[:, 'base']\n base_assets = list(base_wts.index)\n base_weights = list(base_wts.values)\n base_returns = list(read_returns_est.loc[:, 'Annualized Returns'].values)\n base_std = list(read_returns_est.loc[:, 'Annualized Risk'].values)\n\n base_investment = float(read_income_inputs.loc['risky_assets', 'Base'])\n adv_fees = float(read_income_inputs.loc['advisor_fees', 'Base'])\n\n # -------------------required income----------------------------------\n if income:\n req_annual_income = float(read_income_inputs.loc['annual_income', 'inputs'])\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n cpn_income_base = base_investment * wtd_cpn_yield\n else:\n req_annual_income = 0.0\n income_needed = 0.0\n income_net_fia_income = 0.0\n cpn_income_base = base_investment * wtd_cpn_yield\n\n # ----------------------RANDOM RETURNS--------------------------\n r_cols = ['r_{}'.format(name) for name in base_assets]\n boy_value = ['bv_{}'.format(name) for name in base_assets]\n eoy_value = ['ev_{}'.format(name) for name in base_assets]\n\n random_returns = pd.DataFrame(index=income_df.index, columns=r_cols)\n\n # for c in range(len(r_cols)):\n # ret = np.random.normal(base_returns[c], base_std[c], size=(len(random_returns.index), 1))\n\n # this_run_cols = ['{}_{}'.format(cname, str(runs)) for cname in r_cols]\n random_returns = read_normal.loc[:, base_assets]\n\n # random_returns.loc[:, r_cols[c]] = ret.flatten()\n # asset_dict.update({'{}_{}'.format(r_cols[c], str(runs)): ret.flatten()})\n\n # store the simulated assets returns in one dictionary\n # returns_dict.update({str(runs): random_returns})\n\n # collect the asset based returns from all simulation and calculate the median returns.\n # def get_median_returns(sym):\n # cols = [sym + '_' + str(c) for c in np.arange(trials)]\n # asset_df = pd.DataFrame({c: asset_dict.get(c) for c in cols})\n # return asset_df.median(axis=1)\n #\n # asset_median_returns = pd.DataFrame({symbol: get_median_returns(symbol) for symbol in r_cols})\n #\n # asset_median_returns.loc[:, 'simulated_portfolio_median_returns'] = asset_median_returns.dot(base_weights)\n\n base_df = random_returns.copy()\n\n # base_investment = float(read_portfolio_inputs.loc['risky_assets', 'Base'])\n\n fia_portfolio_df = random_returns.copy()\n port_investment = float(read_income_inputs.loc['risky_assets', 'FIA'])\n cpn_income_port = port_investment * wtd_cpn_yield\n\n # ----------------------------------------BASE PORTFOLIO----------------------------\n for name in boy_value:\n base_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment for c in range(len(boy_value))]\n\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'total_net_fees'] = 0.0\n base_df.loc[counter, 'income'] = 0.0\n base_investment = base_df.loc[counter, boy_value].sum()\n\n elif (counter > 0) and (counter < income_starts):\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - base_df.loc[\n counter, 'adv_fees']\n\n # --coupon payment is invested back into the risky portfolio until the income is withdrawn----\n base_investment = base_df.loc[counter, 'total_net_fees'] + cpn_income_base\n\n else:\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = income_needed + social\n\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - \\\n base_df.loc[counter, 'adv_fees'] - \\\n base_df.loc[counter, 'income']\n\n base_df.loc[counter, 'total_pre_income'] = base_df.loc[counter, 'total'] - \\\n base_df.loc[counter, 'adv_fees']\n\n base_investment = base_df.loc[counter, 'total_net_fees']\n\n base_df.loc[:, 'adj_total'] = base_df.loc[:, 'total_net_fees'].apply(lambda x: x if x > 0 else 0)\n sim_base_total.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'total_net_fees']\n sim_base_income.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'income']\n sim_base_total_pre_income.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'total_pre_income']\n\n # ----------------------------FIA PORTFOLIO----------------------------------------------\n for name in boy_value:\n fia_portfolio_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'total_net_fees'] = 0.0\n fia_portfolio_df.loc[counter, 'income'] = 0.0\n port_investment = fia_portfolio_df.loc[counter, boy_value].sum()\n\n elif (counter > 0) and (counter < income_starts):\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees'] + cpn_income_port\n\n else:\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = income_needed + social\n\n if income:\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees'] - \\\n fia_portfolio_df.loc[counter, 'income']\n else:\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees'] + \\\n income_from_fia\n\n fia_portfolio_df.loc[counter, 'total_pre_income'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees']\n\n sim_port_total.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'total_net_fees'] + \\\n income_df.loc[:, 'contract_value']\n\n sim_port_income.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'income']\n\n fia_portfolio_df.loc[:, 'adj_total'] = fia_portfolio_df.loc[:, 'total_net_fees'].apply(\n lambda x: x if x > 0 else 0)\n\n sim_port_total_pre_income.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'total_pre_income']\n\n runs += 1\n\n # ------------------Calculate % of portfolios ending value greater than required LIFETIME cumm. income---------\n total_income_by_age = sim_base_income.loc[:, sim_base_income.columns[0]].cumsum()\n total_income_by_acturial_age = total_income_by_age.loc[life_expectancy - clients_age]\n total_income_by_age.fillna(0, inplace=True)\n income_dataframe = pd.DataFrame(total_income_by_age)\n income_dataframe.loc[:, 'remaining_income_by_acturial_age'] = total_income_by_age.apply(\n lambda x: total_income_by_acturial_age - x)\n\n s = income_dataframe.loc[:, 'remaining_income_by_acturial_age']\n base_prob_of_success = sim_base_total.gt(s, axis=0).sum(axis=1)\n port_prob_of_success = sim_port_total.gt(s, axis=0).sum(axis=1)\n\n # ----------------------------Portfolio sufficient for NEXT YEARS income needs-------------------\n next_year_income = sim_base_income.loc[:, sim_base_income.columns[0]].shift(-1).fillna(0) # Yearly Income Reqd.\n base_success_next_year = sim_base_total.gt(next_year_income, axis=0).sum(axis=1)\n\n base_for_next_year_need = sim_base_total[sim_base_total.gt(next_year_income, axis=0)]\n\n port_success_next_year = sim_port_total.gt(next_year_income, axis=0).sum(axis=1)\n\n port_for_next_year_need = sim_port_total[sim_port_total.gt(next_year_income, axis=0)]\n\n # ---------------Portfolio for 45 years of simulation---------------------------------------\n base_success_portfolio = sim_base_total[sim_base_total.gt(next_year_income, axis=0)]\n port_success_portfolio = sim_port_total[sim_port_total.gt(next_year_income, axis=0)]\n\n # ----------------Portfolio Simulation until the acturial age------------------------------\n acturial_years = life_expectancy - clients_age\n base_success_portfolio_act_age = base_success_portfolio.loc[acturial_years, :]\n port_success_portfolio_act_age = port_success_portfolio.loc[acturial_years, :]\n\n # -------------------------Base Portfolio TS with max Terminal Value ----------------------------\n if base_success_portfolio_act_age.isnull().sum() == trials:\n base_max_portfolio = 0.0\n else:\n base_max_portfolio = base_success_portfolio.loc[:, base_success_portfolio_act_age.idxmax()]\n\n # -------------------------FIA Portfolio TS with max Terminal Value ----------------------------\n if port_success_portfolio_act_age.isnull().sum() == trials:\n port_max_portfolio = 0.0\n else:\n port_max_portfolio = port_success_portfolio.loc[:, port_success_portfolio_act_age.idxmax()]\n # ------------------------------Average age with full income------------------------------\n base_mean_age = ((life_expectancy - clients_age) - base_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).mean()\n\n port_mean_age = ((life_expectancy - clients_age) - port_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).mean()\n\n # ----------------------------Median Age with full Income------------------------------------------\n base_median_age = ((life_expectancy - clients_age) - base_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).median()\n\n port_median_age = ((life_expectancy - clients_age) - port_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).median()\n\n # --------------Mean Value for all the portfolios at end of the acturial age--------------------\n base_act_avg_porfolio = base_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).mean()\n port_act_avg_porfolio = port_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).mean()\n\n # --------------Median Value for all the portfolios at end of the acturial age--------------------\n base_act_median_porfolio = base_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).median()\n port_act_median_porfolio = port_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).median()\n\n # # --------------Mean Value for all the portfolios in the simulation--------------------\n # base_sim_mean = base_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().mean()\n # port_sim_mean = port_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().mean()\n #\n # # --------------Median Value for all the portfolios in the simulation--------------------\n # base_sim_mean = base_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().median()\n # port_sim_mean = port_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().median()\n\n # -------Max Portfolio value at the end of acturial age----------------------------------------\n base_act_max = base_success_portfolio.loc[life_expectancy - clients_age, :].max()\n port_act_max = port_success_portfolio.loc[life_expectancy - clients_age, :].max()\n\n # -------Min Portfolio value at the end of acturial age----------------------------------------\n base_act_min = base_success_portfolio.loc[life_expectancy - clients_age, :].min()\n port_act_min = port_success_portfolio.loc[life_expectancy - clients_age, :].min()\n\n # ---------------------Lifetime Average Income----------------------------------\n base_total_income = sim_base_income.cumsum().loc[acturial_years, :].mean()\n port_total_income = income_from_fia + sim_port_income\n port_total_income = port_total_income.cumsum().loc[acturial_years, :].mean()\n\n simulation_stats = pd.DataFrame(index=['Average Years', 'Median Years', 'Average Age', 'Median Age',\n 'Average Portfolio (act.age)', 'Median Portfolio (act.age)',\n 'Max Portfolio Value', 'Min Portfolio Value',\n 'Average Lifetime Income'], columns=['Base', 'FIA'])\n\n simulation_stats.loc['Average Years', :] = [base_mean_age, base_mean_age]\n simulation_stats.loc['Median Years', :] = [base_median_age, base_median_age]\n simulation_stats.loc['Average Age', :] = [base_mean_age + clients_age, base_mean_age + clients_age]\n simulation_stats.loc['Median Age', :] = [base_median_age + clients_age, base_median_age + clients_age]\n simulation_stats.loc['Average Portfolio (act.age)', :] = [base_act_avg_porfolio, port_act_avg_porfolio]\n simulation_stats.loc['Median Portfolio (act.age)', :] = [base_act_median_porfolio, port_act_median_porfolio]\n simulation_stats.loc['Max Portfolio Value', :] = [base_act_max, port_act_max]\n simulation_stats.loc['Min Portfolio Value', :] = [base_act_min, port_act_min]\n simulation_stats.loc['Average Lifetime Income', :] = [base_total_income, port_total_income]\n comments = ['Average years of portfolios that meet the next years income needs for the lifetime',\n 'Median years of portfolios that meet the next years income needs for the lifetime',\n 'Average Clients Age',\n 'Median Clients Age',\n 'Average of terminal values for the portfolios at the end of the acturial life',\n 'Median of terminal values for the portfolios at the end of the acturial life',\n 'Maximum of terminal values for the portfolios at the end of the acturial life',\n 'Minimum of terminal values for the portfolios at the end of the acturial life',\n 'Average of total income generated by all portfolios at the end of the acturial life']\n\n simulation_stats.loc[:, 'Notes'] = comments\n\n # --------------------------------------------------------------------------------\n\n # # -----------------------------------income breakdown for Base portfolio----------------------------------\n # base_df.to_csv(src + 'base_port_detail.csv')\n # sim_base_total.to_csv(src + 'base_ending_values.csv')\n # income_breakdown_base = pd.DataFrame(sim_base_total.quantile(0.5, axis=1))\n # income_breakdown_base.loc[:, 'income_from_portfolio'] = sim_base_income.quantile(0.5, axis=1)\n # income_breakdown_base.loc[:, 'fia_income'] = 0.0\n # income_breakdown_base.loc[:, 'social_security_income'] = social\n # income_breakdown_base.loc[:, 'coupon_income'] = cpn_income_base\n #\n # income_breakdown_base.rename(columns={income_breakdown_base.columns[0]: 'portfolio_ending_value'}, inplace=True)\n # income_breakdown_base.loc[:, 'income_from_portfolio'][\n # income_breakdown_base.loc[:, 'portfolio_ending_value'] <= 0] = 0\n # income_breakdown_base.loc[:, 'total_income'] = income_breakdown_base.loc[:, income_breakdown_base.columns[1:]].sum(\n # axis=1)\n #\n # # --------------------------------------Block Ends-----------------------------------------------------------\n #\n # # ---------------------------------------income breakdown for FIA portfolio----------------------------------\n # fia_portfolio_df.to_csv(src + 'fia_port_detail.csv')\n # sim_port_total.to_csv(src + 'fiaport_ending_values.csv')\n #\n # income_breakdown_port = pd.DataFrame(sim_port_total.quantile(0.5, axis=1))\n # income_breakdown_port.loc[:, 'income_from_portfolio'] = sim_port_income.quantile(0.5, axis=1)\n # income_breakdown_port.loc[:, 'fia_income'] = income_from_fia\n # income_breakdown_port.loc[:, 'social_security_income'] = social\n # income_breakdown_port.loc[:, 'coupon_income'] = cpn_income_port\n #\n # income_breakdown_port.rename(columns={income_breakdown_port.columns[0]: 'portfolio_ending_value'}, inplace=True)\n # income_breakdown_port.loc[:, 'income_from_portfolio'][\n # income_breakdown_port.loc[:, 'portfolio_ending_value'] <= 0] = 0\n # income_breakdown_port.loc[:, 'total_income'] = income_breakdown_port.loc[:, income_breakdown_port.columns[1:]].sum(\n # axis=1)\n #\n # # ----------------------------------Block Ends-------------------------------------------------------------\n q_cut = [0.0, 0.05, 0.25, 0.5, 0.75, 0.95, 1.0]\n sim_base_income[sim_base_total < income_needed] = 0.0\n\n sim_port_income[sim_port_total < income_net_fia_income] = 0\n\n sim_port_income = sim_port_income + income_from_fia\n\n # base_quantile = sim_base_total.loc[sim_base_total.index[-1]].quantile([0.05, 0.25, 0.50, 0.75, 0.90])\n #\n # port_quantile = sim_port_total.loc[sim_port_total.index[-1]].quantile([0.05, 0.25, 0.50, 0.75, 0.90])\n\n base_quantile = sim_base_total.loc[sim_base_total.index[-1]].quantile(q_cut)\n\n port_quantile = sim_port_total.loc[sim_port_total.index[-1]].quantile(q_cut)\n\n # q_cut = [0.0, .05, 0.25, 0.5, 0.75, 0.95, 1.0]\n cols = ['Min', '5th', '25th', '50th', '75th', '90th', 'Max']\n\n # ------------------------------------------drop year 0-----------------------------------------\n sim_base_total = sim_base_total[1:]\n sim_port_total = sim_port_total[1:]\n\n # ------------------------quantile analysis for base terminal value-----------------------------\n base_qcut = pd.DataFrame(index=sim_base_total.index, columns=cols)\n for c in range(len(cols)):\n base_qcut.loc[:, cols[c]] = sim_base_total.quantile(q_cut[c], axis=1)\n\n base_qcut.clip(lower=0, inplace=True)\n\n sim_base_total.clip(lower=0, inplace=True)\n\n # -------------------------------------quantile analysis for base income----------------------------\n base_income_qcut = pd.DataFrame(index=sim_base_income.index, columns=cols)\n for c in range(len(cols)):\n base_income_qcut.loc[:, cols[c]] = sim_base_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # base_income_qcut = base_income_qcut.loc[income_starts:]\n\n # ---------------------------------quantile analysis for portfolio terminal value ---------------\n\n port_qcut = pd.DataFrame(index=sim_port_total.index, columns=cols)\n for c in range(len(cols)):\n port_qcut.loc[:, cols[c]] = sim_port_total.quantile(q_cut[c], axis=1)\n\n port_qcut.clip(lower=0, inplace=True)\n\n # ----------------------------------quantile analysis for portfolio income----------------------------\n port_income_qcut = pd.DataFrame(index=sim_port_income.index, columns=cols)\n for c in range(len(cols)):\n port_income_qcut.loc[:, cols[c]] = sim_port_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # port_income_qcut = port_income_qcut.loc[income_starts:]\n\n # ----------probability ending value will be less than 0 at the end of the horizon -----------------------\n # base_legacy_risk = (sim_base_total.loc[sim_base_total.index[-1]] < 0).sum() / (trials)\n\n base_legacy_risk = (sim_base_total.loc[sim_base_total.index[life_expectancy - clients_age]] < 0).sum() / trials\n port_legacy_risk = (sim_port_total.loc[sim_port_total.index[life_expectancy - clients_age]] < 0).sum() / trials\n\n # port_legacy_risk = (sim_port_total.loc[sim_port_total.index[-1]] <= 0).sum() / (trials)\n\n legacy_risk = pd.DataFrame([base_legacy_risk, port_legacy_risk,\n 'Prob. of portfolio value less than 0 at the end of the expected life'],\n index=['base', 'fia_portfolio', 'Notes'],\n columns=['Ruin Probability'])\n\n # -----------Year-wise probability of ending value greater than 0 -----------------\n base_psuccess = sim_base_total.apply(lambda x: x > 0).sum(axis=1) / trials\n port_psuccess = sim_port_total.apply(lambda x: x > 0).sum(axis=1) / trials\n\n # -----------------------WRITING FILES TO EXCEL ---------------------------\n\n writer = pd.ExcelWriter(src + method + '_simulated_income_summary_custom.xlsx', engine='xlsxwriter')\n read_income_inputs.to_excel(writer, sheet_name='inputs_for_income')\n\n read_returns_est.to_excel(writer, sheet_name='asset_returns_estimates')\n # read_portfolio_inputs.to_excel(writer, sheet_name='portfolio_inputs')\n\n age_index = list(range(clients_age + 1, clients_age + len(base_qcut) + 1))\n # base_qcut.loc[:, 'clients_age'] = age_index\n # base_qcut.loc[:, 'comment'] = ''\n # base_qcut.loc[:, 'comment'] = np.where(base_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n\n base_inv = float(read_income_inputs.loc['risky_assets', 'Base'])\n base_qcut.loc[:, 'age'] = age_index\n base_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n\n # --------To start with year 0---------------------------------\n insert_col = [base_inv, base_inv, base_inv, base_inv, base_inv, base_inv,\n base_inv, clients_age, np.nan]\n base_qcut.loc[len(base_qcut) + 1, :] = 0.0\n base_qcut = base_qcut.shift(1)\n base_qcut.iloc[0] = insert_col\n base_qcut.reset_index(drop=True, inplace=True)\n base_qcut.loc[:, 'Annual Return'] = base_qcut.loc[:, '50th'].pct_change().fillna(0)\n base_qcut.to_excel(writer, sheet_name='base_ending_value_quantiles')\n # base_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='base_ending_value_quantiles')\n\n # base_income_qcut = base_income_qcut[1:] base_income_qcut.loc[:, 'clients_age'] = age_index\n # base_income_qcut.loc[:, 'comment'] = '' base_income_qcut.loc[:, 'comment'] = np.where(\n # base_income_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n\n base_income_qcut = base_income_qcut.loc[1:, :]\n base_income_qcut.loc[:, 'age'] = age_index\n base_income_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n\n base_income_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='base_income_quantiles')\n\n # age_index = list(range(clients_age+1, clients_age + len(port_qcut)+1))\n # port_qcut.loc[:, 'clients_age'] = age_index\n # port_qcut.loc[:, 'comment'] = ''\n # port_qcut.loc[:, 'comment'] = np.where(port_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n\n port_qcut.loc[:, 'age'] = age_index\n port_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n port_qcut.loc[len(port_qcut) + 1, :] = 0.0\n port_qcut = port_qcut.shift(1)\n port_qcut.iloc[0] = insert_col\n port_qcut.reset_index(drop=True, inplace=True)\n port_qcut.loc[:, 'Annual Return'] = port_qcut.loc[:, '50th'].pct_change().fillna(0)\n port_qcut.to_excel(writer, sheet_name='fia_port_ending_value_quantiles')\n # port_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='fia_port_ending_value_quantiles')\n\n # port_income_qcut = port_income_qcut[1:] port_income_qcut.loc[:, 'clients_age'] = age_index\n # port_income_qcut.loc[:, 'comment'] = '' port_income_qcut.loc[:, 'comment'] = np.where(\n # port_income_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n\n port_income_qcut = port_income_qcut.loc[1:, :]\n port_income_qcut.loc[:, 'age'] = age_index\n port_income_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n port_income_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='fia_port_income_quantiles')\n\n prob_success_df = pd.concat([base_psuccess, port_psuccess], axis=1)\n prob_success_df.rename(columns={prob_success_df.columns[0]: 'prob(ending_value>0)_base',\n prob_success_df.columns[1]: 'prob(ending_value>0)_port'}, inplace=True)\n\n # prob_success_df.loc[:, 'clients_age'] = age_index\n # prob_success_df.loc[:, 'comment'] = ''\n # prob_success_df.loc[:, 'comment'] = np.where(prob_success_df.clients_age == life_expectancy, 'expected_life', \"\")\n\n prob_success_df.loc[:, 'age'] = age_index\n prob_success_df.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n prob_success_df.loc[:, 'prob(ending_value>lifetime_req income)_base'] = base_prob_of_success / trials\n prob_success_df.loc[:, 'prob(ending_value>lifetime_req income)_port'] = port_prob_of_success / trials\n prob_success_df.loc[:, 'prob(ending_value>next_year_req_income)_base'] = base_success_next_year / trials\n prob_success_df.loc[:, 'prob(ending_value>next_year_req_income)_port'] = port_success_next_year / trials\n prob_success_df.loc[:, 'base_max_portfolio_at_acturial_age'] = base_max_portfolio\n prob_success_df.loc[:, 'port_max_portfolio_at_acturial_age'] = port_max_portfolio\n\n # --------------------Percentile Portfolio's based on Acturial Life------------------------\n base_success = prob_success_df.loc[life_expectancy - clients_age, 'prob(ending_value>next_year_req_income)_base']\n port_success = prob_success_df.loc[life_expectancy - clients_age, 'prob(ending_value>next_year_req_income)_port']\n\n # acturial_age_base_tv = sim_base_total.loc[:life_expectancy - clients_age, ]\n # percentile_base_tv = sim_base_total.apply(lambda x: np.nanpercentile(x, base_success), axis=1)\n\n # ----------------Year wise percentile portfolio to meet next year income. Based on the success at acturial age.\n # Yearly portfolio values that can provide the next year income below the success rate at end of life (Percentile)-\n\n # acturial_age_base = base_for_next_year_need.loc[:life_expectancy - clients_age, ]\n # acturial_age_base = base_for_next_year_need.copy().fillna(0)\n percentile_base = base_for_next_year_need.apply(lambda x: np.nanpercentile(x, base_success), axis=1)\n\n # acturial_age_port_tv = sim_port_total.loc[:life_expectancy - clients_age, ]\n # percentile_port_tv = sim_port_total.apply(lambda x: np.nanpercentile(x, port_success), axis=1)\n\n # ----------------Year wise percentile portfolio to meet next year income. Based on the success at acturial age.\n # Yearly portfolio values that can provide the next year income below the success rate at end of life (Percentile)-\n\n # acturial_age_port = port_for_next_year_need.loc[:life_expectancy - clients_age, ]\n # acturial_age_base = port_for_next_year_need.copy().fillna(0)\n percentile_port = port_for_next_year_need.apply(lambda x: np.nanpercentile(x, port_success), axis=1)\n\n prob_success_df.loc[:, 'acturial_success_percentile_base_portfolio'] = percentile_base\n prob_success_df.loc[:, 'acturial_success_percentile_port_portfolio'] = percentile_port\n\n # prob_success_df.loc[:, 'terminalVal_success_percentile_base_portfolio'] = percentile_base_tv\n # prob_success_df.loc[:, 'terminalVal_success_percentile_port_portfolio'] = percentile_port_tv\n\n prob_success_df.to_excel(writer, sheet_name='success_probability')\n\n # --------------BASE - Accumulation and Income Breakdown based on the success percentile portfolio---------------\n base_df.to_csv(src + 'base_port_detail.csv')\n sim_base_total.to_csv(src + 'base_ending_values.csv')\n income_breakdown_base = pd.DataFrame(sim_base_total.quantile(base_success, axis=1))\n income_breakdown_base.loc[:, 'income_from_risky_assets'] = sim_base_income.quantile(base_success, axis=1) \\\n - social - cpn_income_port\n income_breakdown_base.loc[:, 'guaranteed_income'] = 0.0\n income_breakdown_base.loc[:, 'social_security_income'] = social\n income_breakdown_base.loc[:, 'coupon_income'] = cpn_income_base\n\n income_breakdown_base.rename(columns={income_breakdown_base.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_base.loc[:, 'income_from_risky_assets'][\n income_breakdown_base.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_base.loc[:, 'total_income'] = income_breakdown_base.loc[:, income_breakdown_base.columns[1:]].sum(\n axis=1)\n\n # ----------FIA PORTFOLIO - Accumulation and Income Breakdown based on the success percentile portfolio-----------\n fia_portfolio_df.to_csv(src + 'fia_port_detail.csv')\n sim_port_total.to_csv(src + 'fiaport_ending_values.csv')\n\n income_breakdown_port = pd.DataFrame(sim_port_total.quantile(port_success, axis=1))\n income_breakdown_port.loc[:, 'income_from_risky_assets'] = sim_port_income.quantile(port_success, axis=1) \\\n - income_from_fia - social - cpn_income_port\n income_breakdown_port.loc[:, 'guaranteed_income'] = income_from_fia\n income_breakdown_port.loc[:, 'social_security_income'] = social\n income_breakdown_port.loc[:, 'coupon_income'] = cpn_income_port\n\n income_breakdown_port.rename(columns={income_breakdown_port.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_port.loc[:, 'income_from_risky_assets'][\n income_breakdown_port.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_port.loc[:, 'total_income'] = income_breakdown_port.loc[:, income_breakdown_port.columns[1:]].sum(\n axis=1)\n\n # -------------------Write simulation Statistics-------------------------------------\n simulation_stats.to_excel(writer, sheet_name='simulation_statistics')\n\n # port_psuccess.to_excel(writer, sheet_name='fia_port_success_probability')\n\n income_breakdown_base = income_breakdown_base.loc[1:, :]\n income_breakdown_base.loc[:, 'age'] = age_index\n income_breakdown_base.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n income_breakdown_base.loc[income_starts:, :].to_excel(writer, sheet_name='base_income_breakdown_median')\n\n income_breakdown_port = income_breakdown_port.loc[1:, :]\n income_breakdown_port.loc[:, 'age'] = age_index\n income_breakdown_port.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n income_breakdown_port.loc[income_starts:, :].to_excel(writer, sheet_name='fia_income_breakdown_median')\n\n legacy_risk.to_excel(writer, sheet_name='ruin_probability')\n\n median_returns_normal.loc[:, 'fia_median_returns'] = median_normal_fia\n median_returns_normal.to_excel(writer, sheet_name='gr_port_median_normal')\n\n writer.save()\n\n # -----------------Plotting charts--------------------------------------------\n base_qcut.loc[income_starts:].plot(grid=True, title='Quantile Terminal Value - Base Portfolio')\n plt.savefig(src + \"quantile_terminal_base.png\")\n plt.close('all')\n\n base_income_qcut.plot(grid=True, title='Quantile Income - Base Portfolio')\n plt.savefig(src + \"quantile_income_base.png\")\n plt.close('all')\n\n base_psuccess.plot(grid=True, title='Probability of Success (Portfolio Ending Value > 0) - Base Portfolio')\n plt.savefig(src + \"success_probabilty_base.png\")\n plt.close('all')\n\n (1 - base_psuccess).plot(grid=True, title='Probability of Ruin (Portfolio Ending Value < 0) - Base Portfolio')\n plt.savefig(src + \"ruin_probability_base.png\")\n plt.close('all')\n\n port_qcut.loc[income_starts:].plot(grid=True, title='Quantile Terminal Value - FIA Portfolio')\n plt.savefig(src + \"quantile_terminal_fia.png\")\n plt.close('all')\n\n port_income_qcut.plot(grid=True, title='Quantile Income - FIA Portfolio')\n plt.savefig(src + \"quantile_income_fia.png\")\n plt.close('all')\n\n port_psuccess.plot(grid=True, title='Probability of Success (Portfolio Ending Value > 0) - FIA Portfolio')\n plt.savefig(src + \"success_probabilty_fia.png\")\n plt.close('all')\n\n (1 - port_psuccess).plot(grid=True, title='Probability of Ruin (Portfolio Ending Value < 0) - FIA Portfolio')\n plt.savefig(src + \"ruin_probability_fia.png\")\n plt.close('all')\n\n print(\"simulation completed....\")", "def portfolio_to_dict(self):\n holdings = {}\n for asset, pos in self.pos_handler.positions.items():\n holdings[asset] = {\n \"quantity\": pos.net_quantity,\n \"market_value\": pos.market_value,\n \"unrealised_pnl\": pos.unrealised_pnl,\n \"realised_pnl\": pos.realised_pnl,\n \"total_pnl\": pos.total_pnl\n }\n return holdings", "async def value(self, ctx, user=None):\n if not user:\n user = ctx.message.author\n else:\n user = util.GetUserFromNameStr(ctx.message.server.members, user)\n portfolio = GetPortfolio(user.id)\n await self.bot.say('%s\\'s portfolio is now worth $%.2f.' % \n (user, portfolio.Value()))", "def get_portfolio_prices(stocks: list, funds: list, etfs: list, start_date: str, end_date=today) -> pd.DataFrame:\r\n data_frames_stocks = get_assets_data_frames(\r\n stocks, inv.get_stock_historical_data, 'brazil', start_date=start_date, end_date=end_date)\r\n data_frames_funds = get_assets_data_frames(\r\n funds, inv.get_fund_historical_data, 'brazil', start_date=start_date, end_date=end_date)\r\n data_frames_etfs = get_assets_data_frames(\r\n etfs, inv.get_etf_historical_data, 'brazil', start_date=start_date, end_date=end_date)\r\n\r\n data_frames = [*data_frames_stocks, *data_frames_funds, *data_frames_etfs]\r\n\r\n assets = [*stocks, *funds, *etfs]\r\n\r\n portfolio_prices = build_multi_index_data_frame(\r\n data_frames, assets, ['Close', 'Open', 'High', 'Low'])\r\n\r\n return portfolio_prices", "def __populate_historical_trade_data(self):\n\n trade_data = self.__transactions.pivot_table(\n index=\"Date\",\n columns=[\"Ticker\"],\n values=[\n \"Quantity\",\n \"Investment\",\n ],\n aggfunc={\"Quantity\": np.sum, \"Investment\": np.sum},\n )\n\n # Make historical prices columns a multi-index. This helps the merging.\n self.portfolio_historical_prices.columns = pd.MultiIndex.from_product(\n [[\"Close\"], self.portfolio_historical_prices.columns]\n )\n\n trade_data = pd.merge(\n trade_data,\n self.portfolio_historical_prices,\n how=\"outer\",\n left_index=True,\n right_index=True,\n )\n\n trade_data[\"Close\"] = trade_data[\"Close\"].fillna(method=\"ffill\")\n trade_data.fillna(0, inplace=True)\n\n trade_data[\"Quantity\"] = trade_data[\"Quantity\"].cumsum()\n trade_data[\"Investment\"] = trade_data[\"Investment\"].cumsum()\n trade_data[\"Investment\", \"Total\"] = trade_data[\"Investment\"].sum(axis=1)\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Investment delta\"], self.tickers_list + [\"Total\"]]\n )\n ] = (trade_data[\"Investment\"].diff(periods=1).fillna(trade_data[\"Investment\"]))\n\n # End Value = Quantity * Close\n trade_data[pd.MultiIndex.from_product([[\"End Value\"], self.tickers_list])] = (\n trade_data[\"Quantity\"][self.tickers_list]\n * trade_data[\"Close\"][self.tickers_list]\n )\n\n trade_data.loc[:, (\"End Value\", \"Total\")] = trade_data[\"End Value\"][\n self.tickers_list\n ].sum(axis=1)\n\n # Initial Value = Previous End Value + Investment changes\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Initial Value\"], self.tickers_list + [\"Total\"]]\n )\n ] = 0\n\n trade_data[\"Initial Value\"] = trade_data[\"End Value\"].shift(1) + trade_data[\n \"Investment\"\n ].diff(periods=1)\n\n # Set first day Initial Value as the Investment (NaNs break first period)\n for t in self.tickers_list + [\"Total\"]:\n trade_data.at[trade_data.index[0], (\"Initial Value\", t)] = trade_data.iloc[\n 0\n ][\"Investment\"][t]\n\n trade_data = trade_data.reindex(\n columns=[\n \"Quantity\",\n \"Investment\",\n \"Investment delta\",\n \"Close\",\n \"Initial Value\",\n \"End Value\",\n ],\n level=0,\n )\n self.historical_trade_data = trade_data", "def read_portfolio_history(self, price_provider: Callable[[Security, dt.datetime], float]) -> Dict[dt.datetime, float]:\n if self._transactions_dataframe.empty:\n return dict()\n\n dated_transactions = self._transactions_dataframe.set_index(TRANSACTION_DATE).sort_index()\n all_share_ids = dated_transactions[SECURITY_ID].unique()\n\n date_index = dated_transactions.index.unique()\n share_history = pd.DataFrame(0, index=date_index, columns=all_share_ids)\n for share_id in all_share_ids:\n relevant_rows = dated_transactions[SECURITY_ID] == share_id\n transactions = dated_transactions.loc[relevant_rows, TRANSACTION_SHARE_AMOUNT]\n share_history.loc[dated_transactions.index[relevant_rows], share_id] = transactions\n share_history[share_id] = share_history[share_id].cumsum()\n\n share_prices = pd.DataFrame(0, index=date_index, columns=all_share_ids)\n for share_id in all_share_ids:\n date_to_price = lambda date: price_provider(Security(share_id), date)\n share_prices[share_id] = np.vectorize(date_to_price)(date_index.to_pydatetime())\n\n portfolio = (share_prices * share_history).apply(np.sum, axis=1)\n\n return {date.to_pydatetime(): price for date, price in portfolio.to_dict().items()}", "def __repr__(self):\n return \"<Portfolio: %s | valued at %s>\" % (self.portfolios, self.value)", "def income_model_asset_based_portfolio_quantile(num_of_years=30, trials=100, method='normal'):\n\n sim_fia_cv = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_base_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_base_income = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_port_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_port_income = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_base_total_pre_income = pd.DataFrame(index=range(num_of_years + 1))\n sim_port_total_pre_income = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_base_total_preincome = pd.DataFrame(index=range(num_of_years + 1))\n sim_port_total_preincome = pd.DataFrame(index=range(num_of_years + 1))\n\n # read_income_inputs = pd.read_csv(src + \"income_model_inputs.csv\", index_col='Items')\n read_income_inputs = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_model_inputs',\n index_col=[0])\n\n # read_returns_est = pd.read_csv(src + \"income_assets_returns_estimates.csv\", index_col='Symbol')\n read_returns_est = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_assets_returns_estimates',\n index_col=[0])\n\n # read_returns_est.drop(['BM', read_returns_est.index[-1]], axis=0, inplace=True)\n # read_portfolio_inputs = pd.read_csv(src + \"income_portfolio_inputs.csv\", index_col='Items')\n\n # read_asset_weights = pd.read_csv(src + \"asset_weights.csv\", index_col='Asset')\n read_asset_weights = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='asset_weights',\n index_col=[0])\n\n read_asset_weights.drop(read_asset_weights.index[-1], axis=0, inplace=True)\n\n # read random returns for simulation\n read_normal = pd.read_csv(src + 'sort_normal.csv', index_col=[0], parse_dates=True)\n read_small = pd.read_csv(src + 'sort_small_to_large.csv', index_col=[0], parse_dates=True)\n read_large = pd.read_csv(src + 'sort_large_to_small.csv', index_col=[0], parse_dates=True)\n assets_col_names = list(read_normal.columns)\n\n tickers = list(read_asset_weights.index)\n wts = np.array(read_asset_weights.loc[:, 'base'])\n\n def asset_median_returns(data, ticker):\n return data.filter(regex=ticker).median(axis=1)\n\n # dataframe for unsorted returns (normal)\n median_returns_normal = pd.DataFrame({t: asset_median_returns(read_normal, t) for t in tickers})\n median_returns_normal.loc[:, 'portfolio_return'] = median_returns_normal.dot(wts)\n median_normal_fia = pd.DataFrame({'FIA': asset_median_returns(read_normal, 'r_FIA')})\n\n # dataframe for smallest to largest returns\n median_returns_smallest = pd.DataFrame({t: asset_median_returns(read_small, t) for t in tickers})\n median_returns_smallest.loc[:, 'portfolio_return'] = median_returns_smallest.dot(wts)\n median_smallest_fia = pd.DataFrame({'FIA': asset_median_returns(read_small, 'r_FIA')})\n\n # dataframe for unsorted returns (normal)\n median_returns_largest = pd.DataFrame({t: asset_median_returns(read_large, t) for t in tickers})\n median_returns_largest.loc[:, 'portfolio_return'] = median_returns_largest.dot(wts)\n median_largest_fia = pd.DataFrame({'FIA': asset_median_returns(read_large, 'r_FIA')})\n\n years = list(range(0, num_of_years + 1))\n income_cols = ['year', 'strategy_term', 'index_returns', 'term_ret', 'term_ret_with_par', 'term_annualize',\n 'ann_net_spread', 'term_ret_netspr', 'high_inc_benefit_base', 'rider_fee', 'eoy_income',\n 'contract_value']\n\n term = int(read_income_inputs.loc['term', 'inputs'])\n fia_ret = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Returns']\n fia_risk = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Risk']\n par_rate = float(read_income_inputs.loc['par_rate', 'inputs'])\n spread = float(read_income_inputs.loc['spread', 'inputs'])\n bonus_term = int(read_income_inputs.loc['bonus_term', 'inputs'])\n premium = float(read_income_inputs.loc['premium', 'inputs'])\n income_bonus = float(read_income_inputs.loc['income_bonus', 'inputs'])\n\n income_starts = int(read_income_inputs.loc['start_income_years', 'inputs'])\n income_growth = float(read_income_inputs.loc['income_growth', 'inputs'])\n rider_fee = float(read_income_inputs.loc['rider_fee', 'inputs'])\n inc_payout_factor = float(read_income_inputs.loc['income_payout_factor', 'inputs'])\n contract_bonus = float(read_income_inputs.loc['contract_bonus', 'inputs'])\n social = float(read_income_inputs.loc['social', 'inputs'])\n inflation = float(read_income_inputs.loc['inflation', 'inputs'])\n wtd_cpn_yield = float(read_income_inputs.loc['wtd_coupon_yld', 'inputs'])\n life_expectancy = int(read_income_inputs.loc['life_expectancy_age', 'inputs'])\n clients_age = int(read_income_inputs.loc['clients_age', 'inputs'])\n\n # ---------------INCOME MODEL--------------------------------------------\n runs = 0\n returns_dict = {}\n asset_dict = {}\n fia_dict = {}\n while runs < trials:\n print(runs)\n\n income_df = pd.DataFrame(index=years, columns=income_cols)\n income_df.loc[:, 'year'] = years\n income_df.loc[:, 'strategy_term'] = income_df.loc[:, 'year'] % term\n income_df.loc[:, 'strategy_term'] = income_df['strategy_term'].apply(lambda x: 1 if x == 0 else 0)\n\n if method == 'normal':\n income_df.loc[:, 'index_returns'] = read_normal.loc[:, '{}_{}'.format('r_FIA', str(runs))]\n\n elif method == 'smallest':\n income_df.loc[:, 'index_returns'] = read_small.loc[:, '{}_{}'.format('r_FIA', str(runs))]\n\n else:\n income_df.loc[:, 'index_returns'] = read_large.loc[:, '{}_{}'.format('r_FIA', str(runs))]\n\n # income_df.loc[:, 'index_returns'] = np.random.normal(fia_ret, fia_risk, size=(len(years), 1))\n\n cumprod = (1. + income_df['index_returns']).rolling(window=term).agg(lambda x: x.prod()) - 1\n income_df.loc[:, 'term_ret'] = np.where(income_df.loc[:, 'strategy_term'] == 1, cumprod, 0)\n income_df.loc[:, 'term_ret_with_par'] = income_df.loc[:, 'term_ret'] * par_rate\n income_df.loc[:, 'term_annualize'] = income_df.loc[:, 'term_ret_with_par'].apply(\n lambda x: (1 + x) ** (1 / term) - 1)\n income_df.loc[:, 'ann_net_spread'] = income_df.loc[:, 'term_annualize'] - spread\n income_df.loc[:, 'ann_net_spread'] = np.where(income_df.loc[:, 'strategy_term'] == 1,\n income_df.loc[:, 'ann_net_spread'], 0)\n income_df.loc[:, 'term_ret_netspr'] = income_df.loc[:, 'ann_net_spread'].apply(lambda x: (1 + x) ** term - 1)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'high_inc_benefit_base'] = premium * (1 + income_bonus)\n\n elif counter <= min(bonus_term, income_starts):\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base'] * \\\n (1 + income_growth)\n else:\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base']\n\n income_df.loc[:, 'rider_fee'] = income_df.loc[:, 'high_inc_benefit_base'] * rider_fee\n income_df.loc[:, 'eoy_income'] = np.where(income_df.loc[:, 'year'] > income_starts,\n income_df.loc[:, 'high_inc_benefit_base'] * inc_payout_factor, 0)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'contract_value'] = premium * (1 + contract_bonus)\n\n elif income_df.loc[counter, 'strategy_term'] == 1:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee']\n x2 = (x1 * (1 + income_df.loc[counter, 'term_ret_netspr'])) - income_df.loc[counter, 'eoy_income']\n income_df.loc[counter, 'contract_value'] = x2\n\n else:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee'] - \\\n income_df.loc[counter, 'eoy_income']\n\n income_df.loc[counter, 'contract_value'] = x1\n\n # variable stores the income number that is used in the base and fia portfolio calcs.\n\n income_from_fia = income_df.loc[income_df.index[-1], 'eoy_income']\n\n income_df.loc[:, 'contract_value'] = income_df.loc[:, 'contract_value'].apply(lambda x: 0 if x <= 0 else x)\n\n sim_fia_cv.loc[:, str(runs)] = income_df.loc[:, 'contract_value']\n\n # -------------------------------------BASE MODEL---------------------------------------------\n\n base_wts = read_asset_weights.loc[:, 'base']\n base_assets = list(base_wts.index)\n base_weights = list(base_wts.values)\n base_returns = list(read_returns_est.loc[:, 'Annualized Returns'].values)\n base_std = list(read_returns_est.loc[:, 'Annualized Risk'].values)\n\n base_investment = float(read_income_inputs.loc['risky_assets', 'Base'])\n adv_fees = float(read_income_inputs.loc['advisor_fees', 'Base'])\n\n # -------------------required income----------------------------------\n req_annual_income = float(read_income_inputs.loc['annual_income', 'inputs'])\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n cpn_income_base = base_investment * wtd_cpn_yield\n\n # ----------------------RANDOM RETURNS--------------------------\n r_cols = ['r_{}'.format(name) for name in base_assets]\n boy_value = ['bv_{}'.format(name) for name in base_assets]\n eoy_value = ['ev_{}'.format(name) for name in base_assets]\n\n random_returns = pd.DataFrame(index=income_df.index, columns=r_cols)\n\n for c in range(len(r_cols)):\n ret = np.random.normal(base_returns[c], base_std[c], size=(len(random_returns.index), 1))\n\n if method == 'smallest':\n this_run_cols = ['{}_{}'.format(cname, str(runs)) for cname in r_cols]\n random_returns = read_small.loc[:, this_run_cols]\n\n # random_returns.loc[:, r_cols[c]] = np.sort(ret.flatten())\n # asset_dict.update({'{}_{}'.format(r_cols[c], str(runs)): np.sort(ret.flatten())})\n\n elif method == 'largest':\n this_run_cols = ['{}_{}'.format(cname, str(runs)) for cname in r_cols]\n random_returns = read_large.loc[:, this_run_cols]\n\n # random_returns.loc[:, r_cols[c]] = np.flip(np.sort(ret.flatten()))\n # asset_dict.update({'{}_{}'.format(r_cols[c], str(runs)): np.flip(np.sort(ret.flatten()))})\n\n else:\n this_run_cols = ['{}_{}'.format(cname, str(runs)) for cname in r_cols]\n random_returns = read_normal.loc[:, this_run_cols]\n\n # random_returns.loc[:, r_cols[c]] = ret.flatten()\n # asset_dict.update({'{}_{}'.format(r_cols[c], str(runs)): ret.flatten()})\n\n # store the simulated assets returns in one dictionary\n # returns_dict.update({str(runs): random_returns})\n\n # collect the asset based returns from all simulation and calculate the median returns.\n # def get_median_returns(sym):\n # cols = [sym + '_' + str(c) for c in np.arange(trials)]\n # asset_df = pd.DataFrame({c: asset_dict.get(c) for c in cols})\n # return asset_df.median(axis=1)\n #\n # asset_median_returns = pd.DataFrame({symbol: get_median_returns(symbol) for symbol in r_cols})\n #\n # asset_median_returns.loc[:, 'simulated_portfolio_median_returns'] = asset_median_returns.dot(base_weights)\n\n base_df = random_returns.copy()\n pre_income_base_df = random_returns.copy()\n\n # base_investment = float(read_portfolio_inputs.loc['risky_assets', 'Base'])\n\n fia_portfolio_df = random_returns.copy()\n pre_income_port_df = random_returns.copy()\n port_investment = float(read_income_inputs.loc['risky_assets', 'FIA'])\n cpn_income_port = port_investment * wtd_cpn_yield\n\n # ---------Initial Investments for pre-income account values---------------------\n pre_income_base_inv = base_investment\n pre_income_port_inv = port_investment\n # ----------------------------------------BASE PORTFOLIO----------------------------\n for name in boy_value:\n base_df.loc[:, name] = 0.0\n pre_income_base_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n # ---------------For year 0, the year of investment------------\n\n # ------------Calculate the annual portfolio returns - Gross Returns--------------------\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment for c in range(len(boy_value))]\n\n # -------------Record the Pre Income Base Portfolio-----------------------------\n\n pre_income_base_df.loc[counter, boy_value] = [base_weights[c] *\n pre_income_base_inv for c in range(len(boy_value))]\n pre_income_base_df.loc[counter, 'total'] = pre_income_base_df.loc[counter, boy_value].sum()\n pre_income_base_inv = pre_income_base_df.loc[counter, boy_value].sum()\n\n # ------------------Pre Income Block Ends------------------------\n\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n\n base_df.loc[counter, 'total_net_fees'] = 0.0\n base_df.loc[counter, 'income'] = 0.0\n base_investment = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'total_pre_income'] = base_investment\n\n elif (counter > 0) and (counter < income_starts):\n\n # ----For years between the start of the investment and start if the income---------------\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # -------------Record the Pre Income Base Portfolio-----------------------------\n pre_income_base_df.loc[counter, boy_value] = [\n base_weights[c] * pre_income_base_inv * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n pre_income_base_df.loc[counter, 'total'] = pre_income_base_df.loc[counter, boy_value].sum()\n pre_income_base_df.loc[counter, 'adv_fees'] = pre_income_base_df.loc[counter, 'total'] * adv_fees\n pre_income_base_df.loc[counter, 'total_net_fees'] = pre_income_base_df.loc[counter, 'total'] - \\\n pre_income_base_df.loc[counter, 'adv_fees']\n pre_income_base_inv = pre_income_base_df.loc[counter, 'total_net_fees'] + cpn_income_base\n\n # ------------------Pre Income Block Ends------------------------\n\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - base_df.loc[\n counter, 'adv_fees']\n\n # --coupon payment is invested back into the risky portfolio until the income is withdrawn----\n base_investment = base_df.loc[counter, 'total_net_fees'] + cpn_income_base\n base_df.loc[counter, 'total_pre_income'] = base_df.loc[counter, 'total_net_fees']\n\n else:\n\n # -------------For Years after the income started----------------------\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # -------------Record the Pre Income Base Portfolio-----------------------------\n pre_income_base_df.loc[counter, boy_value] = [\n base_weights[c] * pre_income_base_inv * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n pre_income_base_df.loc[counter, 'total'] = pre_income_base_df.loc[counter, boy_value].sum()\n pre_income_base_df.loc[counter, 'adv_fees'] = pre_income_base_df.loc[counter, 'total'] * adv_fees\n pre_income_base_df.loc[counter, 'total_net_fees'] = pre_income_base_df.loc[counter, 'total'] - \\\n pre_income_base_df.loc[counter, 'adv_fees']\n pre_income_base_inv = pre_income_base_df.loc[counter, 'total_net_fees'] + cpn_income_base\n\n # ------------------Pre Income Block Ends------------------------\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = income_needed + social\n\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - \\\n base_df.loc[counter, 'adv_fees'] - \\\n base_df.loc[counter, 'income']\n\n base_df.loc[counter, 'total_pre_income'] = base_df.loc[counter, 'total'] - \\\n base_df.loc[counter, 'adv_fees']\n\n base_investment = base_df.loc[counter, 'total_net_fees']\n\n # ------------------------Portfolio with PreIncome Values----------------------------\n sim_base_total_preincome.loc[:, 's_{}'.format(str(runs))] = pre_income_base_df.loc[:, 'total_net_fees']\n sim_base_total_preincome.fillna(float(read_income_inputs.loc['risky_assets', 'Base']), inplace=True)\n # --------------------------------PreIncome Block Ends----------------------------\n\n base_df.loc[:, 'adj_total'] = base_df.loc[:, 'total_net_fees'].apply(lambda x: x if x > 0 else 0)\n sim_base_total.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'total_net_fees']\n sim_base_income.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'income']\n sim_base_total_pre_income.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'total_pre_income']\n\n # ----------------------------FIA PORTFOLIO----------------------------------------------\n for name in boy_value:\n fia_portfolio_df.loc[:, name] = 0.0\n pre_income_port_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment\n for c in range(len(boy_value))]\n\n # -------------Record the Pre Income Base Portfolio-----------------------------\n\n pre_income_port_df.loc[counter, boy_value] = [base_weights[c] *\n pre_income_port_inv for c in range(len(boy_value))]\n pre_income_port_df.loc[counter, 'total'] = pre_income_port_df.loc[counter, boy_value].sum()\n pre_income_port_inv = pre_income_port_df.loc[counter, boy_value].sum()\n\n # ------------------Pre Income Block Ends------------------------\n\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'total_net_fees'] = 0.0\n fia_portfolio_df.loc[counter, 'income'] = 0.0\n port_investment = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'total_pre_income'] = port_investment\n\n elif (counter > 0) and (counter < income_starts):\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # ------------------Record the Pre Income Base Portfolio-----------------------------\n pre_income_port_df.loc[counter, boy_value] = [\n base_weights[c] * pre_income_port_inv * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n pre_income_port_df.loc[counter, 'total'] = pre_income_port_df.loc[counter, boy_value].sum()\n pre_income_port_df.loc[counter, 'adv_fees'] = pre_income_port_df.loc[counter, 'total'] * adv_fees\n pre_income_port_df.loc[counter, 'total_net_fees'] = pre_income_port_df.loc[counter, 'total'] - \\\n pre_income_port_df.loc[counter, 'adv_fees']\n pre_income_port_inv = pre_income_port_df.loc[counter, 'total_net_fees'] + cpn_income_base\n\n # ------------------Pre Income Block Ends------------------------\n\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees']\n fia_portfolio_df.loc[counter, 'total_pre_income'] = fia_portfolio_df.loc[counter, 'total_net_fees']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees'] + cpn_income_port\n\n else:\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n # -------------Record the Pre Income Base Portfolio-----------------------------\n pre_income_port_df.loc[counter, boy_value] = [\n base_weights[c] * pre_income_port_inv * (1 + period_returns[c])\n for c in range(len(boy_value))]\n\n pre_income_port_df.loc[counter, 'total'] = pre_income_port_df.loc[counter, boy_value].sum()\n pre_income_port_df.loc[counter, 'adv_fees'] = pre_income_port_df.loc[counter, 'total'] * adv_fees\n pre_income_port_df.loc[counter, 'total_net_fees'] = pre_income_port_df.loc[counter, 'total'] - \\\n pre_income_port_df.loc[counter, 'adv_fees']\n pre_income_port_inv = pre_income_port_df.loc[counter, 'total_net_fees'] + cpn_income_base\n\n # ------------------Pre Income Block Ends------------------------\n\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = income_needed + social\n\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees'] - \\\n fia_portfolio_df.loc[counter, 'income']\n\n fia_portfolio_df.loc[counter, 'total_pre_income'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees']\n\n sim_port_total.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'total_net_fees'] + \\\n income_df.loc[:, 'contract_value']\n\n sim_port_income.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'income']\n\n fia_portfolio_df.loc[:, 'adj_total'] = fia_portfolio_df.loc[:, 'total_net_fees'].apply(\n lambda x: x if x > 0 else 0)\n\n sim_port_total_pre_income.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'total_pre_income']\n\n # -------------------Portfolio with PreIncome Values----------------------------\n sim_port_total_preincome.loc[:, 's_{}'.format(str(runs))] = pre_income_port_df.loc[:, 'total_net_fees'] + \\\n income_df.loc[:, 'contract_value']\n\n sim_port_total_preincome.fillna(float(read_income_inputs.loc['risky_assets', 'FIA']), inplace=True)\n # --------------------------------PreIncome Block Ends----------------------------\n\n runs += 1\n\n # ------------------Calculate % of portfolios ending value greater than required LIFETIME cumm. income---------\n total_income_by_age = sim_base_income.loc[:, sim_base_income.columns[0]].cumsum()\n total_income_by_acturial_age = total_income_by_age.loc[life_expectancy - clients_age]\n total_income_by_age.fillna(0, inplace=True)\n income_dataframe = pd.DataFrame(total_income_by_age)\n income_dataframe.loc[:, 'remaining_income_by_acturial_age'] = total_income_by_age.apply(\n lambda x: total_income_by_acturial_age - x)\n\n s = income_dataframe.loc[:, 'remaining_income_by_acturial_age']\n base_prob_of_success = sim_base_total.gt(s, axis=0).sum(axis=1)\n port_prob_of_success = sim_port_total.gt(s, axis=0).sum(axis=1)\n\n # ----------------------------Portfolio sufficient for NEXT YEARS income needs-------------------\n next_year_income = sim_base_income.loc[:, sim_base_income.columns[0]].shift(-1).fillna(0) # Yearly Income Reqd.\n base_success_next_year = sim_base_total.gt(next_year_income, axis=0).sum(axis=1)\n\n base_for_next_year_need = sim_base_total[sim_base_total.gt(next_year_income, axis=0)]\n\n port_success_next_year = sim_port_total.gt(next_year_income, axis=0).sum(axis=1)\n\n port_for_next_year_need = sim_port_total[sim_port_total.gt(next_year_income, axis=0)]\n\n # ---------------Portfolio for 45 years of simulation---------------------------------------\n base_success_portfolio = sim_base_total[sim_base_total.gt(next_year_income, axis=0)]\n port_success_portfolio = sim_port_total[sim_port_total.gt(next_year_income, axis=0)]\n\n # ----------------Portfolio Simulation until the acturial age------------------------------\n acturial_years = life_expectancy - clients_age\n base_success_portfolio_act_age = base_success_portfolio.loc[acturial_years, :]\n port_success_portfolio_act_age = port_success_portfolio.loc[acturial_years, :]\n\n # -------------------------Base Portfolio TS with max Terminal Value ----------------------------\n if base_success_portfolio_act_age.isnull().sum() == trials:\n base_max_portfolio = 0.0\n else:\n base_max_portfolio = base_success_portfolio.loc[:, base_success_portfolio_act_age.idxmax()]\n\n # -------------------------FIA Portfolio TS with max Terminal Value ----------------------------\n if port_success_portfolio_act_age.isnull().sum() == trials:\n port_max_portfolio = 0.0\n else:\n port_max_portfolio = port_success_portfolio.loc[:, port_success_portfolio_act_age.idxmax()]\n\n # ------------------------------Average age with full income------------------------------\n base_mean_age = ((life_expectancy - clients_age) - base_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).mean()\n\n port_mean_age = ((life_expectancy - clients_age) - port_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).mean()\n\n # ----------------------------Median Age with full Income------------------------------------------\n base_median_age = ((life_expectancy - clients_age) - base_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).median()\n\n port_median_age = ((life_expectancy - clients_age) - port_success_portfolio.loc[1:life_expectancy - clients_age, :]\n .isnull().sum()).median()\n\n # --------------Mean Value for all the portfolios at end of the acturial age--------------------\n base_act_avg_porfolio = base_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).mean()\n port_act_avg_porfolio = port_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).mean()\n\n # ---------------Median Value for all the portfolios at end of the acturial age--------------------\n base_act_median_porfolio = base_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).median()\n port_act_median_porfolio = port_success_portfolio.loc[life_expectancy - clients_age, :].fillna(0).median()\n\n # # --------------Mean Value for all the portfolios in the simulation--------------------\n # base_sim_mean = base_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().mean()\n # port_sim_mean = port_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().mean()\n #\n # # --------------Median Value for all the portfolios in the simulation--------------------\n # base_sim_mean = base_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().median()\n # port_sim_mean = port_success_portfolio.loc[1:life_expectancy - clients_age, :].mean().median()\n\n # -------Max Portfolio value at the end of acturial age----------------------------------------\n base_act_max = base_success_portfolio.loc[life_expectancy - clients_age, :].max()\n port_act_max = port_success_portfolio.loc[life_expectancy - clients_age, :].max()\n\n # ----------------------Min Portfolio value at the end of acturial age----------------------------------------\n base_act_min = base_success_portfolio.loc[life_expectancy - clients_age, :].min()\n port_act_min = port_success_portfolio.loc[life_expectancy - clients_age, :].min()\n\n # -----------------------------------------Lifetime Average Income-----------------------------------------\n base_total_income = sim_base_income.cumsum().loc[acturial_years, :].mean()\n port_total_income = income_from_fia + sim_port_income\n port_total_income = port_total_income.cumsum().loc[acturial_years, :].mean()\n\n simulation_stats = pd.DataFrame(index=['Average Years', 'Median Years', 'Average Age', 'Median Age',\n 'Average Portfolio (act.age)', 'Median Portfolio (act.age)',\n 'Max Portfolio Value', 'Min Portfolio Value',\n 'Average Lifetime Income'], columns=['Base', 'FIA'])\n\n simulation_stats.loc['Average Years', :] = [base_mean_age, base_mean_age]\n simulation_stats.loc['Median Years', :] = [base_median_age, base_median_age]\n simulation_stats.loc['Average Age', :] = [base_mean_age + clients_age, base_mean_age + clients_age]\n simulation_stats.loc['Median Age', :] = [base_median_age + clients_age, base_median_age + clients_age]\n simulation_stats.loc['Average Portfolio (act.age)', :] = [base_act_avg_porfolio, port_act_avg_porfolio]\n simulation_stats.loc['Median Portfolio (act.age)', :] = [base_act_median_porfolio, port_act_median_porfolio]\n simulation_stats.loc['Max Portfolio Value', :] = [base_act_max, port_act_max]\n simulation_stats.loc['Min Portfolio Value', :] = [base_act_min, port_act_min]\n simulation_stats.loc['Average Lifetime Income', :] = [base_total_income, port_total_income]\n comments = ['Average years of portfolios that meet the next years income needs for the lifetime',\n 'Median years of portfolios that meet the next years income needs for the lifetime',\n 'Average Clients Age',\n 'Median Clients Age',\n 'Average of terminal values for the portfolios at the end of the acturial life',\n 'Median of terminal values for the portfolios at the end of the acturial life',\n 'Maximum of terminal values for the portfolios at the end of the acturial life',\n 'Minimum of terminal values for the portfolios at the end of the acturial life',\n 'Average of total income generated by all portfolios at the end of the acturial life']\n\n simulation_stats.loc[:, 'Notes'] = comments\n\n # --------------------------------------------------------------------------------\n\n # # -----------------------------------income breakdown for Base portfolio----------------------------------\n # base_df.to_csv(src + 'base_port_detail.csv')\n # sim_base_total.to_csv(src + 'base_ending_values.csv')\n # income_breakdown_base = pd.DataFrame(sim_base_total.quantile(0.5, axis=1))\n # income_breakdown_base.loc[:, 'income_from_portfolio'] = sim_base_income.quantile(0.5, axis=1)\n # income_breakdown_base.loc[:, 'fia_income'] = 0.0\n # income_breakdown_base.loc[:, 'social_security_income'] = social\n # income_breakdown_base.loc[:, 'coupon_income'] = cpn_income_base\n #\n # income_breakdown_base.rename(columns={income_breakdown_base.columns[0]: 'portfolio_ending_value'}, inplace=True)\n # income_breakdown_base.loc[:, 'income_from_portfolio'][\n # income_breakdown_base.loc[:, 'portfolio_ending_value'] <= 0] = 0\n # income_breakdown_base.loc[:, 'total_income'] = income_breakdown_base.loc[:, income_breakdown_base.columns[1:]].sum(\n # axis=1)\n #\n # # --------------------------------------Block Ends-----------------------------------------------------------\n #\n # # ---------------------------------------income breakdown for FIA portfolio----------------------------------\n # fia_portfolio_df.to_csv(src + 'fia_port_detail.csv')\n # sim_port_total.to_csv(src + 'fiaport_ending_values.csv')\n #\n # income_breakdown_port = pd.DataFrame(sim_port_total.quantile(0.5, axis=1))\n # income_breakdown_port.loc[:, 'income_from_portfolio'] = sim_port_income.quantile(0.5, axis=1)\n # income_breakdown_port.loc[:, 'fia_income'] = income_from_fia\n # income_breakdown_port.loc[:, 'social_security_income'] = social\n # income_breakdown_port.loc[:, 'coupon_income'] = cpn_income_port\n #\n # income_breakdown_port.rename(columns={income_breakdown_port.columns[0]: 'portfolio_ending_value'}, inplace=True)\n # income_breakdown_port.loc[:, 'income_from_portfolio'][\n # income_breakdown_port.loc[:, 'portfolio_ending_value'] <= 0] = 0\n # income_breakdown_port.loc[:, 'total_income'] = income_breakdown_port.loc[:, income_breakdown_port.columns[1:]].sum(\n # axis=1)\n #\n # # ----------------------------------Block Ends-------------------------------------------------------------\n q_cut = [0.0, 0.1, 0.25, 0.5, 0.75, 0.95, 1.0]\n sim_base_income[sim_base_total < income_needed] = 0.0\n\n sim_port_income[sim_port_total < income_net_fia_income] = 0\n\n sim_port_income = sim_port_income + income_from_fia\n\n # base_quantile = sim_base_total.loc[sim_base_total.index[-1]].quantile([0.05, 0.25, 0.50, 0.75, 0.90])\n #\n # port_quantile = sim_port_total.loc[sim_port_total.index[-1]].quantile([0.05, 0.25, 0.50, 0.75, 0.90])\n\n base_quantile = sim_base_total.loc[sim_base_total.index[-1]].quantile(q_cut)\n\n port_quantile = sim_port_total.loc[sim_port_total.index[-1]].quantile(q_cut)\n\n # q_cut = [0.0, .05, 0.25, 0.5, 0.75, 0.95, 1.0]\n cols = ['Min', '10th', '25th', '50th', '75th', '90th', 'Max']\n\n # ------------------------------------------drop year 0-----------------------------------------\n sim_base_total = sim_base_total[1:]\n sim_port_total = sim_port_total[1:]\n\n # ---------------------------------plot for histogram for porfolios--------------------------------------\n # base_term_value = sim_base_total.loc[sim_base_total.index[:life_expectancy - clients_age], :]\n # fact = 1 / len(base_term_value)\n # base_ann_ret = (base_term_value.iloc[-1] / base_term_value.iloc[0]) ** fact - 1\n # counts, bins, bars = plt.hist(base_ann_ret)\n\n # ------------------------quantile analysis for base terminal value-----------------------------\n base_qcut = pd.DataFrame(index=sim_base_total.index, columns=cols)\n for c in range(len(cols)):\n base_qcut.loc[:, cols[c]] = sim_base_total.quantile(q_cut[c], axis=1)\n\n base_qcut.clip(lower=0, inplace=True)\n\n sim_base_total.clip(lower=0, inplace=True)\n\n # -------------------------------------quantile analysis for base income----------------------------\n base_income_qcut = pd.DataFrame(index=sim_base_income.index, columns=cols)\n for c in range(len(cols)):\n base_income_qcut.loc[:, cols[c]] = sim_base_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # base_income_qcut = base_income_qcut.loc[income_starts:]\n\n # ---------------------------------quantile analysis for portfolio terminal value ---------------\n\n port_qcut = pd.DataFrame(index=sim_port_total.index, columns=cols)\n for c in range(len(cols)):\n port_qcut.loc[:, cols[c]] = sim_port_total.quantile(q_cut[c], axis=1)\n\n port_qcut.clip(lower=0, inplace=True)\n\n # ----------------------------------quantile analysis for portfolio income----------------------------\n port_income_qcut = pd.DataFrame(index=sim_port_income.index, columns=cols)\n for c in range(len(cols)):\n port_income_qcut.loc[:, cols[c]] = sim_port_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # port_income_qcut = port_income_qcut.loc[income_starts:]\n\n # ----------probability ending value will be less than 0 at the end of the horizon -----------------------\n # base_legacy_risk = (sim_base_total.loc[sim_base_total.index[-1]] < 0).sum() / (trials)\n\n base_legacy_risk = (sim_base_total.loc[sim_base_total.index[life_expectancy - clients_age]] < 0).sum() / trials\n port_legacy_risk = (sim_port_total.loc[sim_port_total.index[life_expectancy - clients_age]] < 0).sum() / trials\n\n # port_legacy_risk = (sim_port_total.loc[sim_port_total.index[-1]] <= 0).sum() / (trials)\n\n legacy_risk = pd.DataFrame([base_legacy_risk, port_legacy_risk,\n 'Prob. of portfolio value less than 0 at the end of the expected life'],\n index=['base', 'fia_portfolio', 'Notes'],\n columns=['Ruin Probability'])\n\n # -----------Year-wise probability of ending value greater than 0 -----------------\n base_psuccess = sim_base_total.apply(lambda x: x > 0).sum(axis=1) / trials\n port_psuccess = sim_port_total.apply(lambda x: x > 0).sum(axis=1) / trials\n\n # ------------------------------------WRITING FILES TO EXCEL ---------------------------\n\n writer = pd.ExcelWriter(dest_simulation + method + '_montecarlo_income_summary.xlsx', engine='xlsxwriter')\n read_income_inputs.to_excel(writer, sheet_name='inputs_for_income')\n\n read_returns_est.to_excel(writer, sheet_name='asset_returns_estimates')\n # read_portfolio_inputs.to_excel(writer, sheet_name='portfolio_inputs')\n\n age_index = list(range(clients_age + 1, clients_age + len(base_qcut) + 1))\n # base_qcut.loc[:, 'clients_age'] = age_index\n # base_qcut.loc[:, 'comment'] = ''\n # base_qcut.loc[:, 'comment'] = np.where(base_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n base_inv = float(read_income_inputs.loc['risky_assets', 'Base'])\n base_qcut.loc[:, 'age'] = age_index\n base_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n\n # -----------------------To start with year 0---------------------------------\n insert_col = [base_inv, base_inv, base_inv, base_inv, base_inv, base_inv,\n base_inv, clients_age, np.nan]\n base_qcut.loc[len(base_qcut) + 1, :] = 0.0\n base_qcut = base_qcut.shift(1)\n base_qcut.iloc[0] = insert_col\n base_qcut.reset_index(drop=True, inplace=True)\n base_qcut.to_excel(writer, sheet_name='base_ending_value_quantiles')\n # base_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='base_ending_value_quantiles')\n\n # base_income_qcut = base_income_qcut[1:] base_income_qcut.loc[:, 'clients_age'] = age_index\n # base_income_qcut.loc[:, 'comment'] = '' base_income_qcut.loc[:, 'comment'] = np.where(\n # base_income_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n\n base_income_qcut = base_income_qcut.loc[1:, :]\n base_income_qcut.loc[:, 'age'] = age_index\n base_income_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n base_income_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='base_income_quantiles')\n\n # age_index = list(range(clients_age+1, clients_age + len(port_qcut)+1))\n # port_qcut.loc[:, 'clients_age'] = age_index\n # port_qcut.loc[:, 'comment'] = ''\n # port_qcut.loc[:, 'comment'] = np.where(port_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n\n port_qcut.loc[:, 'age'] = age_index\n port_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n port_qcut.loc[len(port_qcut) + 1, :] = 0.0\n port_qcut = port_qcut.shift(1)\n port_qcut.iloc[0] = insert_col\n port_qcut.reset_index(drop=True, inplace=True)\n port_qcut.to_excel(writer, sheet_name='fia_port_ending_value_quantiles')\n # port_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='fia_port_ending_value_quantiles')\n\n # port_income_qcut = port_income_qcut[1:] port_income_qcut.loc[:, 'clients_age'] = age_index\n # port_income_qcut.loc[:, 'comment'] = '' port_income_qcut.loc[:, 'comment'] = np.where(\n # port_income_qcut.clients_age == life_expectancy, 'expected_life', \"\")\n\n port_income_qcut = port_income_qcut.loc[1:, :]\n port_income_qcut.loc[:, 'age'] = age_index\n port_income_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n port_income_qcut.loc[income_starts:, :].to_excel(writer, sheet_name='fia_port_income_quantiles')\n\n prob_success_df = pd.concat([base_psuccess, port_psuccess], axis=1)\n prob_success_df.rename(columns={prob_success_df.columns[0]: 'prob(ending_value>0)_base',\n prob_success_df.columns[1]: 'prob(ending_value>0)_port'}, inplace=True)\n\n # prob_success_df.loc[:, 'clients_age'] = age_index\n # prob_success_df.loc[:, 'comment'] = ''\n # prob_success_df.loc[:, 'comment'] = np.where(prob_success_df.clients_age == life_expectancy, 'expected_life', \"\")\n\n prob_success_df.loc[:, 'age'] = age_index\n prob_success_df.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n prob_success_df.loc[:, 'prob(ending_value>lifetime_req income)_base'] = base_prob_of_success / trials\n prob_success_df.loc[:, 'prob(ending_value>lifetime_req income)_port'] = port_prob_of_success / trials\n prob_success_df.loc[:, 'prob(ending_value>next_year_req_income)_base'] = base_success_next_year / trials\n prob_success_df.loc[:, 'prob(ending_value>next_year_req_income)_port'] = port_success_next_year / trials\n prob_success_df.loc[:, 'base_max_portfolio_at_acturial_age'] = base_max_portfolio\n prob_success_df.loc[:, 'port_max_portfolio_at_acturial_age'] = port_max_portfolio\n\n # --------------------Percentile Portfolio's based on Acturial Life------------------------\n base_success = prob_success_df.loc[life_expectancy - clients_age, 'prob(ending_value>next_year_req_income)_base']\n port_success = prob_success_df.loc[life_expectancy - clients_age, 'prob(ending_value>next_year_req_income)_port']\n\n # acturial_age_base_tv = sim_base_total.loc[:life_expectancy - clients_age, ]\n # percentile_base_tv = sim_base_total.apply(lambda x: np.nanpercentile(x, base_success), axis=1)\n\n # ----------------Year wise percentile portfolio to meet next year income. Based on the success at acturial age.\n # Yearly portfolio values that can provide the next year income below the success rate at end of life (Percentile)-\n\n # acturial_age_base = base_for_next_year_need.loc[:life_expectancy - clients_age, ]\n # acturial_age_base = base_for_next_year_need.copy().fillna(0)\n percentile_base = base_for_next_year_need.apply(lambda x: np.nanpercentile(x, base_success), axis=1)\n\n # ----Pre Income Portfolio based on the Probab. of Success to meet next year's income at the end on the Act. Age\n base_pre_income_success = sim_base_total_preincome.apply(lambda x: np.nanpercentile(x, base_success), axis=1)\n base_ann_ret_pre_income = base_pre_income_success.pct_change().fillna(0)\n\n # acturial_age_port_tv = sim_port_total.loc[:life_expectancy - clients_age, ]\n # percentile_port_tv = sim_port_total.apply(lambda x: np.nanpercentile(x, port_success), axis=1)\n\n # ----------------Year wise percentile portfolio to meet next year income. Based on the success at acturial age.\n # Yearly portfolio values that can provide the next year income below the success rate at end of life (Percentile)-\n\n # acturial_age_port = port_for_next_year_need.loc[:life_expectancy - clients_age, ]\n # acturial_age_base = port_for_next_year_need.copy().fillna(0)\n percentile_port = port_for_next_year_need.apply(lambda x: np.nanpercentile(x, port_success), axis=1)\n\n # ----Pre Income Portfolio based on the Probab. of Success to meet next year's income at the end on the Act. Age\n port_pre_income_success = sim_port_total_preincome.apply(lambda x: np.nanpercentile(x, port_success), axis=1)\n port_ann_ret_pre_income = port_pre_income_success.pct_change().fillna(0)\n\n prob_success_df.loc[:, 'acturial_success_percentile_base_portfolio'] = percentile_base\n prob_success_df.loc[:, 'acturial_success_percentile_port_portfolio'] = percentile_port\n\n prob_success_df.loc[:, 'base_pre_income_ann_ret'] = base_ann_ret_pre_income\n prob_success_df.loc[:, 'port_pre_income_ann_ret'] = port_ann_ret_pre_income\n\n # prob_success_df.loc[:, 'terminalVal_success_percentile_base_portfolio'] = percentile_base_tv\n # prob_success_df.loc[:, 'terminalVal_success_percentile_port_portfolio'] = percentile_port_tv\n\n sim_base_total_preincome.to_excel(writer, sheet_name='base_preincome_portfolios')\n # -------------Adding Premium to calculate the total initial investment--------------\n sim_port_total_preincome.iloc[0] = sim_port_total_preincome.iloc[0] + premium\n sim_port_total_preincome.to_excel(writer, sheet_name='port_preincome_portfolios')\n\n # -------------For Simulation slide - BASE Portfolio - Can Delete --------------------\n # base_qcut_preinc = pd.DataFrame(index=sim_base_total_preincome.index, columns=cols)\n # for c in range(len(cols)):\n # base_qcut_preinc.loc[:, cols[c]] = sim_base_total_preincome.quantile(q_cut[c], axis=1)\n #\n # # -------------For Simulation slide - Proposed Portfolio --------------------\n # port_qcut_preinc = pd.DataFrame(index=sim_port_total_preincome.index, columns=cols)\n # for c in range(len(cols)):\n # port_qcut_preinc.loc[:, cols[c]] = sim_port_total_preincome.quantile(q_cut[c], axis=1)\n #\n # base_qcut_preinc.to_excel(writer, sheet_name='base_preincome_quantiles')\n # port_qcut_preinc.to_excel(writer, sheet_name='port_preincome_quantiles')\n\n prob_success_df.to_excel(writer, sheet_name='success_probability')\n\n # --------------BASE - Accumulation and Income Breakdown based on the success percentile portfolio---------------\n base_df.to_csv(src + 'base_port_detail.csv')\n sim_base_total.to_csv(src + 'base_ending_values.csv')\n income_breakdown_base = pd.DataFrame(sim_base_total.quantile(base_success, axis=1))\n income_breakdown_base.loc[:, 'income_from_risky_assets'] = sim_base_income.quantile(base_success, axis=1) \\\n - social - cpn_income_port\n income_breakdown_base.loc[:, 'guaranteed_income'] = 0.0\n income_breakdown_base.loc[:, 'social_security_income'] = social\n income_breakdown_base.loc[:, 'coupon_income'] = cpn_income_base\n\n income_breakdown_base.rename(columns={income_breakdown_base.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_base.loc[:, 'income_from_risky_assets'][\n income_breakdown_base.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_base.loc[:, 'total_income'] = income_breakdown_base.loc[:, income_breakdown_base.columns[1:]].sum(\n axis=1)\n\n # ----------FIA PORTFOLIO - Accumulation and Income Breakdown based on the success percentile portfolio-----------\n fia_portfolio_df.to_csv(src + 'fia_port_detail.csv')\n sim_port_total.to_csv(src + 'fiaport_ending_values.csv')\n\n income_breakdown_port = pd.DataFrame(sim_port_total.quantile(port_success, axis=1))\n income_breakdown_port.loc[:, 'income_from_risky_assets'] = sim_port_income.quantile(port_success, axis=1) \\\n - income_from_fia - social - cpn_income_port\n income_breakdown_port.loc[:, 'guaranteed_income'] = income_from_fia\n income_breakdown_port.loc[:, 'social_security_income'] = social\n income_breakdown_port.loc[:, 'coupon_income'] = cpn_income_port\n\n income_breakdown_port.rename(columns={income_breakdown_port.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_port.loc[:, 'income_from_risky_assets'][\n income_breakdown_port.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_port.loc[:, 'total_income'] = income_breakdown_port.loc[:, income_breakdown_port.columns[1:]].sum(\n axis=1)\n\n # -------------------Write simulation Statistics-------------------------------------\n simulation_stats.to_excel(writer, sheet_name='simulation_statistics')\n\n # port_psuccess.to_excel(writer, sheet_name='fia_port_success_probability')\n\n income_breakdown_base = income_breakdown_base.loc[1:, :]\n income_breakdown_base.loc[:, 'age'] = age_index\n income_breakdown_base.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n income_breakdown_base.loc[income_starts:, :].to_excel(writer, sheet_name='base_income_breakdown_median')\n\n income_breakdown_port = income_breakdown_port.loc[1:, :]\n income_breakdown_port.loc[:, 'age'] = age_index\n income_breakdown_port.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n income_breakdown_port.loc[income_starts:, :].to_excel(writer, sheet_name='fia_income_breakdown_median')\n\n legacy_risk.to_excel(writer, sheet_name='ruin_probability')\n\n if method == 'normal':\n median_returns_normal.loc[:, 'fia_median_returns'] = median_normal_fia\n median_returns_normal.to_excel(writer, sheet_name='gr_port_median_normal')\n\n elif method == 'smallest':\n median_returns_smallest.loc[:, 'fia_median_returns'] = median_smallest_fia\n median_returns_smallest.to_excel(writer, sheet_name='gr_port_median_asc')\n\n else:\n median_returns_largest.loc[:, 'fia_median_returns'] = median_largest_fia\n median_returns_largest.to_excel(writer, sheet_name='gr_port_median_desc')\n\n # ---------------------Histogram for S&P Forecast---------------------------------------\n sp_returns = read_returns_est.loc['SPXT Index', 'Annualized Returns']\n sp_risk = read_returns_est.loc['SPXT Index', 'Annualized Risk']\n sp_random_ret = np.random.normal(loc=sp_returns, scale=sp_risk, size=10000)\n bins, data = np.histogram(sp_random_ret, bins=20)\n df_ret = pd.DataFrame(data, columns=['Return_range'])\n df_bins = pd.DataFrame(bins, columns=['Count'])\n df_hist = df_ret.join(df_bins)\n df_hist.to_excel(writer, sheet_name='sp500_histogram')\n\n # ---------------------Histogram for FIA Portfolios TV>0 at the acturial age---------------------------------------\n tval_at_horizon = sim_port_total.loc[acturial_years, :]\n fact = 1 / acturial_years\n arr_returns = np.array((tval_at_horizon / 1000000) ** fact - 1)\n clean_ann_ret = arr_returns[~np.isnan(arr_returns)]\n p_bins, p_data = np.histogram(clean_ann_ret, bins=20)\n df_ret = pd.DataFrame(p_data, columns=['Return_range'])\n df_bins = pd.DataFrame(p_bins, columns=['Count'])\n df_hist = df_ret.join(df_bins)\n df_hist.to_excel(writer, sheet_name='fia_portfolio_histogram')\n\n tval_df = pd.DataFrame(sim_port_total.loc[acturial_years, :])\n tval_df.rename(columns={tval_df.columns[0]:'Terminal Values'}, inplace=True)\n tval_df.to_excel(writer, sheet_name='fia_ending_values_hist')\n writer.save()\n\n # -----------------Plotting charts--------------------------------------------\n base_qcut.loc[income_starts:].plot(grid=True, title='Quantile Terminal Value - Base Portfolio')\n plt.savefig(src + \"quantile_terminal_base.png\")\n plt.close('all')\n\n base_income_qcut.plot(grid=True, title='Quantile Income - Base Portfolio')\n plt.savefig(src + \"quantile_income_base.png\")\n plt.close('all')\n\n base_psuccess.plot(grid=True, title='Probability of Success (Portfolio Ending Value > 0) - Base Portfolio')\n plt.savefig(src + \"success_probabilty_base.png\")\n plt.close('all')\n\n (1 - base_psuccess).plot(grid=True, title='Probability of Ruin (Portfolio Ending Value < 0) - Base Portfolio')\n plt.savefig(src + \"ruin_probability_base.png\")\n plt.close('all')\n\n port_qcut.loc[income_starts:].plot(grid=True, title='Quantile Terminal Value - FIA Portfolio')\n plt.savefig(src + \"quantile_terminal_fia.png\")\n plt.close('all')\n\n port_income_qcut.plot(grid=True, title='Quantile Income - FIA Portfolio')\n plt.savefig(src + \"quantile_income_fia.png\")\n plt.close('all')\n\n port_psuccess.plot(grid=True, title='Probability of Success (Portfolio Ending Value > 0) - FIA Portfolio')\n plt.savefig(src + \"success_probabilty_fia.png\")\n plt.close('all')\n\n (1 - port_psuccess).plot(grid=True, title='Probability of Ruin (Portfolio Ending Value < 0) - FIA Portfolio')\n plt.savefig(src + \"ruin_probability_fia.png\")\n plt.close('all')\n\n print(\"simulation completed....\")", "def __init__(\n self,\n start_dt,\n starting_cash=0.0,\n currency = \"USD\",\n portfolio_id=None,\n name=None\n ):\n self.start_dt = start_dt\n self.current_dt = start_dt\n self.starting_cash = starting_cash\n self.currency = currency\n self.portfolio_id = portfolio_id\n self.name = name\n\n self.pos_handler = PositionHandler()\n self.history = []\n\n self.logger = logging.getLogger('Portfolio')\n self.logger.setLevel(logging.DEBUG)\n self.logger.info(\n '(%s) Portfolio \"%s\" instance initalized' % (\n self.current_dt.strftime(settings.LOGGING[\"DATE_FORMAT\"]),\n self.portfolio_id\n )\n )\n\n self._initalize_portfolio_with_cash()", "def generate_portfolio(S_0, params):\n\tpublic_client = gdax.PublicClient()\n\tallvar = []\n\tsumvar = 0\n\tfor coin in params:\n\t\ttheta = coin[0]\n\t\tv = theta[0]\n\t\tT = coin[1]\n\t\tprod_id = coin[2]\n\t\t# Get the current value of the coin, i.e. how much you bought\n\t\tname = prod_id + '-USD'\n\t\tstats = public_client.get_product_24hr_stats(name)\n\t\tvalue = (float(stats['high']) + float(stats['low']))/2\n\t\tallvar.append([prod_id, value, v])\n\t\tsumvar += v\n\tpriority = sorted(allvar, key=lambda i: i[2])\n\tportfolio = []\n\tfor i in priority:\n\t\tinvestment = S_0*i[2]/sumvar\n\t\tcurrency = investment/i[1]\n\t\tportfolio.append((i[0], currency, investment)) # id, investment, currency\n\tprint(\"\\nYour suggested investments are: \\n\")\n\tfor coin in portfolio:\n\t\tprint(\"%s: %s for %s USD\" % (coin[0], coin[1], coin[2]))\n\t# Prompt to save the portfolio\n\tdone = False\n\twhile done != True:\n\t\tinp = input(\"\\nWould you like to save this portfolio? (y/n)\t\")\n\t\ttry:\n\t\t\tif inp.lower() == 'y':\n\t\t\t\tpublic_client = gdax.PublicClient()\n\t\t\t\tcurrent_date = np.datetime64(public_client.get_time().get(\"iso\").split('T')[0])\n\t\t\t\t# Save the file\n\t\t\t\twith open(\"portfolios/%s.txt\" % (current_date), \"w\") as f:\n\t\t\t\t\tfor coin in portfolio:\n\t\t\t\t\t\tf.write(str(coin[0]) + ', ' + str(coin[1]) + ', ' + str(coin[2]) + '\\n')\n\t\t\t\tprint(\"Portfolio saved. Exiting.\\n\")\n\t\t\t\tdone = True\n\t\t\tif inp.lower() == 'n':\n\t\t\t\tprint(\"Program complete. Exiting.\\n\")\n\t\t\t\tdone = True\n\t\texcept ValueError:\n\t\t\tprint(\"Your input could not be interpreted.\")", "def portfolio_returns(returns, weights):\n\n # the portfolio returns are given by the dot product of the weights matrix and the portfolio matrix\n port_returns = np.dot(weights, returns)\n\n return port_returns", "def portfolio_returns(returns, weights):\n\n # the portfolio returns are given by the dot product of the weights matrix and the portfolio matrix\n port_returns = np.dot(weights, returns)\n\n return port_returns", "def __get_object_portfolio_bulk(self, name, user, key=\"portfolio\"):\n portfolio = None\n for item in self.bulk_data[key]:\n if item and item.name == name and item.user == user:\n portfolio = item\n break\n return portfolio", "def evaluate_cur_stocks(self):\n today = datetime.today()\n close_val = PRICE_DF.iloc[PRICE_DF.index.get_loc(today, method=\"ffill\")]\n close_val = close_val[self.cur_stocks.index]\n close_val = pd.DataFrame({\"PRICE_CURRENT\" : close_val.values}, index=self.cur_stocks.index)\n evaluated_stocks = pd.merge(self.cur_stocks, close_val, left_index=True, right_index=True)\n evaluated_stocks[\"VOLUME_CURRENT\"] = evaluated_stocks[\"AMOUNT\"] * evaluated_stocks[\"PRICE_CURRENT\"]\n evaluated_stocks[\"RETURN\"] = (evaluated_stocks[\"VOLUME_CURRENT\"] / evaluated_stocks[\"VOLUME_PURCHASE\"]) - 1\n return evaluated_stocks", "def cumulative_returns(shares_allocation, capital, test_data):\n\n # list of DataFrames of cumulative returns for each stock\n daily_returns = []\n\n # iterates over every stock in the portfolio\n for stock in shares_allocation.index:\n\n # multiples shares by share prices in the validation dataset\n daily_returns.append(shares_allocation.loc[stock].values * test_data[stock])\n\n # concatenates every DataFrame in the above list to a single DataFrame\n daily_returns_df = pd.concat(daily_returns, axis=1).reset_index()\n\n # sets the index as the date\n daily_returns_df.set_index(\"Day\", inplace=True)\n\n # adds the cumulative returns for every stock\n cumulative_daily_returns = daily_returns_df.sum(axis=1)\n\n # returns the cumulative daily returns of the portfolio\n return cumulative_daily_returns", "def index():\n\n # Get user's cash\n user = db.execute(\"SELECT cash FROM users WHERE id = ?\", session[\"user_id\"])\n\n # Get portfolio\n portfolios = db.execute(\"SELECT * FROM portfolios WHERE user_id = ?\", session[\"user_id\"])\n\n # Get symbol for each stock\n length = len(portfolios)\n for i in range(length):\n symbol = portfolios[i]['stocks']\n\n # Lookup stock price and add to portfolio information\n portfolios[i]['price'] = lookup(symbol)['price']\n portfolios[i]['total'] = float(portfolios[i]['price']) * portfolios[i]['shares']\n\n # Calculate total value of stocks\n value = 0\n for j in range(length):\n value += portfolios[j]['price']\n\n # Calculate grand total of stocks plus cash\n g_total = user[0][\"cash\"] + value\n\n return render_template(\"index.html\", portfolios=portfolios, cash=user[0][\"cash\"], g_total=g_total)", "def __init__(self, instrumentsNAmounts, totalInvest):\n # add all necessary attributes here which link to portfolio and\n # will be tracked.\n self.portfolio = instrumentsNAmounts\n self.totalInvest = totalInvest", "def compute_portfolio_stats(allocs,prices,rfr=0, sf=252):\n\n # portfolio value\n port_val = compute_port_val(allocs, prices)\n\n daily_rets = port_val/port_val.shift(1) - 1\n daily_rets = daily_rets[1:]\n\n # cumulative return\n cr = port_val.iloc[-1]/port_val.iloc[0] -1\n\n # avg daily return\n adr = daily_rets.mean()\n\n # std dev of daily return\n sddr = daily_rets.std()\n \n #sharpe_ratio\n k = math.sqrt(252)\n \n sr = k * ((daily_rets - 0).mean() / daily_rets.std())\n \n return cr, adr, sddr, sr", "def prepare_portfolio_performance(self, portfolio_scope: str, portfolio_code: str):\n return Performance(\n entity_scope=portfolio_scope,\n entity_code=portfolio_code,\n src=self.portfolio_performance_source,\n block_store=self.block_store,\n perf_start=None,\n )", "def set_portfolio(self, name, user, cash):\n if name and not self.exist_portfolio(name, user) \\\n and self.db_tool.is_connected():\n portfolio = Portfolio(\n name=name,\n user=user,\n cash=cash,\n initial_cash=cash\n )\n self.bulk_data[\"portfolio\"].append(portfolio)\n self.portfolio_name = name\n self.portfolio_user = user", "def get_portfolio_pnl_desc(self):\n\n return self._tsd_portfolio.summary()", "def performance_vs_index(self, index='SPY', dateIni='Ini', dateFin='Fin'):\n if dateFin == 'Fin':\n dateFin = self.data.index[-1]\n if dateIni == 'Ini':\n dateIni = self.data.index[0]\n portfolioGains = round(self.data.loc[self.data.index[-1], 'Profit/Loss%'], 2)\n else:\n pData = self.data.loc[dateIni:dateFin]\n pData.loc[:,'Profit/Loss'] = pData['Gains'].cumsum()\n pData.loc[:,'Profit/Loss%'] = pData['Profit/Loss'] / pData['Invested'] * 100\n portfolioGains = round(pData.loc[pData.index[-1], 'Profit/Loss%'], 2)\n indexData = yf.Ticker(index).history(start=dateIni, end=dateFin)\n indexData['Var%'] = (indexData.Close - indexData.Close[0]) / indexData.Close[0] * 100\n indexGains = round(indexData.loc[indexData.index[-1], 'Var%'], 2)\n return portfolioGains, indexGains, portfolioGains - indexGains", "def index():\n stocks = []\n username = session.get(\"username\")\n symbol_list = db.execute(\"SELECT stock_symbol FROM history WHERE username=:username GROUP BY stock_symbol\", username=username)\n cash_balance = db.execute(\"SELECT cash FROM users WHERE username=:username\", username=username)[0][\"cash\"]\n total_value = cash_balance\n\n for sym in symbol_list:\n symbol = sym[\"stock_symbol\"]\n new_stock = Stock(username, symbol)\n stocks.append(new_stock)\n total_value += new_stock.quantity * new_stock.price\n\n\n return render_template(\"portfolio.html\", stocks = stocks, cash_balance=usd(cash_balance), total_value=usd(total_value))", "def _computeIsosurface(self):\n data = self.getData(copy=False)\n\n if data is None:\n if self.isAutoLevel():\n self._level = float('nan')\n\n else:\n if self.isAutoLevel():\n st = time.time()\n try:\n level = float(self.getAutoLevelFunction()(data))\n\n except Exception:\n module_ = self.getAutoLevelFunction().__module__\n name = self.getAutoLevelFunction().__name__\n _logger.error(\n \"Error while executing iso level function %s.%s\",\n module_,\n name,\n exc_info=True)\n level = float('nan')\n\n else:\n _logger.info(\n 'Computed iso-level in %f s.', time.time() - st)\n\n if level != self._level:\n self._level = level\n self._updated(Item3DChangedType.ISO_LEVEL)\n\n if numpy.isfinite(self._level):\n st = time.time()\n vertices, normals, indices = MarchingCubes(\n data,\n isolevel=self._level)\n _logger.info('Computed iso-surface in %f s.', time.time() - st)\n\n if len(vertices) != 0:\n return vertices, normals, indices\n\n return None, None, None", "def compute_port_val(allocs, prices):\n # normalized price\n # normalized prices\n normed = prices/prices.iloc[0]\n prices.head()\n alloced = normed * allocs\n\n # position values\n start_val = 1 # included to simplify adding ability to calc as $\n pos_vals = alloced * start_val\n\n # portfolio value\n port_val = pos_vals.sum(axis=1)\n\n return port_val", "def market_value(self, ref_prices, suspensions=None):\n # TODO some securities could not be able to be traded\n if suspensions is None:\n suspensions = []\n \n market_value_float = 0.0\n market_value_frozen = 0.0 # suspended or high/low limit\n for sec in self.holding_securities:\n size = self.get_position(sec).current_size\n # TODO PortfolioManager object should not access price\n price = ref_prices[sec]\n mv_sec = price * size\n if sec in suspensions:\n market_value_frozen += mv_sec\n else:\n market_value_float += mv_sec\n \n return market_value_float, market_value_frozen", "def swap_active_portfolio(self):\n\n self.lock.acquire()\n\n self.clear_main()\n\n w = self.windows['MAIN']\n l = 1\n\n for p in self.portfolios:\n # Only support 9 portfolios since that makes this easier to deal\n # with.\n if l >= 10:\n break\n\n w.addstr(l, 0, '%2d' % l, curses.A_BOLD | curses.color_pair(1))\n w.addstr(l, 3, p.name)\n l += 1\n\n self.refresh()\n\n # Wait for the user to give is a key.\n while True:\n c = self.stdscr.getch()\n\n if c < ord('1') and c > ord('9'):\n continue\n\n index = c - ord('1')\n\n if index < len(self.portfolios):\n break\n\n self.portfolios[index].refresh()\n\n self.active_portfolio = self.portfolios[index]\n self.display_portfolio(self.active_portfolio)\n self.lock.release()", "def new_portfolio_flow(portfolio_list):\n while True:\n portfolio_name = prompt.shortcuts.input_dialog(\n title=\"Portfolio Name\", text=\"Please type the portfolio name:\"\n ).run()\n if portfolio_name is not None:\n portfolio_id: int = len(portfolio_list)\n stock_list = []\n stock_list = add_stock_flow(stock_list)\n portfolio_list.append(Portfolio(portfolio_name, portfolio_id, stock_list))\n return portfolio_list\n if portfolio_name is None:\n return None", "def momentum(portfolio_item, transaction_volume, cash_allocation):\n from yahooquery import Ticker\n from math import floor\n import talib\n from .TradeHistoryItem import log_trade\n from API.Help import is_increasing, initialize_alpaca\n\n alpaca = initialize_alpaca()\n\n yahoo_ticker = Ticker(str(portfolio_item))\n info = yahoo_ticker.history()\n ma_5 = talib.SMA(info['close'], timeperiod=5)\n ma_20 = talib.SMA(info['close'], timeperiod=20)\n volume = info['volume']\n\n if portfolio_item.shares == 0:\n # if the price goes from below the sma to above, buy\n if ma_5[-1] > (ma_20[-1] * 1.1) and is_increasing(volume, 3):\n print('buying {} shares of {}'.format(transaction_volume, str(portfolio_item)))\n alpaca.submit_order(str(portfolio_item), transaction_volume, 'buy', 'market', 'day')\n portfolio_item.buy(transaction_volume=transaction_volume, cash_allocated=cash_allocation)\n log_trade(portfolio_item=portfolio_item, transaction_volume=transaction_volume, transaction_type=0)\n # if the price goes from above the sma to below, short\n elif ma_5[-1] < (ma_20[-1] * .9) and not is_increasing(volume, 3) and portfolio_item.shares == 0:\n transaction_volume = floor(cash_allocation / (portfolio_item.ticker.price_now * 1.1))\n print('shorting {} shares of {}'.format(transaction_volume, str(portfolio_item)))\n alpaca.submit_order(str(portfolio_item), transaction_volume, 'sell', 'market', 'day')\n portfolio_item.short(transaction_volume=transaction_volume, cash_allocated=cash_allocation)\n log_trade(portfolio_item=portfolio_item, transaction_volume=transaction_volume, transaction_type=3)", "def trade(self, portfolio: Portfolio, stock_market_data: StockMarketData) -> List[Order]:\n assert portfolio is not None\n assert stock_market_data is not None\n assert stock_market_data.get_companies() == [Company.A, Company.B]\n\n current_state = self.states_compution(stock_market_data, portfolio)\n current_portfolio_value = portfolio.get_value(stock_market_data)\n\n if self.train_while_trading is False:\n # for test set use trained ann\n action = np.argmax(self.model.predict(current_state)[0])\n\n else:\n action = self.select_action(current_state, current_portfolio_value)\n if self.last_state is not None:\n reward = self.calc_reward(current_portfolio_value)\n self.append_memory(self.last_state, self.last_action_a, reward, current_state)\n # train model as soon as sufficient memory is reached\n if len(self.memory) > self.min_size_of_memory_before_training:\n self.train_model()\n\n # Split action into individual actions for Company A and B\n current_action_a = 0\n current_action_b = 0\n assert action < 9 and action >= 0\n if action == 0:\n current_action_a = OrderType.SELL\n current_action_b = OrderType.SELL\n amount_to_sell_a = portfolio.get_stock(Company.A)\n amount_to_sell_b = portfolio.get_stock(Company.B)\n elif action == 1:\n current_action_a = OrderType.SELL\n amount_to_sell_a = portfolio.get_stock(Company.A)\n current_action_b = OrderType.BUY\n stock_price = stock_market_data.get_most_recent_price(Company.A)\n amount_to_buy_b = int(portfolio.cash/stock_price)\n elif action == 2:\n current_action_a = OrderType.BUY\n stock_price = stock_market_data.get_most_recent_price(Company.A)\n amount_to_buy_a = int(portfolio.cash/stock_price)\n current_action_b = OrderType.SELL\n amount_to_sell_b = portfolio.get_stock(Company.B)\n elif action == 3:\n current_action_a = OrderType.BUY\n stock_price = stock_market_data.get_most_recent_price(Company.A)\n amount_to_buy_a = int((portfolio.cash/stock_price)/2)\n current_action_b = OrderType.BUY\n stock_price = stock_market_data.get_most_recent_price(Company.B)\n amount_to_buy_b = int((portfolio.cash/stock_price)/2)\n elif action == 4:\n current_action_a = OrderType.SELL\n amount_to_sell_a = portfolio.get_stock(Company.A)\n # current_action_b = \"hold\"\n elif action == 5:\n current_action_a = OrderType.BUY\n stock_price = stock_market_data.get_most_recent_price(Company.A)\n amount_to_buy_a = int(portfolio.cash/stock_price)\n # current_action_b = \"hold\"\n elif action == 6:\n # current_action_a = \"hold\"\n current_action_b = OrderType.SELL\n amount_to_sell_b = portfolio.get_stock(Company.B)\n elif action == 7:\n # current_action_a = \"hold\"\n current_action_b = OrderType.BUY\n stock_price = stock_market_data.get_most_recent_price(Company.B)\n amount_to_buy_b = int(portfolio.cash/stock_price)\n\n order_list = []\n\n if current_action_a != 0:\n if current_action_a == OrderType.SELL and amount_to_sell_a > 0:\n order_list.append(Order(current_action_a, Company.A, amount_to_sell_a))\n elif current_action_a == OrderType.BUY and portfolio.cash > 0:\n order_list.append(Order(current_action_a, Company.A, amount_to_buy_a))\n\n if current_action_b != 0:\n if current_action_b == OrderType.SELL and amount_to_sell_b > 0:\n order_list.append(Order(current_action_b, Company.B, amount_to_sell_b))\n elif current_action_b == OrderType.BUY and portfolio.cash > 0:\n order_list.append(Order(current_action_b, Company.B, amount_to_buy_b))\n\n self.last_action_a = action\n self.last_state = current_state\n self.last_portfolio_value = current_portfolio_value\n return order_list", "def solve(self):\r\n # Make arrays of end-of-period assets and end-of-period marginal value\r\n \r\n if self.jac == True:\r\n \r\n if len(self.cList) == self.T_sim - self.s :\r\n self.Rfree = self.Rfree + self.dx\r\n else:\r\n self.Rfree = self.Rfree\r\n \r\n aNrm = self.prepare_to_calc_EndOfPrdvP()\r\n EndOfPrdvP = self.calc_EndOfPrdvP()\r\n\r\n # Construct a basic solution for this period\r\n if self.CubicBool:\r\n solution = self.make_basic_solution(\r\n EndOfPrdvP, aNrm, interpolator=self.make_cubic_cFunc\r\n )\r\n else:\r\n solution = self.make_basic_solution(\r\n EndOfPrdvP, aNrm, interpolator=self.make_linear_cFunc\r\n )\r\n\r\n solution = self.add_MPC_and_human_wealth(solution) # add a few things\r\n solution = self.add_stable_points(solution)\r\n \r\n \r\n \r\n self.cList.append(solution.cFunc)\r\n \r\n # Add the value function if requested, as well as the marginal marginal\r\n # value function if cubic splines were used (to prepare for next period)\r\n if self.vFuncBool:\r\n solution = self.add_vFunc(solution, EndOfPrdvP)\r\n if self.CubicBool:\r\n solution = self.add_vPPfunc(solution)\r\n return solution", "def index():\n user_id = session[\"user_id\"]\n portfolio_table = port(user_id, db)\n \n if not isinstance(portfolio_table, dict): \n return apology(\"Error in portfolio\")\n \n return render_template(\"portfolio.html\",\n shares_list = portfolio_table[\"shares\"],\n cash = portfolio_table[\"cash\"],\n total = portfolio_table[\"total\"])", "def calculate_all_portfolios(self, request, pk=None, **kwargs):\n goal = self.get_object()\n\n check_state(Goal.State(goal.state), Goal.State.ACTIVE)\n\n setting_str = request.query_params.get('setting', None)\n if not setting_str:\n logger.debug('setting parameter missing from calculate_all_portfolios query')\n raise ValidationError(\"Query parameter 'setting' must be specified and a valid JSON string\")\n try:\n setting = ujson.loads(setting_str)\n except ValueError:\n logger.debug('setting parameter for calculate_all_portfolios query not valid json')\n raise ValidationError(\"Query parameter 'setting' must be a valid json string\")\n\n # Create the settings from the dict\n serializer = serializers.GoalSettingStatelessSerializer(data=setting)\n serializer.is_valid(raise_exception=True)\n settings = serializer.create_stateless(serializer.validated_data, goal)\n\n # Calculate the portfolio\n try:\n data_provider = DataProviderDjango()\n execution_provider = ExecutionProviderDjango()\n data = [self.build_portfolio_data(item[1], item[0])\n for item in calculate_portfolios(setting=settings,\n data_provider=data_provider,\n execution_provider=execution_provider)]\n return Response(data)\n except Unsatisfiable as e:\n rdata = {'reason': \"No portfolio could be found: {}\".format(e)}\n\n if e.req_funds is not None:\n rdata['req_funds'] = e.req_funds\n\n return Response({'error': rdata}, status=status.HTTP_400_BAD_REQUEST)", "def test_interest_vs_stockprice(self):\n stock_prices = np.array([[5, 10, 20, 40]], dtype=float)\n interest_rate = 2.0 # 200%\n test_case = StockMarket(5, stock_prices, interest_rate)\n test_case.dynamic_programming_bottom_up()\n for portfolio in set(test_case.backtracing_portfolio()):\n self.assertEqual(0, portfolio)", "def portfolio(request):\n projects = Project.objects.all()\n categories = None\n\n if request.GET:\n if 'category' in request.GET:\n categories = request.GET['category'].split(',')\n projects = projects.filter(category__name__in=categories)\n categories = ProjectCategory.objects.filter(name__in=categories)\n\n context = {\n 'projects': projects,\n 'current_categories': categories,\n }\n\n return render(request, 'portfolio/portfolio.html', context)", "def getTrades(current_portfolio, desired_portfolio):\n # Create trade factory between current and desired portfolio.\n # tf = TradeFactory.TradeFactory(current_portfolio, desired_portfolio)\n\n return {}", "def get_cumportfolio(self):\n\n return self._cumportfolio", "def portfolio():\n projects = get_projects()\n for project in projects:\n unicode_body = project[\"description\"].decode(\"utf-8\")\n html_body = markdown.markdown(unicode_body)\n safe_html_body = Markup(html_body)\n project[\"description\"] = safe_html_body\n context = {\n \"projects\": projects\n }\n return render_template(\"portfolio.html\", **context)", "def summary(self):\n print '%s Portfolio\\'s %s Strategy' % (self.portfolio.name, self.name)\n print '-' * COL_DASH_WIDTH\n\n self.display_trades()\n\n for symbol in self.portfolio.assets.keys():\n perf = self.performance[symbol]\n\n print '\\nSummary for %s from %s (first trade) to %s (last trade)' % (symbol, perf['start'], perf['end'])\n print '.' * COL_DASH_WIDTH\n print 'Summary:'\n data = [[fmtn(perf['trades']), fmtn(perf['wins']), fmtn(perf['losses']), fmtn(perf['washes'])]]\n print tabulate.tabulate(data, headers=['Total Trades', '# Wins', '# Losses', '# Washes'])\n\n print '\\nPerformance:'\n data = [[\n fmtn(perf['profit']), fmtn(perf['loss']), fmtn(perf['net_profit']),\n fmtp(perf['profit_factor']), fmtp(perf['percent_profitable']), fmtn(perf['average_trade_net_profit'])\n ]]\n print tabulate.tabulate(data, headers=['Profit', 'Loss', 'Net Profit', 'Profit Factor', 'Percent Profitable', 'Average Net Profit per Trade'])\n\n print '\\nDrawdown:'\n data = [[fmtn(perf['max_drawdown']), fmtn(perf['average_drawdown']), fmtn(perf['max_drawdown_days']), fmtn(perf['average_drawdown_days'])]]\n print tabulate.tabulate(data, headers=['Max', 'Average', 'Max Days', 'Average Days'])\n\n print '\\nRisk:'\n data = [[fmtn(perf['volatility_risk']), fmtn(perf['beta']), fmtn(perf['lower_partial_moment_risk']), fmtn(perf['t_r']), fmtn(perf['s_r'])]]\n print tabulate.tabulate(data, headers=['Volatility', 'Beta', 'Lower Partial Moment', 'Treynor Ratio', 'Sharpe Ratio'])", "def cumulative_returns(self):\n return (1 + self.pct_change).cumprod()", "def project_cache(self):\n return self._project_cache", "def simulate_trading(self):\n self._generate_trading_instances()\n self._run_backtest()\n self.portfolio.output_equity()\n res=self.portfolio.get_statistics()\n self.plot.plot_equity()\n return res", "def index():\n\n # obtain cash info from users database\n cash = db.execute(\"SELECT cash FROM users WHERE id = :id\", id = session[\"user_id\"])\n grandtotal = cash[0][\"cash\"]\n \n # obtain stock info from portfolio database\n stocks = db.execute(\"SELECT symbol, shares FROM portfolio WHERE id = :id\", id=session[\"user_id\"])\n \n # for every stock in the user's portfolio, assign dict key/values for use in html/jinja\n for stock in stocks:\n symbol = str(stock[\"symbol\"])\n shares = int(stock[\"shares\"])\n name = \"\"\n price = \"\"\n total = \"\"\n quote = lookup(symbol)\n stock[\"name\"] = quote[\"name\"]\n stock[\"price\"] = \"{:.2f}\".format(quote[\"price\"])\n stock[\"total\"] = \"{:.2f}\".format(quote[\"price\"] * shares)\n stock[\"grandtotal\"] = quote[\"price\"] * shares\n grandtotal += stock[\"grandtotal\"]\n\n # format grandtotal to force 2 decimal places\n grandtotal = \"{:.2f}\".format(grandtotal)\n \n # render index page with some given values\n return render_template(\"index.html\", stocks = stocks, cash = cash, grandtotal = grandtotal)", "async def init(self, ctx, *amount_and_symbol : str):\n user = ctx.message.author\n portfolio = GetPortfolio(user.id)\n for i in range(0, len(amount_and_symbol),2):\n portfolio.SetOwnedCurrency(amount_and_symbol[i], amount_and_symbol[i+1])\n await self.bot.say('%s\\'s portfolio is now worth $%.2f.' %\n (ctx.message.author, portfolio.Value()))\n portfolio.Save()", "def solve_working(\n assets_this_period,\n assets_next_period,\n hc_this_period,\n hc_next_period,\n interest_rate,\n wage_rate,\n income_tax_rate,\n beta,\n gamma,\n sigma,\n neg,\n continuation_value,\n delta_hc,\n zeta,\n psi,\n n_gridpoints_capital,\n n_gridpoints_hc,\n efficiency,\n survival_rate,\n):\n # Initialize objects\n policy_labor_working_tmp = np.zeros(\n (n_gridpoints_capital, n_gridpoints_hc), dtype=np.float64\n )\n\n # Implied hc effort\n hc_effort = get_hc_effort(\n hc_this_period=hc_this_period,\n hc_next_period=hc_next_period,\n zeta=zeta,\n psi=psi,\n delta_hc=delta_hc,\n )\n\n hc_effort = np.where(np.isnan(hc_effort), 0.0, hc_effort)\n\n # Implied labor supply\n labor_input = 1 - hc_effort\n\n # Consumption\n consumption = get_consumption(\n assets_this_period=assets_this_period,\n assets_next_period=assets_next_period,\n pension_benefit=np.float64(0.0),\n labor_input=labor_input,\n interest_rate=interest_rate,\n wage_rate=wage_rate,\n income_tax_rate=income_tax_rate,\n productivity=hc_this_period,\n efficiency=efficiency,\n )\n\n # Flow utility\n flow_utility = util(\n consumption=consumption,\n labor_input=labor_input,\n hc_effort=hc_effort,\n gamma=gamma,\n sigma=sigma,\n )\n\n flow_utility = np.where(consumption < 0.0, neg, flow_utility)\n flow_utility = np.where(hc_effort > 1.0, neg, flow_utility)\n\n # Value function on the complete mesh (i.e. for all possible choices)\n value_full = flow_utility + beta * survival_rate * continuation_value\n\n # Derive optimal policies and store value function given optimal choices\n policy_capital_working_tmp = np.argmax(np.max(value_full, axis=3), axis=1)\n policy_hc_working_tmp = np.argmax(np.max(value_full, axis=1), axis=2)\n value_working_tmp = np.max(np.max(value_full, axis=3), axis=1)\n for assets_this_period_idx in range(n_gridpoints_capital):\n for hc_this_period_idx in range(n_gridpoints_hc):\n assets_next_period_idx = policy_capital_working_tmp[\n assets_this_period_idx, hc_this_period_idx\n ]\n hc_next_period_idx = policy_hc_working_tmp[\n assets_this_period_idx, hc_this_period_idx\n ]\n policy_labor_working_tmp[\n assets_this_period_idx, hc_this_period_idx\n ] = labor_input[\n assets_this_period_idx,\n assets_next_period_idx,\n hc_this_period_idx,\n hc_next_period_idx,\n ]\n\n return (\n policy_capital_working_tmp,\n policy_hc_working_tmp,\n policy_labor_working_tmp,\n value_working_tmp,\n )", "def get_subportfolio(self, symbol_names_list = None, timeframes_list = None):\n if symbol_names_list is None:\n symbol_names_list = self.symbol_names_list\n \n sub_portfolio = copy.copy(self) #It only copies pointers\n sub_portfolio.del_all_symbols()\n \n for symbol_name in symbol_names_list:\n subsymbol = self[symbol_name].get_subsymbol(timeframes_list)\n sub_portfolio.add_symbol(symbol_name, symbol = subsymbol)\n \n return sub_portfolio", "def test_get_portfolio(self, db_mock):\n repo = Repository()\n\n db_instance = db_mock.return_value\n db_instance.get_portfolio.return_value = None\n self.assertIsNone(repo.get_portfolio(\"123\"))\n\n db_instance.get_portfolio.return_value = (1, [])\n self.assertIsInstance(repo.get_portfolio(\"123\"), Portfolio)", "def exist_portfolio(self, name, user):\n portfolio = self.__get_portfolio_object(name, user)\n return portfolio is not None", "def calculate(self):\n\n final_quotas = self.calculate_quotas(sum(self.populations) / self.num_seats, [0] * self.states)\n final_fair_shares, final_quotas, modified_divisor, estimator = self.calculate_fair_shares([0] * self.states,\n final_quotas, sum(\n self.populations) / self.num_seats, sum(self.populations) / self.num_seats)\n\n lower_boundary = self.calculate_lower_boundary(modified_divisor)\n upper_boundary = self.calculate_upper_boundary(modified_divisor)\n\n if estimator is None:\n return None, None, None, None, None, None, None, None, None, None\n else:\n return self.original_divisor, modified_divisor, self.original_quotas, final_quotas, \\\n self.initial_fair_shares, final_fair_shares, sum(self.initial_fair_shares), lower_boundary, \\\n upper_boundary, self.estimator_history", "def cvxpy_solver(self, verbose=False):\n self.gamma = self._gamma_from_drawdown_control()\n\n objective = 0\n constr = []\n\n # weights variable depends on whether there is a risk-free asset in data\n if self.rf_included is True:\n # variable with shape h+1 predictions so first row\n # can be the known (non-variable) portfolio weight at time t\n weights = cp.Variable(shape=(self.n_preds + 1, self.n_assets))\n else:\n # Set rf to zero in all preds and cov\n self.rets = np.insert(self.rets, self.n_assets, 0, axis=1)\n self.cov = np.insert(self.cov, self.n_assets, 0, axis=-2)\n self.cov = np.insert(self.cov, self.n_assets, 0, axis=-1) # Has to be done in two steps for cov due to dims\n self.start_weights = np.append(self.start_weights, 0)\n\n weights = cp.Variable(shape=(self.n_preds + 1, self.n_assets+1))\n rf_zero_weights = np.zeros(shape=self.n_preds)\n constr += [weights[1:, -1] == 0] # Keep rf pos at zero since it non-present in this case\n\n\n # Loop through each row in the weights variable and construct the optimization problem\n # Note this loop is very cpu-light since no actual computations takes place inside it\n for t in range(1, weights.shape[0]):\n # sum problem objectives. Weights are shifted 1 period forward compared to self.rets\n # Concatenates objective and constraints in lists\n objective += self.single_period_objective_func(weights[t], weights[t-1], self.rets[t-1], self.cov[t-1])\n constr += self.single_period_constraints(weights[t]) # Concatenate constraints\n\n constr += [weights[0] == self.start_weights] # first weights are fixed at known current portfolio\n\n prob = cp.Problem(cp.Maximize(objective), constr) # Construct maximization problem\n prob.solve(solver=cp.ECOS, verbose=verbose)\n\n if self.rf_included is True:\n opt_var = weights.value\n else:\n opt_var = weights.value[:, :-1]\n\n if verbose is True:\n print(\"Shape of var: \", opt_var.shape)\n temp_df = pd.DataFrame(opt_var).round(3)\n temp_df['sum_weights'] = np.sum(opt_var, axis=1)\n print(temp_df)\n\n return opt_var[1:] # Discard first row which is not a variable." ]
[ "0.7362575", "0.6996456", "0.6729907", "0.6721263", "0.6675463", "0.66379255", "0.6499", "0.63661253", "0.63091534", "0.62968546", "0.6240645", "0.6196709", "0.6144556", "0.60243994", "0.5963545", "0.5962163", "0.59054154", "0.5858664", "0.5812279", "0.5781019", "0.5715677", "0.5695099", "0.56832755", "0.56832755", "0.56502557", "0.56142795", "0.5601788", "0.5591255", "0.5557412", "0.5552806", "0.5548991", "0.5546728", "0.553356", "0.5515437", "0.5509156", "0.5503026", "0.5503026", "0.5495613", "0.5492177", "0.5483658", "0.5474354", "0.5473066", "0.54698133", "0.5468401", "0.5466222", "0.5457872", "0.5448533", "0.544473", "0.5442066", "0.5431377", "0.54243016", "0.5423828", "0.54235184", "0.5408874", "0.5393072", "0.53806245", "0.5379401", "0.5367956", "0.5364299", "0.53607404", "0.5357601", "0.5357601", "0.5350372", "0.5319766", "0.52956545", "0.5282608", "0.5277925", "0.52768487", "0.5256564", "0.5251165", "0.522619", "0.52203435", "0.52191967", "0.5211993", "0.52033794", "0.51983654", "0.5197244", "0.51966023", "0.5181622", "0.5177389", "0.5162396", "0.5158838", "0.5156645", "0.515346", "0.5146656", "0.5139753", "0.5139441", "0.5121129", "0.5118124", "0.50915724", "0.5089043", "0.5082551", "0.50619406", "0.5057358", "0.50527126", "0.5051194", "0.5046313", "0.50354725", "0.5031226", "0.50246155" ]
0.81566226
0
Override fields on ``self.account``.
def override_account_fields(self, settled_cash=not_overridden, accrued_interest=not_overridden, buying_power=not_overridden, equity_with_loan=not_overridden, total_positions_value=not_overridden, total_positions_exposure=not_overridden, regt_equity=not_overridden, regt_margin=not_overridden, initial_margin_requirement=not_overridden, maintenance_margin_requirement=not_overridden, available_funds=not_overridden, excess_liquidity=not_overridden, cushion=not_overridden, day_trades_remaining=not_overridden, leverage=not_overridden, net_leverage=not_overridden, net_liquidation=not_overridden): # mark that the portfolio is dirty to override the fields again self._dirty_account = True self._account_overrides = kwargs = { k: v for k, v in locals().items() if v is not not_overridden } del kwargs['self']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def account(self, account):\n\n self._account = account", "def account(self, account):\n\n self._account = account", "def account(self, account):\n\n self._account = account", "def account(self, account):\n\n self._account = account", "def patch(self, account=None, user=None, account_id=None):\n return super().patch()", "def account(self, account: str):\n self._account = account", "def get_account(self, account):\n \n pass", "def set_account(self):\n return self.__Account", "def onAccountUpdate(self, data):\n pass", "def account(self):\r\n return Account(self)", "def put(self, account=None, user=None, account_id=None):\n return super().put()", "def set_specific_fields(self):\n raise NotImplementedError(\"Must be defined by subclass!\")", "def change_account(self, account):\r\n check_account = Account(account, steem_instance=self.steem)\r\n self.account = check_account[\"name\"]\r\n self.refresh()", "def get_account_details(self):\n pass", "def put_account(self, account):\n \n pass", "def update_account_data(self):\n self.ensure_one()\n getattr(self, '%s_update_account_data' % self.provider, lambda: None)()", "def account(self):\n return Account(self)", "def account_information(self) -> MetatraderAccountInformation:\n return self._accountInformation", "def __init__(self, client, account_id):\n\n super(AccountsMixin, self).__init__(client)\n self._account_id = account_id", "def account_id(self, account_id):\n self._account_id = account_id", "def account(self, account_code):\r\n return acc.Account(self, account_code)", "def extended_account_data(self,accountinfo):\n\n data = {\n 'username' : accountinfo['username'],\n 'password' : accountinfo['password'],\n 'passwordconfirm' : accountinfo['password'],\n 'firstname' : accountinfo['firstname'],\n 'middlename' : accountinfo['middlename'],\n 'lastname' : accountinfo['lastname'],\n 'email' : accountinfo['email'],\n 'emailconfirm' : accountinfo['email'],\n 'gender' : 'Refused',\n 'disability' : 'Refused',\n 'hispanic' : 'Refused',\n 'race_refused' : 'Yes',\n 'captcha' : False,\n 'usageagreement' : True,\n }\n return data", "def to_representation(self, instance):\n data = super(AccountSerializer, self).to_representation(instance)\n data[\"display_name\"] = instance.name\n return data", "def _account(self) -> Account:\n if isinstance(self._node_cached_account, Account):\n return self._node_cached_account\n account = Account.retrieve(\n session=self.entity.session,\n entity=self.entity,\n account_id=self.account_id\n )\n self._node_cached_account = account\n return account", "def account_amount(self, account_amount):\n\n self._account_amount = account_amount", "def get_account(self):\n return self._account", "def get_account(self):\n return self._account", "def account_id(self, account_id):\n\n self._account_id = account_id", "def account_id(self, account_id):\n\n self._account_id = account_id", "def account_id(self, account_id):\n\n self._account_id = account_id", "def account_id(self, account_id):\n\n self._account_id = account_id", "def account_id(self, account_id):\n\n self._account_id = account_id", "def account_id(self, account_id):\n\n self._account_id = account_id", "def copy_fields(self, model):\n fields = super(HistoricalRecords, self).copy_fields(model)\n for name, field in self.additional_fields.items():\n assert name not in fields\n assert hasattr(self, 'get_%s_value' % name)\n fields[name] = field\n return fields", "def account_name(self, account_name):\n\n self._account_name = account_name", "def account_name(self, account_name):\n\n self._account_name = account_name", "def accounts(self): # pragma: no coverage\r\n raise NotImplementedError()", "def account(self) -> str:\n return self._account", "def account(self) -> str:\n return self._account", "def setAccount(self, account_id):\n self.data_struct['_setAccount'] = account_id", "def describe_account_attributes():\n pass", "def update(self, account):\n model = models.load('Account', account)\n return self.client.update_account(model=model)", "def account(self, account_id):\r\n return Account(self, account_id)", "def _postprocess_wim_account(wim_account, hide=_CONFIDENTIAL_FIELDS):\n # Fix fields from join\n for field in ('type', 'description', 'wim_url'):\n if field in wim_account:\n wim_account['wim.'+field] = wim_account.pop(field)\n\n for field in ('id', 'nfvo_tenant_id', 'wim_account_id'):\n if field in wim_account:\n wim_account['association.'+field] = wim_account.pop(field)\n\n wim_account = _postprocess_record(wim_account, hide)\n\n created = wim_account.get('created')\n wim_account['created'] = (created is True or created == 'true')\n\n return wim_account", "def __init__(self, account, user_username, user_password):\n self. account = account\n self. user_username = user_username\n self.user_password = user_password", "def save_account(self):\n Credential.account_list.append(self)", "def to_representation(self, obj):\n data = super(AccountInfoSerializer, self).to_representation(obj)\n if obj.pk is None:\n source = mkt.LOGIN_SOURCE_LOOKUP[mkt.LOGIN_SOURCE_UNKNOWN]\n elif obj.source in self.ALLOWED_SOURCES:\n source = mkt.LOGIN_SOURCE_LOOKUP[obj.source]\n else:\n source = mkt.LOGIN_SOURCE_LOOKUP[mkt.LOGIN_SOURCE_BROWSERID]\n\n data[\"source\"] = source\n return data", "def __init__(self):\n self.account_balance = 0\n self.amount = 0", "def set_account(self, account: str):\n ret = self._call_txtrader_api('set_account', {'account': account})\n if ret:\n self.account = account\n return ret", "async def on_account_information_updated(self, account_information: MetatraderAccountInformation):\n self._accountInformation = account_information", "def custom_profile_fields(self, custom_profile_fields):\n\n self._custom_profile_fields = custom_profile_fields", "def account_type(self, account_type):\n\n self._account_type = account_type", "def account_type(self, account_type):\n\n self._account_type = account_type", "def account_type(self, account_type):\n\n self._account_type = account_type", "def account_type(self, account_type):\n\n self._account_type = account_type", "def account_number(self, account_number):\n\n self._account_number = account_number", "def __init__(self,Account,username,password):\n self.Account = Account\n self.username = username\n self.password = password", "def get_account_information(self):\n self.account_information = retry(lambda: self.client\n .futures_account_v2())\n return self.account_information", "def save(self, *args, **kwargs):\n self.username = self.username or self.email\n super().save(*args, **kwargs)", "def add_account(self, account):\n self.accounts[account.account_number] = account.json()\n # We should save in database the new account using self.di, but not now in order to get our tests passed", "def account_info(account):\n return {\n 'status': account.status,\n 'availability': account.availability,\n 'blurb': account.message,\n 'email': account.email,\n 'name': account.name,\n 'success': True\n }", "def get(self, account=None, user=None, account_id=None):\n self.get_object = lambda: account\n return super().get()", "def get_private(self):\n return self.__Account", "def account_context():\n return dict(\n user=current_user,\n party=current_user.party,\n )", "def accounts(self, accounts):\n\n self._accounts = accounts", "def __init__(self,account,user_name,password,email):\n self.account = account\n self.user_name = user_name\n self.password = password\n self.email = email", "def account_number(self):\n return self.__account_number", "def on_account(self, account: AccountData):\n # self.on_event(EVENT_ACCOUNT, account)\n # self.on_event(EVENT_ACCOUNT + account.vt_accountid, account)\n pass", "def account_summary(self):\n pass", "def __init__(self, account_id, balance):\n self.account_id = account_id\n self.balance = balance", "def to_solders(self) -> instruction.AccountMeta:\n return instruction.AccountMeta(\n pubkey=self.pubkey.to_solders(), is_signer=self.is_signer, is_writable=self.is_writable\n )", "def __init__(self,\r\n account_id=None,\r\n name=None,\r\n org_no=None,\r\n uni_customer_no=None,\r\n created=None,\r\n last_modified=None,\r\n dealer_id=None,\r\n dealer_name=None,\r\n dealer_reference=None,\r\n enabled=None,\r\n additional_properties = {}):\r\n\r\n # Initialize members of the class\r\n self.account_id = account_id\r\n self.name = name\r\n self.org_no = org_no\r\n self.uni_customer_no = uni_customer_no\r\n self.created = APIHelper.RFC3339DateTime(created) if created else None\r\n self.last_modified = APIHelper.RFC3339DateTime(last_modified) if last_modified else None\r\n self.dealer_id = dealer_id\r\n self.dealer_name = dealer_name\r\n self.dealer_reference = dealer_reference\r\n self.enabled = enabled\r\n\r\n # Add additional model properties to the instance\r\n self.additional_properties = additional_properties", "def __init__(self,account,username, password):\n self.account = account\n self.username = username\n self.password = password", "def account_balance(self, account_balance):\n\n self._account_balance = account_balance", "def account_balance(self, account_balance):\n\n self._account_balance = account_balance", "def test_client_bank_account_partial_update(self):\n pass", "def __init__(self, accountHolder=None):\n if accountHolder:\n self._getAccount = lambda: accountHolder.theAccount\n else:\n self._getAccount = lambda: self.theAccount", "def __init__(self, *args, **kwargs):\n user = kwargs.pop('user', None)\n super().__init__(*args, **kwargs)\n if self.instance.id:\n for origin in Origin.ORIGIN_TYPES:\n # in case they had an origin that doesn't exist any more.\n try:\n self.fields[origin[0]].initial = self.instance.origins.filter(type=origin[0]).first()\n except Origin.DoesNotExist:\n pass\n # adjust fields for different users\n if user.has_perm('players.view_any_player'):\n allowed_fields = self.fields.keys()\n else:\n allowed_fields = PLAYER_ALLOWED_FIELDS\n if not self.instance.id:\n allowed_fields = allowed_fields + NEW_PLAYER_FIELDS\n self.fields = dict([(key, val) for key, val in self.fields.items() if key in allowed_fields])", "def __init__(__self__, *,\n account_key: pulumi.Input[str],\n account_name: pulumi.Input[str],\n storage_type: pulumi.Input[str]):\n pulumi.set(__self__, \"account_key\", account_key)\n pulumi.set(__self__, \"account_name\", account_name)\n pulumi.set(__self__, \"storage_type\", 'StorageAccount')", "def load_account_data(self):\n if self.acc is None:\n return\n\n self.name_entry.delete(0, tk.END)\n self.name_entry.insert(0, self.acc.name)\n self.name_entry.config(state='disabled')\n\n self.email_entry.delete(0, tk.END)\n self.email_entry.insert(0, self.acc.email)\n\n self.user_entry.delete(0, tk.END)\n self.user_entry.insert(0, self.acc.username)\n\n key = secrets.decrypt_field(self.us.crypt_key)\n self.pass_entry.delete(0, tk.END)\n self.pass_entry.insert(0, secrets.decrypt_data(key, self.acc.password))", "def __init__(self, *args, **kw):\n super(SignupFormExtra, self).__init__(*args, **kw)\n # Put the first and last name at the top\n new_order = self.fields.keyOrder[:-2]\n new_order.insert(0, 'first_name')\n new_order.insert(1, 'last_name')\n self.fields.keyOrder = new_order", "def fields(self, fields):\n\n self._fields = fields", "def additional_fields(self, additional_fields):\n\n self._additional_fields = additional_fields", "def account_holder(self, account_holder):\n\n self._account_holder = account_holder", "def account_status(self, account_status):\n\n self._account_status = account_status", "def account_look_up_info(self, account_look_up_info):\n\n self._account_look_up_info = account_look_up_info", "def configure_account(self, alias: Alias = sentinel, margin_rate: DecimalNumber = sentinel):\n pass", "def __init__(self, account_name, user_name, password):\n self.account_name = account_name\n self.user_name = user_name\n self.password = password", "def __init__(self,account,username, password):\n self.user_name = username\n self.password = password\n self.account = account", "def __init__(self, *args, **kwargs):\n super (EditUserLoginData, self).__init__ (*args, **kwargs)\n instance = getattr (self, 'instance', None)\n if instance and instance.id:\n self.fields['username'].widget.attrs['readonly'] = True\n self.fields['username'].widget.attrs['disabled'] = True", "def _onchange_field(self):\n if not self.secretary_contact_id:\n return\n if self.partner_type in ['dr', 'patient', 'secretary']:\n self.update({\n 'secretary_contact_id': False\n })", "def get_account_settings():\n pass", "def __init__(self,account_name, username, password):\n self.account_name = account_name\n self.username = username\n self.password = password", "def __init__(__self__, *,\n account_alias: pulumi.Input[str]):\n pulumi.set(__self__, \"account_alias\", account_alias)", "def default_get(self, field_list):\n # NOTE: use field_list argument instead of fields for fix the pylint\n # error W0621 Redefining name 'fields' from outer scope\n context = self._context or {}\n res = super(AccountWhSrc, self).default_get(\n field_list)\n res.update({'uid_wh_agent': self._get_uid_wh_agent(\n )})\n res.update({'partner_list': self._get_partner_agent(\n )})\n\n return res", "def get_account(self, account_id, **kwargs):\r\n\r\n if 'mask' not in kwargs:\r\n kwargs['mask'] = 'status'\r\n\r\n return self.account.getObject(id=account_id, **kwargs)", "def _replace_fields(self):\n for name, value in self._cleaned_data.items():\n setattr(self, name, value)", "def _set_user_info(self):\n sha = sha1(self.email).hexdigest()\n user_info = redis.hgetall(\"sl:account:{}\".format(sha))\n\n if (type(user_info) != dict or\n user_info.get(\"password\") != self.password):\n user_info = {}\n\n try:\n self.plan = Plan.from_id(user_info.get(\"plan\"))\n except SleekException:\n self.plan = None\n self.customer_token = str_to_none(\n user_info.get(\"customer_token\")\n )\n self.subscription_token = str_to_none(\n user_info.get(\"subscription_token\")\n )\n self.subscription_end = str_to_none(\n user_info.get(\"subscription_end\")\n )", "def get_protected_fields(self):\n\t\tfields = super(PaymentObject, self).get_protected_fields()\n\t\treturn tuple(set(fields) | {'user_id'})", "def default_account(\n PK=1,\n UtilityProvider=\"test_provider\",\n AccountNumber=\"test_account_number\",\n RawAccountNumber=\"test_raw_account_number\",\n SourceLink=\"test_source_link\",\n StatementType=\"test_statement_type\",\n StatementDate=date(2000, 2, 5),\n IntervalStart=date(2000, 1, 1),\n IntervalEnd=date(2000, 2, 1),\n TotalBillAmount=Decimal(100.0),\n AmountDue=Decimal(90.0),\n NewCharges=Decimal(80.0),\n OutstandingBalance=Decimal(70.0),\n PreviousBalance=Decimal(60.0),\n meters=None,\n floating_charges=None,\n):\n return Account(\n PK=PK,\n UtilityProvider=UtilityProvider,\n AccountNumber=AccountNumber,\n RawAccountNumber=RawAccountNumber,\n SourceLink=SourceLink,\n StatementType=StatementType,\n StatementDate=StatementDate,\n IntervalStart=IntervalStart,\n IntervalEnd=IntervalEnd,\n TotalBillAmount=TotalBillAmount,\n AmountDue=AmountDue,\n NewCharges=NewCharges,\n OutstandingBalance=OutstandingBalance,\n PreviousBalance=PreviousBalance,\n meters=[] if not meters else meters,\n floating_charges=[] if not floating_charges else floating_charges,\n )" ]
[ "0.6413226", "0.6413226", "0.6413226", "0.6413226", "0.6343206", "0.6329128", "0.6322581", "0.63135093", "0.61441493", "0.60770935", "0.60361296", "0.6022793", "0.60135746", "0.60068023", "0.59981", "0.59467715", "0.58997744", "0.5875439", "0.5858766", "0.5787134", "0.5765871", "0.5693723", "0.5661822", "0.56605464", "0.5647849", "0.5623891", "0.5623891", "0.56091964", "0.56091964", "0.56091964", "0.56091964", "0.56091964", "0.56091964", "0.55932355", "0.5577527", "0.5577527", "0.5525663", "0.5497245", "0.5497245", "0.54854614", "0.5484916", "0.54774356", "0.5476911", "0.546609", "0.54583955", "0.54424846", "0.5410426", "0.5377124", "0.53760546", "0.53738874", "0.53401583", "0.5329828", "0.5329828", "0.5329828", "0.5329828", "0.53200155", "0.5298636", "0.5294403", "0.52843815", "0.52803403", "0.52714235", "0.527081", "0.52647203", "0.52584964", "0.5255239", "0.52511203", "0.52499014", "0.5246141", "0.5233991", "0.52194643", "0.5218058", "0.52145964", "0.5213871", "0.52087873", "0.52087873", "0.5207795", "0.52052665", "0.5198356", "0.5193696", "0.5190282", "0.51890326", "0.5186539", "0.5186149", "0.51714903", "0.5168949", "0.5168342", "0.5165137", "0.5161807", "0.51558155", "0.51505286", "0.514965", "0.5140693", "0.5138576", "0.5135262", "0.5132134", "0.5115724", "0.51141286", "0.51140845", "0.5109768", "0.5108096" ]
0.67399603
0
Initializing method. Always starts with player 'X' going first. Also creates a blank board to begin playing on.
def __init__(self): self.current = Piece.EX self.board = [Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK, Piece.BLANK]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n # Current player\n self.player = X\n\n # Board\n self.board = [\n [None, None, None],\n [None, None, None],\n [None, None, None]\n ]\n\n # Winner\n self.winner = None\n\n # Game over\n self._gameover = False", "def __init__(self):\n self.board = [\n BS, BS, BS, BS,\n BS, BS, BS,\n BS, BS, BS, BS,\n EM, EM, EM,\n WS, WS, WS, WS,\n WS, WS, WS,\n WS, WS, WS, WS\n ]\n self.curr_player = WHITE_PLAYER", "def initBoard(self):\n pass", "def new_game(self):\n self.board = [None] * 9\n self.player = \"X\"\n self.winner = None", "def __init__(self):\n self.game_board = [' '] * 9\n self.size = len(self.game_board)\n self.move = 'X'\n self.player1 = None\n self.player2 = None\n self.current_player = None\n self.board_coords = {\n (1, 3): 0, (2, 3): 1, (3, 3): 2,\n (1, 2): 3, (2, 2): 4, (3, 2): 5,\n (1, 1): 6, (2, 1): 7, (3, 1): 8\n }\n\n self.winning_cases = [\n (0, 1, 2), (3, 4, 5), (6, 7, 8),\n (0, 3, 6), (1, 4, 7), (2, 5, 8),\n (0, 4, 8), (2, 4, 6)\n ]", "def __init__(self):\n self.__grid = create_grid(\n Settings.SIZE_X, Settings.SIZE_Y, MarkerType.NONE)\n\n self.__turn = 0\n self.__state = GameState.PLAYING\n self.__winner = MarkerType.NONE\n self.__loser = MarkerType.NONE\n\n # Separate counter for turns, because __turn depends on starting player\n self.__turns_played = 0", "def __init__(self):\r\n\t\tself.game_board = [['0','0','0'],['0','0','0'],['0','0','0']]\r\n\t\tself.count = 0\r\n\t\tself.x_turn = True\r\n\t\r\n\r\n\t\tpass", "def __init__(self):\n # The starting counts are set to 0 and modified when the board is initiated.\n self.num_black_pieces = 0\n self.num_black_kings = 0\n self.num_white_pieces = 0\n self.num_white_kings = 0\n # Creates a new board and fills it with the appropriate pieces.\n self.board = self._initiate_board()\n self.moves = []", "def __init__(self):\n self._game_state = \"UNFINISHED\"\n self._current_player = \"BLACK\"\n self._game_board = Board()", "def setup(self):\n piece_order = ['ROOK','KNIGHT','BISHOP','QUEEN','KING','BISHOP','KNIGHT','ROOK']\n for row,colour in zip([0,7],['BLACK','WHITE']):\n for col,piece in enumerate(piece_order):\n self.board[row][col] = colour + '_' + piece\n \n for row,colour in zip([1,6],['BLACK','WHITE']):\n for i in range(8):\n self.board[row][i] = colour + '_' + 'PAWN'\n \n self.toplay = 'WHITE'", "def setUp(self):\n self.gameBoard = Grid((100, 100), Cell)", "def __init__(self, player_x_type, player_o_type):\n self.board = Board()\n self.player_x = player_x_type(self.FIRST_PLAYER_MARK)\n self.player_o = player_o_type(self.SECOND_PLAYER_MARK)", "def initialize():\n\n global PLAYER # this means we use the global var PLAYER and cannot have a local var named PLAYER\n global LEVEL_COUNTER\n\n LEVEL_COUNTER = 1\n \n coordinates = generate_coords()\n\n PLAYER = Stark()\n tree = Tree()\n ww = WhiteWalker()\n crown = Crown()\n gray_gem = GrayGem()\n clear_board()\n GAME_BOARD.create(\"Snow\",\"Snow\")\n GAME_BOARD.draw_msg(\"Level \" + str(LEVEL_COUNTER) + \". Winter is coming.\")\n generate_level(coordinates, [PLAYER, ww, gray_gem, crown, tree, tree, gray_gem, tree, tree, gray_gem, tree])\n\n # for i in range(0,NUM_ELTS):\n # place_on_board(elts[i], coordinates[i][0], coordinates[i][1])", "def __init__(self):\n self.players = {1: [\"Player_a\", \"\\u25CF\"], 2: [\"Player_b\", \"\\u25CB\"]}\n self.current_player = 1\n self.playing_player = self.players[1]\n self.grid = [[\" \"] * 6 for x in range(7)]", "def start_game(self):\n self.board = Board(num_tableaus=self.tableau_qty, num_decks=self.decks, deal_3=self.deal_3)\n self.board.init_move_dict()\n self.board.deal(self.deck)\n\n if self.api_use:\n self.init_game_api()\n elif self.commandline:\n self.init_cl_game()\n else:\n self.init_pygame()", "def initGameState(self):\n print(\"Setting game state: \")\n self.playGUI = GUI()\n self.playGUI.drawBoard(self.player)", "def initialize_board(self):\n self.board = np.zeros(shape=(BOARD_SIZE, BOARD_SIZE), dtype=np.int) # another way of defining board: [[for x in range(cm.BOARD_SIZE)] for x in range(cm.BOARD_SIZE)]\n center = int(BOARD_SIZE / 2)\n self.board[center-1][center-1] = self.board[center][center] = WHITE # place the board according to position\n self.board[center][center-1] = self.board[center-1][center] = BLACK\n self.black_piece = 2\n self.white_piece = 2", "def __init__(self):\n self.game_screen = pygame.display.set_mode((GameData.screen_dim, GameData.screen_dim))\n self.game_screen.fill(GameData.background_color)\n self.player = 1\n self.game_over = False\n self.board = np.zeros((GameData.rows, GameData.columns))", "def initialize_board():\n # Wipe current board\n for x in range(len(THE_BOARD.positions)):\n for y in range(len(THE_BOARD.positions)):\n THE_BOARD.positions[x][y] = ' '\n\n all_pieces = []\n\n # Pawns\n white_pawns = [Pawn('white', (6, i)) for i in range(len(THE_BOARD.positions[6]))]\n black_pawns = [Pawn('black', (1, i)) for i in range(len(THE_BOARD.positions[1]))]\n all_pieces.extend(white_pawns)\n all_pieces.extend(black_pawns)\n\n # Rooks\n rook1 = Rook('black', (0, 0))\n all_pieces.append(rook1)\n rook2 = Rook('black', (0, 7))\n all_pieces.append(rook2)\n rook3 = Rook('white', (7, 0))\n all_pieces.append(rook3)\n rook4 = Rook('white', (7, 7))\n all_pieces.append(rook4)\n\n # Knights\n knight1 = Knight('black', (0, 1))\n all_pieces.append(knight1)\n knight2 = Knight('black', (0, 6))\n all_pieces.append(knight2)\n knight3 = Knight('white', (7, 1))\n all_pieces.append(knight3)\n knight4 = Knight('white', (7, 6))\n all_pieces.append(knight4)\n\n # Bishops\n bishop1 = Bishop('black', (0, 2))\n all_pieces.append(bishop1)\n bishop2 = Bishop('black', (0, 5))\n all_pieces.append(bishop2)\n bishop3 = Bishop('white', (7, 2))\n all_pieces.append(bishop3)\n bishop4 = Bishop('white', (7, 5))\n all_pieces.append(bishop4)\n\n # King and Queen\n queen1 = Queen('black', (0, 4))\n all_pieces.append(queen1)\n queen2 = Queen('white', (7, 4))\n all_pieces.append(queen2)\n king1 = King('black', (0, 3))\n all_pieces.append(king1)\n king2 = King('white', (7, 3))\n all_pieces.append(king2)\n\n # Add every single piece to the board. Only then can they update their spaces threatened\n for piece in all_pieces:\n THE_BOARD.update(piece)\n THE_BOARD.update_all_spaces_threatened()", "def setUp(self):\n\n self.board = Board(3, 3)", "def __init__(self):\n\n self.__turn_info = { 'turn': ChessGame.WHITE }\n self.init_board()", "def init_game(self):\n nrows = len(self.array)\n self.game_over = False\n self.squares_left = nrows * nrows\n self.bombs_left = 0\n # clear the board\n for i in xrange(nrows):\n for j in xrange(nrows):\n self.array[i][j].reset()\n # put N random bombs\n for i in xrange(nrows):\n rand_num = random.randrange(nrows*nrows)\n if self.array[rand_num / nrows][rand_num % nrows].type \\\n != SquareType.BOMB:\n self.insert_bomb(rand_num / nrows, rand_num % nrows)\n self.squares_left -= self.bombs_left\n self.print_board()", "def __init__(self, players):\n\n # Define the players\n self.players = players\n\n # Define who starts the game\n self.nplayer = 1 \n\n # Define the board\n self.board = [0] * 9", "def __init__(self):\n self.board = Board()\n self.__root = BinaryNode(Board(), None, Board.PLAYER_1)\n self.player = Board.PLAYER_0\n self.win = False", "def __init__(self, board=None):\n self.winner = None\n self.board = board or [self.__class__.EMPTY_POSITION_COUNTER] * 9", "def setup_new_game(self):\r\n self._player = Player()\r\n self._stats = GameStats(self._bb_settings)\r\n self._scoreboard = Scoreboard(self._bb_settings, self._screen)", "def __init__(self):\n self.board_dict = dict()\n for i in range(self.BOARD_WIDTH):\n for j in range(self.BOARD_WIDTH):\n self.board_dict[i, j] = 0, None\n\n self.players_locations = dict()\n self.last_moved = None", "def start_at_beginning(self):\n b_pieces = [ChessPiece.B_ROOK,\n ChessPiece.B_KNIGHT,\n ChessPiece.B_BISHOP,\n ChessPiece.B_QUEEN,\n ChessPiece.B_KING,\n ChessPiece.B_BISHOP,\n ChessPiece.B_KNIGHT,\n ChessPiece.B_ROOK]\n w_pieces = [ChessPiece.W_ROOK,\n ChessPiece.W_KNIGHT,\n ChessPiece.W_BISHOP,\n ChessPiece.W_QUEEN,\n ChessPiece.W_KING,\n ChessPiece.W_BISHOP,\n ChessPiece.W_KNIGHT,\n ChessPiece.W_ROOK]\n\n for i in range(8):\n self.board.put_piece(b_pieces[i], 0, i)\n self.board.put_piece(ChessPiece.B_PAWN, 1, i)\n self.board.put_piece(w_pieces[i], 7, i)\n self.board.put_piece(ChessPiece.W_PAWN, 6, i)", "def __init__(self):\n self.board = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]\n self.last_move = None", "def __init__(self):\n super(CamTacToe, self).__init__()\n self.state = [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']\n self.history = [self.state[:]]\n self.player_char = 'x'\n self.opponent_char = 'o'\n self.difficulty = 0\n self.player_start = 0", "def clear_board(cls):\n # Set the board dimensions\n cls.board = [[cls.empty for x in range(cls.size)] for y in range(cls.size)]\n \n # Set allowed positions the user may provide\n cls.positions = [str(x) for x in range(1, cls.size**2 + 1)]\n\n cls.current_player = 'X'\n\n cls.display_board()\n\n cls.prompt_player()", "def initializeGame(self):\n # Fill deck with cards and shuffle it\n self.deck.fill(104)\n self.deck.shuffle()\n #print \"Deck initialized\"\n\n # Initialize the field\n self.field.initialize(self.deck.draw(4))\n self.field.sortField()\n #self.field.printField()\n\n # Set players to initial state again\n # Distribute cards and set bulls to 0\n for p in self.players:\n p.bulls = 0\n p.setHand(self.deck.draw(10))", "def init_board(self) -> None:\n\t\tself.canvas.create_rectangle(0, 0, self.canvas_width, self.canvas_height, fill=self.color_background)\n\t\tfor x in range(0, self.canvas_width, self.canvas_width//self.board_size):\n\t\t\tself.canvas.create_line(x, 0, x, self.canvas_width, fill=self.color_tile_border)\n\n\t\tfor y in range(0, self.canvas_width+1, self.canvas_height//self.board_size):\n\t\t\tself.canvas.create_line(0, y, self.canvas_height, y, fill=self.color_tile_border)\n\n\t\tself.text_area.delete('0.1', '2.1')", "def initialize_board(self):\n self.board_values = {x:x for x in(range(1,10))}", "def __init__ (self, cols = 6, rows = 7, requiredToWin = 4):\r\n\t\tself.cols = cols\r\n\t\tself.rows = rows\r\n\t\tself.win = requiredToWin\r\n\t\tself.board = [[NONE] * rows for _ in range(cols)]", "def __init__(self):\r\n self._board = None\r\n self._bb_settings = Settings()\r\n self._screen = pygame.display.set_mode((self._bb_settings.screen_width,\r\n self._bb_settings.screen_height))\r\n self._player = Player()\r\n self._stats = GameStats(self._bb_settings)\r\n self._scoreboard = Scoreboard(self._bb_settings, self._screen)\r\n self._image = pygame.image.load('board.bmp')\r\n self._rect = self._image.get_rect()\r\n self._play_mode_button_list = self.make_play_mode_buttons()\r\n self._replay_button_list = self.make_replay_buttons()", "def __init__(self):\n self._board = [\n\n ['', '', '', \"x\", '', '', '', ''],\n ['', '', '', '', '', '', '', ''],\n ['', '', '', '', '', '', '', ''],\n ['', '', '', '', '', '', '', ''],\n ['', '', '', '', '', '', '', ''],\n ['', '', '', '', '', '', '', ''],\n ['', '', '', '', '', '', '', ''],\n [\"o\", '', \"o\", '', \"o\", '', \"o\", ''],\n ]\n self._game_state = \"UNFINISHED\" # default game state\n self._current_row = 0 #helper used to enforce moving one row at a time\n self._current_x_row = 0 # tracks x's row coordinate\n self._current_x_column = 3 # tracks x's column coordinate\n\n #four coordinates tracking the available diagonal spaces of x\n self._lower_right = (self._current_x_row + 1, self._current_x_column + 1)\n self._lower_left = (self._current_x_row + 1, self._current_x_column - 1)\n self._upper_right = (self._current_x_row - 1, self._current_x_column + 1)\n self._upper_left = (self._current_x_row - 1, self._current_x_column - 1)\n\n #helper used to check if x is in the first column\n self._row1 = (\n self._board[0][0],\n self._board[1][0],\n self._board[2][0],\n self._board[3][0],\n self._board[4][0],\n self._board[5][0],\n self._board[6][0],\n self._board[7][0])\n #helper used to check if x is in the last column\n self._row7 = (\n self._board[0][7],\n self._board[1][7],\n self._board[2][7],\n self._board[3][7],\n self._board[4][7],\n self._board[5][7],\n self._board[6][7],\n self._board[7][7])", "def __init__(self, player):\n \n self.colour = player\n self.game_in_head = Board()", "def init_new_board(self) -> None:\r\n\r\n TkState.enable(self.settings_menu.winfo_children())\r\n TkState.enable(self.edit_menu.winfo_children())\r\n TkState.enable([self.play_button, self.step_button])\r\n TkState.disable([self.reset_button])\r\n\r\n self.gen_number.config(text = 0)\r\n self.speed_scale.set(self.INITIAL_TIME_PER_GEN)\r\n self.zoom_scale.set(self.INITIAL_ZOOM)\r\n\r\n self.animator.board = self.anim_board\r\n self.painter.board = self.anim_board\r\n self.painter.adjust_to_canvas()", "def setUp(self):\n self.game = TTTBoard(3)", "def init_board(self):\n\n self.__board = dict()\n order = ['rook', 'knight', 'bishop', 'queen', 'king', 'bishop',\n 'knight', 'rook']\n for j, name in enumerate(order):\n\n self.__board[(0, j)] = ChessGame.Piece( name, ChessGame.WHITE)\n self.__board[(7, j)] = ChessGame.Piece( name, ChessGame.BLACK)\n self.__board[(1, j)] = ChessGame.Piece('pawn', ChessGame.WHITE)\n self.__board[(6, j)] = ChessGame.Piece('pawn', ChessGame.BLACK)\n\n self.__players = { ChessGame.WHITE: set(), ChessGame.BLACK: set() }\n for color in (ChessGame.BLACK, ChessGame.WHITE):\n self.__players[color] = {(x, y) for (x, y), piece in\n self.__board.iteritems() if piece.color == color }\n\n return", "def __init__(self, players):\n self.players = players\n self.board = Board()", "def __init__(self, rows=6, columns=7, win_length=4):\n\n self._board = [[0 for i in xrange(columns)] for i in xrange(rows)]\n self._rows = rows\n self._columns = columns\n self._win_length = win_length\n self.current_player = None\n self.winner = None\n print \"The game is afoot!\"", "def __init__(self):\n self._current_state = \"UNFINISHED\"\n self._start_color = \"RED\"\n self._board = Board()", "def _initiate_board(self):\n grid = []\n for i in range(constant.BOARD_DIMENSION):\n # Starts each row\n current_row = []\n for j in range(constant.BOARD_DIMENSION):\n # Adds the pieces depending on the position\n if i < constant.ROWS_OF_PIECES:\n # Black pieces\n if (j + i) % 2 != 0:\n current_row.append(Piece(i, j, Player.black))\n self.num_black_pieces = self.num_black_pieces + 1\n else:\n current_row.append(None)\n\n elif i >= constant.BOARD_DIMENSION - constant.ROWS_OF_PIECES:\n # White pieces\n if (j + i) % 2 != 0:\n current_row.append(Piece(i, j, Player.white))\n self.num_white_pieces = self.num_white_pieces + 1\n else:\n current_row.append(None)\n\n else:\n current_row.append(None)\n\n grid.append(current_row)\n\n return grid", "def setup_board(self):\n\n for row in range(10):\n\n row_list = list()\n\n for column in range(9):\n\n row_list.append(None)\n\n self._board.append(row_list)", "def create_game(self):\n\n\t\tself.player_model.grid = []\n\t\tself.player_model.available_cells = []\n\n\t\tfor i in range(9):\n\t\t\tc = Cell(i, None)\n\t\t\tself.player_model.grid.append(c)\n\t\t\tself.player_model.available_cells.append(c)\n\n\t\tself.player_frame.setup_game(self.player_model.current_player.name)", "def __init__(self):\n\n self._board = Board()", "def setup(self):\n # Create your sprites and sprite lists here\n self.game: Game = Game(SCREEN_WIDTH, SCREEN_HEIGHT, TILE_SIZE, 1, grid_layers = 4)\n self.game.game_message = \"Lead the Rabbit home\"\n\n # show the menu so that we see the instructions\n self.game.menu.button_list[0].text = \"Start\"\n self.game.menu.is_visible = True", "def start_gameloop(self):\n print(\"Game Loop starting...\")\n while True:\n current_turn = self.who_goes_first()\n print('The ' + current_turn + ' will go first.')\n while self.is_active:\n if current_turn == \"player\":\n self.board.draw()\n move = get_player_move(\n self.board.positions, self.board.is_position_availible)\n self.board.make_move(move, self.player_letter)\n current_turn = \"computer\"\n else:\n move = self.npc.get_move(self.board)\n self.board.make_move(move, self.npc.letter)\n current_turn = \"player\"\n if self.board.is_winner(self.player_letter):\n self.board.draw()\n print(\"You won!\")\n self.is_active = False\n if self.board.is_winner(self.npc.letter):\n self.board.draw()\n print(\"You lost!\")\n self.is_active = False\n if self.board.is_board_full():\n self.board.draw()\n print(\"Tie\")\n self.is_active = False\n if request_play_again() is False:\n break\n self.is_active = True\n self.board = Board(request_board_size())", "def start_game() -> None:\n rows = get_int()\n cols = get_int()\n state = game.GameState(rows, cols)\n\n line = next_line()\n if line == 'CONTENTS':\n rowList = []\n for i in range(rows):\n row = []\n line = raw_next_line()\n for index in range(cols):\n row.append(line[index])\n rowList.append(row)\n state.set_board_contents(rowList)\n\n while True:\n _display_board(state)\n line = next_line()\n if line == 'Q':\n return\n if line == '':\n if state.tick():\n _display_board(state)\n break\n else:\n _process_command(line, state)\n print('GAME OVER')", "def initial_state() -> Board:\n board = (\"rnbqkbnr\", \"pppppppp\", \"........\", \"........\", \"........\",\n \"........\", \"PPPPPPPP\", \"RNBQKBNR\")\n\n return board", "def __init__(self):\n self._board = []\n for i in range(10):\n self._board.append([None for i in range(9)])\n self.place_pieces()", "def initGame(self, parent):\n\n\t\tself.resetBoard()\n\n\t\tself.parent = parent\n\t\twidth = self.boardSize * self.squareSize\n\t\theight = self.boardSize * self.squareSize\n\t\ttk.Frame.__init__(self, self.parent)\n\n\t\tself.canvas = tk.Canvas(self, borderwidth=0, highlightthickness=0, width=width, height=height)\n\t\tself.canvas.pack()\n\t\tself.canvas.bind('<Button-1>', self.click)\n\t\tself.parent.title(\"TicTacToe\")\n\n\t\tfor row in range(self.boardSize):\n\t\t\tfor col in range(self.boardSize):\n\t\t\t\tx1 = (col * self.squareSize)\n\t\t\t\ty1 = (row * self.squareSize)\n\t\t\t\tx2 = x1 + self.squareSize\n\t\t\t\ty2 = y1 + self.squareSize\n\t\t\t\tself.canvas.create_rectangle(x1, y1, x2, y2, outline=\"black\", tags='square')", "def setup_new_board(self):\n\n logger.info(u'setup_new_board()')\n\n self.squares = [[None for j in xrange(8)] for i in xrange(8)]\n \n self.black_checkers = [ch.Checker(u'black', self) for i in xrange(12)]\n self.white_checkers = [ch.Checker(u'white', self) for i in xrange(12)]\n\n u\"\"\" Place checkers in starting squares \"\"\"\n i = 0\n for row in xrange(3):\n for column in xrange(8):\n if self.dark_square((row, column)):\n self.place_checker((row, column), self.white_checkers[i])\n i += 1\n\n i = 0\n for row in xrange(5, 8):\n for column in xrange(8):\n if self.dark_square((row, column)):\n self.place_checker((row, column), self.black_checkers[i])\n i += 1", "def __init__(self, board):\n self.running = True\n self.state = \"waiting\"\n pygame.init()\n pygame.display.set_caption(\"Sudoku Solver\")\n\n self.define_grid()\n self.define_number_positions()\n self.define_button()\n self.board = board\n self.font = pygame.font.Font('ubuntu.ttf', NUMBERS_SIZE)\n self.sleep_time = 1 / CHANGES_PER_SECOND\n\n self.original_board = board.copy()", "def __init__(self):\n self.board = [\n [None, None, None],\n [None, None, None],\n [None, None, None]\n ]", "def __init__(self, initial_board):\n self.initial_board = initial_board", "def __init__(self):\n self.computer_first = 0 # randint(0, 1)\n self.app = Tk()\n self.app.attributes(\"-toolwindow\", 1)\n self.app.title('Tic Tac Toe')\n self.app.resizable(width=False, height=False)\n self.board = Board()\n self.font = Font(family=\"Helvetica\", size=32)\n self.buttons = {}\n for x, y in self.board.fields:\n handler = lambda x=x, y=y: self.move(x, y)\n button = Button(self.app, command=handler, font=self.font,\n width=2, height=1)\n button.grid(row=y, column=x)\n self.buttons[x, y] = button\n handler = lambda: self.reset()\n button = Button(self.app, text='reset', command=handler)\n button.grid(row=self.board.size + 1, column=0,\n columnspan=self.board.size, stick='WE')\n self.update()\n if self.computer_first:\n self.move(randint(0, self.board.size - 1),\n randint(0, self.board.size - 1))", "def __init__(self, dim, reverse = False, board = None):\n self.empty_squares = []\n if board == None:\n self.board = [[\"\", \"\", \"\"],\n [\"\", \"\", \"\"],\n [\"\", \"\", \"\"]]\n self.dim = dim\n self.board = board\n self.reverse = reverse\n self.win = None\n self.DRAW = 4\n self.EMPTY = 1\n self.PLAYERO = 2\n self.PLAYERX = 3", "def initialize_board(self):\n seed = self.seed and self.seed.any()\n if not (self.shape or seed):\n raise Exception(\"Either a shape or a seed is required.\")\n\n elif self.shape and seed:\n # Center the seed on a game board\n board = self._center_seed(self.shape, self.seed)\n\n elif self.shape:\n # The probability a cell starts off dead\n prob_dead = [1 - self.game.weight]\n # Class probabilities for live cells\n probs_alive = [self.game.weight * (1/self.classes)] * self.classes\n\n board = np.random.choice(\n self.classes + 1,\n np.prod(self.shape),\n p = prob_dead + probs_alive\n ).reshape(self.shape)\n \n else: # Only a seed is given\n self.shape = self.seed.shape\n board = self.seed\n\n self.array = board\n self.start_array = board\n self.prev_array = None", "def game_setup(self):\n self.deck = Shoe(6)\n self.initial_draw()\n self.pot = ask_for_bet(self.player.money)\n show_table(self.player, self.dealer, self.pot)\n self.surrender_and_insurance()", "def __init__(self, master=None):\n super().__init__(master)\n self.masterframe = Frame(self.master)\n self.masterframe.pack()\n self.grid = Grid(3)\n self.canvasSize = 100\n self.mX = 0\n self.mY = 0\n self.working = True\n # True is Player 1 and False is Player 2\n self.turn = True\n self.createCanvas()", "def __init__(self, board_width, board_height):\n # Create board using generate_grid_dict method with given width and height.\n board = self.generate_grid_dict(board_width, board_height)\n self.draw = False\n pygame.init()\n\n # Set the caption for the board and the font for the win prompt.\n pygame.display.set_caption('Connect4 - Player 1')\n self.game_font = pygame.freetype.Font(\"SF Distant Galaxy.ttf\", 40)\n\n # size of each square of the grid:\n self.square_size = 80\n # generate board width (amount of squares and square width):\n self.width = board_width * self.square_size\n # generate board height (amount of squares and square height):\n self.height = board_height * self.square_size\n # generate the radius of the chips depending on the square size:\n self.radius = int(self.square_size / 4)\n # find the middle of the square for chip placement:\n self.square_mid = int(self.square_size / 2)\n # set the screen size with the board width and height:\n self.screen = pygame.display.set_mode((self.width, self.height))\n\n # Fill the screen with a white background.\n background = pygame.Surface(self.screen.get_size())\n background.fill((255, 255, 255))\n self.background = background.convert()\n\n # Build the grid.\n for i in range(0, self.width, self.square_size):\n pygame.draw.rect(self.background, (0, 0, 0), (i, 0, 0, self.height))\n for i in range(0, self.height, self.square_size):\n pygame.draw.rect(self.background, (0, 0, 0), (0, i, self.width, 0))\n self.screen.blit(self.background, (0, 0))\n\n # Setup, so player one starts.\n self.playerOne = True\n self.red = 250\n self.blue = 0\n\n # Help dict for logic, when drawing a chip. Maps 0 -> size, 1 -> size - 1...\n self.draw_dict_mapping = {}\n for i in range(self.height//80 + 1):\n self.draw_dict_mapping[i] = self.height//80 - i\n\n # Start the game with the run game method.\n self.run_game(board)", "def setup(self):\n self.board[(3, 3)] = -1\n self.board[(3, 4)] = -1\n self.board[(4, 3)] = 1\n self.board[(4, 4)] = 1\n\n self.stones_set = 4", "def init(self):\n self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n self.running = True\n self.state = \"waiting\"\n\n self.button_text = \"Solve!\"\n self.button_color = BUTTON_COLOR_SOLVE\n self.puzzle_state = \"unsolved\"\n\n if not self.checkBoardValid():\n self.state = \"failed\"\n self.puzzle_state = \"failed\"\n self.button_text = \"Impossible puzzle!\"", "def init_board():\n\t# Generates a table 10*10 of 0s with -1 around and the initial state\n\t# of the board with 2 whites and 2 blacks in the middle\n\ttable = [[0 if i != 0 and i != 9 else -1 for i in range(10)] if j != 0 and j != 9 else [-1 for i in range(10)] for j in range(10)] #leaves a -1 line around the whole table of 0s\n\t#initial state is drawn and recorded\n\ttable[4][4] = 2\n\ttable[5][5] = 2\n\ttable[4][5] = 1\n\ttable[5][4] = 1\n\tdrawPiece((4,4),2)\n\tdrawPiece((5,5),2)\n\tdrawPiece((4,5),1)\n\tdrawPiece((5,4),1)\n\treturn table", "def __init__(self, player, board):\n self.player = player\n self.board = board", "def start(self):\n self.__init__()\n self.set_n_players()\n self.init_players()\n self.init_territory_selection_phase()\n self.init_troop_deployment_phase()\n # self.game_phase()", "def setup_game(self, player, opponent):\n\n self.display.clear_screen()\n\n ship_index = 0\n\n while not player.ready(len(self.SHIP_INFO)):\n # prints the currrent board\n board = self.display.construct_player_board(player, opponent, True)\n self.display.print_board(board)\n\n ship_name, ship_length = self.SHIP_INFO[ship_index]\n ship_to_add = Ship(ship_name, ship_length)\n\n try:\n player.add_ship(ship_to_add)\n except Exception as e:\n ship_to_add = player.ships[ship_index]\n\n origin, orientation = self.display.prompt_for_ship_placement(\n ship_to_add)\n\n try:\n player.place_ship(ship_to_add, origin, orientation,\n self.BOARD_SIZE)\n except ValueError as ve:\n self.display.clear_screen()\n print(ve)\n print()\n continue\n\n self.display.clear_screen()\n ship_index += 1\n self.display.prompt_switch(opponent.name)", "def __init__(self):\n self.board = Board()\n #self.player1 = player1\n #self.player2 = player2\n self.winner = None", "def __init__(self):\n # create game object\n self.game = Game()\n self.players = (\"X's\", \"O's\")\n\n # define string constants for UI\n self.BG_COLOR = \"#DBF6E9\"\n self.FONT = \"Verdana\"\n self.PROMPT = \"{0}, it's your turn.\"\n self.SCORE_LABEL = \"{0}: {1}\"\n self.TIE_LABEL = \"Ties: {0}\"\n\n # create window and instructions at the top\n self.window = tk.Tk()\n self.window.title(\"Tic-tac-toe\")\n self.window.configure(padx=30, pady=30, bg=self.BG_COLOR)\n self.window.geometry(\"450x450\")\n self.instructions = self.create_label(self.window, self.PROMPT.format(self.players[self.game.whose_turn]))\n self.instructions.grid(row=0, column=0)\n # create score frame to hold results of previous games in this session\n self.score_frame = tk.Frame(self.window, bg=self.BG_COLOR)\n self.score_frame.grid(row=1, column=1, padx=20, pady=20, sticky='n')\n self.score_label = self.create_label(self.score_frame, 'Score')\n self.score_label.grid(row=0, column=0, sticky='w')\n self.player_0_score_label = self.create_label(self.score_frame,\n self.SCORE_LABEL.format(self.players[0], self.game.player_0_score))\n self.player_0_score_label.grid(row=1, column=0)\n self.player_1_score_label = self.create_label(self.score_frame,\n self.SCORE_LABEL.format(self.players[1], self.game.player_1_score))\n self.player_1_score_label.grid(row=2, column=0)\n self.num_ties_label = self.create_label(self.score_frame, self.TIE_LABEL.format(self.game.num_ties))\n self.num_ties_label.grid(row=3, column=0, sticky='w')\n # create game frame; each of the nine squares on the grid is represented as a button\n self.game_frame = tk.Frame(self.window)\n self.game_frame.grid(row=1, column=0, pady=20)\n self.button_list = self.create_buttons()\n self.place_buttons()\n\n self.window.mainloop()", "def __init__(self, player1, player2):\n self.players = [player1, player2]\n self.tokens = {\n ' ': ' ',\n player1: 'X',\n player2: 'O',\n }\n self.score = {\n player1: 0,\n player2: 0,\n }\n self.moves = None\n self.winner = None\n self.turn = ''\n self.reset()", "def play_game(self, x_player, o_player):\n #from connectfour import Player #either from connect4_player or connectfour\n \n current_side = \"X\"\n players = {\"X\": x_player, \"O\": o_player}\n while ((not self.win_for(\"X\")) and\n (not self.win_for(\"O\")) and\n (not self.is_full())):\n print()\n print(self)\n print()\n move = Board.INVALID_MOVE\n while not self.allows_move(move):\n if players[current_side] == \"human\":\n move = int(input(current_side + \"'s move: \"))\n else:\n move = players[current_side].next_move(self)\n print(\"Computer playing for \" + current_side +\n \" plays at \" + str(move))\n\n self.add_move(move, current_side)\n if current_side == \"X\":\n current_side = \"O\"\n else:\n current_side = \"X\"\n\n if self.win_for(\"X\"):\n print(\"X wins --- congratulations!\\n\")\n elif self.win_for(\"O\"):\n print(\"O wins --- congratulations!\\n\")\n else:\n print(\"Tied game!\\n\")\n\n print()\n print(self)", "def __init__(self, parent, board, Player1, Player2, *args, **kwargs):\n\n\t\tself.button = [[\"\" for x in range(8)] for y in range(8)]\n\t\t# self.board = Board.Board()\n\t\tself.initialButtonPush = ()\n\t\tself.buttonPushed = \"\"\n\t\tself.nextButton = ()\n\n\t\t# setup tkinter frame\n\t\ttk.Frame.__init__(self, parent, *args, **kwargs)\n\t\tparent = parent\n\t\t# call setup functions\n\t\tself.setupButtonImages()\n\t\tself.setupBoard()\n\t\tself.NewBoard = board\n\t\tself.Player1 = Player1\n\t\tself.Player2 = Player2\n\t\t\n\t\tself.updateBoard()", "def __init__(self, screen, win_size_x, win_size_y, player_num):\n self._player_list = list(Player(f\"Player {x}\") for x in range(player_num))\n self._player_num = player_num\n print(\"The number of players is: \", player_num)\n self._screen = screen\n self._player_turn = 0\n self._ui_player_turn = UI((1820, 10), (100, 50), f\"Player {self._player_turn}\")\n self._screen.blit(self._ui_player_turn.update(\n f\"Player {self._player_turn}\"), self._ui_player_turn._location)\n self._core_deck = Deck(\"Deck/test_deck.txt\")\n self._war_deck = []\n self._map = Map(self._player_num, win_size_x, win_size_y)\n self._clock = pygame.time.Clock()\n self._run = True\n self._fps = 30\n self.each_player_draws_hand(self._core_deck)", "def __init__(self):\n\n self.frameCount = 0\n self._initScreen()\n self._initObjects()\n self._initControls()\n self._initLevel()\n self._start()\n print \"DEBUG: Initializing Game\"\n pass", "def reset_board(self):\n self.board = [0, 0, 0, 0, 0, 0, 0, 0, 0]\n self.turn = 0\n\n self.change_button_img_to_null()\n\n #self.Score_Label.grid(row=0,column=1, ipadx=32)\n\n self.player_highlight()\n self.change_button_state('normal')\n self.update_score()", "def __init__(self, num_players):\n self.num_players = num_players\n self.firework = [[], [], [], [], []]\n self.nb_blue_stone = MAX_BLUE_STONE\n self.nb_red_stone = MAX_RED_STONE\n self.draw = None\n self.hands = None\n self.fill_draw()\n random.shuffle(self.draw)\n self.discard = []\n self.draw_initial_hands()", "def init_game(self):\n self.view.carregar_jogadores_possiveis(self._possible_players_list())\n self.view.put_view_in_main_loop()", "def __init__(self, width, height, player, opponent):\r\n self.height = height\r\n self.width = width\r\n self.board = []\r\n for x in range(height):\r\n self.board.append([])\r\n for y in range(width):\r\n self.board[x].append(\"\\t\")\r\n self.player = player\r\n self.opponent = opponent\r\n self.lastMove = (0, 0)", "def start(self):\n # store a sign controlling addition or subtraction so pieces move in the right direction\n self.board = fen_to_position(self.game.fen)\n self.transposition_table = dict()", "def reset(self):\n self.board = Board()\n self.winner = None", "def reset(self):\r\n self.board = [[0 for i in range(self.width)]\r\n for i in range(self.height)]\r\n self.new_tile()\r\n self.new_tile()", "def __init__(self):\n self.start()\n while self.player.money > 0:\n self.game_loop()\n if self.player.money > 0:\n if not play_again():\n break\n elif self.player.money == 0:\n no_more_money()\n self.reset_table()", "def initial_board():\n board = [OUTER] * 100\n for i in Othello.squares():\n board[i] = EMPTY\n # The middle four squares should hold the initial piece positions.\n board[44], board[45] = BLACK, WHITE\n board[54], board[55] = WHITE, BLACK\n return board", "def __init__(self, players=None):\n self.game = Game()\n if players:\n self.player1 = players[0]\n self.player2 = players[1]\n else:\n self.player1 = Player('X')\n self.player2 = Player('O')\n self.record = Record()\n self.winning_moves = []", "def __init__(self):\n\n super().__init__()\n self.setup_janggi_game()\n self._game_state = 'UNFINISHED'\n self._player_turn = 'BLUE'", "def start(self):\n # asserts preconditions are met\n #assert self.validGameSettings()\n\n #draws initial welcome screen\n #self._text = GLabel(text=\"Press 'S' to Play\")\n #self._text.draw(self.view)\n\n # initializing instance variables\n self.setState(STATE_INACTIVE)\n self.setWave(None)\n self.setText(None)\n self.lastkeys = 0 #ADD MORE ATTRIBUTES\n\n # draws iniital welcome screen\n self.welcomeScreen()", "def __init__(self,player1: Player = ManualPlayer(\"P1\"),\\\r\n player2: Player = ManualPlayer(\"P2\")):\r\n\r\n self.board = np.zeros((BOARD_SIZE,BOARD_SIZE)\\\r\n ,dtype=np.int8)\r\n self.board[3,3] = '2'\r\n self.board[4,4] = '2'\r\n self.board[3,4] = '1'\r\n self.board[4,3] = '1' \r\n\r\n self.players = []\r\n self.players.append(player1)\r\n self.players.append(player2)\r\n self.turn = 1\r\n self.count = 0", "def play(self):\n board = Board()\n print(\"Let's play tic-tac-toe against computer!\")\n print(\"Here is your board!\")\n count = 1\n print(board)\n while True:\n board.person_move()\n status = board.get_status()\n if status == 'x' or status == '0':\n return(f\"Winner is {status}\")\n elif status == 'draw':\n return(\"Friendship won!\")\n board.make_computer_move()\n status = board.get_status()\n if status == 'x' or status == '0':\n return(f\"Winner is {status}\")\n elif status == 'draw':\n return(\"Friendship won!\")\n print(f\"Board after {count} action.\")\n count += 1\n print(board)", "def __init__(self, board, turn):\n self.player = turn\n self.roll = self.roll_dice()\n #array of applied board states\n self.moves = []\n self.board = board\n self.generate_valid_moves()", "def initial_state():\n board = [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]\n return board", "def __init__(self):\n self.boards = [[False, False, False, False, False, False, False, False, False],\n [False, False, False, False, False, False, False, False, False],\n [False, False, False, False, False, False, False, False, False]]", "def __init__(self):\n self.boards = [[False, False, False, False, False, False, False, False, False],\n [False, False, False, False, False, False, False, False, False],\n [False, False, False, False, False, False, False, False, False]]", "def start(self):\n self.player = Player()\n self.dealer = Dealer()\n self.pot = 0\n self.side_bet = 0\n start_game()", "def __init__(self):\n self.played_pos = []\n self.grid = [['-', '-', '-'],\n ['-', '-', '-'],\n ['-', '-', '-']]\n self.player_played_pos = {'p1': set(), 'p2': set()}", "def start():\n display_board()\n print(\"\\n\")\n y_n_prompt()", "def reset(self):\n self.board = Board()\n self.update()\n self.computer_first = randint(0, 1)\n if self.computer_first:\n self.move(randint(0, self.board.size - 1),\n randint(0, self.board.size - 1))", "def start(self):\n self.playing = True\n self.table = table.Table(self.players)" ]
[ "0.76133895", "0.74191046", "0.7272769", "0.7215476", "0.70663446", "0.7060645", "0.7049015", "0.7029267", "0.69543284", "0.69497305", "0.692294", "0.6910041", "0.6903089", "0.6872871", "0.6863269", "0.68311656", "0.68238926", "0.68191206", "0.6769023", "0.67465365", "0.67353773", "0.6735311", "0.6722465", "0.6718771", "0.6690097", "0.66767114", "0.66105705", "0.65862644", "0.65823096", "0.6577102", "0.65610796", "0.65481275", "0.65418315", "0.6535146", "0.6523749", "0.65145266", "0.65011114", "0.6476084", "0.64714104", "0.6470568", "0.6452707", "0.6451656", "0.64408195", "0.6436849", "0.643481", "0.6429563", "0.64281636", "0.64098865", "0.64046603", "0.63997084", "0.63976705", "0.6365669", "0.6364156", "0.6357598", "0.635603", "0.63416636", "0.6334285", "0.63112533", "0.6305934", "0.6304511", "0.6300643", "0.63001174", "0.6299671", "0.629563", "0.62948847", "0.628704", "0.6286675", "0.6286281", "0.6280643", "0.6272809", "0.62654567", "0.62516403", "0.6242899", "0.6237107", "0.62330437", "0.6206097", "0.6199669", "0.6189039", "0.61612475", "0.6160348", "0.6159054", "0.61552453", "0.61494493", "0.6147866", "0.6128221", "0.6123399", "0.6121669", "0.61187667", "0.6113047", "0.6112451", "0.6104364", "0.6100864", "0.6092064", "0.6090432", "0.6090432", "0.6089558", "0.60893905", "0.6086315", "0.60845226", "0.6084323" ]
0.6897718
13
Switches whose turn it is.
def switchPlayer(self): if (self.current is Piece.EX): self.current = Piece.OH else: self.current = Piece.EX
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _switch_turn(self, cur_player):\n if cur_player == \"W\":\n self._turn = \"B\"\n else:\n self._turn = \"W\"", "def turn(self):\n pass", "def get_switches(self) -> tuple:\n return self.switches", "def changeTurn(self):\n\t\tif self.turn == 1:\n\t\t\tself.turn = 2\n\t\telse:\n\t\t\tself.turn = 1", "def getSwitch(self, projNumber):", "def set_switches_from_rule_nbr(self):\n for rule_switch, enabled in zip(CA_World.bin_0_to_7, self.int_to_8_bit_binary(self.rule_nbr)):\n SimEngine.gui_set(rule_switch, value=(True if enabled=='1' else False))", "def toDo(switch):\n func = \"InitSwitches.\" \"switch\" + str(switch)\n eval(func + \"()\")", "def set_switch(self, value):\n act = SwitchAction(self, value)\n return act.invoke()", "def do_attribute_switches(self):\n event = None\n if self.rhs:\n event = self.get_event_from_args(self.rhs, check_host=True)\n else:\n proj = self.project\n if not proj:\n raise self.CalCmdError(\n \"You must use /create first or specify an event.\"\n )\n if \"largesse\" in self.switches:\n return self.set_largesse(event)\n if \"date\" in self.switches or \"reschedule\" in self.switches:\n return self.set_date(event)\n if \"location\" in self.switches:\n return self.set_location(event)\n if \"desc\" in self.switches:\n return self.set_event_desc(event)\n if \"roomdesc\" in self.switches:\n return self.set_room_desc(event)\n if \"plotroom\" in self.switches:\n return self.set_plotroom(event)\n if \"private\" in self.switches:\n return self.set_private(event)\n if \"host\" in self.switches:\n return self.add_or_remove_host(event)\n if \"gm\" in self.switches:\n return self.add_or_remove_gm(event)\n if \"invite\" in self.switches:\n return self.invite_org_or_player(event)\n if \"uninvite\" in self.switches:\n return self.uninvite_org_or_player(event)\n if \"action\" in self.switches:\n return self.set_crisis_action(event)\n if \"risk\" in self.switches:\n return self.set_risk(event)", "def turn(self):\n return self._turn", "def update_turn(self):\n pass", "def _flip_turn(self):\n self._turn = self._next_turn\n return self._turn", "def switch(self) -> bool:\n return bool(self.pressed & 0x1)", "async def get_switches(self):\n return await self.get_states_by_tag_prefix(\"led\")", "def pswitchon(chan) :\n s.phaseSwitching(True, chan)", "def get_on_turn(self):\n return deepcopy(self.__on_turn)", "def read_switch(self):\n return GPIO.input(SWITCH_PIN)", "def switches(self):\n return {k:v for k, v in self._data.items() \n if v[\"type\"] == \"SWITCH\"}", "def switch_turn(self):\n if self.completed is not None:\n self.current_turn = self.creator if self.current_turn != self.creator else self.opponent\n self.save()", "def _get_turn(self):\n raise NotImplementedError", "def change_mode(self):\n return (self.mode + 1) % 2", "def keyed_rotor(self):\n turnover = self.on_turnover()\n self.inc_rotor_setting()\n return turnover", "def internal_switch(self) -> s11.InternalSwitch:\n return self.calobs.internal_switch", "def op_set(op):\r\n\tglobal fo\r\n\tif op == cat:\r\n\t\tfo = cat.who()\r\n\t\treturn cat_turn\r\n\telif op == ninja:\r\n\t\tfo = ninja.who()\r\n\t\treturn ninja_turn\r\n\telse:\r\n\t\tprint \"This is no one's turn!!!\"", "def TURN_OPTIONS() -> tuple:\n return \"Hit me! (Draw another Card)\", \"Stand (End round, stop drawing)\"", "def list_switches(self):\n return [x for x,y in self.devices.items() if y.device_type == \"Switch\"]", "def is_switch(G):\n return False", "def get_all_switches(name):\n return [False,False,False,False] #TODO Implement", "def switch_pseudocode(self, *args):\n return _ida_hexrays.Hexrays_Hooks_switch_pseudocode(self, *args)", "def multipleSwitch(randomhouse):\n\n randomBatteries = randomhouse.possible_connections\n\n randomBatteries.sort(key=lambda x: x[1])\n currentbattery = randomhouse.connection\n for randomBattery in randomBatteries:\n rb = randomBattery[0]\n if rb != currentbattery:\n if rb.capacity >= randomhouse.output:\n switch(randomhouse, rb)\n return", "def get_switch(self, conf, dpid):\n\t\tpass", "def change_state_controller(self, switches):\n index = self.__index\n for dummy in range(len(switches)):\t\t\n index = index ^ (2**(switches[dummy] - 1))\n\n for state in self.__next_state:\n if any(control[0] == 'control' for control in state.__control_list) \\\n and all(~(state.__index ^ index) & 2**(switches[dummy] - 1) for dummy in range(len(switches))):\n state.__active_control = None\n self.__active_control = None\n return state\n return None", "def swap(self, *args, **kwargs):\n return self.switch(*args, **kwargs)", "def _next_turn(self):\n return self.TURNS[self._turn is self.BLACK]", "def changeTurn(self):\r\n # Undoes clicked pieces and removes potential moves from the bar area\r\n for point in self.points:\r\n if point.isClicked():\r\n point.undoClick()\r\n if not self.bar[self.currentPlayer].isEmpty():\r\n self.undoPossibleBarMoves()\r\n \r\n # Changes by turn changing the currentPlayer index and updating all the \r\n # necessary board objects\r\n self.currentPlayer = (self.currentPlayer + 1) % 2\r\n self.dice.makeActive()\r\n for point in self.points:\r\n point.setActiveTurn()\r\n for bar in self.bar:\r\n bar.update()\r\n bar.setActiveTurn()\r\n self.message.resetText('It\\'s ' + self.getCurrentString() + '\\'s turn!', self.surface)\r\n self.turnchanger.setFillColor(self.getTurn())\r\n self.turnchanger.draw(self.surface)\r\n pygame.display.flip()", "def incTurn(self):\n self.turnOn = (self.turnOn+1)%self.turns", "def init_turn(self):\n self.before_turn_switches()\n if self.battlefield.win is not None:\n return\n self.before_turn()", "def switch_pivot():\n for piv_switcher in get_one_switcher():\n piv_switcher.switch()", "def __process_xx_switch_arg(self, argument):\n _method_name = '__process_xx_switch_arg'\n\n match = self.__xx_args_switch_regex.match(argument)\n xarg = match.group(2)\n on_or_off = match.group(1)\n if on_or_off == '+':\n on_or_off_text = 'on'\n else:\n on_or_off_text = 'off'\n\n if 'switch' not in self.__xx_args:\n self.__xx_args['switch'] = OrderedDict()\n self._logger.finer('WLSDPLY-08304', argument, xarg, on_or_off_text,\n class_name=self._class_name, method_name=_method_name)\n self.__xx_args['switch'][xarg] = on_or_off", "def turn():\n \n robottype = get_type()\n if robottype == RobotType.PAWN:\n pawn_turn()\n else:\n overlord_turn()\n bytecode = get_bytecode()", "def turn():\n \n robottype = get_type()\n if robottype == RobotType.PAWN:\n pawn_turn()\n else:\n overlord_turn()\n bytecode = get_bytecode()", "def swint(self) -> None:", "def check_switching_action(self):\n current_switching = {}\n for devices in self.switching_systems:\n command = self.build_command(devices, \"get_closed_channels\")\n switching = str(self.vcw.query(devices, command)).strip()\n switching = self.pick_switch_response(devices, switching)\n current_switching.update({devices[\"Device_name\"]: switching})\n self.settings[\"settings\"][\"current_switching\"][\n devices[\"Device_name\"]\n ] = current_switching\n return current_switching", "def get_turn(self):\n return self._turn", "def switch_player(player):\n if player == PLAYERX:\n return PLAYERO\n else:\n return PLAYERX", "def switch_player(player):\n if player == PLAYERX:\n return PLAYERO\n else:\n return PLAYERX", "def _advance_turn(self):\n\n self.__turn_info['turn'] = ChessGame.BLACK if\n self.__turn_info['turn'] == ChessGame.WHITE else ChessGame.WHITE", "def switchPlayer(self):\n\n \n tmp = self.current\n self.current = self.other\n self.other = tmp\n\n self.topSelector.toggleActive()\n self.bottomSelector.toggleActive()", "def begin_turn(self):\n pass", "def turn(self, dir):\n if dir.upper() == 'R':\n if self.direction == 3:\n self.direction = 0\n else:\n self.direction += 1\n if dir.upper() == 'L':\n if self.direction == 0:\n self.direction = 3\n else:\n self.direction -= 1", "def lightning_turnon(self):\n self.turnOn()", "def Steering(x1):\n #RETURNS A LIST WITH THE RIGHTSPEED and LEFTSPEED for the appropriate turn\n \n LeftSp=127\n RightSp=127\n\n if x1 < 320:\n print \"I'm turning Left\"\n LeftSp = 50\n RightSp = 240\n\n if x1 > 320:\n print \"I'm turning Right\"\n LeftSp = 240\n RightSp = 50\n\n return [RightSp, LeftSp]", "def test_switch_returns(self):\n\n #Player 1 and Player 2 are represented by 1 and -1\n #Multiplying current_player by -1 will flip them\n current_player = self.controller.game_state.player * -1\n\n #after running flip_current_player function in the controller,\n # test current player\n self.assertEqual(self.controller.game_state.flip_current_player(),\n current_player)", "def computer_turn(self):\r\n\r\n print(\r\n '\\nTURN: Computer -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=')\r\n\r\n # Scan through memory to see if the computer already knows a matching pair\r\n loc1, loc2 = self.computer_scan_memory()\r\n\r\n if loc1 and loc2: # when there is a pair inside the computer's memory\r\n x1, y1 = loc1\r\n x2, y2 = loc2\r\n\r\n # Check point\r\n assert x1 != None and y1 != None, 'x1 or y1 is None type'\r\n assert x2 != None and y2 != None, 'x2 or y2 is None type'\r\n\r\n choice1_key = self.state[x1, y1]\r\n choice2_key = self.state[x2, y2]\r\n else: # when there is no pair inside the computer's memory\r\n # Randomly select one card then scan memory\r\n x1, y1 = self.computer_random_select()\r\n choice1_key = self.state[x1, y1]\r\n\r\n # Scan through memory\r\n loc = self.computer_scan_memory(pick=choice1_key)\r\n\r\n if loc and (x1 != loc[0] or y1 != loc[1]): # there is a common value card in the computer's memory\r\n x2, y2 = loc # and that memory is not the same as the first choice\r\n\r\n # Check point\r\n assert x2 != None and y2 != None, 'x2 or y2 is None type'\r\n\r\n choice2_key = self.state[x2, y2]\r\n else: # There is no common value in the computer's memory\r\n while True: # select a card different from the first choice\r\n x2, y2 = self.computer_random_select()\r\n if x2 != x1 or y2 != y1:\r\n break\r\n choice2_key = self.state[x2, y2]\r\n\r\n print('First choice: {0} ({1}, {2})'.format(self.deck[choice1_key], x1, y1))\r\n print('Second choice: {0} ({1}, {2})'.format(self.deck[choice2_key], x2, y2))\r\n\r\n # Check if it is a match or not\r\n if self.check_card(self.deck[choice1_key], self.deck[choice2_key]):\r\n print('MATCH')\r\n # Replace the corresponding cards in the remaining inventory and current state with -1\r\n self.remaining[choice1_key] = -1\r\n self.remaining[choice2_key] = -1\r\n self.state[x1, y1] = -1\r\n self.state[x2, y2] = -1\r\n self.computer_cards += 2 # the computer gets 2 cards\r\n self.bin.append([x1, y1]) # move the location of the card to the already-taken bin\r\n self.bin.append([x2, y2])\r\n self.forget_memory(choice1_key) # remove from computer's memory\r\n self.forget_memory(choice2_key)\r\n self.match = 0 # The computer will continue to choose cards\r\n else:\r\n print('NOT a match')\r\n # Add these cards to the computer's memory\r\n self.computer_memory[choice1_key] = [x1, y1]\r\n self.computer_memory[choice2_key] = [x2, y2]\r\n self.match = 1 # The player's turn\r", "def __checkSwitch ( self, letter, value ):\n\n #-- 1 --\n # [ if letter is a key in self.switchMap -> I\n # else ->\n # sys.stderr +:= (usage message) + (error message)\n # stop execution ]\n if not self.switchMap.has_key ( letter ):\n usage ( self.switchSpecs, self.posSpecs,\n \"No such switch: -%s\" % letter )\n\n #-- 2 --\n if len(value) == 0:\n self.switchMap[letter] = 1\n else:\n self.switchMap[letter] = value", "def change_player_turn(self):\r\n self._player_turn *= -1", "def test_switch(self):\n\n # Switch to the 'orig' data pipe.\n pipes.switch('orig')\n\n # Test the current data pipe.\n self.assertEqual(pipes.cdp_name(), 'orig')\n\n # Switch to the 'empty' data pipe.\n pipes.switch('empty')\n\n # Test the current data pipe.\n self.assertEqual(pipes.cdp_name(), 'empty')", "def turn(self):\n return repr(self._turn)", "def turn(self, **kwargs: Any) -> None:\n pass", "def get_limit_switch(name,switch):\n return False #TODO Implement", "def on_turnover(self):\n return True if self.rotor_setting in self.turnover_characters else False", "def _do_switch(self, runner, recipe, extra_info=''):\n if 'batch_augments' in recipe:\n self._switch_batch_augments(runner, recipe['batch_augments'])\n runner.logger.info(f'Switch batch augments{extra_info}.')\n\n if 'train_pipeline' in recipe:\n self._switch_train_pipeline(runner, recipe['train_pipeline'])\n runner.logger.info(f'Switch train pipeline{extra_info}.')\n\n if 'loss' in recipe:\n self._switch_loss(runner, recipe['loss'])\n runner.logger.info(f'Switch loss{extra_info}.')", "def get_all_switch(self, conf):\n\t\tpass", "def __init__(self):\r\n self.turn = 0", "def get_hw_switch_states(self):\n hw_states = dict()\n #k = self._kp.keypad()\n k = \"\"\n for number, sw in self.switches.items():\n if number == k:\n hw_states[number] = 1\n else:\n hw_states[number] = 0\n return hw_states", "def set_turn(self):\n if self.status == self.PLAYER_TURN:\n return\n self.status = self.PLAYER_TURN\n self.client.send_player_turn(10)", "def RunTurn( lobound=1, hibound=20 ):\n\tpass", "def _action_toggle(self, flag):\n if flag:\n return {\"toggle\": \"ON\"}\n else:\n return {\"toggle\": \"OFF\"}", "def switch_player(current, player1, player2):\r\n if current == player1:\r\n return player2\r\n else:\r\n return player1", "def turn_clockwise(a):\r\n if a==\"N\":\r\n return \"E\"\r\n elif a==\"E\":\r\n return \"S\"\r\n elif a==\"S\":\r\n return \"W\"\r\n elif a==\"W\":\r\n return \"N\"", "def state(self) -> bool:\n return self.get_state(self.entity_ids[\"switch\"])", "def switch(self, u_stuff, i_stuff):\n\n if self.sim_options['user_based']:\n return u_stuff, i_stuff\n else:\n return i_stuff, u_stuff", "def __toggle(self,x):\n\t\tif x == 1:\n\t\t\treturn 0\n\t\telse:\n\t\t\treturn 1", "def next_turn(self):\n if self.turn == BLUE and self.ai:\n self.ai_turn = True\n self.turn = RED\n elif self.turn == BLUE:\n self.turn = RED\n else:\n self.turn = BLUE\n\n self.selected_piece = None\n self.selected_legal_moves = []\n self.check_game_over()", "def switch(ind, status):\n print(\"Switching :\", ind, \">>\", status == 'on')\n GPIO.output(ind, status == 'on')", "def test_switch_channels(self):\n\t\t# not available yet, experimental\n\t\tpass", "def switch(cond, ift, iff):", "def __switch_restriction_set(self, name: str):\n switch = {\n \"user\": self.userRestrictionCategories,\n \"role\": self.roleRestrictionCategories,\n \"channel\": self.channelRestrictionCategories\n }\n return switch[name]", "def state(self):\n # None will return False\n return bool(self.switch.value)", "def update_player_turn(self):\n\n if self.get_player_turn() != 'BLUE':\n\n self._player_turn = 'BLUE'\n\n else:\n\n self._player_turn = 'RED'", "def doTurn(self, gamestate):\n raise NotImplementedError(\"Please Implement this method\")", "def play_turn(self, cur_board):\n pass", "def turn_left(self):\n pass", "def _get_switch(self, switch):\n switch = self.switch_by_label(switch)\n id = self.switches[switch.label].id\n # make sure that the serial port is open\n self.assure_serial()\n # create command for the arduino and send it\n input_string = 'r' + str(id[0]) + str(id[1])\n self.serial.write(input_string.encode('ascii'))\n time.sleep(self.READ_DELAY)\n # retrieve result\n result = self.serial.readline().decode().rstrip()\n time.sleep(self.READ_DELAY)\n # store the indicators to the switch\n switch.indicators = (int(result[0]), int(result[1]))\n # raise error if the indicators show an error\n if switch.state is None:\n raise SwitchError(\"Reading the state was unsuccessful: Indicators \"\n f\"of the switch show {switch.indicators}.\")\n return switch.state", "def turn_right(self):\n pass", "def take_turn(self):\r\n self._choose_best_option()\r\n self._do_draw()", "def turn(self, turnDir):\n if turnDir == 0: # left\n if self.dir == \"N\":\n self.dir = \"W\"\n elif self.dir == \"W\":\n self.dir = \"S\"\n elif self.dir == \"S\":\n self.dir = \"E\"\n elif self.dir == \"E\":\n self.dir = \"N\"\n else:\n raise ValueError(\"invalid dir %s\" % self.dir)\n elif turnDir == 1: # right\n if self.dir == \"N\":\n self.dir = \"E\"\n elif self.dir == \"E\":\n self.dir = \"S\"\n elif self.dir == \"S\":\n self.dir = \"W\"\n elif self.dir == \"W\":\n self.dir = \"N\"\n else:\n raise ValueError(\"invalid dir %s\" % self.dir)\n else:\n raise ValueError(\"invalid turnDir %d\" % turnDir)", "def switch_to_state(self, state):\n self.switch_state = state", "def switchPlayer():\n\n #escrever o condicional do modo de jogo.\n if (modoDeJogo == \"1\" or modoDeJogo == 1):\n quemJoga = player[1]\n\n if (player[0] == quemJoga):\n quemJoga = \"pc\"\n else: \n quemJoga = player[0]\n\n return quemJoga #quemComeca\n else:\n quemJoga = player[2]\n\n if (player[0] == quemJoga):\n quemJoga = player[1]\n else: \n quemJoga = player[0]\n \n return quemJoga #quemComeca", "def get_current_turn(self):\n return self._turn", "def switcher_secret(self):\n return self._switcher_secret", "def get_turn(self):\n return self.__turn_info['turn']", "def __le__(self, *args):\n return _ida_hexrays.cswitch_t___le__(self, *args)", "def _sense_and_act(self):\n pass", "def get_switch_stringlist(self):\n return text_switch", "def switch_to_main(self):\n return main.switch()", "def tempo_mode_switch(event):\n value = gremlin.actions.Value(event.is_pressed)\n tempo_mode_switch_container(event, value)", "def the(target: Target) -> \"SwitchTo\":\n return SwitchTo(target)", "def getTurn(self):\r\n return self.players[self.getCurrentPlayer()].getColor()", "def switched_with(self):\n return self" ]
[ "0.68772453", "0.6781878", "0.6612293", "0.65573156", "0.6287292", "0.62218165", "0.6109247", "0.6078702", "0.6077556", "0.5993894", "0.5986418", "0.59811705", "0.5940087", "0.59389865", "0.59333205", "0.59090734", "0.5887211", "0.58654636", "0.5862874", "0.58553296", "0.5841399", "0.5838009", "0.5806558", "0.5773462", "0.57688683", "0.57571375", "0.57520115", "0.5721215", "0.57196224", "0.5717366", "0.57122654", "0.5701516", "0.5687098", "0.56653416", "0.56640774", "0.56614393", "0.5652782", "0.5649384", "0.5645647", "0.56424916", "0.56424916", "0.5641763", "0.562853", "0.56163853", "0.5602817", "0.5602817", "0.5593657", "0.5585743", "0.5580623", "0.5570371", "0.55684465", "0.55560786", "0.5554986", "0.55540043", "0.5540065", "0.5536568", "0.55251193", "0.5517301", "0.5509242", "0.5506791", "0.55039185", "0.54923385", "0.54886043", "0.5488171", "0.5486333", "0.5484282", "0.5483648", "0.5464573", "0.5445681", "0.5441241", "0.54407334", "0.543451", "0.54331815", "0.5423839", "0.54155165", "0.54118997", "0.5401977", "0.53860795", "0.5378013", "0.53747207", "0.5373839", "0.5367682", "0.5359754", "0.53596544", "0.5353335", "0.53507483", "0.5350191", "0.5343651", "0.53433496", "0.53419024", "0.53406274", "0.53352916", "0.5333471", "0.53216046", "0.53169477", "0.53163195", "0.53147125", "0.5307582", "0.5299107", "0.5297001" ]
0.5775022
23
Trys to make a move. If the move is successful returns 1. If the move string is not able to interpreted correctly or if that place is already full the move fails and the function returns 0
def makeMove(self, move): try: if (self.board[int(move) - 1] is Piece.BLANK): self.board[int(move) - 1] = self.current return 1 else: return 0 except: return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_valid(move):\n return True", "def handle_move(self, move_string):\n def map_move(move):\n col = int(ascii_lowercase.find(move[0])) + 1 # dummy col\n row = int(move[1:])\n # if not 0 < col <= game[\"board_width\"]:\n # raise ValueError('bad coord; invalid col in ' + coord)\n # if not 0 < row <= game[\"board_height\"]:\n # raise ValueError('bad coord; invalid row in ' + coord)\n return row*(self.rules[\"row_len\"]) + col\n move = list(map(map_move,move_string.split(' ')))\n self.turn[\"board\"][move[0]].make_move(*move[1:])\n self.turn[\"half_move_clock\"] += 1\n if self.turn[\"active_player\"] == 1:\n self.turn[\"full_move_clock\"] += 1\n self.turn[\"active_player\"] = (self.turn[\"active_player\"] + 1) % 2\n # self.turn[\"board\"][move_start].make_move(move_end)", "def test_check_move_with_valid(self):\n board = [\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\"\\u25cb\"] + [\" \"] * 5,\n [\" \"] * 6,\n [\" \"] * 6\n ]\n valid = self.game.check_move(board, 3)\n self.assertTrue(valid)", "def attempt_move(self, move_input):\n # handle undo move\n if move_input == ['UN', 0, 'UN']:\n self.undo_move()\n return True\n\n # handle stock draw Special Action first\n if move_input == ['S0', 0, 'S0']:\n self.save_board_state()\n self.stock.deal_to_wp(self.wp)\n self.moves += 1\n return True\n\n # handle basic cases\n if len(move_input) != 3:\n return False\n if move_input[0] not in self.move_dict or move_input[2] not in self.move_dict:\n return False\n if type(move_input[1]) is not int:\n return False\n if move_input[2] == \"W0\":\n return False\n\n orig_pile = self.move_dict[move_input[0]]\n orig_ind = move_input[1]\n dest_pile = self.move_dict[move_input[2]]\n if orig_ind >= orig_pile.get_length():\n return False\n\n # handle flip tableau card Special Action\n if move_input[0][0] == 'T' and orig_pile == dest_pile and orig_ind == 0:\n orig_pile.reveal_top_card()\n\n # basic conditions have been met\n adj_ind = orig_pile.get_length() - orig_ind - 1\n if orig_pile.is_valid_retrieval(orig_ind):\n self.save_board_state()\n move_pile = orig_pile.remove_cards(orig_ind + 1)\n if dest_pile.is_valid_placement(move_pile):\n dest_pile.merge_pile(move_pile)\n if move_input[0][0] == 'T' and self.auto_flip_tab:\n orig_pile.reveal_top_card()\n self.moves += 1\n return True\n else:\n orig_pile.merge_pile(move_pile)\n self.board_states.pop()\n return False\n return False", "def make_move(move):\n global manatee_pos\n global hyacinths\n global hyacinth_pos\n\n # Ends the program if movement is out of bounds\n if move == (0, 0):\n return None\n new_pos = (manatee_pos[0] + move[0], manatee_pos[1] + move[1])\n if new_pos[0] < 0 or new_pos[0] >= len(map):\n return None\n if new_pos[1] < 0 or new_pos[1] >= len(map[new_pos[0]]):\n return None\n\n entity = map[new_pos[0]][new_pos[1]]\n if entity == \"#\" or entity == \"G\":\n # Runs if movement is impossible\n return None\n if entity == \" \" or entity == \".\":\n # Runs if normal movement is possible\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n return None\n if entity == \"O\":\n # Runs if manatee wins game\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n return \"win\"\n if entity == \"\\\\\":\n # Runs if manatee eats hyacinth\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n hyacinths += 1\n if len(hyacinth_pos) == hyacinths:\n map[grate_pos[0]][grate_pos[1]] = \"O\"\n return None\n if entity == \"*\":\n # Checks if manatee can push boat\n if move[0] == 0:\n new_boat_pos = (new_pos[0] + move[0], new_pos[1] + move[1])\n if new_boat_pos[0] < 0 or new_boat_pos[0] >= len(map):\n return None\n if new_boat_pos[1] < 0 \\\n or new_boat_pos[1] >= len(map[new_boat_pos[0]]):\n return None\n if map[new_boat_pos[0]][new_boat_pos[1]] == \" \":\n map[new_boat_pos[0]][new_boat_pos[1]] = \"*\"\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n return None\n return None", "def check_move(self, move):\n\n if str(move) in self.moves_made:\n return False\n return True", "def CheckMove(self,move):\n\t\tif(move=='w'):\n\t\t\tif(self.x==0):\n\t\t\t\treturn 0\n\t\t\treturn 1\n\t\telif(move=='s'):\n\t\t\tif(self.x==15):\n\t\t\t\treturn 0\n\t\t\treturn 1\n\t\telif(move=='d'):\n\t\t\tif(self.y==35):\n\t\t\t\treturn 0\n\t\t\treturn 1\n\t\telif(move=='a'):\n\t\t\tif(self.y==0):\n\t\t\t\treturn 0\n\t\t\treturn 1", "def check4move(st, selected_unit, direction):\n return 1", "def test_check_move_with_barely_valid(self):\n board = [\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\"\\u25cb\"] * 5 + [\" \"],\n [\" \"] * 6,\n [\" \"] * 6\n ]\n valid = self.game.check_move(board, 4)\n self.assertTrue(valid)", "def make_legal_move(move, board, start, end, move_number, en_passant_square, king_w, king_b, rook_w_l, rook_w_r,\n rook_b_l,\n rook_b_r, promotion_piece):\n # this function should have essentially what i had in main at first without making the move\n test_board = board.copy()\n valid_move = False\n piece = find_piece(board, start)\n end_piece = find_piece(board, end)\n\n if switch_player(move_number): # for whites move\n if (65 <= ord(piece) <= 90) and (validate_move(board, end, move_number)):\n if piece == \"P\":\n if pawn(board, start, end, move_number):\n if end[0] == 8:\n promotion(test_board, start, end, promotion_piece)\n if not check(test_board, move_number):\n retract_move(test_board, start, end, end_piece)\n promotion(board, start, end, promotion_piece)\n return True\n else:\n valid_move = False\n else:\n valid_move = True\n else:\n if can_en_passant(board, en_passant_square, end, start, move_number):\n execute_enpassant(test_board, start, end, move_number)\n if not check(test_board, move_number):\n retract_move(test_board, start, end, end_piece)\n execute_enpassant(board, start, end, move_number)\n return True\n else:\n return False\n elif piece == \"K\":\n if king(start, end):\n if controlled_squares(board, move_number, end):\n valid_move = True\n elif piece == \"N\":\n if knight(start, end):\n valid_move = True\n elif piece == \"B\":\n if bishop(board, start, end):\n valid_move = True\n elif piece == \"Q\":\n if queen(board, start, end):\n valid_move = True\n elif piece == \"R\":\n if rook(board, start, end):\n valid_move = True\n else:\n valid_move = False\n if valid_move:\n update_board(test_board, start, end)\n if not check(test_board, move_number):\n retract_move(test_board, start, end, end_piece)\n update_board(board, start, end)\n return True\n else:\n retract_move(test_board, start, end, end_piece)\n print(\"Illegal Move\")\n return False\n if not switch_player(move_number):\n if (97 <= ord(piece) <= 122) and validate_move(board, end, move_number):\n if piece == \"p\":\n if pawn(board, start, end, move_number):\n if end[0] == 1:\n promotion(test_board, start, end, promotion_piece)\n if not check(test_board, move_number):\n retract_move(test_board, start, end, end_piece)\n promotion(board, start, end, promotion_piece)\n return True\n else:\n valid_move = False\n else:\n valid_move = True\n else:\n if can_en_passant(board, en_passant_square, end, start, move_number):\n execute_enpassant(test_board, start, end, move_number)\n if not check(test_board, move_number):\n retract_move(test_board, start, end, end_piece)\n execute_enpassant(board, start, end, move_number)\n return True\n else:\n valid_move = False\n elif piece == \"k\":\n if king(start, end):\n if controlled_squares(board, move_number, end):\n valid_move = True\n elif piece == \"n\":\n if knight(start, end):\n valid_move = True\n elif piece == \"b\":\n if bishop(board, start, end):\n valid_move = True\n elif piece == \"q\":\n if queen(board, start, end):\n valid_move = True\n elif piece == \"r\":\n if rook(board, start, end):\n valid_move = True\n else:\n valid_move = False\n if valid_move:\n update_board(test_board, start, end)\n if not check(test_board, move_number):\n retract_move(test_board, start, end, end_piece)\n update_board(board, start, end)\n return True\n else:\n retract_move(test_board, start, end, end_piece)\n print(\"Illegal Move\")\n return False", "def validMove(move):\r\n\r\n\tglobal tile1, tile2, tile3, tile4, tile5, tile6, tile7, tile8, tile9\r\n\r\n\ta=eval(\"tile\"+str(move)+\"==0\")\r\n\treturn a", "def check_one_move(self):\n count_moves = 0\n one_move = None\n for direction in self.directions:\n if self.valid_move(self.loc, direction):\n count_moves += 1\n one_move = direction\n if count_moves != 1:\n return None\n return one_move", "def make_move(self, board: Board) -> int:\n\n move = input()\n move = int(move)\n\n while move not in board.get_valid_moves():\n print(\"That is not a valid move\")\n move = input()\n move = int(move)\n\n return move", "def test_check_move_with_invalid(self):\n board = [\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\"\\u25cb\"] * 6,\n [\" \"] * 6,\n [\" \"] * 6\n ]\n valid = self.game.check_move(board, 4)\n self.assertFalse(valid)", "def takeNaiveMove():\r\n\tnotFound=True\r\n\twhile notFound:\r\n\t\tmove=random.randint(1,9)\r\n\t\tif validMove(move):\r\n\t\t\tnotFound=False\r\n\treturn move", "def valid_move(self, player, move):\n return (True)", "def is_valid_move(state, move):\n row, col = move\n if row not in [1, 2, 3] or col not in [1, 2, 3]:\n print(\"Invalid move! Specify correct game square!\")\n return False\n if state[row-1][col-1] != '_':\n print('Invalid move! Place your marker on a free square!')\n return False\n return True", "def getMove(player,first_move=False):\n while True: \n move = raw_input(\"MAKE YOUR MOVE: \").upper()\n\n # handle special commands\n if move == \"QUIT\" or move == \"Q\":\n wannaQuit()\n continue\n elif move == \"QUIT!\" or move == \"Q!\":\n if SAY_PROC:\n SAY_PROC.terminate()\n sys.exit()\n continue\n elif move == \"HELP\" or move == \"H\":\n help()\n continue\n elif move == \"HELP!\" or move == \"H!\":\n say(helpString())\n continue\n elif move == \"SHUTUP!\" or move == \"S!\":\n shutUp(fuck=True)\n continue\n elif move == \"SHUTUP\" or move == \"S\":\n shutUp()\n continue\n elif move == \"BOARD\" or move == \"B\":\n printBoard()\n continue\n elif move == \"CLEAR\" or move == \"C\": \n clearTerminal()\n printBoard()\n continue\n elif move == \"CLEAR!\" or move == \"C!\": # TODO board -> clear, you end up with a half drawn new board. clear again fixes this\n clearTerminalAndBuffer()\n printBoard()\n continue\n elif move == \"PASS\" or move == \"P\": \n if wannaPass():\n break\n else:\n continue\n elif move == \"PASS!\" or move == \"P!\": \n break\n \n # mostly used to catch blank lines or me typing ASFADS like an asshole\n if len(move) < 7:\n print \"That's too short to be a command.\"\n continue\n\n parts=move.split(\":\")\n if len(parts) != 3:\n print \"Can't find all the parts of the move command. Maybe you're missing/have too many \\\":\\\"?\"\n continue\n\n for item in parts:\n if len(item) == 0:\n print \"Found a blank command. Maybe you left in an extra \\\":\\\"?\"\n continue\n\n coords = parts[0].replace(\" \",\"\") # incase of space inbetween file and rank\n direction = parts[1].strip()\n word = parts[2].strip()\n\n if not coords[0].isalpha():\n print \"I don't know where to put your word (Bad file coord).\"\n continue\n\n if not coords[1:].isdigit():\n print \"I don't know where to put your word (Bad rank coord).\"\n continue\n\n x = gridCharToInt(coords[0])\n y = int(coords[1:]) - 1\n if 14 < x < 0 or 14 < y < 0:\n print \"Those aren't coords on the board. Valid Files are from A-O, valid Ranks are 1-15.\"\n continue\n\n if first_move:\n if x != 7 or y != 7:\n print \"The first move must start from the center (H8).\"\n continue\n\n #compact that command\n if direction == \"ACROSS\":\n direction = \"A\"\n elif direction == \"DOWN\":\n direction = \"D\"\n if direction != \"A\" and direction !=\"D\":\n print \"I don't know where to put your word (Across or Down?).\"\n continue\n \n score,placed_tiles = checkWords(x,y,direction,word,first_move)\n if not score: #error reporting is handling in check words\n continue\n else:\n for tile in placed_tiles:\n if not tile in player.rack:\n print \"You don't have the tiles to play that!\"\n continue\n player.rack.remove(tile)\n print player.name+\" scored \"+str(score)+\" on that last play!\"\n player.score+=score\n for tile in placed_tiles:\n player.rack.remove(tile)\n break #YAY", "def move_check(self):\r\n \r\n if not self.run:\r\n return False\r\n \r\n if self.get_num_legal_moves() == 0:\r\n SlTrace.lg(\"NO more legal moves!\", \"nolegalmoves\")\r\n ###return False \r\n \r\n if self.new_move:\r\n self.announce_player(\"start_move\")\r\n if SlTrace.trace(\"selected\"):\r\n self.list_selected(\"After start_move\")\r\n self.new_move = False\r\n player = self.get_player()\r\n if player is None:\r\n return False\r\n \r\n return True", "def checkMove(guess, xPos, yPos):\n\n\t# Return 0 if x position or y position are not valid\n\tif(xPos not in range(0, 5) or yPos not in range(0, 5)):\n\t\treturn 0\n\n\t# Return 0 f the guessed position is not water\n\tif(guess[yPos][xPos] != \"~\"):\n\t\treturn 0\n\n\treturn 1", "def is_valid(self, move):\r\n return move > 10 and move < 89", "def validate_move(move):\n if move[0] in cc.VALID_RANKS and move[1] in cc.VALID_RANKS:\n valid = True\n else:\n valid = False\n return valid", "def make_move(self, move: Tuple[int, int]) -> MoveError:\n\n # Make sure our move is going to be valid\n if self.is_winner():\n return MoveError.GAME_WON\n\n elif move[0] >= self._board_size or move[0] < 0 or move[1] >= self._board_size or move[1] < 0:\n return MoveError.OUT_OF_RANGE\n\n elif self._board[move[1]][move[0]] != self.NEUTRAL_PLAYER:\n return MoveError.TAKEN\n\n # If we make it to here, then it is valid to make the move\n self._board[move[1]][move[0]] = self._players[self._current_player]\n self._number_of_moves = self._number_of_moves + 1\n self._last_move = move\n\n self._check_for_winner()\n\n # Only change who the player is if we didn't get a winner,\n # otherwise the final board's color will be wrong\n if not self.is_winner():\n self._current_player = (self._current_player + 1) % len(self._players)\n\n return MoveError.OKAY", "def is_valid_move(self, move):\n if type(move) == str:\n move = int(move)\n\n return move in self.get_possible_moves()", "def move_invalid():\n check50.run(run_command).stdin(\"EAST\").stdout(\"Invalid command.\")", "def make_move(state: str, section_num: int, move: str) -> str:\n if move == wf.CHECK:\n check_result = wf.check_section(state, section_num)\n if check_result:\n print('The section is correct')\n else:\n print('The section is incorrect')\n else:\n state = wf.change_state(state, section_num, move) \n return state", "def test_verify_move(self):\n self._verify([self.applied_commands['move']])", "def check_move(self, x, y):\n try:\n return self.map[self.y+y][self.x+x] == \" \" or [self.x+x, self.y+y] == self.end_pos\n except IndexError:\n return False", "def _get_move_result(self, unlocked_before_move : bool, err = None):\n if err:\n return Moveresult.INVALID\n elif self.current_turn.entity in self.game_state.get_completed_characters():\n return Moveresult.EXIT\n elif self.game_state.is_character_expelled(self.current_turn.entity):\n return Moveresult.EJECT\n elif self.game_state.is_current_level_unlocked() and not unlocked_before_move:\n return Moveresult.KEY\n else:\n return Moveresult.OK", "def human_move(board,player):\r\n \r\n s = input(\"Please input a legal move in a format of \\\"current_position-landing_position\\\", if the move is cantering or plain. In case of a capturing move, follow \\\"current_position-landing_position-enemy piece\\\": \")\r\n move = s.split('-')\r\n legal = legal_moves(board,player)\r\n execution(move,legal,board,player)", "def make_move(self, start, end):\r\n start_pos = self.parse_pos(start) # Start and end position are lists that contain column and row\r\n end_pos = self.parse_pos(end)\r\n\r\n start_row = start_pos[0] # Position of row and columns are assigned to variables\r\n start_col = start_pos[1]\r\n end_row = end_pos[0]\r\n end_col = end_pos[1]\r\n\r\n board = self._board.get_board()\r\n start_piece = board[start_row][start_col].get_piece()\r\n end_piece = board[end_row][end_col].get_piece()\r\n\r\n\r\n # If there is no piece to be moved or game is over or piece is to be moved to its original location\r\n if start_piece is None or self._game_state != \"UNFINISHED\"\\\r\n or (start_row == end_row and start_col == end_col):\r\n return False\r\n\r\n start_piece_id = start_piece.get_player_id() # Contains the player id associated with the piece\r\n end_piece_player_id = None\r\n if end_piece is not None: # Executes if end piece contains a piece object\r\n end_piece_player_id = end_piece.get_player_id()\r\n\r\n # If Red's turn\r\n if self._player_turn == 1:\r\n if start_piece_id != 'r': # If red moves a black piece\r\n return False\r\n if start_piece.is_legal_move(start, end, start_piece, end_piece_player_id, board) : # Checks the legal move conditions\r\n if self.move_piece(start, end): # Returns False if move is invalid\r\n # Checks if move violates flying general and puts self in check\r\n if self.is_not_flying_general() is True and self.is_in_check(\"red\") is False:\r\n self.change_player_turn()\r\n self.is_in_checkmate()\r\n return True\r\n else: # Reverses the move if violates flying general rule\r\n self.reverse_move(start, end, board,end_piece_player_id, end_piece)\r\n return False\r\n\r\n else:\r\n return False\r\n else:\r\n return False\r\n\r\n # If Black's turn\r\n elif self._player_turn == -1:\r\n if start_piece_id != 'b': # If black moves a red piece\r\n return False\r\n if start_piece.is_legal_move(start, end, start_piece, end_piece_player_id, board): # Checks the legal move conditions\r\n if self.move_piece(start, end): # Returns False if move is invalid\r\n if self.is_not_flying_general() is True and self.is_in_check(\"black\") is False:\r\n self.change_player_turn()\r\n self.is_in_checkmate()\r\n return True\r\n else: # Reverses the move if violates flying general rule\r\n self.reverse_move(start, end, board, end_piece_player_id, end_piece)\r\n return False\r\n else:\r\n return False\r\n else:\r\n return False", "def make_move(self, move, check_valid=True):\r\n self.board[move.sr][move.sc] = \"--\"\r\n self.board[move.er][move.ec] = move.pieceMoved\r\n self.moveLog.append(move)\r\n self.turn_white = not self.turn_white\r\n if move.pieceMoved == 'wk':\r\n self.wKingPos = (move.er, move.ec)\r\n elif move.pieceMoved == 'bk':\r\n self.bKingPos = (move.er, move.ec)\r\n\r\n if move.isEnpassantMove:\r\n self.board[move.sr][move.ec] = \"--\"\r\n\r\n if move.pieceMoved[1] == 'p' and abs(move.sr - move.er) == 2:\r\n self.enpas_pos = ((move.er + move.sr) // 2, move.ec)\r\n else:\r\n self.enpas_pos = ()\r\n\r\n if move.isPawnPromotion and not check_valid:\r\n promoted_piece = \"a\"\r\n while promoted_piece not in ('q', 'r', 'b', 'n'):\r\n promoted_piece = input(\"Promote to q, r, b, or n: \")\r\n self.board[move.er][move.ec] = move.pieceMoved[0] + promoted_piece\r\n\r\n # castle\r\n if move.castle:\r\n if move.ec - move.sc == 2:\r\n self.board[move.er][move.ec - 1] = self.board[move.er][move.ec + 1]\r\n self.board[move.er][move.ec + 1] = '--'\r\n else:\r\n self.board[move.er][move.ec + 1] = self.board[move.er][move.ec - 2]\r\n self.board[move.er][move.ec - 2] = '--'\r\n\r\n # castle rights on rook, king move\r\n self.update_castle_rights(move)\r\n self.castleRightsLog.append(CastleRights(self.cr_castle_r.wks, self.cr_castle_r.bks,\r\n self.cr_castle_r.wqs, self.cr_castle_r.bqs))", "def valid_move(self, row, col):\n if not self._game_over:\n i_row, i_col = row-1, col-1\n #i_row and i_col wil be used to index the board (hence the i)\n (valid, flip_lst) = self._valid_placement(i_row, i_col)\n #print(\"FOR TESTING. Tiles Flipped: \", flip_lst)\n \n if valid:\n #Big Change: You decided to make determining validity\n # and flipping separate operations\n self._flip(i_row, i_col, flip_lst)\n else:\n print(\"\\nPlease enter a valid move!\")\n return False\n\n if self._board_is_full():\n self._game_over = True\n self._set_winner() \n \n self._switch_turn(self._turn)\n if not self._valid_move_exists(): #Check if the other player has any valid moves\n print(\"\\nNo valid moves exist for {0}. {0}'s turn has been skipped\".format(self._turn))\n self._switch_turn(self._turn) #Switch turn back to player before skip was determined\n if not self._valid_move_exists(): #Check if the other player has any valid moves\n print(\"No valid moves exist for {0}. {0}'s turn has been skipped\".format(self._turn))\n print(\"No moves exist for either player. GAME OVER\")\n self._game_over = True\n self._set_winner()\n return False\n\n return True\n elif self._game_over:\n print(\"The game is over. No more moves can be made!\")\n #TODO: Replace this^ with an exception later?\n return False", "def make_move(self, move):\n if int(move) < 0 or int(move) > 48 or self.board[int(move) // 7][int(move) % 7] != \"\" or int(move) % 2 == 0:\n raise ValueError(\"{} is not a valid move for {}\".format(move, self.board))\n DotsAndBoxesState.score1 += self.check_score(move)\n self.board[int(move) // 7][int(move) % 7] = colors[self.turn] + self.turn + \"\\u001b[0m\"\n self.turn = get_opponent(self.turn) #change into another player's trun", "def test_no_moves(self):\n game = self.ending(['bw..wwww'], 8, 1)\n game.man_move(0, 2)\n self.assertEqual(game.finish_state,\n (400, game.first_player, 'No moves'))", "def move(self, initial_pos, final_pos):\n\n ops = [ self.op_move_north, self.op_move_nwest, self.op_move_neast,\n self.op_jump_north, self.op_jump_nwest, self.op_jump_neast,\n self.op_capture_north, self.op_capture_east, self.op_capture_west,\n self.op_capture_nwest, self.op_capture_neast, self.op_add_piece\n ]\n\n # Check if the final position is empty\n if final_pos and self.board.get_element(*final_pos) != self.EMPTY:\n return False\n\n # Check if the intial position is a valid start piece\n if initial_pos not in self.next_pieces:\n return False\n\n # Test for add piece operation\n if self.next_move == self.ADDPIECE_1 or self.next_move == self.ADDPIECE_2:\n res = self.op_add_piece(initial_pos)\n if type(res) != type(False):\n return res\n\n # Test for other operations\n for op in ops:\n res = op(initial_pos)\n\n if type(res) != type(False) and (res.last_piece == final_pos):\n return res\n\n return False", "def move(self, piece):\n\n if list(piece) in self.find_moves():\n self.block[tuple( self.find_free() )] = self.block[tuple(piece)]\n self.block[tuple(piece)] = 0\n return \"success\"\n else:\n return \"error\"", "def get_move(self, board):\n\n valid_moves = [move for move in board.legal_moves]\n is_valid_move = False\n while not is_valid_move:\n move = input(\"Enter a valid move in uci format: \").lower()\n if len(move) == 4 or len(move) == 5:\n try:\n player_move = chess.Move.from_uci(move)\n\n if board.is_legal(player_move):\n try:\n board.push(player_move)\n return player_move\n except:\n print(\"invalid move...\")\n else:\n print(\"invalid move...\")\n except:\n print(\"invalid move...\")\n else:\n print(\"invalid move...\")", "def _move(self, start: (int, int), dest: (int, int), extra_info=''):\n moving_piece = self.board[start[0]][start[1]]\n end_piece = self.board[dest[0]][dest[1]]\n\n # Check if the move is valid\n possible_moves = self.get_all_moves()\n if State.convert_to_EAN(start, dest, extra_info) not in possible_moves:\n return False\n\n # Invalidate castling\n self._invalidate_castles()\n\n # Update half turn counters since capture (updates 50 move draw)\n # reset on capture, which is when the destination piece is a different color\n self.half_moves += 1\n if not State._is_same_color(moving_piece, end_piece):\n self.half_moves = 0\n\n # Update full moves after black's turn\n if not self.white_to_move:\n self.full_moves += 1\n\n # Update the board to reflect the move\n self._update_board(start, dest, extra_info)\n\n # Update move history TODO\n # Detect three move repetition TODO\n\n # Update whose turn it is\n self.white_to_move = not self.white_to_move", "def make_move(self, position_from, position_to):\n\n # Returns False if position_from or position_to are not an actual position\n # ranging from 'a1' to 'i10' for columns 'a' through 'i' and rows '1' \n # through '10'.\n if not isinstance(position_to, str) or\\\n not isinstance(position_from,str) or\\\n len(position_to) > 3 or len(position_to) < 2 or\\\n len(position_from) > 3 or len(position_from) < 2:\n\n return False \n\n # Returns False if game has been won.\n elif self.get_game_state() != 'UNFINISHED':\n\n return False\n\n position_to = position_to.lower()\n position_from = position_from.lower()\n current_player = self.get_player_turn()\n\n # Returns False if move is not legal based on Janggi game rules.\n if self.legal_move(position_to, position_from) is not True:\n\n return False\n\n # Returns True if current player wishes to pass on their move.\n # Any position on the board will suffice, including a General's position.\n # Tested in legal_move to exit legal_move safely. The second test for pass\n # updates player turn and returns True, so long as the player is not\n # currently in check.\n if position_to == position_from:\n\n if self.is_in_check(current_player) is True:\n \n return False\n\n self.update_player_turn()\n return True\n\n # Move is valid. Adjusted board, update potential moves for all GamePieces,\n # test checkmate (change game_state if True), update player turn, and\n # return True.\n else:\n\n game_piece_object = self.get_game_piece_object_at_position(position_from)\n self.adjust_board(game_piece_object, position_to, position_from)\n self.update_potential_moves()\n \n # Sets the General's Board position attribute if the position has\n # changed.\n if isinstance(game_piece_object, General):\n\n if current_player == 'BLUE':\n\n self.set_general_position_blue(position_to)\n\n else:\n\n self.set_general_position_red(position_to)\n\n # Sets game_state to the player who won if checkmate is detected.\n if self.is_checkmate() is True:\n\n if current_player == 'BLUE':\n\n self.set_game_state('BLUE_WON')\n \n else:\n\n self.set_game_state('RED_WON')\n\n self.update_player_turn()\n return True", "def _is_valid_move(self, vector, current_piece, other_piece):\n return True", "def is_valid_move(self, position, dest_square):\n if self.symbol.isupper() and position.turn != 'w':\n return False\n elif self.symbol.islower() and position.turn != 'b':\n return False\n elif dest_square not in self.calculate_scope(position):\n return False\n else:\n return True", "def test_valid_move(self, move):\n if self.game_state[move[0]][move[1]] is not None:\n return False\n return True", "def move(self, row, column, piece):\n\n if row < 0 or row >= self._dimension or column < 0 or column >= self._dimension or self._board[row][column] != ' ':\n print('Move cannot be made')\n return False\n else:\n self._board[row][column] = piece\n self._number_of_moves += 1", "def makeMove(self, _pos, _check_legality=True):\n\n # Check legality is an argument mainly for testing purposes; not for live gameplay.\n if _check_legality:\n legality = self.board.getMoveLegality(self, _pos)\n is_legal_move, is_capturing = legality['is_legal_move'], legality['is_capturing']\n if not is_legal_move:\n # Will need to update this line with interface update.\n # exit(\"ILLEGAL MOVE of {} at {}\".format(self.color, _pos))\n return 'illegal'\n else:\n # _is_capturing is necessary because of the not too often situation of a move with\n # no liberties, but is legal because it's capturing.\n if is_capturing: self.board.playerMakesMove(self, _pos, _is_capturing=True)\n else: self.board.playerMakesMove(self, _pos)\n else:\n self.board.playerMakesMove(self, _pos)", "def takeStrategicMove():\r\n\tglobal move1, move2\r\n\r\n\tif move1==0 or move2==0:\r\n\t\tif validMove(1):\r\n\t\t\treturn 1\r\n\t\telif validMove(5):\r\n\t\t\treturn 5\r\n\telif winningMove():\r\n\t\treturn winningMove()\t\t\r\n\telif blockingMove():\r\n\t\treturn blockingMove()\r\n\telse:\r\n\t\treturn takeNaiveMove()", "def check_move(board, move):\n\n player, spike_index, fields_to_move = Judge._validate_move(move)\n\n # 1. moving out of the bar\n # 2. check if the source is of the valid player\n # 3. check if the destination is valid\n\n board.set_player_perspective(player)\n\n # 1.\n if spike_index == OUT_OF_BAR_SPECIAL_MOVE:\n if board.bar[player] < 1:\n return False\n\n if not board.valid_dest(fields_to_move - 1):\n return False\n\n return True\n\n # 2.\n if not board.valid_source(spike_index):\n return False\n # 3.\n dest_spike_index = spike_index + fields_to_move\n\n if dest_spike_index >= len(INITIAL_SPIKES_STATE):\n return board.all_at_home()\n \n return board.valid_dest(dest_spike_index)", "def make_move(self, starting_pos, ending_pos):\n\n print(\"make_move, input =\", starting_pos, ending_pos)\n\n # convert inputs to work with table\n starting_loc = self.parse_input(starting_pos)\n ending_loc = self.parse_input(ending_pos)\n\n # conditionals to check\n valid = starting_loc[0] is not False and ending_loc[0] is not False\n invalid = starting_loc[0] is False or ending_loc[0] is False\n\n # check if piece is at starting_loc\n self.validate_move_check_for_piece(starting_loc)\n\n valid = starting_loc[0] is not False and ending_loc[0] is not False\n if valid:\n self.validate_move_game_conditions(starting_loc, ending_loc)\n\n # if still valid, check conditions involving the generals\n valid = starting_loc[0] is not False and ending_loc[0] is not False\n if valid:\n # Move will put king in own king check\n self.validate_move_general_conditions(starting_loc, ending_loc)\n\n # ensure move is valid to move on\n invalid = starting_loc[0] is False or ending_loc[0] is False\n if invalid:\n return False\n\n # move piece\n self.move_pieces(starting_loc, ending_loc)\n piece = self._game_board[ending_loc[0]][ending_loc[1]]\n piece.update_location(ending_loc)\n self.set_player_turn()\n\n # check if game has been won\n if self.gen_is_in_check(self._player_turn, True):\n self.set_game_state(self._player_turn)\n\n self.printBoardWithStyling()\n return True", "def execute_move(board, move):\n\n player, spike_index, fields_to_move = Judge._validate_move(move)\n\n board.set_player_perspective(player)\n \n if spike_index == OUT_OF_BAR_SPECIAL_MOVE:\n dest_spike_index = fields_to_move - 1\n board.remove_checker_from_bar()\n else:\n dest_spike_index = spike_index + fields_to_move\n board.pop_player_checker(spike_index)\n\n if dest_spike_index >= len(INITIAL_SPIKES_STATE):\n return board\n\n board.push_player_checker(dest_spike_index)\n\n return board", "def is_move_valid(self, direction, reference_board=None):\n # Verify a left move does not take you off the board.\n if (direction == \"l\"):\n if (self._current_loc.get_column() == 0):\n return False\n # Verify an up move does not take you off the board.\n elif (direction == \"u\"):\n # Verify the move does not take you off the board.\n if (self._current_loc.get_row() == 0):\n return False\n # Verify a right move does not take you off the board.\n elif (direction == \"r\"):\n current_row = self._current_loc.get_row()\n max_column_number = len(self._untraversed_board[current_row])\n if self._current_loc.get_column() + 1 == max_column_number:\n return False\n # Verify a down move does not take you off the board.\n elif (direction == \"d\"):\n if self._current_loc.get_row() + 1 == len(self._untraversed_board):\n return False\n else:\n assert False, \"Invalid move direction.\"\n\n # Get the new location for a move in the specified direction.\n new_location = self._calculate_move_location(direction)\n new_row = new_location.get_row()\n new_col = new_location.get_column()\n # Verify the space is available\n if(reference_board is None):\n return BoardPath._untraversed_board[new_row][new_col] != \"#\"\n else:\n return reference_board[new_row][new_col] != \"#\"", "def make_move(board, move, ch):\n board[move['row']][move['col']] = ch\n \n winner = board_winner(board)\n \n if winner is not None:\n return True, winner\n \n if not board_has_move(board):\n return True, None\n \n return False, None", "def make_move(self):\r\n if self.running and self.run:\r\n if self.board is None:\r\n SlTrace.lg(\"sp.board is None\")\r\n return False\r\n \r\n SlTrace.lg(\"running_loop self.running and self.run\", \"running_loop\")\r\n SlTrace.lg(\"running_loop self.start_move\", \"running_loop\")\r\n if self.start_move():\r\n SlTrace.lg(\"running_loop successful start_move\", \"running_loop\")\r\n self.next_move_no()\r\n SlTrace.lg(\"running_loop after start_move\", \"running_loop\")\r\n if self.to_pause:\r\n self.pause_cmd()\r\n self.to_pause = False\r\n return True", "def make_move(self, move, player):\n if not self.test_valid_move( move):\n return False\n self.game_state[move[0]][move[1]] = player", "def move(x, y, direction, board):\n\n piece_at_xy = starter.get_piece(x, y, board); # Getting necessary pieces\n\n assert piece_at_xy != '*', \"Error in swipe logic\"; # Logical debug case\n valid_direction = (direction == \"left\" or\n direction == \"right\" or\n direction == \"up\" or\n direction == \"down\");\n assert valid_direction, \"Invalid direction passed in\"; # Logical debug case\n\n # The new x and y for the current piece (adjacent's current position) are stored alongside adjacent (fewer ifs + redundant code)\n if direction == \"left\":\n adjacent = (starter.get_piece(x - 1, y, board), x - 1, y);\n elif direction == \"right\":\n adjacent = (starter.get_piece(x + 1, y, board), x + 1, y);\n elif direction == \"up\":\n adjacent = (starter.get_piece(x, y - 1, board), x, y - 1);\n elif direction == \"down\":\n adjacent = (starter.get_piece(x, y + 1, board), x, y + 1);\n\n if adjacent[0] == None: # Edge of the board case (no action taken)\n return False;\n\n elif piece_at_xy != adjacent[0] and adjacent[0] != '*': # Can't combine two numbers case (no action taken)\n return False;\n\n elif adjacent[0] == '*': # Empty spot adjacent case (recursive movement in direction)\n starter.place_piece('*', x, y, board);\n starter.place_piece(piece_at_xy, adjacent[1], adjacent[2], board);\n move(adjacent[1], adjacent[2], direction, board);\n return True;\n\n elif piece_at_xy == adjacent[0]: # Adjacent same numbers case (combine them)\n starter.place_piece('*', x, y, board);\n starter.place_piece(str(int(adjacent[0]) * 2), adjacent[1], adjacent[2], board);\n move(adjacent[1], adjacent[2], direction, board);\n return True;\n\n else:\n # Logical debug case\n assert False, \"No way you should be in here. Error in move logic\";\n\n return False;", "def apply_move(self, move):\n if self.check_move(move=move):\n self.board_list[move] = self.current_player.marker # changes value in the board to player which is either X or O\n self.moves_made += str(move) # keeps track of all moves\n return True\n else:\n return False", "def move(self):\r\n their_move = self.last_moves[\"their_move\"]\r\n return (their_move == \"\" and random.choice(moves) or their_move)", "def _isvalidmove(self, from_, to_):\n if self.board[from_].occupant is None:\n print(\"Moving from empty square\")\n return False\n piece = self.board[from_].occupant\n\n if piece.color != self.to_move:\n print(\"Wrong color\")\n return False\n\n if self.is_checked:\n if piece.notation != 'K':\n print(\"King is checked!\")\n return False\n\n diff = (\n to_cartesian(to_)[0] - to_cartesian(from_)[0],\n to_cartesian(to_)[1] - to_cartesian(from_)[1]\n )\n if not piece.hopping:\n if self.board.isblocked(from_, to_):\n print(\"Move blocked by other pieces\")\n return False\n\n if self.board[to_].occupant is not None:\n if piece.color == self.board[to_].occupant.color:\n print(\"Cannot capture friendly\")\n return False\n\n if diff not in piece.get_captures():\n print(\"Invalid piece capture\")\n return False\n\n if diff not in piece.get_moves():\n print(\"Invalid piece move\")\n return False\n\n return True", "def validate_movement(self, piece, from_col, from_row, to_col, to_row):\n col_diff = abs(ord(from_col) - ord(to_col))\n row_diff = abs(from_row - to_row)\n\n # For any piece, it must actually move...\n if col_diff == 0 and row_diff == 0:\n return False\n # ...and there must be empty spaces in between the from/to squares (when on a column, row, or diagonal)\n if not self.validate_empty_between(from_col, from_row, to_col, to_row):\n return False\n\n # White pawn\n if piece == 'P':\n if col_diff == 1 and (to_row - from_row == 1):\n # Can move diagonally up one square, if taking another piece in that square or by en-passant\n return self.piece_colour(to_col, to_row) == 'B' \\\n or self.is_en_passant(from_col, from_row, to_col, to_row)\n elif col_diff != 0:\n # Otherwise, it can't change columns\n return False\n elif from_row == 2:\n # From initial position, can go up one or two rows (but can't take a piece)\n return (to_row == 3 or to_row == 4) and self.get_square(to_col, to_row) == ' '\n else:\n # Otherwise, can only move up one row (but can't take a piece)\n return to_row - from_row == 1 and self.get_square(to_col, to_row) == ' '\n # Black pawn\n elif piece == 'p':\n if col_diff == 1 and (from_row - to_row == 1):\n # Can move diagonally down one square, if taking another piece in that square or by en-passant\n return self.piece_colour(to_col, to_row) == 'W' \\\n or self.is_en_passant(from_col, from_row, to_col, to_row)\n elif col_diff != 0:\n # Otherwise, it can't change columns\n return False\n elif from_row == 7:\n # From initial position, can go down one or two rows (but can't take a piece)\n return (to_row == 6 or to_row == 5) and self.get_square(to_col, to_row) == ' '\n else:\n # Otherwise, can only move down one row (but can't take a piece)\n return from_row - to_row == 1 and self.get_square(to_col, to_row) == ' '\n # Rook\n elif piece.lower() == 'r':\n # Must remain in same column or same row\n return col_diff == 0 or row_diff == 0\n # Knight\n elif piece.lower() == 'n':\n # Jumps in a 2+1 pattern\n return (col_diff == 2 and row_diff == 1) or (col_diff == 1 and row_diff == 2)\n # Bishop\n elif piece.lower() == 'b':\n # Moves along diagonals\n return col_diff == row_diff\n # Queen\n elif piece.lower() == 'q':\n # Can move along columns, rows, or diagonals\n return col_diff == 0 or row_diff == 0 or col_diff == row_diff\n # King\n elif piece.lower() == 'k':\n # Can move a single square in any direction\n if not(0 <= col_diff <= 1) or not(0 <= row_diff <= 1):\n return False\n\n # But not next to the other king\n other_king = 'k' if piece.isupper() else 'K'\n # Get valid border squares\n border_squares = list(filter(\n lambda b_square: 'a' <= b_square[0] <= 'f' and 1 <= b_square[1] <= 8,\n [\n (chr(ord(to_col) - 1), to_row - 1), (to_col, to_row - 1), (chr(ord(to_col) + 1), to_row - 1),\n (chr(ord(to_col) - 1), to_row), (to_col, to_row), (chr(ord(to_col) + 1), to_row),\n (chr(ord(to_col) - 1), to_row + 1), (to_col, to_row + 1), (chr(ord(to_col) + 1), to_row + 1)\n ]\n ))\n # Check for the other king\n for square in border_squares:\n if self.get_square(square[0], square[1]) == other_king:\n return False\n\n return True", "def step(self, move):\n if self.is_done:\n raise Exception('You have to reset the game first.')\n action = self.get_action(move)\n\n # adjust in case too big, could also count as invalid if action >= field_width\n # this is for the human player to make always valid moves\n action = action % self.n_actions\n\n step_column = self.get(action, self.dim[-1] - 1)\n is_invalid = step_column != 0\n\n # we should consider to end the game after an invalid move since further play might not be meaningful\n if is_invalid: # board filled => invalid move.\n info = {'winner': 0, 'game_over': False, 'invalid_move': True}\n # current player is not changed.\n self.invalid_moves += 1\n self._is_done = True\n return self.board, self.invalid_move_reward, self.is_done, info,\n \n else: # not filled => valid move\n self.steps += 1\n height = self.get_height(action) # getting the _height at which the token must fall to\n self.set(action, height, value=self.current_player)\n\n winner = self.check_winner(self.board, action, height)\n if winner != 0:\n info = {'winner': winner, 'game_over': True, 'invalid_move': False}\n reward = self.winning_reward\n self._is_done = True\n else:\n if self.is_full():\n # Tie\n info = {'winner': winner, 'game_over': True, 'invalid_move': False}\n reward = self.tie_reward\n self._is_done = True\n else:\n # Proceeding to next turn\n if self.check_opponent_winning(): # checks whether opponent can win\n reward = self.losing_reward\n else:\n reward = 0\n info = {'winner': winner, 'game_over': False, 'invalid_move': False}\n self.current_player *= -1 \n \n return self.board, reward, self.is_done, info", "def move(self, board):\n\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n while not board.check_move(move[0], move[1]):\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n return move", "def apply_move(b,player,move):\n move = move.strip().lower()\n if len(move)!=2:\n raise Exception(\"Valid move is two characters (e.g. A2 or B3)\")\n if move[0] not in COLS:\n move = move[::-1]\n if move[0] not in COLS:\n raise Exception(\"No column spec found\")\n j = COLS.index(move[0])\n i = int(move[1])-1\n if b[i][j] != \" \":\n raise Exception(\"Another move already filled that position\")\n b[i][j] = player", "def test_move(self):\n # Run a handful of GCMC moves\n n_moves = 10\n std_gcmc_system_sampler.move(std_gcmc_system_simulation.context, n_moves)\n\n # Check that all of the appropriate variables seem to have been updated\n # Hard to test individual moves as they are rarely accepted - just need to check the overall behaviour\n assert std_gcmc_system_sampler.n_moves == n_moves\n assert 0 <= std_gcmc_system_sampler.n_accepted <= n_moves\n assert len(std_gcmc_system_sampler.Ns) == n_moves\n assert len(std_gcmc_system_sampler.acceptance_probabilities) == n_moves\n assert isinstance(std_gcmc_system_sampler.energy, Quantity)\n assert std_gcmc_system_sampler.energy.unit.is_compatible(kilocalories_per_mole)\n\n return None", "def test_move(self):\n # Run a handful of GCMC moves\n n_moves = 10\n std_gcmc_sphere_sampler.move(std_gcmc_sphere_simulation.context, n_moves)\n\n # Check that all of the appropriate variables seem to have been updated\n # Hard to test individual moves as they are rarely accepted - just need to check the overall behaviour\n assert std_gcmc_sphere_sampler.n_moves == n_moves\n assert 0 <= std_gcmc_sphere_sampler.n_accepted <= n_moves\n assert len(std_gcmc_sphere_sampler.Ns) == n_moves\n assert len(std_gcmc_sphere_sampler.acceptance_probabilities) == n_moves\n assert isinstance(std_gcmc_sphere_sampler.energy, Quantity)\n assert std_gcmc_sphere_sampler.energy.unit.is_compatible(kilocalories_per_mole)\n\n return None", "def move(self, action):\n \n self.counter += 1\n\n if action not in self.ACTIONS:\n raise Exception(\"Invalid action\")\n\n \n\n d_x, d_y = self.MOVEMENTS[action]\n x, y = self.position\n new_x, new_y = x + d_x, y + d_y\n new_X,new_Y=self.position_to_xy(new_x, new_y)\n \n\n if (new_x, new_y) not in self.cases:\n return self._get_state(), -3, False, self.ACTIONS\n \n \n \n elif (self.openGoal(new_x,new_y))&(new_X>-400):\n self.position = new_x, new_y\n self.positionxy = self.position_to_xy(new_x, new_y)\n \n return self._get_state(), 20, True, self.ACTIONS\n \n # elif not self.openGoal(new_x,new_y):\n # self.position = new_x, new_y\n # self.positionxy = self.position_to_xy(new_x, new_y)\n # return self._get_state(), -1, False, self.ACTIONS\n \n elif self.counter > 100:\n self.position = new_x, new_y\n self.positionxy = self.position_to_xy(new_x, new_y)\n return self._get_state(), -1, True, self.ACTIONS\n \n else:\n self.position = new_x, new_y\n self.positionxy = self.position_to_xy(new_x, new_y)\n return self._get_state(), -1, False, self.ACTIONS", "def test_valid_move():\n\n board = Board()\n\n # a col outside the width of the board should be false\n assert board.valid_move(board.get_grid_size()[1] + 1) is False\n\n # only positive cols should be considered for a move\n assert board.valid_move(-2) is False\n\n # since board is empty all cols should have moves\n for i in range(board.get_grid_size()[1]):\n assert board.valid_move(i) is True\n\n # if a col is full no move can be made\n for i in range(board.get_grid_size()[1]):\n if i % 2 == 0:\n board.move(board.P1, 0)\n else:\n board.move(board.P2, 0)\n\n \"\"\"\n board now looks like this...\n \n 0 1 2 3 4 5 6 \n +-+-+-+-+-+-+-+\n 0|O|-|-|-|-|-|-|0\n +-+-+-+-+-+-+-+\n 1|X|-|-|-|-|-|-|1\n +-+-+-+-+-+-+-+\n 2|O|-|-|-|-|-|-|2\n +-+-+-+-+-+-+-+\n 3|X|-|-|-|-|-|-|3\n +-+-+-+-+-+-+-+\n 4|O|-|-|-|-|-|-|4\n +-+-+-+-+-+-+-+\n 5|X|-|-|-|-|-|-|5\n +-+-+-+-+-+-+-+\n 0 1 2 3 4 5 6 \n\n \"\"\"\n assert board.valid_move(0) is False", "def get_move(self, find_move_name):\n frame_data = self._get_frame_data()\n sprites = self._get_sprites()\n\n # Need to check both names separately\n for move in frame_data.keys():\n if '\"' in find_move_name:\n temp_move_name = find_move_name.replace('\"', '')\n if temp_move_name == move:\n frame_data_name = move\n break\n else:\n continue\n elif find_move_name.lower() == move.lower():\n frame_data_name = move\n break\n\n else:\n for move in frame_data.keys():\n if find_move_name.lower() in move.lower():\n frame_data_name = move\n break\n else:\n raise MoveNotFound\n\n sprite_name = None\n\n # temporary fix for the 214/236B/22x/5AD meme\n if '214b' in frame_data_name.lower() and not '214bc' in frame_data_name.lower():\n for move in sprites.keys():\n if '214A/B' in move:\n sprite_name = move\n break\n elif '236b' in frame_data_name.lower() and not '236bc' in frame_data_name.lower():\n for move in sprites.keys():\n if '236A/B' in move:\n sprite_name = move\n break\n\n elif '22' in frame_data_name.lower():\n for move in sprites.keys():\n if '22A/B' in move and '22c' not in frame_data_name.lower():\n sprite_name = move\n break\n elif '22A/B/C' in move and '22c' in frame_data_name.lower():\n sprite_name = move\n break\n\n elif 'reversal' in frame_data_name.lower():\n for move in sprites.keys():\n if '5AD' in move:\n sprite_name = move\n break\n\n for move in sprites.keys():\n if sprite_name is not None:\n break\n if 'j.' in frame_data_name.lower() and ' ' in frame_data_name:\n for split_name in frame_data_name.split(' '):\n if move.lower() == split_name.lower():\n sprite_name = move\n break\n elif move.lower() == frame_data_name.lower():\n sprite_name = move\n break\n else:\n for move in sprites.keys():\n if sprite_name is not None:\n break\n if 'j.' in frame_data_name.lower() and ' ' in frame_data_name:\n for split_name in frame_data_name.split(' '):\n if move.lower() in split_name.lower():\n sprite_name = move\n break\n elif move.lower() in frame_data_name.lower() and '22' not in find_move_name:\n print('ok')\n sprite_name = move\n break\n elif find_move_name.lower() in move.lower():\n sprite_name = move\n break\n else:\n sprite_name = None\n\n if sprite_name is None:\n sprite = ''\n else:\n sprite = self._get_high_quality_sprite(sprites[sprite_name])\n\n return {\n frame_data_name: {\n 'fd': frame_data[frame_data_name],\n 'sprite': sprite\n }\n }", "def nextMoveGNU(self, move=\"go\", board=None):\n # get move\n if self.pawning:\n while not rospy.is_shutdown():\n for row in [2,3,4,5]:\n for col in ['a','b','c','d','e','f','g','h']:\n p1 = board.getPiece(col,row)\n if p1 != None and abs(p1.type) == ChessPiece.WHITE_PAWN:\n p2 = board.getPiece(col,row+1)\n if p2 == None:\n # this is a candidate \n m = col + str(row) + col + str(row+1)\n self.history.append(m)\n return m\n else:\n self.engine.sendline(move) \n if self.engine.expect(['My move is','Illegal move']) == 1:\n return None \n self.engine.expect('([a-h][1-8][a-h][1-8][RrNnBbQq(\\r\\n)])')\n m = self.engine.after.rstrip()\n self.history.append(m)\n return m", "def move(self, row: int, col: int, player: int):\n def addup(dict_name, invalid_set, another_invalid, locx, locy):\n if locx == locy:\n diag_name = (1,1)\n if diag_name not in invalid_set:\n dict_name[diag_name] += 1\n if dict_name[diag_name] == self.tar:\n return player\n another_invalid.add(diag_name)\n if locx == self.tar-1-locy:\n diag_name = (-1, -1)\n if diag_name not in invalid_set:\n dict_name[diag_name] += 1\n if dict_name[diag_name] == self.tar:\n return player\n another_invalid.add(diag_name)\n curcol = (locy, None)\n currow = (None, locx)\n if curcol not in invalid_set:\n dict_name[curcol] += 1\n if dict_name[curcol] == self.tar:\n return player\n another_invalid.add(curcol)\n if currow not in invalid_set:\n dict_name[currow] += 1\n if dict_name[currow] == self.tar:\n return player\n another_invalid.add(currow)\n return 0\n res = 0\n if (row, col) not in self.walked:\n if player == 1:\n res = addup(self.p1, self.invalid_1, self.invalid_2, row, col)\n if player == 2:\n res = addup(self.p2, self.invalid_2, self.invalid_1, row, col)\n self.walked.add((row, col))\n return res", "def next_move(board, player):\n \n move_row = \"move\"\n move_column = \"move\"\n\n while not move_row.isnumeric():\n move_row = input(\"{}, pick row to place your {}. > \".format(player.name, player.char))\n while not move_column.isnumeric(): \n move_column = input(\"Pick column in row {} to place your {}. > \".format(move_row, player.char))\n\n move_row = int(move_row)\n move_column = int(move_column)\n\n move = Move(player, (move_row, move_column))\n \n # Check if move is out of bounds\n if (move_row >= len(board.current_board) or\n move_column >= len(board.current_board)):\n print(\"Move out of bounds. Choose a valid move.\")\n return board\n\n # Check if space is already used\n if board.current_board[move_row][move_column] != \"-\":\n print(\"Spot already played. Pick an unused space.\")\n return board\n\n board.last_move = player.name\n board.add_move(move)\n\n return board", "async def move(self, board, valid_actions):\n self._move = None\n output_move_row = Value('d', -1)\n output_move_column = Value('d', 0)\n try:\n # await self.search(board, valid_actions) \n p = Process(\n target=self.search, \n args=(\n self._color, board, valid_actions, \n output_move_row, output_move_column))\n p.start()\n while p.is_alive():\n await asyncio.sleep(0.1)\n self._move = np.array([output_move_row.value,output_move_column.value],dtype=np.int32)\n except asyncio.CancelledError as e:\n print('The previous player is interrupted by a user or a timer.')\n except Exception as e:\n print(type(e).__name__)\n print('move() Traceback (most recent call last): ')\n traceback.print_tb(e.__traceback__)\n finally:\n p.kill()\n self._move = np.array(\n [output_move_row.value, output_move_column.value],\n dtype=np.int32)\n return self.best_move", "def validate_move(board: list, character: list, direction: str) -> bool:\n max_x_y_coordinates = board[-1]\n valid_options = []\n if character[1] < max_x_y_coordinates[0]:\n valid_options.append(\"d\")\n if character[1] > 0:\n valid_options.append(\"a\")\n if character[0] < max_x_y_coordinates[1]:\n valid_options.append(\"s\")\n if character[0] > 0:\n valid_options.append(\"w\")\n if direction in valid_options:\n return True\n else:\n return False", "def check_score(self, move):\n\n i = int(move) // 7\n j = int(move) % 7 #find the corresponding index of the input move\n\n if i == 0: #top\n if self.board[i+1][j-1] != \"\" and self.board[i+1][j+1] != \"\" and self.board[i+2][j] != \"\":\n return 1\n return 0\n if i == 6: #bottom\n if self.board[i-1][j-1] != \"\" and self.board[i-1][j+1] != \"\" and self.board[i-2][j] != \"\":\n return 1\n return 0\n if j == 0: #left\n if self.board[i-1][j+1] != \"\" and self.board[i+1][j+1] != \"\" and self.board[i][j+2] != \"\":\n return 1\n return 0\n if j == 6: #right\n if self.board[i-1][j-1] != \"\" and self.board[i+1][j-1] != \"\" and self.board[i][j-2] != \"\":\n return 1\n return 0\n if i == 2 or i == 4: # horizontal\n score = 0\n if self.board[i-1][j-1] != \"\" and self.board[i-1][j+1] != \"\" and self.board[i-2][j] != \"\":\n score += 1\n if self.board[i+1][j-1] != \"\" and self.board[i+1][j+1] != \"\" and self.board[i+2][j] != \"\":\n score += 1\n return score\n\n if j == 2 or j == 4: # vertical\n score = 0\n if self.board[i-1][j-1] != \"\" and self.board[i+1][j-1] != \"\" and self.board[i][j-2] != \"\":\n score += 1\n if self.board[i-1][j+1] != \"\" and self.board[i+1][j+1] != \"\" and self.board[i][j+2] != \"\":\n score += 1\n return score", "def move_possible(x, y, board):\n\n piece_at_xy = starter.get_piece(x, y, board);\n if piece_at_xy == None:\n return False;\n elif piece_at_xy == '*': # An empty space means a move is always possible\n return True;\n\n return (\n piece_at_xy == starter.get_piece(x + 1, y, board) or\n piece_at_xy == starter.get_piece(x - 1, y, board) or\n piece_at_xy == starter.get_piece(x, y + 1, board) or\n piece_at_xy == starter.get_piece(x, y - 1, board)\n );", "def fix_move(self, invalid_move: QMove):\n\n # TODO: reduce time_per_game second by second\n ERROR_MSG = f\"INVALID_MOVE {invalid_move.to_string()}\"\n\n if self.is_ai and self.proc is not None:\n self.proc.stdin.write(str.encode(ERROR_MSG + '\\n'))\n self.proc.stdin.flush()\n new_move = QMove(os.read(self.proc.stdout.fileno(), 100))\n else:\n new_move = QMove(\n input(\"Move was invalid, enter a valid move:\\n\\t>> \"))\n\n return new_move", "def move(self, motor, position, timeout):\n \n try:\n caput(motor, position, wait=True, timeout=timeout)\n except:\n e = sys.exc_info()\n print str(e)\n print \"ERROR: caput failed.\"\n print (motor + \" pos:\" + str(position) + \" timeout:\" + str(timeout))\n return self.__g.FAIL\n \n rdbd = motor + \".RDBD\"\n rbv = motor + \".RBV\"\n\n final_pos = caget(rbv)\n deadband = caget(rdbd)\n\n success = True\n\n if ((final_pos < position-deadband) or (final_pos > position+deadband)):\n print \"ERROR: final_pos out of deadband.\"\n print (motor + \" \" + str(position) + \" \" + str(timeout) + \" \" \n + str(final_pos) + \" \" + str(deadband))\n success = False\n\n if (success):\n return self.postMoveCheck(motor)\n else:\n self.postMoveCheck(motor)\n return self.__g.FAIL", "def move(puzzle: str, direction: str):\r\n position_index = puzzle.index(EMPTY)\r\n position = position_index + 1\r\n grid_width = get_grid_width(puzzle)\r\n\r\n # What direction to moved the tile if it's a valid move\r\n if direction == UP:\r\n if (position) > grid_width:\r\n return swap_position(puzzle, position_index, position_index - grid_width)\r\n\r\n elif direction == DOWN:\r\n if (len(puzzle) - position) >= grid_width:\r\n return swap_position(puzzle, position_index, position_index + grid_width)\r\n\r\n elif direction == LEFT:\r\n if (position - 1) % grid_width != 0:\r\n return swap_position(puzzle, position_index, position_index - 1)\r\n\r\n elif direction == RIGHT:\r\n if position % grid_width != 0:\r\n return swap_position(puzzle, position_index, position_index + 1)\r\n\r\n return None", "def is_valid_move(x:int, y:int,board_length) -> bool:\n if x < 0 or y < 0 or x == board_length or y == board_length:\n return False\n return True", "def move(self):\r\n my_move = self.last_moves[\"my_move\"]\r\n return (my_move != \"\" and moves[(moves.index(my_move)+1) % 3] or\r\n random.choice(moves))", "def find_best_move(state: GameState) -> None:", "def nextMoveGNU(self, move=\"go\", board=None):\n # get move\n if self.pawning:\n while not rospy.is_shutdown():\n rows = [2,3,4,5]\n piece = ChessPiece.WHITE_PAWN\n if board.side == board.BLACK:\n rows = [7,6,5,4]\n piece = ChessPiece.BLACK_PAWN\n for row in rows:\n for col in ['a','b','c','d','e','f','g','h']:\n p1 = board.getPiece(col,row)\n if p1 != None and abs(p1.type) == piece:\n p2 = board.getPiece(col,row+1)\n if p2 == None:\n # this is a candidate\n m = col + str(row) + col + str(row+1)\n self.history.append(m)\n return m\n else:\n self.engine.sendline(move)\n if self.engine.expect(['My move is','Illegal move']) == 1:\n return None\n self.engine.expect('([a-h][1-8][a-h][1-8][RrNnBbQq(\\r\\n)])')\n m = self.engine.after.rstrip()\n self.history.append(m)\n return m", "def makeMove(self, moveStr):\r\n\t\tmoveStr = str(moveStr)\r\n\r\n\t\tmoveUci = self._userParseSanToUci(moveStr)\r\n\t\t# print(moveUci)\r\n\r\n\t\tif moveUci is None:\r\n\t\t\treturn\r\n\r\n\t\tresponse = requests.post(f'https://lichess.org/api/board/game/{self.gameId}/move/{moveUci}', headers=self.authHeader)\r\n\r\n\t\tif response.status_code == 200:\r\n\t\t\tlog.debug('Move Successfully Sent')\r\n\r\n\t\telse:\r\n\t\t\tlog.warning(f'Move Unsuccessfully Sent. Status Code: {response.status_code}')", "def make_move(self, board):\n user_input = self.get_user_input(\n 'coordinates of next move (x,y): '\n )\n move = self.transform_user_input(user_input)\n\n valid = board.move_is_valid(move)\n while not valid:\n user_input = self.get_user_input(\n 'Invalid move, coordinate of next move: '\n )\n move = self.transform_user_input(user_input)\n valid = board.move_is_valid(move)\n board.set_piece(move, color=self.color)", "def process_move(player, board):\r\n c = player.__repr__()\r\n print(c, \"'s turn\")\r\n move = player.next_move(board)\r\n board.add_checker(player.checker, move)\r\n print()\r\n print(board)\r\n if board.is_win_for(player.checker):\r\n i = player.num_moves\r\n print(player.__repr__(), \"wins in \", i, \"moves\")\r\n print(\"Congratulations!\")\r\n return True\r\n elif board.is_full() and not board.is_win_for(player.checker):\r\n print(\"It's a tie!\")\r\n return True\r\n else:\r\n return False", "def move_piece(self, move: Move, ignore_check=False):\n # no need to check the values of Position as they are guaranteed to be sane.\n\n # sanity: cant move from a position onto itself\n if move.from_pos.to_str == move.to_pos.to_str: # TODO compare by value, not str\n raise InvalidMoveException('cant move a piece onto itself (%s)' % move.from_pos.to_str())\n\n piece = self.rubric(move.from_pos)\n if piece is None:\n raise Exception(\"assert failed: found empty rubric at: %s\" % move.from_pos.to_str())\n\n # sanity: there must be a piece in the start position:\n if piece.piece_type == PieceType.PLACEHOLDER:\n raise InvalidMoveException('cant move from empty rubric (%s)' % (move.from_pos.to_str()))\n\n # sanity: ensure the move is valid for this turn's color\n if piece.color != self._current_side_color:\n raise InvalidMoveException(\"cant move a piece of this color at this turn\")\n\n # sanity: if capturing, pieces must have different colors\n captured_piece = self.rubric(move.to_pos)\n if captured_piece.piece_type != PieceType.PLACEHOLDER:\n if captured_piece.color == piece.color:\n raise InvalidMoveException('cant capture a piece of the same color (start: %s, end: %s)' %\n (move.from_pos.to_str(), move.to_pos.to_str()))\n\n # handle movement in Check\n king_attackers = self.get_king_attackers()\n if not ignore_check and len(king_attackers) > 0:\n # if the king is under attack (== Check), the only valid moves are those that\n # resolve the situation:\n # a) moving the king to an unattacked position\n # b) capturing the attacker by the king, provided its position is NOT attacked by another piece\n # c) capturing the attacker by another piece\n # d) blocking the check by a rook, queen or bishop: placing them between\n # the king and the attacker - NOTE I will not implement this\n\n # if there are no valid moves, the game is over (Checkmate) - this is handled elsewhere\n\n # determine if the move will resolve the Check:\n # create a copy of the board, run the proposed move (ignore the check)\n # and then determine if we're still in check afterwards\n board_copy = self.get_board_copy()\n board_copy.move_piece(move, ignore_check=True)\n if len(board_copy.get_king_attackers()) > 0:\n # the move did not resolve the check, so it wasnt valid:\n raise InvalidMoveException(\"move failed to resolve Check\")\n\n if piece.is_valid_move(move, self._rubrics):\n # handle capture scenario\n if captured_piece.piece_type != PieceType.PLACEHOLDER:\n # remove the captured piece\n self.remove_piece(captured_piece)\n\n # move the piece to its destination\n self.set_rubric(piece, move.to_pos)\n\n # set empty placeholder in the origin rubric\n self.set_rubric(PlaceHolder(Position), move.from_pos)\n\n return True", "def _maybe_move(self, source_chunk, target_chunk, path_index, move_func):\n if len(source_chunk.paths) <= 1:\n return False\n\n move_time = source_chunk.paths[path_index].time\n\n new_source_badness = self._badness(source_chunk.time - move_time)\n new_target_badness = self._badness(target_chunk.time + move_time)\n\n delta_badness = ((new_source_badness + new_target_badness) -\n (source_chunk.badness + target_chunk.badness))\n if delta_badness < 0:\n move_func()\n return True\n\n return False", "def choose_move(self):\n return 0", "def is_legal_move(self, start_pos, end_pos, start_piece, end_piece_player_id, board):\r\n parsed_positions = self.parse_positions(start_pos, end_pos)\r\n\r\n start_row = parsed_positions[0]\r\n start_col = parsed_positions[1]\r\n end_row = parsed_positions[2]\r\n end_col = parsed_positions[3]\r\n count = 0 # Count will track how many pieces are between start and end_pos\r\n\r\n if start_row != end_row and start_col != end_col: # Moving diagonally\r\n return False\r\n\r\n # If cannon moves to an empty position\r\n # if end_piece_player_id is None:\r\n\r\n if start_row == end_row: # Moving horizontally\r\n col_difference = end_col - start_col\r\n\r\n if col_difference > 0: # Moving to the right of the board\r\n for col in range(start_col + 1, end_col): # Checks if there is a piece between start_col and end_col\r\n if board[start_row][col].get_piece() is not None:\r\n count += 1\r\n\r\n if col_difference < 0: # Moving to the left of the board\r\n for col in range(start_col - 1, end_col, -1): # Checks to the left of the board\r\n # If there is a piece to block movement to the end_pos, return False\r\n if board[start_row][col].get_piece() is not None:\r\n count += 1\r\n\r\n if start_col == end_col: # Moving vertically\r\n row_difference = end_row - start_row\r\n\r\n if row_difference > 0: # Moving down the board\r\n for row in range(start_row + 1, end_row):\r\n if board[row][start_col].get_piece() is not None: # If no piece is impeding path to end_pos\r\n count += 1\r\n\r\n\r\n if row_difference < 0: # Moving up the board\r\n for row in range(start_row -1, end_row, -1):\r\n if board[row][start_col].get_piece() is not None: # If no piece is impeding path to end_pos\r\n count += 1\r\n\r\n # 1 piece between start_pos and end_pos and end_pos contains a chess piece\r\n if count == 1 and end_piece_player_id is not None:\r\n return True\r\n # end_pos has no piece and there are no pieces to impede path\r\n elif end_piece_player_id is None and count == 0:\r\n return True\r\n # Returns False for all other scenarios\r\n else:\r\n return False", "def validate_move(board: list, character: dict, direction: str) -> bool:\n if direction.strip().upper() == \"N\":\n return (character[\"Position\"][0] - 1, character[\"Position\"][1]) in board\n elif direction.strip().upper() == \"S\":\n return (character[\"Position\"][0] + 1, character[\"Position\"][1]) in board\n elif direction.strip().upper() == \"W\":\n return (character[\"Position\"][0], character[\"Position\"][1] - 1) in board\n elif direction.strip().upper() == \"E\":\n return (character[\"Position\"][0], character[\"Position\"][1] + 1) in board\n else:\n print(\"Please enter only directions shown above\")\n return False", "def make_move(self, board: Board) -> int:\n return random.choice(board.get_valid_moves())", "def is_valid_move(self, position: Point) -> bool:\n\t\tif self.tiles[position.x][position.y] == 0:\n\t\t\treturn True\n\t\treturn False", "def move(self,x_pos,y_pos,x_dir,y_dir,user):\r\n\r\n if self.finished == True:\r\n return Move.finish\r\n\r\n size = len(self)\r\n if not self.__on_the_board(x_pos+x_dir, y_pos+y_dir):\r\n return Move.out_of_bounds\r\n\r\n moving_piece = self.board[y_pos][x_pos]\r\n\r\n if moving_piece == 0 or user * moving_piece < 0:\r\n return Move.no_piece_found\r\n\r\n if not self.__valid_direction(x_dir,y_dir):\r\n return Move.invalid_destination\r\n\r\n if self.board[y_pos+y_dir][x_pos+x_dir] != 0:\r\n return Move.path_blocked\r\n \r\n # List of moves the user can make if captures are possible.\r\n user_capture_options = [forced_move for forced_move in self.capture_options if forced_move.user == moving_piece]\r\n\r\n if user_capture_options != []:\r\n if not self.__is_capture_move(moving_piece,x_pos,y_pos,x_dir,y_dir):\r\n return Move.capture_ignored\r\n \r\n if abs(x_dir) == 1:\r\n return self.__move_single_tile(moving_piece,x_pos,y_pos,x_dir,y_dir,user)\r\n\r\n if abs(x_dir) == 2:\r\n return self.__move_double_tile(moving_piece,x_pos,y_pos,x_dir,y_dir,user)\r\n\r\n # This code should not be reached, as abs(x_dir) is forced to be either 1 or 2 before looking at the if-clauses.\r\n # However, may this filter ever need to be altered, here's a handy error to let you know that you did something wrong.\r\n raise NotImplementedError(\"This code should not be reached!\")", "def test_perform_move(self):\n p = hw.create_tile_puzzle(3, 3)\n self.assertFalse(p.perform_move(\"taco\"))\n self.assertTrue(p.perform_move('up'))\n self.assertEqual(p.get_board(), [[1,2,3],[4,5,0],[7,8,6]])\n self.assertFalse(p.perform_move('right'))\n p = hw.create_tile_puzzle(2, 4)\n self.assertTrue(p.perform_move('left'))\n self.assertTrue(p.perform_move('up'))\n self.assertFalse(p.perform_move('up'))\n self.assertEqual(p.get_board(), [[1,2,0,4],[5,6,3,7]])\n p = hw.create_tile_puzzle(1, 4)\n self.assertTrue(p.perform_move('left'))\n self.assertTrue(p.perform_move('left'))\n self.assertTrue(p.perform_move('left'))\n self.assertFalse(p.perform_move('down'))\n self.assertFalse(p.perform_move('left'))\n self.assertEqual(p.get_board(), [[0,1,2,3]])", "def is_pawn_move_valid(self, from_row, from_col, to_row, to_col):\n # Setup variables used\n piece = self.board.squares[from_row][from_col]\n piece_color = self.piece_color(piece)\n to_piece = self.board.squares[to_row][to_col]\n row_diff = abs(from_row - to_row)\n col_diff = abs(from_col - to_col)\n dc = 0\n\n # Set flag for first move of pawn\n first_move = True if from_row == 6 or from_row == 1 else False\n\n # If direction is not correct for white, exit\n if to_row - from_row > 0:\n dr = 1\n if self.piece_color(piece) == \"white\":\n return False\n\n # If direction is not correct for black, exit\n if to_row - from_row < 0:\n dr = -1\n if self.piece_color(piece) == \"black\":\n return False\n\n # If moving straight\n if from_col == to_col:\n # if not legal straight move, exit\n if not (row_diff == 1 or (first_move and row_diff == 2)):\n return False\n\n # make sure to move has no pieces on straight path\n dm = row_diff + 1\n\n # return value\n retVal = self._any_piece_in_way(from_row, from_col, dr, dc, dm)\n\n# if retVal and not self.testing:\n# # self.pawn_promotion(to_row, to_col, piece_color)\n# self.board.overwrite_board_square(to_row, to_col)\n# if piece_color == \"black\":\n# self.board.put_piece(self.B_QUEEN, to_row, to_col)\n# else:\n# self.board.put_piece(self.W_QUEEN, to_row, to_col)\n\n return retVal\n\n # WHITE en passant\n # move from moveHistory => (\"piece\", fromRow, fromCol, toRow, toCol)\n if (self.moveHistory[-1][2] == self.moveHistory[-1][4] == (to_col)) and \\\n self.moveHistory[-1][0] == \"♟\" and self.moveHistory[-1][1] == 1 and\\\n self.moveHistory[-1][3] == 3 and piece_color == \"white\":\n if col_diff == 1 and row_diff == 1 and to_piece == None:\n if not self.testing:\n self.board.overwrite_board_square(self.moveHistory[-1][3], self.moveHistory[-1][4])\n self.board.squares[self.moveHistory[-1][3]][self.moveHistory[-1][4]] = None\n return True\n\n # BLACK en passant\n if (self.moveHistory[-1][2] == self.moveHistory[-1][4] == (to_col)) and \\\n self.moveHistory[-1][0] == \"♙\" and self.moveHistory[-1][1] == 6 and\\\n self.moveHistory[-1][3] == 4 and piece_color == \"black\":\n if col_diff == 1 and row_diff == 1 and to_piece == None:\n if not self.testing:\n self.board.overwrite_board_square(self.moveHistory[-1][3], self.moveHistory[-1][4])\n self.board.squares[self.moveHistory[-1][3]][self.moveHistory[-1][4]] = None\n return True\n\n # else move must be taking piece directly move\n # if legal taking piece move and (opponent-already check for own piece) piece at to-square\n if col_diff == 1 and row_diff == 1 and to_piece != None:\n\n# if not self.testing:\n# # self.pawn_promotion(to_row, to_col, piece_color)\n# self.board.overwrite_board_square(to_row, to_col)\n# if piece_color == \"black\":\n# self.board.put_piece(self.B_QUEEN, to_row, to_col)\n# else:\n# self.board.put_piece(self.W_QUEEN, to_row, to_col)\n return True\n\n return False", "def get_next_move(self):\n return int(input('Enter your move: '))", "def move(self, board):\n\n if board.get_number_of_moves() == 0:\n random_row = randint(0, 2)\n random_column = randint(0, 2)\n\n if random_row == 1 or random_column == 1:\n random_row = 1\n random_column = 1\n elif random_row == 2:\n random_row = board.get_dimension()-1\n\n if random_column == 2:\n random_column = board.get_dimension()-1\n\n move = (random_row, random_column)\n elif board.get_number_of_moves() == 1 or board.get_number_of_moves() == 2:\n if board.get_piece(1,1) == ' ':\n move = (1, 1)\n else:\n board_dimension = board.get_dimension()-1\n corners = [(0, 0), (0, board_dimension), (board_dimension, 0), (board_dimension, board_dimension)]\n corners = self.remove_filled_positions(corners, board)\n\n move = corners[randint(0, len(corners)-1)]\n else:\n move = self.check_for_winner(board)\n\n if move == (-1, -1):\n board_dimension = board.get_dimension()-1\n corner1_moves = self.remove_filled_positions([(0, 0), (2, 2)], board)\n corner2_moves = self.remove_filled_positions([(0, 2), (2, 0)], board)\n\n non_corner_moves = self.remove_filled_positions([(1, 0), (2, 1), (1, 2), (0, 1)], board)\n\n center_piece = board.get_piece(1, 1)\n corner_pieces = [board.get_piece(0, 0), board.get_piece(board_dimension, 0), board.get_piece(0, board_dimension), board.get_piece(board_dimension, board_dimension)]\n\n if corner_pieces[0] != self._piece and corner_pieces[0] != ' ' and corner_pieces[0] == corner_pieces[3]:\n move = non_corner_moves[randint(0, 3)]\n elif corner_pieces[1] != self._piece and corner_pieces[1] != ' ' and corner_pieces[1] == corner_pieces[2]:\n move = non_corner_moves[randint(0, 3)]\n elif len(corner2_moves) > 0 and corner_pieces[0] != self._piece and corner_pieces[0] == center_piece and corner_pieces[3] == self._piece:\n move = corner2_moves[0]\n elif len(corner1_moves) > 0 and corner_pieces[1] != self._piece and corner_pieces[1] == center_piece and corner_pieces[2] == self._piece:\n move = corner1_moves[0]\n elif len(corner1_moves) > 0 and corner_pieces[2] != self._piece and corner_pieces[2] == center_piece and corner_pieces[1] == self._piece:\n move = corner1_moves[0]\n elif len(corner2_moves) > 0 and corner_pieces[3] != self._piece and corner_pieces[3] == center_piece and corner_pieces[0] == self._piece:\n move = corner2_moves[0]\n else:\n move = self.can_complete_two_in_row(board)\n\n if move == (-1, -1):\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n while not board.check_move(move[0], move[1]):\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n return move", "def askMove(self,posibleMoves):\n print(\"Where will you move?\")\n while True:\n pos = raw_input(\"Type Colum and Row 'CR' Ex:a1 for first column/row: \")\n if len(pos) == 2:\n c = ord(pos[0])-97\n r = int(pos[1])-1\n move = c+r*8\n if move in posibleMoves:\n return move\n print(\"Invalid move, try again\")\n return", "def parse_move(move):\n if not (len(move) == 2):\n return None, None\n try:\n row = ord(move[0].upper()) - 65\n col = int(move[1])\n except:\n return None, None\n return row, col", "def move(self, row, column, symbol):\n game_state = self.determine_game_state()\n if game_state not in (GameState.GAME_NOT_STARTED, GameState.GAME_IN_PROGRESS):\n return MoveResults.MOVE_INVALID\n\n # check for initial move\n if self.board == BLANK_BOARD and symbol == O_SYMBOL:\n return MoveResults.MOVE_INVALID\n\n # check for invalid row and column\n if row < 0 or row > 2 or column < 0 or column > 2:\n return MoveResults.MOVE_INVALID\n\n # make sure the game piece is valid\n if symbol != X_SYMBOL and symbol != O_SYMBOL:\n return MoveResults.MOVE_INVALID\n\n # make sure the game piece isn't moving out of turn\n x_moves = self.board.count(X_SYMBOL)\n o_moves = self.board.count(O_SYMBOL)\n if symbol == X_SYMBOL and x_moves > o_moves:\n return MoveResults.MOVE_INVALID\n elif symbol == O_SYMBOL and o_moves >= x_moves:\n # note that x always goes first.\n return MoveResults.MOVE_INVALID \n\n # figure out position.\n position = (3 * row) + column\n\n # make sure there's not already a piece there.\n if self.board[position] != EMPTY_SYMBOL:\n return MoveResults.MOVE_INVALID\n\n self.board = self.board[:position] + symbol + self.board[position+1:] \n return MoveResults.MOVE_VALID", "def test_human_move_char(self):\n self.ri.return_value = 'c'\n assert False == self.T.human_move()", "def is_valid_move(self, move: Any) -> bool:\n return move in self.get_possible_moves()" ]
[ "0.70150155", "0.676564", "0.6584836", "0.6568262", "0.6568117", "0.6563307", "0.6558569", "0.64923847", "0.64853036", "0.6477489", "0.645601", "0.6451311", "0.6451188", "0.64166987", "0.6414095", "0.63909614", "0.63762623", "0.636292", "0.6332474", "0.63271296", "0.63165474", "0.6294423", "0.6260845", "0.625517", "0.6251586", "0.62417924", "0.6229471", "0.6223107", "0.6220301", "0.6190982", "0.617644", "0.61361843", "0.61217", "0.6101234", "0.60865843", "0.6076552", "0.6073566", "0.6059842", "0.6054433", "0.605089", "0.6050613", "0.6046398", "0.6046322", "0.6045086", "0.6041365", "0.60356534", "0.60348153", "0.60320616", "0.60260147", "0.60234237", "0.60175395", "0.6001694", "0.5991489", "0.5986339", "0.59817743", "0.5975904", "0.5975037", "0.59716666", "0.5969712", "0.5967952", "0.59671646", "0.5965469", "0.59599257", "0.59503084", "0.5941529", "0.59398395", "0.5938152", "0.5937943", "0.5937562", "0.5934122", "0.5929441", "0.59280646", "0.59250766", "0.592403", "0.59234107", "0.59057945", "0.590375", "0.58987814", "0.5897164", "0.589089", "0.5887902", "0.5864931", "0.5863642", "0.58553916", "0.58510023", "0.5849331", "0.5841226", "0.58408874", "0.58407456", "0.5830376", "0.5822612", "0.5821459", "0.58180714", "0.5808107", "0.58081067", "0.58067983", "0.58012354", "0.57960457", "0.5795296", "0.5791194" ]
0.6508602
7
Returns the winning peice if the game is over. If the game is a draw it returns the empty peice, and if the game is not over returns false.
def isOver(self): isFull = Piece.BLANK for a,b,c in [[0, 1, 2], [3, 4, 5], [6, 7, 8], [0, 3, 6], [1, 4, 7], [2, 5, 8], [0, 4, 8], [2, 4, 6]]: if (self.board[a] is self.board[b] is self.board[c] and self.board[a] is not Piece.BLANK): return self.board[a] if (self.board[a] is Piece.BLANK or self.board[b] is Piece.BLANK or self.board[c] is Piece.BLANK): isFull = False return isFull
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gameOver():\n if len(p1)==0 and len(p1winnings)==0:\n return True\n elif len(p2)==0 and len(p2winnings)==0:\n return True\n return False", "def is_game_over(self):\r\n\r\n if self.winner != 0:\r\n return True\r\n\r\n return False", "def is_game_over(self):\n\n # This checks whether or not the board is full...\n if len(self.board.values()) == 100 and \\\n 0 not in self.board.values():\n p1 = self._longest_chain(1)\n p2 = self._longest_chain(2)\n if len(p1) > len(p2):\n return 1\n elif len(p2) > len(p1):\n return 2\n else:\n return 0\n\n # If it's not full. We check for boxes\n else:\n for x in range(self.width-1):\n for y in range(self.height-1):\n slice = self._slice((x,y), (2,2))\n if 0 not in slice[0] and 0 not in slice[1]:\n # is this slice a box?\n if slice[0][0] == slice[0][1] and \\\n slice[0][1] == slice[1][0] and \\\n slice[1][0] == slice[1][1]:\n return slice[0][0] # winner\n\n return -1 # game is not over", "def is_game_over(self):\n bk = False\n wk = False\n\n # Find the kings\n for row in range(8):\n for col in range(8):\n if self.board.squares[row][col] == ChessPiece.B_KING: # Black king symbol\n bk = True\n break\n if self.board.squares[row][col] == ChessPiece.W_KING: # Black king symbol\n wk = True\n break\n\n # If a king is missing, end the game. This fixes a bug we were having\n if bk == False:\n return 1\n if wk == False:\n return 2\n\n if self.white_wins():\n return 1\n elif self.black_wins():\n return 2\n elif self.tie():\n return 3\n else:\n return 0", "def is_over(self):\n alive_players = [1 if p.status == \"alive\" else 0 for p in self.players]\n # If only one player is alive, the game is over.\n if sum(alive_players) == 1:\n return True\n\n # If all rounds are finshed\n if self.round_counter >= 2:\n return True\n return False", "def is_over(self):\n winner = TictactoeMatch.get_winner(self.inputs_)\n if winner:\n self.result_ = winner\n if Config.USER['debug']['enabled']:\n print \"It is over! Player \"+str(self.result_)+\" (\"+str(self.player_label_[self.result_])+\") wins!\"\n return True\n for value in self.inputs_:\n if value == TictactoeMatch.EMPTY:\n if Config.USER['debug']['enabled']:\n print \"Go!\"\n return False\n self.result_ = TictactoeMatch.DRAW\n if Config.USER['debug']['enabled']:\n print \"It is over! Draw!\"\n return True", "def check_game_over(self):\n red, blue = self.board.count_piece()\n if blue == 0:\n self.ui.show_result(\"RED WIN!\")\n self.turn = RED\n elif red == 0:\n self.ui.show_result(\"BLUE WIN!\")\n self.turn = BLUE\n elif red == blue == 1:\n self.ui.show_result(\"DRAW!\")", "def game_over(self):\n return bool(self.last_round and self.last_player == self.current_player)", "def check_over(self):\n if self.board.has_winner() == 1:\n return 1\n elif self.board.has_winner() == 2:\n return 2\n elif self.board.check_cats_game():\n return 0\n else:\n return -1", "def verify_winner(self):\r\n return self.count_pegs() == 1", "def _check_game_over(self):\n return self.game_board.check_game_over()", "def game_over(self):\n # TODO: Define the game over condition for Adventure.\n # use self.over to determine if the game if over\n return self.over", "def game_over(self):\n red_minion = 0\n blue_minion = 0\n red_master = 0\n blue_master = 0\n only_masters = True\n for row in self.board:\n for piece in row:\n if piece != 0:\n if not piece.master:\n if piece.player:\n blue_minion += 1\n else:\n red_minion += 1\n only_masters = False\n else:\n if piece.player:\n blue_master += 1\n else:\n red_master += 1\n if blue_minion + blue_master == 0:\n self.winner = \"Red\"\n self.red_victories += 1\n self.number_of_games +=1\n self.game_over_screen()\n return True\n elif red_minion + red_master == 0:\n self.winner = \"Blue\"\n self.blue_victories += 1\n self.number_of_games +=1\n self.game_over_screen()\n return True\n elif only_masters:\n if red_master > blue_master:\n self.winner = \"Red\"\n self.red_victories += 1\n elif blue_master > red_master:\n self.winner = \"Blue\"\n self.blue_victories += 1\n else:\n self.winner = \"Nobody\"\n self.number_of_games +=1\n self.game_over_screen()\n return True\n \n return False", "def game_over(players):\n active_players = players_with_decks(players)\n if not active_players or len(active_players) == 1:\n return True\n return False", "def is_game_over(self):\n board = list(self.board)\n for wins in self.WINNING:\n # Create a tuple\n w = (board[wins[0]], board[wins[1]], board[wins[2]])\n if w == ('X', 'X', 'X'):\n return 'X'\n if w == ('O', 'O', 'O'):\n return 'O'\n # Check for stalemate\n if ' ' in board:\n return None\n return ' '", "def check_win(self):\n return UNEXPOSED not in self.get_game() and self.get_game().count(FLAG) == len(self.get_pokemon_location)", "def check_win(self):\r\n wins = [self.check_rows(), self.check_cols(), self.check_diag()]\r\n for case, pos in wins:\r\n if case != -1:\r\n print('Game over!')\r\n if self.grid[case][-1] == self.computer:\r\n print('The computer won!')\r\n return (True, pos)\r\n print('The player won!')\r\n return (True, pos)\r\n\r\n return (self.check_draw(), None)", "def check_if_game_over():\n check_for_winner()\n check_for_tie()", "def game_over(state):\n return wins(state, HUMAN) or wins(state, COMP)", "def game_over(state):\r\n return wins(state, HUMAN) or wins(state, COMP)", "def check_if_game_over():\n # Calling check for winners.\n check_for_winner()\n # Calling check it's tie or not.\n check_if_tie()", "def gameOver(self):\n\t\treturn self.lives == 0", "def __game_is_over(self, x, y):\n\t\tif np.count_nonzero(self.board) >= 42:\n\t\t\treturn True\n\n\t\tlines = self.__extract_lines(x, y)\n\n\t\tfor line in lines:\n\t\t\tif self.__winner_in_line(line) != 0:\n\t\t\t\treturn True\n\n\t\treturn False", "def _checkRoundOver(self):\n\n if not any(player.isAlive() for player in self.teams[0].players):\n self.endGame()", "def check_game(self):\n gameOver = None\n if self.turn > 4:\n gameOver = self.check_x_won()\n if gameOver is True:\n self.game_x_won()\n return\n\n gameOver = None\n if self.turn > 5:\n gameOver = self.check_o_won()\n if gameOver is True:\n self.game_o_won()\n return\n\n if self.turn >= 9:\n self.game_tie()\n return", "def is_over(self):\n return self.game.is_over()", "def game_over(self) -> bool:\n return self.rstate.game_over()", "def is_game_over(self):\n\n if len(self.next_pieces) == 0:\n return True", "def is_over(self):\n for el1, el2, el3 in self.WINNING_POSITIONS:\n if self.board[el1] == self.board[el2] == self.board[el3]:\n if self.board[el1] == 0:\n continue\n\n self.winner = self.board[el1]\n return True\n\n if self.__class__.EMPTY_POSITION_COUNTER not in self.board:\n return True\n\n return False", "def is_game_over(board):\n winner = check_winner(board)\n draw = check_draw(winner, board)\n return True if winner or draw else False", "def is_round_over(whose_turn,players):\n if ((len(players[whose_turn].hand.cards) == 0) and (players[whose_turn].has_discarded == True)):\n round_over = True\n else:\n round_over = False\n return round_over", "def game_over(self):\n return self.lives() < 0", "def check_for_game_won(self):\n all_moscuvites_captured = True\n king_captured = True\n king_escaped = True\n for piece in self.game_pieces:\n if piece.player == 2:\n all_moscuvites_captured = False\n elif piece.player == 3:\n king_captured = False\n king_coords = (piece.x,piece.y)\n escape_coords = [(0, 0), (0, 8),\n (8, 0), (8, 8)]\n if king_coords not in escape_coords:\n king_escaped = False\n if king_captured:\n return 2\n elif king_escaped or all_moscuvites_captured:\n return 1\n else:\n return 0", "def is_over(self):\n winner = self.get_winner()\n status = bool(winner or not self.available_moves)\n return status, winner", "def __game_is_over(self):\n return not (self.__playing and self.__bricks_total > 0 and self.__num_lives > 0)", "def is_game_over(cls):\n cls.record_winner()\n cls.record_tie()", "def is_game_over(self):\n return self.state.all_avatars_placed() and self.state.is_game_over()", "def is_game_over(self):\n if self.just_cheated_a or self.just_cheated_b:\n return False\n if self.game_stage == 3:\n return (self.die_a.current_value == \"5\" and self.die_b.current_value == \"6\" or\n self.die_a.current_value == \"6\" and self.die_b.current_value == \"5\")\n else:\n return False", "def is_game_over(self):\n if max([max(row) for row in self.grid]) == 2 ** (self.grid_size ** 2):\n raise GameException('Congrats, You won !')\n\n # If there is a zero then the game is not over\n for row in self.grid:\n if 0 in row:\n return False\n\n # Check if two consecutive number (vertically or horizontally) are\n # equal. In this case the game is not over.\n for i in range(self.grid_size):\n for j in range(self.grid_size):\n # horizontal check\n if (i < self.grid_size - 1 and\n self.grid[i][j] == self.grid[i + 1][j]):\n return False\n # vertical check\n if (j < self.grid_size - 1 and\n self.grid[i][j] == self.grid[i][j + 1]):\n return False\n\n return True", "def isGameOver(self):\n for i in range(self.rows):\n for j in range(self.columns):\n if self.grid[i][j].face == 'down':\n return False\n #if here then all cards must be face up\n return True", "def is_game_win(self):\n return not self.deck and not self.hand", "def _checkRoundOver(self):\n\n # if we already ended it doesn't matter\n if self.hasEnded():\n return\n\n if not any(player.isAlive() for player in self.teams[0].players):\n # allow continuing after wave 1\n if self._wave > 1:\n self.continueOrEndGame()\n else:\n self.endGame()", "def game_won(self):\n return all((foundation.is_full() for foundation in self.foundations.values()))", "def check_if_over(self):\n if self.remainingBalls == 0:\n self.check_if_won()\n self.game_over = True", "def is_game_over(self):\n if (self.check_win(HexBoard.RED) or self.check_win(HexBoard.BLUE) or \n len(self.get_move_list())==0):\n self.game_over = True\n return self.game_over", "def gameWon(self):\n \n wins = [ threeInARow( self.squares[0], self.squares[1], self.squares[2] ),\n threeInARow( self.squares[3], self.squares[4], self.squares[5] ),\n threeInARow( self.squares[6], self.squares[7], self.squares[8] ),\n threeInARow( self.squares[0], self.squares[3], self.squares[6] ),\n threeInARow( self.squares[1], self.squares[4], self.squares[7] ),\n threeInARow( self.squares[2], self.squares[5], self.squares[8] ),\n threeInARow( self.squares[0], self.squares[4], self.squares[8] ),\n threeInARow( self.squares[2], self.squares[4], self.squares[6] ) ]\n \n return any(wins)", "def check_player_reached():\n global round_start_timer, round_over\n\n if player1.alive and player1.rect.top < (platform_width // 2):\n add_time_points()\n reset_players()\n player1.wins += 1\n return True\n\n elif player2.alive and (player2.rect.top + player2.image.get_height()) > \\\n (SCREEN_HEIGHT - platform_width):\n player2.wins += 1\n round_over = True\n add_time_points()\n reset_players()\n return True", "def isGameOver(self):\n pass", "def game_tie(self):\n\n shape = self.board.shape\n if np.count_nonzero(self.board) == (shape[0] * shape[1]):\n # The board is full\n player = 0\n return True\n else:\n return False", "def winning_game_player(players):\n\n # in order for there to be a winner, the game must\n # be over\n if not game_over(players):\n return None\n\n # if the game is over, it could be that there is no\n # winner\n active_players = players_with_decks(players)\n if not active_players:\n return False\n\n # if the game is over than find the winner\n return players_with_decks(players)[0]", "def is_won(self):\n combinations = [*[(i, i + 3, i + 6) for i in range(3)],\n *[(i*3, i*3 + 1, i*3 + 2) for i in range(3)],\n (0, 4, 8), (2, 4, 6)]\n\n win = [*filter(lambda x: self[x[0]] == self[x[1]] == self[x[2]] and\n self[x[0]] != self.CELL_EMPTY, combinations)]\n return self[win[0][0]] if len(win) > 0 else self.CELL_EMPTY", "def is_game_over(self) -> bool:\n return self._is_game_over", "def won(self):\n for y in range(self.size):\n winning = []\n for x in range(self.size):\n if self.fields[x, y] == self.opponent:\n winning.append((x, y))\n if len(winning) == self.size:\n return winning\n for x in range(self.size):\n winning = []\n for y in range(self.size):\n if self.fields[x, y] == self.opponent:\n winning.append((x, y))\n if len(winning) == self.size:\n return winning\n winning = []\n for y in range(self.size):\n x = y\n if self.fields[x, y] == self.opponent:\n winning.append((x, y))\n if len(winning) == self.size:\n return winning\n winning = []\n for y in range(self.size):\n x = self.size-1-y\n if self.fields[x, y] == self.opponent:\n winning.append((x, y))\n if len(winning) == self.size:\n return winning\n return None", "def is_game_won(self):\n if self.game_is_tied():\n return False\n my_available_steps = self.steps_available(self.loc)\n opp_available_steps = self.steps_available(self.opponent_loc)\n if my_available_steps == 0 or opp_available_steps == 0:\n return True\n else:\n return False", "def game_over(self):\n\n if self._number_of_moves == 9:\n return True\n\n return self._number_of_moves == 9 or self.winner_found()", "def checkWin(self):\n winstates = [(0, 1, 2),\n (3, 4, 5),\n (6, 7, 8),\n (0, 3, 6),\n (1, 4, 7),\n (2, 5, 8),\n (0, 4, 8),\n (2, 4, 6)]\n win = False\n for state in winstates:\n if (self.gameState[state[0]] + self.gameState[state[1]] + self.gameState[state[2]]) == 3:\n self.handleWin(1)\n win = True\n elif (self.gameState[state[0]] + self.gameState[state[1]] + self.gameState[state[2]]) == -3:\n self.handleWin(-1)\n win = True\n\n if len([i for i in range(9) if self.gameState[i] == 0]) == 0 and not win:\n print(\"Draw yo\")\n self.handleDraw()\n return None", "def isGameOver(self):\n for row in range(0, self.rows):\n for col in range(0, self.cols):\n if self.isMine(row, col) and self.isClicked(row, col):\n return True\n return False", "def gameover(self):\n if self._gameover:\n return True\n \n if self.terminal():\n self._gameover = True\n return True\n \n return False", "def is_game_won(self):\n return True", "def __check_winner(self):\n for i in range(0, 3):\n col = self.__get_col(i)\n if col.get(self.player_char) == 3:\n print('\\nYou win!')\n self.game_ended = True\n return\n if col.get(self.opponent_char) == 3:\n print('\\nYou lose.')\n self.game_ended = True\n return\n row = self.__get_row(i)\n if row.get(self.player_char) == 3:\n print('\\nYou win!')\n self.game_ended = True\n return\n if row.get(self.opponent_char) == 3:\n print('\\nYou lose.')\n self.game_ended = True\n return\n for i in range(0, 2):\n diag = self.__get_diag(i)\n if diag.get(self.player_char) == 3:\n print('\\nYou win!')\n self.game_ended = True\n return\n if diag.get(self.opponent_char) == 3:\n print('\\nYou lose.')\n self.game_ended = True\n return\n if self.state.count(' ') == 0:\n print('\\nDraw!')\n self.game_ended = True", "def game_is_over(self) -> models.Conclusion:\n raise NotImplementedError", "async def check_game_over(self, game_id):\n game = await self.get_game(game_id)\n player1_stand = await self.check_player_standing(game[1])\n player2_stand = await self.check_player_standing(game[2])\n if player1_stand and player2_stand:\n return True\n else:\n return False", "def game_over(self) -> bool:\n for row in range(9):\n for col in range(9):\n if self._grid_sol[row][col] != self.get_cell(row, col):\n return False\n return True", "def won_game(self):\n for player in self.players:\n if len(player.cards) == 0:\n\n return True\n return False", "def check_game_status(self):\n for player in (\"1\", \"2\"):\n row_win = np.apply_along_axis(\n lambda x: set(x) == {player}, 1, self.board\n ).any()\n col_win = np.apply_along_axis(\n lambda x: set(x) == {player}, 0, self.board\n ).any()\n d1_win = set(self.data[[0, 4, 8]]) == {player}\n d2_win = set(self.data[[2, 4, 6]]) == {player}\n if any([row_win, col_win, d1_win, d2_win]):\n return (\"win\", player)\n\n if self.counter[\"_\"] == 0:\n return (\"tie\", None)\n else:\n return (\"turn\", \"1\" if self.counter[\"1\"] == self.counter[\"2\"] else \"2\")", "def is_game_won(self) -> int:\n\n b = self.board\n for c1, c2, c3, c4 in _WINDOWS:\n if b[c1] and (b[c1] == b[c2] == b[c3] == b[c4]):\n print(\"win\", c1, c2, c3, c4)\n return b[c1]", "def winner(self):\n\n if self.game_ended():\n return self.winning()\n else:\n return 0", "def hasWin(self) :\n comparison = self.compareNumberUser()\n if (comparison == 'equal') :\n return True\n else :\n return False", "def terminal_test(self, state):\r\n # Anyone won already?\r\n ended, winner = self.anyone_won(state)\r\n\r\n # There is no draw stage in this game, WTF.\r\n # It's always on !!\r\n if ended:\r\n return ended, winner\r\n else:\r\n # Checking if still game play is left\r\n return False, None", "def game_on(self):\n doc = self.documentation\n return (self.draw.accepted or doc[len(doc)-1].accepted) and (self.board.stones_set < self.board.max_nr_stones) and (self.board.score[opponent(self.draw.player)] > 0)", "def win(self, player):\n if player == 1:\n a = self.player_one.moves\n else:\n a = self.player_two.moves\n winning_moves = []\n for i in range(1, 9, 3):\n winning_moves.append(range(i, i + 3))\n for i in range(1, 4):\n winning_moves.append(range(i, i + 7, 3))\n winning_moves.append([1, 5, 9])\n winning_moves.append([3, 5, 7])\n for move in winning_moves:\n flg = True\n for index in move:\n if index not in a:\n flg = False\n break\n if flg:\n return True, player\n if len(self.player_one.moves) + len(self.player_two.moves) == 9:\n self.print_space()\n self.display_board()\n self.print_space()\n print \" Games is drawn\"\n self.logging.debug(\"Game is draw, nobody won\")\n self.logging.debug(\"Enjoy the game again :)\")\n sys.exit(100)\n return False, player", "def is_winning(game: List[int]) -> bool:\n # performs the Vertical XOR by reducing as list of bool (lst) with xor lambda\n reduce_xor = (lambda lst: reduce(__xor__, lst, False))\n\n # converts game into binary and the converts/permutes the row and col\n game_bin_row_col = row_to_col(game_to_bin(game))\n\n # performs Vertical XOR on every column\n res_vert_xor = list(map(reduce_xor, game_bin_row_col))\n\n return reduce(__or__, res_vert_xor, False)", "def is_game_complete(game):\n game_round = min(len(game.creator_scores), len(game.invitee_scores))\n creator_score = sum(game.creator_scores[:game_round])\n invitee_score = sum(game.invitee_scores[:game_round])\n return creator_score >= GAME_SCORE_TO_WIN or invitee_score >= GAME_SCORE_TO_WIN", "def check_winner(self):\n\t\tif self.check_diagonals() or self.check_rows() or self.check_columns():\n\t\t\treturn True\n\t\telif self.board_is_full():\n\t\t\tprint(\"There was a draw, everyone lost\")\n\t\t\treturn None\n\t\treturn False", "def has_won(board, player):\r\n return False", "def is_end_game(self):\n win = self.is_game_won()\n tie = self.game_is_tied()\n return win or tie", "def check_win(self):\n win = None\n for pos in self.winning_pos:\n win = self.is_match(set(self.get_cells(pos)))\n if win:\n return win\n if not self.open_tiles():\n return \"Draw\"\n return win", "def has_winner(self):\r\n\r\n\t\t\"Check for horizonal win\"\r\n\r\n\t\tfor x in range(0, 3):\r\n\r\n\t\t\tif self.game_board[x][0] == self.game_board[x][1] and self.game_board[x][1] == self.game_board[x][2]:\r\n\r\n\t\t\t\treturn self.game_board[x][0]\r\n\r\n\t\t\"Check for vertical win\"\r\n\r\n\t\tfor y in range(0, 3):\r\n\r\n\t\t\tif self.game_board[0][y] == self.game_board[1][y] and self.game_board[1][y] == self.game_board[2][y]:\r\n\r\n\t\t\t\treturn self.game_board[0][y]\r\n\r\n\t\t\"Check for diagonal from left to right\"\r\n\t\r\n\t\tif self.game_board[0][0] == self.game_board[1][1] and self.game_board[1][1] == self.game_board[2][2]:\r\n\t\t\treturn self.game_board[1][1]\t\r\n\r\n\t\tif self.game_board[0][2] == self.game_board[1][1] and self.game_board[1][1] == self.game_board[2][0]:\r\n\t\t\treturn self.game_board[1][1]\t\r\n\r\n\t\tif self.count == 8:\r\n\r\n\t\t\treturn \"Tie\"\r\n\r\n\t\telse:\r\n\r\n\t\t\treturn \"0\"\r\n\r\n\r\n\t\tpass", "def check_win_lose(self):\n if self.b.get_player_i() == 7: # player got to the bank\n return 1 # win\n if self.b.get_chaser_i() == self.b.get_player_i(): # chaser catch the player\n return 2 # lose\n return 0 # nothing", "def has_won(board, player):\n return False", "def check_game_over(self, row, col):\n player_symbol = self.board[row][col]\n\n # Top Right: Row -1 Col 1\n # Bottom Left: Row 1 Col -1\n self.check_four_in_a_row(player_symbol, row, col, -1, 1, 1, -1)\n\n # Top Left: Row -1 Col -1\n # Bottom Right Row 1 Col 1\n self.check_four_in_a_row(player_symbol, row, col, -1, -1, 1, 1)\n\n # Horizontal: Row 0 Col 1, Row 0 Col -1\n self.check_four_in_a_row(player_symbol, row, col, 0, 1, 0, -1)\n\n # Vertical: Row 1 Col 0, Row -1 Col 0\n self.check_four_in_a_row(player_symbol, row, col, 1, 0, -1, 0)\n\n if self.turns >= self.num_playable_rows * self.num_playable_columns:\n self.game_over = True\n self.board_full = True", "def has_winner(self):\n\n if self.num_black_pieces == 0 or len(self.get_all_valid_moves(Player.black)) == 0:\n return Player.white\n elif self.num_white_pieces == 0 or len(self.get_all_valid_moves(Player.white)) == 0:\n return Player.black\n elif self.repetition_happened() or self.passive_game():\n return \"Tie\"\n else:\n return None", "def check_win(self):\n\n # Sets rings initially to False\n black_ring = False\n white_ring = False\n\n # Gets board\n board = self._game_board.get_board_area()\n\n # Iterate through stone-holding points\n for i in range(1, 19):\n for j in range(1, 19):\n # Get the surroundings of each point\n surround = {board[i+1][j], board[i-1][j], board[i+1][j+1], board[i+1][j-1], board[i-1][j],\n board[i-1][j-1], board[i-1][j+1], board[i][j-1]}\n\n # If the point is empty and surrounded by \"B\", there's a black ring\n if board[i][j] == \" \" and surround == {\"B\"}:\n black_ring = True\n\n # If the point is empty and surrounded by \"W\", there's a white ring\n if board[i][j] == \" \" and surround == {\"W\"}:\n white_ring = True\n\n if not black_ring:\n return \"WHITE_WON\"\n\n if not white_ring:\n return \"BLACK_WON\"\n\n return \"UNFINISHED\"", "def check_for_winner(self):\r\n\r\n # Iterate through the rows\r\n for row in range(self.height):\r\n if self.board[row][0] == self.board[row][1] == self.board[row][2] and self.board[row][0] != None:\r\n return Board.WIN if self.board[row][0] else Board.LOSS\r\n\r\n # Iterate through the columns\r\n for col in range(self.width):\r\n if self.board[0][col] == self.board[1][col] == self.board[2][col] and self.board[0][col] != None:\r\n return Board.WIN if self.board[0][col] else Board.LOSS\r\n\r\n # Diagonals\r\n if self.board[0][0] == self.board[1][1] == self.board[2][2] and self.board[0][0] != None:\r\n return Board.WIN if self.board[0][0] else Board.LOSS\r\n if self.board[0][2] == self.board[1][1] == self.board[2][0] and self.board[0][2] != None:\r\n return Board.WIN if self.board[0][2] else Board.LOSS\r\n\r\n # No winner yet\r\n return 0", "def checkWinAll(self, model, previousWin):\r\n previous = self.__render\r\n self.__render = Render.NOTHING # avoid rendering anything during execution of the check games\r\n\r\n win = 0\r\n lose = 0\r\n \r\n cellsRanking = {}\r\n sumForProb = 0\r\n for cell in self.allowableCells:\r\n if self.play(model, cell)[0] == Status.WIN:\r\n win += 1\r\n else:\r\n lose += 1\r\n\r\n self.__render = previous # restore previous rendering setting\r\n\r\n logging.info(\"won: {} | lost: {} | win rate: {:.5f}\".format(win, lose, win / (win + lose)))\r\n\r\n result = True if lose == 0 else False\r\n \r\n if lose == 0:\r\n previousWin += 1\r\n else:\r\n previousWin = 0 \r\n\r\n return result, win / (win + lose), previousWin", "def play(self):\n if self.stats['round'] == 0:\n if self.data['personalities'] and self.data['events']:\n self.choose_opponent()\n self.resolve_conflict()\n else:\n self.stats['round'] += 1\n elif self.stats['round'] == 1:\n if self.data['locations']:\n self.choose_location()\n self.resolve_conflict()\n else:\n self.stats['round'] += 1\n else:\n print(\"You've won\")\n self.game_over = True\n return self.stats", "def is_not_game_ended(data_map):\n\n continue_game = True\n loser = None\n winner = None\n\n # If a player has not any units, the other player win.\n for i in range(2):\n if not len(data_map['player' + str(i + 1)]) and continue_game:\n loser = data_map['player' + str(i + 1)]\n winner = data_map['player' + str(3 - (i + 1))]\n continue_game = False\n\n # If there's 20 turn without any attack, player1 loose and player2 win.\n if float(data_map['attack_turn']) / 2 > 19:\n loser = data_map['player1']\n winner = data_map['player2']\n continue_game = False\n\n return continue_game, loser, winner", "def determineEndGame(self):\n\n print(\"noWinners: \" + str(self.noWinners) + \", noTotKids: \" + str(self.noTotKids))\n\n # TODO scegliere come determinare la fine del gioco\n # if self.noWinners == self.noTotKids - 1: # end-game test\n if self.noWinners == self.noTotKids:\n print(\"ho determinato la fine del gioco\")\n return True\n else:\n print(\"colore toccato ma la partita non e' finita\")\n return False", "def is_unhappy(self):\n #checked!#\n ###your code here###\n same=0\n for i in self.home.neighbors:\n if i.occupant!=None:\n if i.occupant.group==self.group:\n same+=1\n happniess=float(same)/len(self.home.neighbors)\n if happniess<self.happiness_threshold:\n return True\n else:\n return False", "def is_over(self):\n return (self.possible_moves() == []) or self.loss_condition()", "def is_over(self, board):\n if _winner(board) != 0:\n return True\n return False", "def check_game_over(board: Board, whites_turn: bool) -> bool:\n if is_in_check(board, whites_turn) and can_move(board, whites_turn):\n turn = 'White' if whites_turn else 'Black'\n print()\n print(f'{turn} is in check')\n return False\n elif is_in_check(board, whites_turn) and can_move(board, whites_turn) == False:\n print()\n print('Checkmate')\n return True\n elif is_stalemate(board, whites_turn):\n print()\n print('Stalemate')\n return True\n else:\n return False", "def play_game(self):\n while self.over is False:\n self.board.print_board()\n winner = self.check_over()\n if winner != -1:\n return winner\n self.p1.get_move(self.board)\n self.board.print_board()\n winner = self.check_over()\n if winner != -1:\n return winner\n self.p2.get_move(self.board)", "def check_win(players: List[Player]) -> Tuple[bool, Optional[Player]]:\n total_players = len(players)\n for player in players:\n if player.influence == 0:\n total_players -= 1\n if total_players == 1:\n for player in players:\n if player.influence >0:\n return True, player\n return False, None", "def game_over(_user_id):\n _board = boards[_user_id]\n return _board.is_game_over()", "def gameOver(self):\n i = 0 # accumulator for the number of None objects\n for x in range(ALIEN_ROWS):\n for y in range(ALIENS_IN_ROW):\n if self._aliens[x][y] ==None:\n i +=1\n if i == ALIEN_ROWS * ALIENS_IN_ROW:\n self._gameOver = True\n\n for x in range(ALIEN_ROWS):\n for y in range(ALIENS_IN_ROW):\n if self._aliens[x][y] !=None:\n positiony = self._aliens[x][y].getAY() - ALIEN_HEIGHT/2\n if posy<= self._dline:\n self._gameOver = False", "def victory_check(self):\n\n # get essential values\n board = self.get_game_space()\n affinity = self.get_affinity()\n \n # pick the right check for the game we are playing\n if isinstance(board, Gomoku):\n \n # get the possible ways to win\n possible_wins = board.get_wins(affinity)\n \n # if we can win, pick a good win \n if len(possible_wins) == 1: return possible_wins[0]\n elif len(possible_wins) > 1:\n best_win = None\n wins_by_x = {}\n wins_by_y = {}\n for win in possible_wins:\n if win[0] not in wins_by_x.keys():\n wins_by_x[win[0]] = []\n if win[1] not in wins_by_y.keys():\n wins_by_y[win[1]] = []\n wins_by_x[win[0]].append(win)\n wins_by_y[win[1]].append(win)\n for y in wins_by_y:\n if len(wins_by_y[y]) > 1: \n for win in wins_by_y[y]:\n if best_win is None or win[0] < best_win[0]:\n best_win = win \n return best_win\n\n else: return None", "def is_game_lost(self):\n values = [self.hand[i]._lvalue + self.hand[i]._rvalue for i in range(len(self.hand))]\n return not sum_in_list_dyn(values, self.number_point)", "def isGameOver(board, *args, **kwargs):\n black_win = consecutive_score(\"black\", board, board.size)\n white_win = consecutive_score(\"white\", board, board.size)\n if black_win >= 100 and white_win >= 100:\n return \"tie\"\n elif black_win >= 100 and white_win <= 100:\n return \"black\"\n elif black_win <= 100 and white_win >= 100:\n return \"white\"\n else:\n return -1", "def winning_event(self, player):\n # vertical check\n for col in range(GameData.columns):\n if self.board[0][col] == player and self.board[1][col] == player and self.board[2][col] == player:\n self.draw_vertical_winning_line(col, player)\n print(\"Player {} has won the game!\".format(player))\n self.game_over = True\n return True\n\n # horizontal check\n for row in range(GameData.rows):\n if self.board[row][0] == player and self.board[row][1] == player and self.board[row][2] == player:\n self.draw_horizontal_winning_line(row, player)\n print(\"Player {} has won the game!\".format(player))\n self.game_over = True\n return True\n\n # ascending diagonal heck\n if self.board[2][0] == player and self.board[1][1] == player and self.board[0][2] == player:\n self.draw_asc_diagonal(player)\n print(\"Player {} has won the game!\".format(player))\n self.game_over = True\n return True\n\n # descending diagonal win chek\n if self.board[0][0] == player and self.board[1][1] == player and self.board[2][2] == player:\n self.draw_desc_diagonal(player)\n print(\"Player {} has won the game!\".format(player))\n self.game_over = True\n return True\n\n return False" ]
[ "0.75529164", "0.74890757", "0.73833823", "0.73335296", "0.7333373", "0.72050595", "0.70841414", "0.705282", "0.7051845", "0.70401967", "0.6998241", "0.69875365", "0.6977598", "0.6947065", "0.69441944", "0.6942049", "0.69409615", "0.69100434", "0.6907182", "0.6895013", "0.689254", "0.6864129", "0.6858663", "0.6856255", "0.6853972", "0.6834625", "0.67975646", "0.67975074", "0.6787823", "0.67834437", "0.6777791", "0.6768507", "0.67675555", "0.67673045", "0.67617464", "0.6742236", "0.6735091", "0.67267764", "0.6723736", "0.67235", "0.6714662", "0.66961753", "0.6663297", "0.6650618", "0.66456383", "0.66229564", "0.66226405", "0.6611483", "0.6606639", "0.6596068", "0.6590693", "0.6589285", "0.65796876", "0.6564864", "0.65572286", "0.6554924", "0.65484995", "0.65458715", "0.6490743", "0.6484579", "0.64818376", "0.64560556", "0.6442077", "0.6441256", "0.6434982", "0.64334285", "0.6427821", "0.64125234", "0.640933", "0.64040637", "0.6393729", "0.6390652", "0.6376509", "0.6368323", "0.63620615", "0.6357347", "0.6357237", "0.6354516", "0.6341487", "0.63261455", "0.6320126", "0.63093585", "0.63076043", "0.6301174", "0.6297625", "0.6296961", "0.62967575", "0.6280389", "0.6269796", "0.62618935", "0.62520576", "0.62463385", "0.6243144", "0.62399", "0.6239806", "0.62264806", "0.6218997", "0.62182856", "0.62125236", "0.6212372" ]
0.691553
17
Returns a string interpretation of the board.
def __str__(self): boardString = "\n{0}|{1}|{2}\n-----\n{3}|{4}|{5}\n-----\n{6}|{7}|{8}\n" return boardString.format(self.board[0], self.board[1], self.board[2], self.board[3], self.board[4], self.board[5], self.board[6], self.board[7], self.board[8])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def board_string(self):\n s = \"\"\n for i, v in enumerate(self.board):\n # if i % 81 == 0:\n # s += \"\\n\"\n if v is None:\n s += \"0\"\n else:\n if v.color == StoneColor.black:\n s += \"1\"\n else:\n s += \"2\"\n return s", "def __str__(self):\n board = ''\n for row in range(self.height):\n if row > 0:\n board += '\\n'\n for col in range(self.width):\n if self.board[row][col] == '':\n board += '| '\n else:\n board += ('|' + self.board[row][col])\n board += '|'\n board += ('\\n' + '-' * 2 * self.width + '\\n')\n for i in range(self.width):\n board += (' ' + str(i))\n return board", "def __repr__(self):\n W = self.width\n H = self.height\n\n s = '' # the string to return\n for row in range(0, H):\n s += '|'\n for col in range(0, W):\n s += self.data[row][col] + '|'\n s += '\\n'\n\n s += (2 * W + 1) * '-' # bottom of the board\n s += '\\n'\n\n x = -1\n for i in range(W):\n if x == 9:\n x = 0\n s += \" \" + str(x)\n else:\n x += 1\n s += \" \" + str(x)\n\n return s # the board is complete, return it", "def __repr__(self):\n t = ''\n for x in range(len(self.board)):\n for y in range(len(self.board[0])):\n t += str(self.board[x][y]) + ' '\n t += '\\n'\n return t", "def uninterpret_board(self):\n\t\tboard_string = ''\n\t\tfor i in range(len(self.board)):\n\t\t\tfor j in range(len(self.board[i])):\n\t\t\t\tboard_string+=self.board[i][j]\n\n\t\treturn board_string", "def __str__(self):\n board = \"\"\" 0 1 2 3 4 5\\n\"\"\"\n\n for y in range(Board.board_size):\n board += str(y) + \" \"\n for x in range(Board.board_size):\n piece = self.board[x][y] if self.board[x][y] is not None else \".\"\n\n piece = str(piece).lower() if piece in self.player_1_pieces else str(piece)\n\n board += piece\n board += \" \"\n board += \"\\n\"\n return board", "def StringFromBoard(board):\n\trows = []\n\tfor row in board:\n\t\trows.append('|'.join([' '+square+' ' for square in row]))\n\treturn '\\n-----------\\n'.join(rows)", "def __str__(self):\n rep = \"\"\n for row in range(self._dim):\n for col in range(self._dim):\n rep += STRMAP[self._board[row][col]]\n if col == self._dim - 1:\n rep += \"\\n\"\n else:\n rep += \" | \"\n if row != self._dim - 1:\n rep += \"-\" * (4 * self._dim - 3)\n rep += \"\\n\"\n return rep", "def __repr__(self) -> str:\n\t\t\n\t\trepr = \"\"\n\t\tfor row in self.board:\n\t\t\tfor element in row:\n\t\t\t\tif element:\n\t\t\t\t\trepr = repr + \"o \"\n\t\t\t\telse:\n\t\t\t\t\trepr = repr + \"@ \"\n\t\t\trepr = repr + \"\\n\"\n\t\treturn repr", "def boardToString(self, margins={}):\n b = self.board\n rg = range(b.size())\n left = ' '*margins.get('left', 0)\n s = '\\n'.join(\n [left + ' '.join([self.getCellStr(x, y) for x in rg]) for y in rg])\n return s", "def board(self) -> str:\n divider = \"+\" + \"-\" * 23 + \"+\"\n b = [divider]\n for i in range(9):\n r = []\n for j in range(3):\n s = tool.index_counter(i, j * 3)\n r.append(' '.join(str(i) if i > 0 else ' '\n for i in self.grid[s:s+3]))\n b.append(f\"| {r[0]} | {r[1]} | {r[2]} |\")\n if (i + 1) % 3 == 0:\n b.append(divider)\n return \"\\n\".join(b)", "def __str__(self):\n\t\tstring = \"\"\n\t\tfor i in self.board:\n\t\t\tfor j in i:\n\t\t\t\tstring += str(j)\n\t\t\tstring += \"\\n\"\n\t\treturn string", "def __str__(self):\r\n return str(self.board)", "def __str__(self):\r\n \r\n #return \"The 2048 board is \" + str(self._cells)\r\n string = \"\"\r\n for row in range(self._grid_height):\r\n for column in range(self._grid_width):\r\n if column == self._grid_width -1:\r\n string += str(self._cells[row][column]) + \"\\n\"\r\n else:\r\n string += str(self._cells[row][column]) +\", \"\r\n return \"The 2048 board is \"+ str(self._grid_height) + \"x\" + str(self._grid_width) + \" and contains: \" + \"\\n\" + string", "def __str__(self):\n result = \"\"\n for i in range(3):\n for j in range(3):\n if self.board[i][j] == 5:\n result += \" x\"\n elif self.board[i][j] == 7:\n result += \" о\"\n else:\n result += \" #\"\n result += \"\\n\"\n return result", "def __str__(self):\n s = \" 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15\\n\"\n board = initial_board()\n count = 1\n for i in self.occupied:\n board[i[0]][i[1]] = self.occupied[i]\n space = ''\n for i in range(0, 16):\n space += ' '\n start = '---'.join(space)\n s += start+'\\n|'\n for row in range(1,16):\n for col in range(1,16):\n if use_color and (row, col) == self.action:\n s += '\\033[91m'\n if board[row][col] == 0:\n s += ' |'\n elif board[row][col] == 1:\n s += ' O |'\n else:\n s += ' X |'\n if use_color and (row, col) == self.action:\n s += '\\033[0m'\n s += '\\033[0m'\n s+=str(count)+'\\n'+start+'\\n|'\n count += 1\n\n s = s[:len(s)-1]\n s += \"\\n*****************************************************************************\"\n return s[:len(s)-1]", "def __str__(self):\n \n # top row\n result = ' '\n result = '\\n ' + '-' * (self.DIM*2+5) + '\\n'\n \n # board rows\n for row in range(self.DIM):\n if row is 3 or row is 6:\n result += '|' + '-' * (self.DIM*2+5) + '|' + '\\n'\n # result += '|-------+-------+-------|\\n'\n result += '| '\n for col in range(self.DIM):\n if col is 3 or col is 6:\n result += '| '\n if self.board[row][col] == SudokuConfig.EMPTY:\n result += '.'\n else:\n result += str(str(self.board[row][col]))\n if col != self.DIM-1:\n result += ' '\n result += ' |' + '\\n'\n \n # bottom row\n result += ' ' + '-' * (self.DIM*2+5) + '\\n'\n result += ' '\n result += '\\n'\n \n return result", "def __str__(self):\n if self._active_player:\n def piece_to_index(piece):\n return (piece & 0xF)\n else:\n def piece_to_index(piece):\n return (piece & 0xE) | (0 if piece & 1 else 1)\n\n return '\\n'.join(map(\n lambda posY, row: ''.join(map(\n lambda posX, piece: self.EMOJI[\n piece_to_index(piece)\n if piece else\n 14 + ((posY + posX) % 2)],\n count(), row)),\n count(),\n self.board if self._active_player else reversed(\n [reversed(row) for row in self.board])))", "def __str__(self):\n board_lists = [['_']*self.__width for rows in range(self.__height)]\n for car in self.__cars:\n car_coords = car.car_coordinates()\n for item in car_coords:\n if item == (3,7):\n pass\n else:\n board_lists[item[0]][item[1]] = car.get_name()\n board_str = '\\n'.join(' '.join(sub) for sub in board_lists)\n return board_str", "def __str__(self):\r\n result = \"\"\r\n for line in self.board:\r\n for i in line:\r\n if i is None:\r\n result += \" \"\r\n else:\r\n result += i + \" \"\r\n result += \"\\n\"\r\n\r\n return result", "def _to_string(board: Tuple[Tuple[Optional[int]]], width: int) -> str:\n display = \"\\n\"\n for i in range(width):\n for j in range(width):\n line = board[j][i * width:i * width + width]\n start = j * width ** 2 + i * width\n for k, space in enumerate(line):\n if space == 0:\n space = start + k\n else:\n space = (\"X\" if space == 1\n else \"O\" if space == -1\n else \"-\")\n display += \"{0:>4}\".format(space)\n display += \" \" * width\n display += \"\\n\"\n return display", "def __repr__(self):\n r = \"\"\n for row in self.board:\n for cell in row:\n if cell == \"\":\n cell = color_magenta(\"_\")\n r += cell + \" \" # for all the empty strings, we will replace it with an '_'.\n r += \"\\n\"\n return r", "def __str__(self):\n return '\\n'.join(str(self._board[j]) for j in range(self._n))", "def __str__(self):\n # replace with your code\n board = \"\"\n for index in range(self.grid_height):\n board += \"[\"\n for inner_index in range(self.grid_width):\n board += str(self.board[index][inner_index]) + \" \"\n else:\n board += \"]\\n\"\n return board", "def __str__(self):\n b = ''\n for i in range(7): # 7 steps in the board\n if i == self.chaser_i: # chaser position\n b += '|' + str(i) + '| chaser |\\n'\n elif i == self.player_i: # player position\n b += '|' + str(i) + '| player |\\n'\n else:\n b += '|' + str(i) + '| |\\n'\n b += '|7| bank |\\n' # bank position\n return b", "def convertBoard(self):\n \n board = \"\"\n \n for m in self.squares:\n board += str(convertMarker(m)) + \" \"\n \n return board", "def print_board(self):\n board = \"\"\n for i in range(3):#need to change this in the future\n for j in range(3):#need to change this in the future\n board += self.board[i][j]\n if j != 2:#need to change this in the future\n board += \" | \"\n board += \"\\n\"\n return board", "def __repr__(self):\n s = dashes = \"\".join([ ' -' for i in range(self.BoardSize) ])\n for row in range( self.BoardSize ):\n sRow = '|'\n for col in range( self.BoardSize ):\n sRow += str(self.CurrentGameboard[row][col]) + '|'\n s += '\\n' + sRow + '\\n' + dashes\n return s", "def __str__(self):\n #formatting board correctly\n formatted_board = \"\"\n for i in range(self.size):\n formatted_board += str(self.board[i]) + \"\\n\"\n return \"Board size: \" + str(self.size) + \"\\n\" + \"Number of Queens placed: \" + str(self.num_queens_placed) + \"\\n\" + str(formatted_board)", "def get_board_as_string(game):\n str_board = \"\\n\" # every board starts with a blank line\n row = 0 # used to print the board\n\n # creates a board of 5 lines. 3 rows, 2 dashed.\n for line in range(1, 6):\n\n # every odd line\n if line % 2 != 0:\n\n # add a row to the string str_board\n str_board += \"{} | {} | {}\".format(game['board'][row][0], game['board'][row][1], game['board'][row][2])\n\n # increment the row\n row += 1\n\n # every even line\n else:\n str_board += \"--------------\"\n\n str_board += \"\\n\" # add line break at the end of every line\n\n return str_board", "def macro_str(self):\n str = '-' * (2 * self.SIZE + 1) + '\\n'\n for row in self.boards:\n str += ' '\n for board in row:\n str += board.state.value + ' '\n str += '\\n' + '-' * (2 * self.SIZE + 1) + '\\n'\n return str", "def get_board_state_pretty(self):\n\n board_state = ''\n for i in range(0, 3):\n board_state += ' | '.join([self.board['{}{}'.format(i, j)] for j in range(0, 3)])\n board_state += '\\n'\n return board_state", "def __str__(self) -> str:\n return self.board", "def __repr__(self):\r\n numLetters = self.numLetters\r\n S = ''\r\n S += 3*'\\n'\r\n S += ' '\r\n for i in range(numLetters):\r\n S += self.currentBoard[i] + ' '\r\n\r\n return S", "def __repr__(self):\n return f'Board({ self.board !r})'", "def displayBoard(self):\n res = ''\n for i in range(0, self.size):\n res += '|'\n for j in range(0, self.size):\n res += ' ' + str(self.board[i][j])\n res += '\\n'\n res += '+'\n for i in range(0, self.size * 2):\n res += '-'\n res += '\\n '\n for i in range(1, (self.size + 1)):\n res += (' ' + str(i))\n return res", "def __str__(self):\n def align_column(grid):\n board = \"\"\n for i in range(self.n):\n board += str(grid[i]) + \"\\n\"\n return board.strip()\n return (\"===Current Stage===\\n\"\n \"{}\\n\"\n \"====Goal Board=====\\n\"\n \"{}\".format(align_column(self.from_grid),\n align_column(self.to_grid)))", "def board_to_string(board):\n ordered_vals = []\n for r in ROW:\n for c in COL:\n ordered_vals.append(str(board[r + c]))\n return ''.join(ordered_vals)", "def board_to_string(board):\n ordered_vals = []\n for r in ROW:\n for c in COL:\n ordered_vals.append(str(board[r + c]))\n return ''.join(ordered_vals)", "def __str__(self):\n str = '-' * (self.SIZE ** 2 + self.SIZE + 1) + '\\n'\n for row in self.boards:\n for i in range(self.SIZE):\n str += '|'\n for board in row:\n for square in board.export_grid()[i]:\n str += square.value\n str += '|'\n str += '\\n'\n str += '-' * (self.SIZE ** 2 + self.SIZE + 1) + '\\n'\n return str", "def board2str(board, end='\\n'):\n s = ''\n for x in range(board.shape[0]):\n for y in range(board.shape[1]):\n s += str(board[x][y]) + '\\t'\n s += end\n return s[:-len(end)]", "def __str__(self):\n board = ''\n board_2 = ''\n\n for row in self.from_grid:\n for space in row:\n board += ' ' + space\n board += '\\n'\n\n for row in self.to_grid:\n for space in row:\n board_2 += ' ' + space\n board_2 += '\\n'\n\n return 'Current State:\\n' + board + 'Target State:\\n' + board_2", "def __repr__(self):\n\n # Creates a deep copy of _board and changes all None values to underscores.\n temp_board = copy.deepcopy(self.get_board())\n\n for row_index, row in enumerate(temp_board):\n\n for column_index, column in enumerate(row):\n\n if column is None:\n\n row[column_index] = '____' + self.reverse_position(column_index, row_index) + '_____'\n\n # str() with position is mapped onto each list within _board list,\n # converted to string, and then joined by a nextline character.\n return '\\n\\n'.join(map(str, temp_board))", "def __str__(self):\n line = ''\n line += self.board_state.__str__()\n line += self.move.__str__()\n line += '\\n'\n return line", "def __getAsciiString(self):\n lines = []\n horizontalLine = ('-' * (26))\n lines.append(horizontalLine)\n for row in self.board:\n rowLine = '|'\n for col in row:\n if col == -1:\n col = 'O'\n if col == 0:\n col = '-'\n if col == 1:\n col = 'X'\n rowLine = rowLine + ' ' + col.__str__() + ' |'\n lines.append(rowLine)\n lines.append(horizontalLine)\n return '\\n'.join(lines)", "def __str__(self) -> str:\r\n output: str = \"\"\r\n\r\n for row_i in range(Board._NUM_ROWS):\r\n for col_i in range(Board._NUM_COLS):\r\n pos: Pos2D = Pos2D(col_i, row_i)\r\n output += (\"{} \".format(self.squares[pos].get_representation()))\r\n # Finished row, add new line.\r\n output += \"\\n\"\r\n\r\n return output", "def __str__(self):\r\n # replace with your code\r\n return str(self._board[0]) + \"\\n\" + str(self._board[1]) + \"\\n\" + str(self._board[2]) + \"\\n\" + str(self._board[3]) + \"\\n\\n\"", "def __str__(self) -> str:\n not_actual = self.current_board\n representation = self.current_board\n\n for index in range(len(not_actual)):\n if not_actual[index: index + 2] in ['31', '32', '33', '34', '36',\n '37', '38']:\n representation = representation.replace(\n not_actual[index: index + 2], '@')\n if not_actual[index: index + 2] in ['41', '42', '43', '44', '45',\n '46', '47', '48']:\n representation = representation.replace(\n not_actual[index: index + 2], '@')\n if not_actual[index: index + 2] in ['51', '52', '53', '54', '55',\n '56', '57', '58']:\n representation = representation.replace(\n not_actual[index: index + 2], '@')\n return representation", "def get_board_state(self):\n\n board_state = ''\n for i in range(0, 3):\n board_state += ''.join([self.board['{}{}'.format(i, j)] for j in range(0, 3)])\n return board_state", "def __str__(self):\n string = \"\"\n for row in self.layout:\n for tile in row:\n string+= str(tile) + \" \"\n string+= \"\\n\"\n return string", "def print_board(board):\n rep = ''\n for row in xrange(8, 0, -1):\n begin, end = 10*row + 1, 10*row + 9\n rep += '%d %s\\n' % (row-1, ' '.join(board[begin:end]))\n rep += ' %s\\n' % ' '.join(map(str, range(8)))\n return rep", "def __str__(self):\n\t\t\n\t\tdef mapping(x):\n\t\t\tif x == 1:\n\t\t\t\t# WHITE\n\t\t\t\treturn 'O'\n\t\t\telif x == -1:\n\t\t\t\t# BLACK\n\t\t\t\treturn 'X'\n\t\t\telse:\n\t\t\t\t# Empty\n\t\t\t\treturn '-'\n\t\t\n\t\ts = 'BLACK - X\\n'\n\t\ts += 'WHITE - O\\n\\n'\n\t\tfor j in self.rows:\n\t\t\ts += j\n\t\t\ts += ' '\n\t\t\ts += ''.join(mapping(self[i+j]) for i in self.columns)\n\t\t\ts += '\\n'\n\t\treturn s + '\\n ' + self.columns + '\\n'", "def showBoard(self):\n \n brd = \"\\n | | \\n\" + \\\n \" \" + self.squares[0] + \" | \" + self.squares[1] + \" | \" + self.squares[2] + \" \\n\" + \\\n \"___|___|___\\n\" + \\\n \" | | \\n\" + \\\n \" \" + self.squares[3] + \" | \" + self.squares[4] + \" | \" + self.squares[5] + \" \\n\" + \\\n \"___|___|___\\n\" + \\\n \" | | \\n\" + \\\n \" \" + self.squares[6] + \" | \" + self.squares[7] + \" | \" + self.squares[8] + \" \\n\" + \\\n \" | | \\n\"\n\n return brd", "def __str__(self):\n ans = \"\"\n for row in range(self._grid_height):\n ans += str(self._cells[row])\n ans += \"\\n\"\n return ans", "def __str__(self):\r\n\t\toutStr = \"\"\r\n\t\toutStr += \"Heuristic Level: \" + str(self.heuristic)\r\n\t\toutStr += \"\\n-\" + \"-----\"*self.n\r\n\t\tfor row in self.board:\r\n\t\t\ttempStr = (\"\\n|\" + \" %2d |\" * self.n)\r\n\t\t\toutStr += tempStr % tuple(row)\r\n\t\t\toutStr += \"\\n-\" + \"-----\"*self.n\r\n\r\n\t\treturn outStr", "def __repr__(self):\r\n rep_tpl = (self.__board_size, self.__bombs_dict, self.__ships)\r\n return str(rep_tpl)", "def __str__(self):\n puzzle_string = '—' * 13 + '\\n'\n for i in range(self.PUZZLE_NUM_ROWS):\n for j in range(self.PUZZLE_NUM_COLUMNS):\n puzzle_string += '│{0: >2}'.format(str(self.position[i][j]))\n if j == self.PUZZLE_NUM_COLUMNS - 1:\n puzzle_string += '│\\n'\n\n puzzle_string += '—' * 13 + '\\n'\n return puzzle_string", "def __str__(self):\n string = ''\n for row in self.board:\n for item in row:\n if item == None:\n string += \"_ \"\n else:\n string += f\"{item.name:<2}\"\n string += '\\n'\n \n return string", "def __repr__(self):\r\n # Initialize the return string\r\n s = ''\r\n\r\n for row in range(self.height):\r\n # Print the index of the row\r\n s = s + str(row % 10) + ' |'\r\n\r\n for col in range(self.width):\r\n s += self.grid[row][col]\r\n s += '|'\r\n\r\n s += '\\n'\r\n s += '--' * (self.width + 1)\r\n s += '-'\r\n s += '\\n'\r\n \r\n s += ' '\r\n for i in range(self.width):\r\n s += ' ' + str(i % 10) \r\n \r\n return s", "def __repr__(self):\r\n answer = '\\n'\r\n for i in range(len(self)):\r\n for j in range(len(self)):\r\n if (i+j)%2 == 0:\r\n answer += '{}'.format(self.quantum_board[i][j])\r\n else:\r\n answer += '-'\r\n answer += '\\t'\r\n answer += '\\n'\r\n return answer", "def __str__(self):\n value = str(self.puzzle) + str(\" \") + str(self.g) + str(\" \") + str(self.h)\n return value", "def __str__(self):\n ans = \"\"\n for row in range(self._height):\n ans += str(self._grid[row])\n ans += \"\\n\"\n return ans", "def __str__(self):\n ans = \"\"\n for row in range(self._height):\n ans += str(self._grid[row])\n ans += \"\\n\"\n return ans", "def __str__(self):\n ans = \"\"\n for row in range(self._height):\n ans += str(self._grid[row])\n ans += \"\\n\"\n return ans", "def __str__(self):\n ans = \"\"\n for row in range(self._height):\n ans += str(self._grid[row])\n ans += \"\\n\"\n return ans", "def __repr__(self):\r\n answer = '\\n'\r\n for i in range(len(self)):\r\n for j in range(len(self)):\r\n if (i+j)%2 == 0:\r\n answer += '{}'.format(self.board[i][j])\r\n else:\r\n answer += '-'\r\n answer += '\\t'\r\n answer += '\\n'\r\n return answer", "def __str__(self):\r\n ans = \"\"\r\n for row in range(self._height):\r\n ans += str(self._grid[row])\r\n ans += \"\\n\"\r\n return ans", "def __str__(self):\r\n ans = \"\"\r\n for row in range(self._height):\r\n ans += str(self._grid[row])\r\n ans += \"\\n\"\r\n return ans", "def __str__(self):\r\n ans = \"\"\r\n for row in range(self._height):\r\n ans += str(self._grid[row])\r\n ans += \"\\n\"\r\n return ans", "def __str__(self):\n string = ''\n\n for row in range(0, self.height):\n for col in range(0, self.width):\n string += self.cells[(row, col)]\n string += '\\n'\n\n return string", "def board_string(self, players):\n if len(self.user_guess) == 1:\n board = \"\\n-------------------\\n\"\n board += f\"Player {players[0]}: {self.user_guess[0]}, {self.applied_guess[0]}\\n\"\n board += f\"Player {players[1]}: {self.user_guess[0]}, {self.applied_guess[0]}\\n\"\n board += \"-------------------\\n\"\n\n board = \"\\n-------------------\\n\"\n board += f\"Player {players[0].get_name()}: {self.user_guess[0]}, {self.applied_guess[0]}\\n\"\n board += f\"Player {players[1].get_name()}: {self.user_guess[1]}, {self.applied_guess[1]}\\n\"\n board += \"-------------------\\n\"\n\n return board", "def print_board(self):\n print('Board:')\n print('\\n'.join([''.join(['{:4}'.format(item) for item in row]) for row in self.board]))", "def __repr__(self):\n representantion = ''\n\n for i in range(3):\n for j in range(3):\n representantion += str(self.state[3 * i + j])\n\n if j == 2 and i != 2:\n representantion += '\\n'\n else:\n representantion += ' '\n\n return representantion", "def printBoard(self):\n if self.side == self.WHITE or self.side == None:\n for r in [8,7,6,5,4,3,2,1]:\n for c in 'abcdefgh':\n p = self.getPiece(c,r) # print a8 first\n if p == None:\n print \" \",\n else:\n print self.getPieceName(p.type),\n print \"\"\n else:\n for r in [1,2,3,4,5,6,7,8]:\n for c in 'hgfedcba':\n p = self.getPiece(c,r) # print h1 first\n if p == None:\n print \" \",\n else:\n print self.getPieceName(p.type),\n print \"\"\n\n for r in [8,7,6,5,4,3,2,1]:\n for c in 'abcdefgh':\n p = self.getPiece(c,r)\n #if p != None and p.header.frame_id == \"chess_board\":\n # print \"Warning, frame is chess_board:\", c+str(r)", "def state_to_string(board_state):\n return str(board_state)", "def transferBoardToString(self,board):\n #int(False) will be 0\n outputString = str()\n for i in board:\n outputString = outputString + str(int(i))\n return outputString", "def getCellStr(self, x, y): # TODO: refactor regarding issue #11\n c = self.board.getCell(x, y)\n\n if c == 0:\n return '.' if self.__azmode else ' .'\n\n elif self.__azmode:\n az = {}\n for i in range(1, int(math.log(self.board.goal(), 2))):\n az[2 ** i] = chr(i + 96)\n\n if c not in az:\n return '?'\n s = az[c]\n elif c == 1024:\n s = ' 1k'\n elif c == 2048:\n s = ' 2k'\n else:\n s = '%3d' % c\n\n return self.__colors.get(c, Fore.RESET) + s + Style.RESET_ALL", "def __str__(self):\n return \"Board: \" + str(self._board) + \" Workers: \" + str(self._workers.items())", "def draw_board(self):\n board = \"-------------------\\n\"\n board += \"| %s | %s | %s |\\n\" % (self.board_values[1], self.board_values[2], self.board_values[3])\n board += \"-------------------\\n\"\n board += \"| %s | %s | %s |\\n\" % (self.board_values[4], self.board_values[5], self.board_values[6])\n board += \"-------------------\\n\"\n board += \"| %s | %s | %s |\\n\" % (self.board_values[7], self.board_values[8], self.board_values[9])\n board += \"-------------------\\n\"\n return board", "def __str__(self):\n s = ''\n for i in range(5):\n for j in range(5):\n ani = False\n if self[i][j] == 0:\n s += ' 0 '\n elif self[i][j].species == 'Elephant':\n s += ' E'\n ani = True\n elif self[i][j].species == 'Rhinoceros':\n s += ' R'\n ani = True\n else:\n s += ' B '\n if ani:\n if self[i][j].direction[0] == 0 and self[i][j].direction[1] == 1:\n d = '> '\n elif self[i][j].direction[0] == -1 and self[i][j].direction[1] == 0:\n d = '∧ '\n elif self[i][j].direction[0] == 0 and self[i][j].direction[1] == -1:\n d = '< '\n else:\n d = '∨ '\n s += d\n s += '\\n \\n'\n return s", "def printBoard(self):\n if self.side == self.WHITE or self.side == None:\n for r in [8,7,6,5,4,3,2,1]:\n for c in 'abcdefgh':\n p = self.getPiece(c,r) # print a8 first\n if p == None:\n print \" \",\n else:\n print self.getPieceName(p.type),\n print \"\"\n else:\n for r in [1,2,3,4,5,6,7,8]:\n for c in 'hgfedcba':\n p = self.getPiece(c,r) # print h1 first\n if p == None:\n print \" \",\n else:\n print self.getPieceName(p.type),\n print \"\"", "def __str__(self):\n result = \"\"\n for row in self._cells:\n result += \" \".join(map(str, row))\n result += \"\\n\"\n return result", "def __repr__(self):\n s = \"\"\n for y in range(0,HEIGHT):\n temp=\"\"\n for x in range(0,WIDTH):\n temp = temp+ str(self.gameState[x,y])\n s += temp+\"\\n\"\n return s", "def __str__(self) -> str:\n output = self.columns * \" __\" + \"\\n\"\n for i in range(self.rows):\n for j in range(self.columns):\n filling = \"__\"\n if len(self.cells[i][j].agents) == 1:\n filling = \"_§\"\n elif len(self.cells[i][j].agents) == 2:\n filling = \"§§\"\n elif len(self.cells[i][j].agents) > 2:\n filling = \"++\"\n output += \"|\" + filling\n if j == self.columns - 1:\n output += \"|\"\n output += \"\\n\"\n return output", "def __str__(self):\n result = ''\n for row in range(self.getHeight()):\n for col in range(self.getWidth()):\n result += str(self.data[row][col]) + ' '\n result += '\\n'\n return result", "def __str__(self):\n grid_str = \"\"\n for i in range(len(self.grid)):\n for j in range(len(self.grid[i])):\n grid_str = grid_str + self.grid[i][j] + '\\t'\n grid_str = grid_str.strip('\\t')\n grid_str = grid_str + '\\n'\n return grid_str", "def __str__(self):\n s=\"\"\n for y in range(0,HEIGHT):\n for x in range(0,WIDTH):\n s+=str(self.gameState[x,y])\n return s", "def __repr__(self) -> str:\n return \"P1's Turn: {} - Board: {}\".format(self.p1_turn,\n self.board)", "def __repr__(self) -> str:\n return \"P1's Turn: {} - Board: {}\".format(self.p1_turn,\n self.current_board)", "def __str__(self):\r\n # replace with your code\r\n result = ''\r\n for row in range(0, self._grid_height):\r\n result += str(self._grid_tile[row]) + '\\n'\r\n return result", "def __str__(self):\r\n grid_text = \"\\n-------------------\\n|\"\r\n for i in range(len(self.grid)):\r\n grid_text = grid_text + ' %s '%(self.grid[i][-1])\r\n\r\n if i%3 == 2:\r\n grid_text = grid_text + '|\\n-------------------\\n|'\r\n else:\r\n grid_text = grid_text + '|'\r\n return grid_text[0:len(grid_text)-1]", "def __str__(self):\r\n # The full representative string\r\n str_matrix = \"\"\r\n\r\n if self.matrix is not None:\r\n # Save the lenght into a variable\r\n # to send this number to the tiles method\r\n # and calculate the number of spaces\r\n spaces = len(self.matrix)\r\n for i in range(0, spaces):\r\n nums = list(filter(lambda x: x != \"_\", self.matrix[i]))\r\n str_matrix += self.tiles(nums, (i+1), (spaces - i))\r\n\r\n return str_matrix", "def __str__(self):\n grid_string = ''\n for row in range(self.grid_height):\n grid_string += str(self.grid[row]) + '\\n'\n return grid_string", "def get_cards_as_string(self):\n return '' \\\n ' {}\\n' \\\n ' {}\\n' \\\n ' {}\\n' \\\n ' {}\\n' \\\n ' {}\\n'.format(*self.get_cards_high_to_low())", "def print_board(self):\n \n # How to show empty/p1/p2\n VALS = \".XO\"\n\n print(\"\\n a b c d e f g\")\n print(\" /--+-+-+-+-+-+--\\\\\")\n for r in range(_HEIGHT - 1, -1, -1):\n s = \"%s |\" % r\n for c in range(_WIDTH):\n # Print mark next to most recent move\n mark = \">\" if self.last_play_rc == (r, c) else \" \"\n s += mark + VALS[self.board[r * 7 + c]]\n print(s + \" |\")\n print(\" \\\\--+-+-+-+-+-+--/\")\n print(\" a b c d e f g\\n\")", "def __str__(self) -> str:\n return self._color + str(self._value) + SudokuTile.C_END", "def __str__(self):\n grid_str = \"\"\n for row in range(self.grid_height):\n grid_str += str(self.grid[row])+'\\n'\n return grid_str", "def print_board(self):\n to_join = [\"-\" * self.DIMENSIONS[0]]\n for row in self.grid:\n to_join.append(\"\".join([ch.letter if ch is not None else \" \" for ch in row]))\n\n print(\"\\n\".join(to_join))", "def __str__(self):\n ans = \"\"\n for i in range(self.row):\n for j in range(self.col):\n ans+=str(self.array[i][j])+\" \"\n ans+=\"\\n\"\n return ans", "def printBoard(self):\n\t\tkey = [' ', 'X', 'O']\n\t\tprint(' | |')\n\t\tprint(' ' + key[self.state[0][0]] + ' | ' + key[self.state[0][1]] + ' | ' + key[self.state[0][2]])\n\t\tprint(' | |')\n\t\tprint('-----------')\n\t\tprint(' | |')\n\t\tprint(' ' + key[self.state[1][0]] + ' | ' + key[self.state[1][1]] + ' | ' + key[self.state[1][2]])\n\t\tprint(' | |')\n\t\tprint('-----------')\n\t\tprint(' | |')\n\t\tprint(' ' + key[self.state[2][0]] + ' | ' + key[self.state[2][1]] + ' | ' + key[self.state[2][2]])\n\t\tprint(' | |')" ]
[ "0.84243935", "0.8055579", "0.8017218", "0.78294134", "0.7797174", "0.77931315", "0.7776164", "0.77571356", "0.77145636", "0.7699537", "0.7682327", "0.7632191", "0.76316303", "0.76192135", "0.7602081", "0.7599799", "0.758553", "0.7570357", "0.75595516", "0.7550093", "0.7537935", "0.7514287", "0.7499289", "0.7492063", "0.7480225", "0.74771684", "0.7468804", "0.744572", "0.743624", "0.7418388", "0.7418208", "0.7397805", "0.73942703", "0.73898166", "0.7384276", "0.7383382", "0.73743814", "0.7365193", "0.7365193", "0.7363094", "0.7350477", "0.7309778", "0.72961354", "0.7294264", "0.72914183", "0.7285849", "0.7250098", "0.7218796", "0.7198089", "0.71431416", "0.7140117", "0.71355754", "0.71270055", "0.71012276", "0.7091507", "0.70888555", "0.7082249", "0.7058949", "0.70419264", "0.7034083", "0.7031354", "0.702456", "0.702456", "0.702456", "0.702456", "0.701469", "0.69672674", "0.69672674", "0.69672674", "0.6948384", "0.69478524", "0.6892123", "0.6885317", "0.6873642", "0.68593305", "0.6854289", "0.6853239", "0.6849875", "0.6848315", "0.6842803", "0.68405086", "0.68397176", "0.68177366", "0.6811835", "0.6811544", "0.6802742", "0.6785202", "0.67664397", "0.6761365", "0.67566323", "0.67458403", "0.67448026", "0.67379135", "0.6737263", "0.6735821", "0.67301553", "0.6727139", "0.6724389", "0.6718744", "0.6715472" ]
0.79764
3
The main method for running the game.
def main(): print("Welcome to TicTacToe") board = Board() while (not board.isOver()): print("It is {0}'s turn".format(board.current) + board.__str__()) move = input('Where would you like to go? : ').strip() if (move == 'q'): break elif (board.makeMove(move) == 1): board.switchPlayer() else: print("I didn't understand your input, these are the valid inputs:\nentering 'q' will quit out of the game.\n") print("entering a number will place the peice in that box, the numbers are as follows:\n \n1|2|3\n-----\n4|5|6\n-----\n7|8|9\n") print(board.__str__() + "\nGame Over") if (board.isOver() is Piece.EX or board.isOver() is Piece.OH): print("Player {0} wins!".format(board.isOver())) else: print("It was a draw")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n g = Game(800, 600)\n g.start()", "def main():\r\n gameclass = data.game.GameClass()\r\n gameclass.main_loop()", "def main():\n game = RiichiMahjongApp()\n game.run()", "def main():\n g = DemoGame(800, 600)\n g.start()", "def main():\n game = Game(TIMES, HARDNESS)\n game.start()\n game.print_score()", "def main():\n\n name, game = select_game(vgc.KNOWN_GAMES)\n print('---- Launching: %s -----'%name)\n game.game.main()\n sys.exit(0)", "def main():\n boba_blast_game.main()", "def main():\n game = TinkerGame()\n game.setup()\n while game.calculate_points() > 0 and not game.game_over:\n game.play()\n game.end()", "def main():\n pygame.init()\n os.environ['SDL_VIDEO_CENTERED'] = '1'\n pygame.display.set_caption('8-Puzzle game')\n screen = pygame.display.set_mode((800, 500))\n fpsclock = pygame.time.Clock()\n program = SlidePuzzle((3, 3), 160, 5, difficulty=10) # program is also the gym environment\n\n choice = program.selectPlayerMenu(fpsclock, screen)\n if choice == \"AI\":\n pygame.display.quit()\n trainAI(program)\n elif choice == \"human\":\n launchWithGUI(program, fpsclock, screen)\n del program", "def main():\n if \"cli\" in sys.argv:\n run_cli_game()\n else:\n run_gui_game()", "def main():\n display, clock = game.init_pygame()\n highscores = HighScores(display, clock)\n highscores.run()", "def main():\n game_of_life(10, 20)", "def main():\n field = Field(10, 10)\n snake = Snake((0, 0))\n game = Game(field, snake)\n game.start()", "def main():\n\n # Create logging file, rotate if filesize exceeds 1MB\n logger.add(\"logs/{time}.log\", rotation=\"1 MB\")\n\n GameContainer()\n logger.info(\"Started the game launcher. Make sure to support pygame!\")", "def main():\n args = get_parser().parse_args()\n players = prepare_game(\n decks_count=args.decks,\n auto_mode=args.auto_mode,\n player_one_name=args.name_player,\n players_count=args.players,\n )\n game(players=players)", "def main() -> None:\r\n game = advanced_game(MAP_FILE)\r\n\r\n root = tk.Tk()\r\n root.title('EndOfDayz')\r\n if TASK == 1:\r\n gui = BasicGraphicalInterface\r\n elif TASK == 2:\r\n gui = ImageGraphicalInterface\r\n # else:\r\n # gui = MastersGraphicalInterface\r\n app = gui(root, game.get_grid().get_size())\r\n app.play(game)\r\n root.mainloop()", "def main(argv):\n config_options = parse_config(CONFIG_FILE_NAME)\n arguments_options = parse_args(argv, **config_options)\n playgame.main(arguments_options)", "def main():\n game = Blackjack()\n game.play()", "def main():\r\n\r\n #set the display, caption, and timer\r\n pygame.init()\r\n mainClock = pygame.time.Clock()\r\n windowSurface = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT), 0, 32)\r\n pygame.display.set_caption(\"Cat's Big Adventure\")\r\n\r\n #Display a menu, choose a level and instantiate a game\r\n display_menu(windowSurface)\r\n\r\n #initialize the game\r\n stats = [6]\r\n game = Game(stats)\r\n \r\n # run the game loop until the user quits\r\n while True:\r\n # Process events (keystrokes, mouse clicks, etc)\r\n game.process_events(windowSurface)\r\n\r\n # Update object positions, check for collisions\r\n game.run_logic()\r\n \r\n # Draw the current frame\r\n game.display_frame(windowSurface)\r\n\r\n #draw background image\r\n background_image = pygame.image.load(\"sky.png\").convert()\r\n windowSurface.blit(background_image, [0, 0])\r\n \r\n mainClock.tick(FRAMERATE)", "def main():\n play_game(progression)", "def main():\n \n # load_and_initialize_func()\n\n loop_and_update_forever()\n\n pygame.quit()", "def main():\r\n # create the EdenLudo sample\r\n EdenEvolves = EdenLudo()\r\n # ru-n the scene\r\n run()", "def main(self):\n _age = info.getInfo(self)\n _flag = game.check_age(self, _age)\n if _flag == False:\n exit()\n game.wants_to_play(0)", "def main():\n even_game()", "def main():\n initialize()\n inputs = InputsTemp()\n\n ui_font = pygame.font.SysFont(\"Comic Sans MS\", 50)\n\n assets_library = AssetsLibrary((Path(__file__).parent / \"Assets\"))\n\n # todo: create display class to wrap display from pygame\n window = setup_display(inputs.width_height)\n\n background_img = assets_library.assets.bg_black\n\n run = True\n FPS = 60\n lives = 5\n level = 1\n clock = pygame.time.Clock()\n\n ui_margin = {\n \"left\": 10,\n \"right\": 10,\n \"top\": 10,\n \"bottom\": 10,\n }\n\n def redraw_window():\n window.blit(background_img.get_image(inputs.width_height), (0, 0))\n\n lives_label = ui_font.render(f\"lives: {lives}\", 1, (255, 255, 255))\n level_label = ui_font.render(f\"level: {level}\", 1, (255, 255, 255))\n\n window.blit(lives_label, (ui_margin[\"left\"], ui_margin[\"top\"]))\n window.blit(level_label, (inputs.width_height[0] - level_label.get_width() - ui_margin[\"right\"], ui_margin[\"top\"]))\n pygame.display.update()\n\n while run:\n clock.tick(FPS)\n\n redraw_window()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n\n print(\"Game ended\")", "def main():\n pygame.init()\n\n try:\n filename = sys.argv[1]\n except IndexError:\n usage()\n\n game = Game.from_file(filename)\n grid = game.get_grid()\n width, height = grid.get_width(), grid.get_height()\n win = pygame.display.set_mode((width*CELL_SIZE, height*CELL_SIZE))\n selected = 0 # default selected player\n select_player(selected)\n render(win, grid)\n\n while not (game.winning() or game.losing()):\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONUP:\n selected = handle_click_event(grid, selected)\n\n elif event.type == pygame.KEYUP and event.key in KEY_DIRECTIONS:\n game.next_step(selected, KEY_DIRECTIONS[event.key])\n render(win, grid)\n\n # QUIT\n elif event.type == pygame.QUIT:\n pygame.quit()\n\n if game.winning():\n text = \"You win!\"\n game_status = \"win\"\n elif game.losing():\n text = \"You lose!\"\n game_status = \"lose\"\n\n display_end_screen(win, text, game_status)", "def main():\n run_it = scene.Control()\n state_dict = {\"TITLE\" : title.Title(),\n \"INTRO\" : cutscene.Cutscene0(),\n \"GAMEPLAY\" : gameplay.gamePlay(),\n \"ENDING\" : cutscene.Cutscene1()\n }\n run_it.setup_states(state_dict, \"TITLE\")\n run_it.main()", "def main():\n app = RunSnakeRunApp(0)\n app.MainLoop()", "def main():\n print(\"is Running!\")", "def main(cls):\n parser = optparse.OptionParser()\n parser.add_option('-c', '--columns', type=\"int\", default=16)\n parser.add_option('-r', '--rows', type=\"int\", default=16)\n parser.add_option('-m', '--mines-density', type=\"float\", default=0.2,\n help=\"percent of mines: 0.15 is trivial, 0.2 good [default], 0.25 hard\")\n (options, args) = parser.parse_args()\n if args:\n parser.error(\"unexpected arguments: \" + \" \".join(args))\n \n game = cls(options.columns, options.rows, options.mines_density)\n game.window.mainloop()", "def main():\n # Initialize the event manager.\n event_manager = events.EventManager()\n AppState.get_state().set_event_manager(event_manager)\n\n # Initialize and register the application heartbeat.\n heart_beat = HeartBeat()\n event_manager.register_listener(heart_beat)\n\n # Initialize and register the world.\n basic_experiment = experiment.basic.BasicExperiment()\n world = basic_experiment.get_world()\n event_manager.register_listener(world)\n AppState.get_state().set_world(world)\n\n # Initialize pygame.\n surface = init()\n\n # Initialize and register the view.\n main_view = view.View(surface)\n event_manager.register_listener(main_view)\n\n # Initialize and register the controller.\n main_controller = controller.Controller()\n event_manager.register_listener(main_controller)\n\n # Start the heartbeat.\n heart_beat.run()", "def main():\n Main()", "def run():\n\tif len(sys.argv) > 1 and sys.argv[1] in {'-V', '--version'}:\n\t\tprint(\"pokesim - Pokémon Battle Simulator - Version %s\" % __version__)\n\t\texit()\n\n\trandom.seed()\n\ttry:\n\t\tmain()\n\texcept (KeyboardInterrupt, EOFError):\n\t\texit(0)", "def main():\n arcade.open_window(WINDOW_WIDTH, WINDOW_HEIGHT, \"Snake.exe\")\n # Set the window background colour\n arcade.set_background_color(light_green)\n\n # Calls the on_draw method every 1/3(20 seconds) of a second\n arcade.schedule(on_draw, 1/3)\n # Keeps the window open until closed by the user\n arcade.run()", "def run():\n main()", "def main():\n\n # Fix crackling audio\n util.set_environment('PULSE_LATENCY_MSEC', '60')\n\n # Replace launcher with game exe in proton arguments\n util.replace_command('FF9_Launcher.exe', 'x64/FF9.exe')", "def start_game(self):\n\n\t\tpass", "def main(self) -> None:\n pass", "def run(self, GameState):\n pass", "def main():\n pass", "def main():\n run_program()", "def main() -> int:\n setup(\"main\", WIDTH, HEIGHT)\n init()\n for _ in range(randint(10, 10)):\n add_rand_circ()\n for _ in range(randint(10, 10)):\n add_rand_rect()\n mainloop()\n update_leaderboard()\n return 0", "def main():\n return", "def run_gui_game():\n # Set up game\n view = GuiView()\n game = GameEngine(view)", "def main():\r\n\r\n pygame.init()\r\n pygame.display.init()\r\n\r\n # Set the pygame clock\r\n clock = pygame.time.Clock()\r\n\r\n pygame.display.set_caption(\"Blackbox game\")\r\n current_game = BlackBoxGame()\r\n clock = pygame.time.Clock()\r\n\r\n while True:\r\n current_game.check_events()\r\n clock.tick(60)\r\n current_game.update_screen()\r\n\r\n pygame.quit()", "def run_game():\r\n pygame.init()\r\n ai_settings = Settings()\r\n screen = pygame.display.set_mode(\r\n (ai_settings.screen_width, ai_settings.screen_height))\r\n pygame.display.set_caption(\"Stars\")\r\n\r\n # Make a group of stars.\r\n stars = Group()\r\n\r\n # Create a star system\r\n gf.create_star_system(ai_settings, screen, stars)\r\n \r\n # Main game loop.\r\n while True:\r\n \r\n # Let's player quit the game.\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n\r\n gf.update_screen(ai_settings, screen, stars)", "def main(**kwargs):\n print('Start')\n agent = initAgent(**kwargs)\n kwargs['agent'] = agent\n result = []\n\n def mainsub(*args):\n game = Game(**kwargs)\n game.display(kwargs['noshow'])\n while True:\n # get_input = getch(\"Enter direction (w/a/s/d): \")\n get_input = game.action()\n if get_input in keypad:\n game.move(keypad.index(get_input))\n game.update()\n # elif get_input == \"q\":\n # break\n # else:\n # print(\"\\nInvalid choice.\")\n # continue\n if game.end:\n game.savegame()\n game.display(kwargs['noshow'])\n print(\"Result:\", game.nturn, game.score)\n break\n game.display(kwargs['noshow'])\n result.append((game.score, game.nturn))\n game.agent.replay()\n if kwargs['train']:\n game.agent.save()\n game.reset()\n if kwargs['train']:\n np.save('result.%s' % game.agent.algo, np.array(result))\n\n map(mainsub, range(kwargs['n']))\n print(\"Thanks for playing.\")", "def start_game(self):\n print(\"hi there, game started!\")\n self.draw()", "def main():\n game = Hangman()\n game.play_hangman()", "def main():\n\n # This is for text mode.\n\n if len(sys.argv) == 2 and sys.argv[1] == '-t':\n model.main()\n sys.exit(0)\n\n # Do initialization.\n\n pygame.init()\n screen = pygame.display.set_mode(DISPLAY_MODE)\n pygame.display.set_caption(TITLE)\n clock = pygame.time.Clock()\n background = pygame.Surface(screen.get_size()).convert()\n background.fill(BACKGROUND)\n pygame.display.flip()\n\n game_model = model.Game()\n board_view = view.Board(game_model)\n score_board = view.ScoreBoard(game_model)\n rendering_groups = [board_view, score_board]\n\n while True:\n\n clock.tick(FRAMES_PER_SEC)\n scheduler.tick()\n\n # Handle user input.\n\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key in (K_ESCAPE, K_q) or event.type == QUIT:\n sys.exit(0)\n elif event.key == K_h:\n url = \"file://\" + os.path.abspath(data.find(\"help.html\"))\n webbrowser.open(url, new=True)\n elif event.key == K_r:\n game_model.reset()\n elif event.type == MOUSEBUTTONDOWN:\n for square_view in board_view:\n if square_view.rect.collidepoint(*pygame.mouse.get_pos()):\n xyz = square_view.square_model.xyz\n try:\n game_model.move(xyz)\n except ValueError:\n pass\n break\n\n # Provide the simulation and render it.\n\n for i in rendering_groups:\n i.update()\n i.clear(screen, background)\n pygame.display.update(i.draw(screen))", "def main():\n print(\"Call your main application code here\")", "def main():\n print(\"Call your main application code here\")", "def main():\n print(\"Call your main application code here\")", "def main(args):\n global numBirds, numToads\n if len(args) > 2:\n numBirds = int(args[1])\n numToads = int(args[2])\n gameLoop(GameManager(MapSize, numBirds, numToads))", "def main():\r\n app = application.Application()\r\n app.initializeLayer(menu.Menu())\r\n app.run()", "def play_game():\n pass", "def run(self):\n print(\"WELCOME TO MINESWEEPER!\")\n\n\n while True:\n\n self.get_input()\n start_game(self.rows, self.cols, self.mines)", "def main():\n secret_word = get_word()\n play_game(secret_word)", "def main(self,Surf):\n while True:\n if self.state == \"GAME\":\n self.event_loop()\n self.update(Surf)\n elif self.state == \"QUIT\":\n break\n pg.display.update()\n self.Clock.tick(65)", "def Gameloop():", "def main():\n print(\"It works!!! ;-)\")\n ###TODO### do something with the various methods/functions of this file", "def start() -> None:\n\n # PREPARE\n clone_game_files()\n\n # SIMULATE\n turns = run.simulation()\n\n # LOG\n logs = read.combine_logs(turns)\n\n # CALCULATE\n results = calculate.results(logs)\n\n # DISPLAY\n visualize.charts(results)\n\n # CLEAN\n remove_cloned_files()", "def main():\r\n # Initialize words from specific file\r\n words_list = hangman_helper.load_words()\r\n # Run single game with given word list to choose from\r\n run_single_game(words_list)\r\n # Ask the user if he would like to play again\r\n request = hangman_helper.get_input()\r\n if request[INPUT_TYPE] == hangman_helper.PLAY_AGAIN:\r\n if request[INPUT_VALUE]:\r\n run_single_game(words_list)", "def main(self):\r\n pass", "def main():\n greetings()\n run_jarvis()", "def main():\n dealCards().mainloop()", "def main():\n #Initialize pygame\n pygame.init()\n\n #Set up the display and draw it to screen\n display = DisplayUpdater()\n display.generate_display()\n\n #Set up the audio player\n sound = AudioPlayer()\n\n sound.play_menu_music()\n #Set up the controls\n controls = PlayerInput()\n\n #Start off in the main menu, can go to credits, leaderboard, or game\n display.show_main_menu()\n user_input = controls.get_menu_input(sound)\n #While the user hasn't quit from the main menu\n while user_input != INPUT.ESCAPE:\n #If the player hits ENTER, launch the game\n if user_input == INPUT.ENTER:\n play_demon_music = game(display, sound, controls)\n sound.play_menu_music(play_demon_music)\n controls.clear_menu_input()\n user_input = INPUT.SPACE\n\n #If the player hits SPACE, go to the leaderboard\n if user_input == INPUT.SPACE:\n display.show_leaderboard()\n user_input = controls.get_menu_input(sound)\n #If the player hits C, go to the credits\n if user_input == INPUT.C:\n display.show_credits()\n user_input = controls.get_menu_input(sound)\n\n #If the player hits ESC, return to the main menu.\n #must be in own if statement so we don't quit\n if user_input in (INPUT.ESCAPE, INPUT.SPACE, INPUT.C):\n display.show_main_menu()\n user_input = controls.get_menu_input(sound)", "def run_game_logic(self):\n pass", "def main():\n global TURRET\n os.environ['SDL_VIDEO_CENTERED'] = '1'\n pg.init()\n pg.display.set_caption(CAPTION)\n pg.display.set_mode(SCREEN_SIZE)\n TURRET = pg.image.load(\"turret.png\").convert()\n TURRET.set_colorkey(COLOR_KEY)\n Control().main_loop()\n pg.quit()\n sys.exit()", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.001, display=True) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=100) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():\n global levels\n difficulty = select_difficulty()\n start_game(difficulty)", "def run_game(self) -> None:\n decision = 0\n if self._initial:\n self._initial = False\n while decision != 1:\n try:\n display_no_combat_init(self.hero)\n decision = get_user_input([1, 2, -1])\n if decision == -1:\n self._quit()\n elif decision == 2:\n self._show_bag()\n else:\n break\n except KeyboardInterrupt:\n print(\"[!] If you want to quit, use the provided user interface\")\n\n while not self.hero.is_dead:\n try:\n self._load_map()\n except KeyboardInterrupt:\n print(\"[!] If you want to quit, use the provided user interface\")", "def main():\r\n if check_argv():\r\n if len(sys.argv) == 3:\r\n gui = GUI(sys.argv[1], int(sys.argv[2]), True, ip=None)\r\n gui.create_board()\r\n gui.root.title(\"Server\")\r\n if not gui.is_human():\r\n gui.ai.find_legal_move(gui.game, gui.update_game)\r\n gui.run_game()\r\n elif len(sys.argv) == 4:\r\n ip = socket.gethostbyname(socket.gethostname())\r\n gui = GUI(sys.argv[1], int(sys.argv[2]), False, ip)\r\n gui.create_board()\r\n gui.root.title(\"Client\")\r\n if not gui.is_human():\r\n gui.ai.find_legal_move(gui.game, gui.update_game)\r\n gui.run_game()", "def main():\n\n\tif len(sys.argv) > 1 and sys.argv[1]:\n\t\t_, _, hash = read_file(sys.argv[1])\n\t\toffset_x = 0\n\t\toffset_y = 0\n\telse:\n\t\toffset_x, offset_y, hash = screenshot()\n\n\tprint(hash)\n\tgame = eliza_logic.Game(0)\n\tgame.exact_setup(hash)\n\tprint(game)\n\tresult = game.global_solve(-1)\n\tprint(result)\n\n\t# If it was a screen grab, we can actually do this -- just type n/q/c to quit or anything else to continue\n\tif result is not None and offset_x and offset_y:\n\t\tx = six.moves.input(\"Ready for automated solution? \")\n\t\tif x.lower() in [\"n\", \"q\", \"c\"]:\n\t\t\treturn\n\n\t\texecute_solution(offset_x, offset_y, result)", "def main():\n doctest.testmod()\n game()", "def run(self):\n pygame.init()\n pygame.display.set_caption(\"Genetic Game\")\n self.screen = pygame.display.set_mode((self.SCREEN_W, self.SCREEN_H), 0, 32)\n\n self.ominus_sprites = [OminusSprite(self.screen, o, PLAYERS_COLORS[o.id]) for o in self.model.get_players()]\n for o in self.ominus_sprites:\n self.agent_group.add(o)\n\n self.wall_sprites = [WallSprite(self.screen, w) for w in self.model.get_walls()]\n for w in self.wall_sprites:\n self.terrain_group.add(w)", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline= True ) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.5) # reduce update_delay to speed up simulation\n sim.run(n_trials=100) # press Esc or close pygame window to quit", "def main() -> None:\n return", "def main():\n global lp\n global games, selected_game\n \n # Create a Launchpad instance and start it up\n lp = Launchpad()\n lp.open()\n lp.reset()\n \n populate_games()\n draw_main_menu()\n \n playing = True\n\n # Timekeeping\n last_time = 0\n anim_timer = 0\n\n while playing:\n # Get delta_time this loop\n cur_time = perf_counter()\n delta_time = cur_time - last_time\n last_time = cur_time\n\n # Get next input\n button = lp.button_state_xy()\n \n # Animate border\n anim_timer += delta_time\n if anim_timer > BORDER_ANIM_DELAY:\n anim_timer = 0\n draw_border()\n \n # Handle input (if any)\n if button:\n x = button[0]\n y = button[1]\n \n # If button pressed\n if button[2]:\n # Write red to pressed button if circle button\n if x == 8 or y == 0:\n lp.led_ctrl_xy(x, y, 3, 0)\n \n if (x, y) == BUT_LEFT:\n selected_game = (selected_game + 1) % len(games)\n draw_cover()\n \n if (x, y) == BUT_RIGHT:\n selected_game = (selected_game - 1) % len(games)\n draw_cover()\n \n # If button released\n if not button[2]:\n # Write menu color to released button if circle button\n if y == 0:\n lp.led_ctrl_xy(x, y, *menu_leds[x]) # Top row\n if x == 8:\n lp.led_ctrl_xy(x, y, *menu_leds[7 + y]) # Right column\n \n if (x, y) == BUT_START:\n lp.clear_input()\n lp.reset()\n games[selected_game].play()\n draw_main_menu()\n \n if (x, y) == BUT_QUIT:\n playing = False\n \n lp.draw()\n \n lp.reset()\n lp.close()" ]
[ "0.85276115", "0.8359565", "0.8280766", "0.8256087", "0.8189385", "0.79771304", "0.78113306", "0.7789521", "0.7754325", "0.77015805", "0.7665701", "0.7611436", "0.76085716", "0.7576111", "0.7546943", "0.7546619", "0.7520529", "0.7456335", "0.74490446", "0.74308777", "0.7388939", "0.7355476", "0.73381764", "0.73325497", "0.7302963", "0.7289043", "0.7269477", "0.723349", "0.7210045", "0.7197681", "0.7184768", "0.7162537", "0.7155853", "0.7155467", "0.71550477", "0.7152539", "0.7121594", "0.71179414", "0.7111066", "0.7096553", "0.7062233", "0.704228", "0.70236814", "0.70173675", "0.7007432", "0.7003049", "0.70021015", "0.69968724", "0.6994511", "0.6961736", "0.69601816", "0.69601816", "0.69601816", "0.6957019", "0.6950015", "0.6931413", "0.6925387", "0.6921405", "0.6908866", "0.6895347", "0.6893785", "0.6888733", "0.6878322", "0.68602824", "0.6843059", "0.6841527", "0.68347126", "0.68150145", "0.68089354", "0.68072563", "0.68067425", "0.68067425", "0.68067425", "0.68067425", "0.68067425", "0.68067425", "0.68067425", "0.68067425", "0.68067425", "0.68067425", "0.68067425", "0.68067425", "0.68067425", "0.68067425", "0.68067425", "0.68067425", "0.68067425", "0.68067425", "0.68067425", "0.68067425", "0.68067425", "0.68067425", "0.6797973", "0.67977375", "0.67907566", "0.6789781", "0.67890084", "0.67847353", "0.6783034", "0.6774448", "0.6771813" ]
0.0
-1
Remove temporary partition files from disk. The removed files' names are deleted from the _temporary_files set. The intended use is to delete individual files as part of the garbage collection process and to delete all files when python exits. This is quite brutal and may break partitions if used unwisely. It is not recommended to be used as a general tidyup function.
def _remove_temporary_files(filename=None): if filename is not None: if filename in _temporary_files: # If this condition is not met then probably # _remove_temporary_files() has already been run at # exit dirname, _lock_file, _other_lock_files = _temporary_files[filename] try: remove(_lock_file) except OSError: pass # Only remove the temporary file if it is not being # used by any other ranks if not _lock_files_present(_other_lock_files): # Remove the given temporary file try: remove(filename) rmdir(dirname) except OSError: pass del _temporary_files[filename] # --- End: if return # Still here? Then remove all temporary files and lock files for filename in _temporary_files: try: remove(filename) except OSError: pass dirname, _lock_file, _other_lock_files = _temporary_files[filename] try: remove(_lock_file) except OSError: pass for lock_file in _other_lock_files: try: remove(lock_file) except OSError: pass # --- End: for try: rmdir(dirname) except OSError: pass # --- End: for _temporary_files.clear()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_temporary_files():\n try:\n xml_file_path, bin_file_path = get_ida_exported_files()\n if os.path.isfile(xml_file_path):\n os.remove(xml_file_path)\n\n if os.path.isfile(bin_file_path):\n os.remove(bin_file_path)\n\n except Exception:\n print(\"GhIDA:: [!] Unexpected error while removing temporary files.\")", "def _remove_tmpfiles():\n for f in tmpfiles:\n try:\n os.remove(f)\n except OSError:\n pass", "def del_tmp() -> None:\n for elem in os.listdir('./tmp'):\n path = f\"./tmp/{elem}\"\n if os.path.isfile(path):\n os.remove(path)\n else:\n shutil.rmtree(path)", "def clean_up_temp_dir():\n files = glob.glob(f'{CONFIG_DIR}/tmp/*')\n for f in files:\n try:\n os.remove(f)\n except Exception:\n pass", "def clear_tempfiles(self, remove=True):\n while self._tempfiles:\n self.pop(remove)\n self.push()", "def remover_files():\n directory = os.getcwd()\n for file_name in glob.glob((\"{}/tmp/*\").format(directory)):\n remove(file_name)", "def clear_temp(remove_all=True):\n tf_list = []\n\n if remove_all:\n temp_dir = _get_temp_dir(False)\n temp_dir += (\n os.path.sep if os.path.sep not in temp_dir[len(temp_dir) - 1] else \"\"\n )\n tf_list = glob.glob(\"{0}TESS_*\".format(temp_dir))\n else:\n global _tempfiles\n\n tf_list = list(_tempfiles)\n _tempfiles.clear()\n\n for tf in tf_list:\n if os.path.isfile(tf):\n _remove_file(tf)", "def delete_temporary_files(request, tmp_path_factory):\r\n _tmp_path_factory = tmp_path_factory\r\n\r\n def cleanup():\r\n tmp_path = _tmp_path_factory.getbasetemp()\r\n if pathlib.Path(tmp_path).exists() and pathlib.Path(tmp_path).is_dir():\r\n shutil.rmtree(tmp_path)\r\n\r\n request.addfinalizer(cleanup)", "def cleanup_tmpdir(tmpdir):\n try:\n for filename in glob.glob(os.path.join(tmpdir, \"*\")):\n os.remove(filename)\n os.rmdir(tmpdir)\n except OSError, inst:\n sys.stderr.write('WARNING: could not remove temp files'\n ' in ' + tmpdir + '\\n' + str(inst) + '\\n')", "def _delete_temp():\n global _TEMP_NAME\n\n try:\n database.delete_temp(_TEMP_NAME)\n outputtools.delete_temp(_TEMP_NAME)\n except:\n raise", "def remove_tmp_sources(source_filename):\n logging.info('Removing temporary files ...')\n source_dir = os.path.dirname(source_filename)\n if os.path.exists(source_filename):\n os.remove(source_filename)\n for f in os.listdir(source_dir):\n if f.startswith('tmp_'):\n os.remove(os.path.join(source_dir, f))", "def _clean_up_temporary_files(dataset_dir):\n filename = _DATA_URL.split('/')[-1]\n filepath = os.path.join(dataset_dir, filename)\n tf.gfile.Remove(filepath)\n\n tmp_dir = os.path.join(dataset_dir, 'cifar-100-python')\n tf.gfile.DeleteRecursively(tmp_dir)", "def clean_temp_storage_dir(self, filenames):\n for fn in filenames:\n try:\n pathlib.Path(pathlib.PurePath(self.temp_storage_dir, fn)).unlink()\n except FileNotFoundError:\n pass", "def _clean_up_temporary_files(dataset_dir):\n return", "def cleanUpTemporaryFiles(options):\n os.system(\"rm \"+options.output_directory_per_run+\"/*.abundance\")\n os.system(\"rm \"+options.output_directory_per_run+\"/*.phasing_score\")\n os.system(\"rm \"+options.output_directory_per_run+\"/*regionsOfInterest*\")\n os.system(\"mv \"+options.output_directory_per_run+\"/* \"+options.output_directory_per_run+\"/../\")\n os.system(\"rm -rf \"+options.output_directory_per_run)", "def clear_tmp_folder(self):\r\n for file in os.listdir(self.temp_dir):\r\n if file.endswith('.png') or file.endswith('.jpg'):\r\n path = os.path.join(self.temp_dir, file)\r\n print ('Cleaned up {}'.format(path))\r\n os.remove(path)", "def tearDown(self):\n for f in os.listdir('/tmp'):\n if not f.startswith(self.FILE_PREFIX):\n continue\n\n os.remove(os.path.join('/tmp', f))", "def remove_temp_folders(self) -> None:\n if self.args.deletefolders:\n time.sleep(2)\n for f in self.created_folders:\n shutil.rmtree(path.join(self.args.output, f))\n print(f\"{self.args.output}/{f} was deleted\")", "def remove_intermediate_files(dir_):\n file_list = glob.glob(f'{dir_}/*temp*')\n [os.remove(f) for f in file_list]", "def clean():\n for f in [f for f in os.listdir() if f.endswith(\".part\")]:\n os.remove(f)", "def clean_up_temp_files():\n global __tmp_model_dir\n\n if __tmp_model_dir is not None:\n FileUtils.deleteDirectory(__tmp_model_dir)\n __tmp_model_dir = None", "def _clean_up_optimization():\n for (root, dirs, files) in walk(TEMP_MODULES_DIR_PATH, topdown=False):\n for file in files:\n if file.startswith(\"__temp_\"):\n remove(f\"{root}/{file}\")\n try:\n rmdir(root)\n except OSError:\n G.warn_(f\"Unidentified file found in temporary directory: {root}\")", "def tearDown(self):\n for root, dirs, files in os.walk(TEMPDIR, topdown=False):\n for name in files:\n os.remove(os.path.join(root, name))\n for name in dirs:\n os.rmdir(os.path.join(root, name))\n os.rmdir(root)", "def cleanTempDirs(olderThanDays=None):\n from armi import runLog\n from armi.utils.pathTools import cleanPath\n\n disconnectAllHdfDBs()\n printMsg = runLog.getVerbosity() <= DEBUG\n if _FAST_PATH_IS_TEMPORARY and os.path.exists(_FAST_PATH):\n if printMsg:\n print(\n \"Cleaning up temporary files in: {}\".format(_FAST_PATH),\n file=sys.stdout,\n )\n try:\n cleanPath(_FAST_PATH, mpiRank=MPI_RANK)\n except Exception as error:\n for outputStream in (sys.stderr, sys.stdout):\n if printMsg:\n print(\n \"Failed to delete temporary files in: {}\\n\"\n \" error: {}\".format(_FAST_PATH, error),\n file=outputStream,\n )\n\n if olderThanDays is not None:\n cleanAllArmiTempDirs(olderThanDays)", "def _remove_temp_path():\n if os.path.exists(_temp_path):\n if os.path.isdir(_temp_path):\n def onerror(function, path, excinfo):\n persist.printf(\"{}: Unable to delete '{}' while cleaning up temporary directory\"\n .format(p_name, path))\n import traceback\n traceback.print_exc(*excinfo)\n import shutil\n shutil.rmtree(_temp_path, onerror=onerror)\n else:\n persist.printf(\"{}: For some reason, '{}' is a file. Removing...\"\n .format(p_name, _temp_path))\n os.remove(_temp_path)", "def task_clean_tmp_files():\n client = google.cloud.storage.Client()\n blobs = client.list_blobs(settings.PODCAST_STORAGE_BUCKET,\n prefix=settings.PODCAST_TMP_STORAGE_DIRECTORY)\n for blob in blobs:\n if blob.time_created.replace(tzinfo=None) + datetime.timedelta(1) <= datetime.datetime.now():\n blob.delete()\n\n return OK_RESPONSE", "def clean_tmp (tmp_path, keep_tmp):\n\n # check if folder exists\n if isdir(tmp_path):\n\n # delete [tmp_path] folder if [set_bb.keep_tmp] not True\n if not keep_tmp:\n shutil.rmtree(tmp_path)\n log.info ('removing temporary folder: {}'.format(tmp_path))\n\n else:\n # otherwise fpack its fits images\n #list_2pack = glob.glob('{}/*.fits'.format(tmp_path))\n list_2pack = list_files(tmp_path, end_str='.fits')\n\n for filename in list_2pack:\n fpack (filename)\n\n else:\n log.warning ('tmp folder {} does not exist'.format(tmp_path))\n\n\n return", "def tearDown(self):\r\n remove_files(self.files_to_remove, False)\r\n if self.tmpdir:\r\n rmtree(self.tmpdir)\r\n\r\n # clean up the file from init_flowgram_file\r\n if (hasattr(self, \"tmp_filename\") and exists(self.tmp_filename)):\r\n remove(self.tmp_filename)", "def clean_filesystem(files=[]):\n remove_files(files + find_cache_files())", "def clearTemp():\n Installer.tempDir.rmtree(safety='Temp')", "def remove_frames(tmpdir, files):\n for fname in files: os.remove(os.path.join(tmpdir, fname))\n if not(tmpdir == None): os.rmdir(tmpdir)", "def clean(self):\n os.remove(\"temp.py\") # Delete the file \"temp.py\", to free up disk space", "def purge(self, maxtemp=300, maxlock=600):\n # get list of intermediate directories\n dirs = []\n self.__get_list_of_interm_dirs(dirs)\n # remove all but old temporary or locked elements\n oldtemp = maxtemp != 0 and time.time() - maxtemp or 0\n oldlock = maxlock != 0 and time.time() - maxlock or 0\n if oldtemp or oldlock:\n for _dir in dirs:\n path = '%s/%s' % (self.path, _dir)\n tmp_lock_elems = [x for x in os.listdir(path)\n if re.search('(%s|%s)$' %\n (TEMPORARY_SUFFIX,\n LOCKED_SUFFIX), x)]\n for old in tmp_lock_elems:\n try:\n stat = os.stat('%s/%s' % (path, old))\n except OSError:\n error = sys.exc_info()[1]\n if error.errno == errno.ENOENT:\n continue\n raise error\n if (old.endswith(TEMPORARY_SUFFIX) and\n stat.st_mtime >= oldtemp):\n continue\n if old.endswith(LOCKED_SUFFIX) and \\\n stat.st_mtime >= oldlock:\n continue\n _warn(\"WARNING: removing too old volatile file: %s/%s\" %\n (self.path, old))\n try:\n os.unlink('%s/%s' % (path, old))\n except OSError:\n error = sys.exc_info()[1]\n if error.errno != errno.ENOENT:\n raise error\n # try to purge all but the last intermediate directory\n if len(dirs) > 1:\n dirs.sort()\n dirs.pop()\n for _dir in dirs:\n path = '%s/%s' % (self.path, _dir)\n if len(os.listdir(path)) == 0:\n _special_rmdir(path)", "def clean(recursive=0):\n temp_files = set([\n \".blg\", \".bbl\", \".aux\", \".log\", \".brf\", \".nlo\", \".out\", \".dvi\", \".ps\",\n \".lof\", \".toc\", \".fls\", \".fdb_latexmk\", \".pdfsync\", \".synctex.gz\",\n \".ind\", \".ilg\", \".idx\"\n ])\n\n try:\n from send2trash import send2trash\n rm = send2trash\n except ImportError:\n print \"Send2Trash is not installed, use os.remove instead\"\n rm = os.remove\n\n _clean(os.getcwd(), 0, recursive, temp_files, rm)", "def delete_b_files(intermediate_files: List[File]) -> None:\n for f in intermediate_files:\n f.remove()", "def RemoveTempDirContents():\n temp_dir = os.path.abspath(tempfile.gettempdir())\n print 'Removing contents of %s' % temp_dir\n\n print ' Inspecting args for files to skip'\n whitelist = set()\n for i in sys.argv:\n try:\n if '=' in i:\n i = i.split('=')[1]\n low = os.path.abspath(i.lower())\n if low.startswith(temp_dir.lower()):\n whitelist.add(low)\n except TypeError:\n # If the argument is too long, windows will freak out and pop a TypeError.\n pass\n if whitelist:\n print ' Whitelisting:'\n for w in whitelist:\n print ' %r' % w\n\n start_time = time.time()\n for root, dirs, files in os.walk(temp_dir):\n for f in files:\n p = os.path.join(root, f)\n if p.lower() not in whitelist:\n try:\n os.remove(p)\n except OSError:\n pass\n else:\n print ' Keeping file %r (whitelisted)' % p\n for d in dirs[:]:\n p = os.path.join(root, d)\n if p.lower() not in whitelist:\n try:\n # TODO(iannucci): Make this deal with whitelisted items which are\n # inside of |d|\n\n # chromium_utils.RemoveDirectory gives access denied error when called\n # in this loop.\n shutil.rmtree(p, ignore_errors=True)\n # Remove it so that os.walk() doesn't try to recurse into\n # a non-existing directory.\n dirs.remove(d)\n except OSError:\n pass\n else:\n print ' Keeping dir %r (whitelisted)' % p\n print ' Removing temp contents took %.1f s' % (time.time() - start_time)", "def remove_local():\n\n try:\n # if str(Settings.SKIP_DELETE) == \"True\":\n # Settings.maybe_print(\"skipping local remove\")\n # return\n # Settings.print('Deleting Local File(s)')\n # delete /tmp\n tmp = File.get_tmp()\n if os.path.exists(tmp):\n shutil.rmtree(tmp)\n Settings.print('Local File(s) Removed')\n else:\n Settings.print('Local Files Not Found')\n except Exception as e:\n Settings.dev_print(e)", "def cleanup(self):\r\n if self.tempDirectory != None:\r\n shutil.rmtree(self.tempDirectory, True)\r\n self.tempDirectory = None", "def delete_temp_folder():\n\n tempFolder = os.path.join(os.getenv(\"APPDATA\"), \"GARI\\Temp\")\n\n if os.path.exists(tempFolder):\n for file in os.listdir(tempFolder):\n arcpy.Delete_management(os.path.join(tempFolder, file))", "def _cleanup(self):\n try:\n tmpdir = self.tmpdir\n except AttributeError:\n # Don't need to do anything if the temp dir isn't set\n return\n shutil.rmtree(tmpdir)", "def clearTempDir(self):\n shutil.rmtree(tempDir.as_posix(), ignore_errors=True)", "def clean():\n folders = ['utils_dfn/temp', 'utils_dfn/img', 'utils_dfn/mask', 'utils_dfn/output']\n for folder in folders:\n for item in os.listdir(folder):\n item_path = os.path.join(folder, item)\n if os.path.isdir(item_path):\n shutil.rmtree(item_path)\n elif os.path.isfile(item_path):\n os.remove(item_path)", "def RemoveChromeTemporaryFiles():\n # NOTE: print out what is cleaned up so the bots don't timeout if\n # there is a lot to cleanup and also se we see the leaks in the\n # build logs.\n # At some point a leading dot got added, support with and without it.\n kLogRegex = r'^\\.?(com\\.google\\.Chrome|org\\.chromium)\\.'\n if chromium_utils.IsWindows():\n RemoveTempDirContents()\n RemoveChromeDesktopFiles()\n RemoveJumpListFiles()\n elif chromium_utils.IsLinux():\n LogAndRemoveFiles(tempfile.gettempdir(), kLogRegex)\n LogAndRemoveFiles('/dev/shm', kLogRegex)\n elif chromium_utils.IsMac():\n nstempdir_path = '/usr/local/libexec/nstempdir'\n if os.path.exists(nstempdir_path):\n ns_temp_dir = subprocess.check_output([nstempdir_path]).strip()\n if ns_temp_dir:\n LogAndRemoveFiles(ns_temp_dir, kLogRegex)\n for i in ('Chromium', 'Google Chrome'):\n # Remove dumps.\n crash_path = '%s/Library/Application Support/%s/Crash Reports' % (\n os.environ['HOME'], i)\n LogAndRemoveFiles(crash_path, r'^.+\\.dmp$')\n else:\n raise NotImplementedError(\n 'Platform \"%s\" is not currently supported.' % sys.platform)", "def cleanup_intermediate_files():\n\n dirs = (DIR_PAGE, DIR_SRGB, DIR_VTI, DIR_TIFF, DIR_BACK, DIR_TEXT)\n map(lambda dir: shutil.rmtree(os.path.join(cwd, dir)) , dirs)", "def do_clean(VERBOSE=0):\r\n thisdir = os.getcwd()\r\n tempdir = \"TEMP\"\r\n\r\n if os.path.isdir(os.path.join(thisdir, \"TEMP\")):\r\n tempdir = os.path.join(thisdir, \"TEMP\")\r\n elif os.path.isdir(os.path.join(thisdir, \"..\", \"TEMP\")):\r\n tempdir = os.path.join(thisdir, \"..\", \"TEMP\")\r\n\r\n if os.path.isdir(tempdir):\r\n os.chdir(tempdir)\r\n if VERBOSE > 0:\r\n print \" cleaning temporary directory '%s'\" % tempdir\r\n filestogo = glob.glob(\"*.*\")\r\n gonecount = 0\r\n for fg in filestogo:\r\n try:\r\n os.remove(fg)\r\n gonecount = gonecount +1\r\n except:\r\n if VERBOSE > 0:\r\n print \" !!! COULDN@T DELETE FILE '%s'\" % fg\r\n else:\r\n pass\r\n if VERBOSE > 0:\r\n print \" Deleted %s files\" % gonecount\r\n print\r\n os.chdir(thisdir)", "def deleteIntermediateFiles(self):\n uniq_files = set(self.files_to_delete)\n print (\"Deleting %d intermediate files\" % len(uniq_files))\n for fn in uniq_files:\n # don't delete log files\n if not fn.endswith(\".log\"):\n os.remove(fn)", "def teardown():\n for filename in files_to_delete:\n delete_file(filename)", "def _remove_files(self):\n if hasattr(self, 'files'):\n for file in self.files:\n if os.path.exists(file):\n os.remove(file)\n\n self._remove_changes()\n self._remove_temporary_files()", "def delete_tempdirs(self):\n\n if self._noWebDir:\n shutil.rmtree(self.webTopDir)", "def _remove_all_ww_tmp_dirs():\n\t \n\t\t\n\t\tfor tmp_dir in glob.glob(f\"{TEST_TMP_DIR}/ww_*\"):\n\t\t\tprint(f\"removing tmp_dir = {tmp_dir}\")\n\t\t\tTest_Base._remove_ww_tmp_dir(tmp_dir)\n\t\t\t\n\t\treturn", "def cleanUp(self):\n\n tapeList = sorted(glob.glob('TAPE?'))\n tapeList = ['TAPE%d' % num for num in [1, 2, 5, 6, 7, 10]]\n for tape in tapeList:\n if os.path.isfile(tape): os.remove(tape)\n # end TAPE loop", "def deleteOutFiles(self, onlytmp=True):\n self.linkNodes()\n for node in self.sort():\n file = node.outputpath\n if (not onlytmp or file[0:4]=='tmp.'):\n logger.info(\"Deleting output file '%s'\" % file)\n dfs.delete(file)", "def cleanup_intermediate_files(self):\n self.cmd(\"rm -f {local_temp_dir}/*rg_dict* \\\n {local_temp_dir}/*aln* \\\n {local_temp_dir}/snappy*\".\n format(\n local_temp_dir=self.local_temp_dir\n ),\n shell=True)", "def cleanup_files(self):\n\n self.backup_files()\n self.delete_files()", "def tearDown(self):\n for d in os.listdir(tmp_dir_path):\n d_path = os.path.join(tmp_dir_path,d)\n try:\n os.remove(d_path)\n except:\n for f in os.listdir(d_path):\n f_path = os.path.join(d_path,f)\n os.remove(f_path)\n os.rmdir(d_path)\n assert os.listdir(tmp_dir_path) == []", "def remove_temp_folder(context):\n\n app = context.unrestrictedTraverse(\"/\")\n broken_id = \"temp_folder\"\n if broken_id in app.objectIds():\n temp_folder = app.unrestrictedTraverse(broken_id, None)\n if not isinstance(temp_folder, Broken):\n logger.info(\"%s is not broken, so we keep it.\", broken_id)\n return\n app._delObject(broken_id)\n logger.info(\"Removed broken %s from Zope root.\", broken_id)\n\n # The root Zope object has a dictionary '_mount_points.\n # >>> app._mount_points\n # {'temp_folder': MountedObject(id='temp_folder')}\n if not hasattr(app, \"_mount_points\"):\n return\n if broken_id in app._mount_points:\n del app._mount_points[broken_id]\n app._p_changed = True\n logger.info(\"Removed %s from Zope root _mount_points.\", broken_id)", "def removeTmpDirs():\n p = Path(\".\")\n eggDirs = [x for x in p.glob(\"*.egg-info\") if x.is_dir()]\n eggDirs.append(Path(\"build\"))\n\n for d in eggDirs:\n if d.is_dir():\n shutil.rmtree(d)", "def tmp_dir():\n tmpdir = tempfile.mkdtemp()\n yield tmpdir\n shutil.rmtree(tmpdir)", "def tearDown(self):\n for fn in self.tempImages:\n os.remove(os.path.join(self.root, fn))\n os.rmdir(self.root)", "def _cleanup(self):\n os.system(\"rm -r %s/*\" %(self._snippet_index_dir))\n os.system(\"rm %s/*\" %(self._para_dir))\n os.system(\"rm %s/*\" %(self._temp_dir))\n os.system(\"rm %s/*\" %(self._snippet_result_dir))", "def a_temp_file():\n filename = None\n try:\n tmpfile = tempfile.NamedTemporaryFile(delete=False)\n filename = tmpfile.name\n yield tmpfile\n finally:\n if filename and os.path.exists(filename):\n os.remove(filename)", "def cleanup(e):\n for f in e.files:\n try:\n if os.path.isfile(f):\n os.remove(f)\n except OSError:\n continue\n\n return", "def CleanUp(self, path):\n try:\n if os.path.exists(path):\n os.remove(path)\n except (OSError, IOError) as e:\n logging.info(\"Failed to remove temporary file %s. Err: %s\", path, e)", "def clean_up(self):\n directory = os.path.join(os.getcwd(), self.TMP_FOLDER)\n if os.path.exists(directory) and os.path.isdir(directory):\n shutil.rmtree(directory)", "def clear_base_files(self):\r\n compilelock.get_lock()\r\n try:\r\n for base_dir in ('cuda_ndarray', 'cutils_ext', 'lazylinker_ext',\r\n 'scan_perform'):\r\n to_delete = os.path.join(self.dirname, base_dir + '.delete.me')\r\n if os.path.isdir(to_delete):\r\n try:\r\n shutil.rmtree(to_delete)\r\n _logger.debug('Deleted: %s', to_delete)\r\n except Exception:\r\n _logger.warning('Could not delete %s', to_delete)\r\n continue\r\n to_rename = os.path.join(self.dirname, base_dir)\r\n if os.path.isdir(to_rename):\r\n try:\r\n shutil.move(to_rename, to_delete)\r\n except Exception:\r\n _logger.warning('Could not move %s to %s',\r\n to_rename, to_delete)\r\n finally:\r\n compilelock.release_lock()", "def tearDown(self) -> None:\n filtered = [f for f in glob.glob('steps/tests/test_output/*') if not re.match(r'\\.keep', f)]\n for file in filtered:\n try:\n if Path(file).is_dir():\n shutil.rmtree(file)\n else:\n os.remove(file)\n except PermissionError as pe:\n # We don't necessarily care that much\n continue", "def clean_data():\n for clean_file in clean_files:\n file_list = [f for f in os.listdir(\".\") if f.endswith(clean_file)]\n for f in file_list:\n os.remove(f)", "def create_temp_files(containers):\n for name in containers:\n run_cmd(f\"rm -rf /tmp/{name}.img\", True)\n for name in containers:\n run_cmd(f\"truncate -s 1G /tmp/{name}.img\", True)", "def purge():\n all_hashes = read_all()\n used_hashes = read_used()\n\n for kind, hashes in used_hashes.items():\n to_remove = all_hashes[kind].difference(hashes)\n if kind == 'evs':\n delete_from_directory_by_hashes(EV_DIRECTORY, to_remove)\n elif kind == 'cache':\n delete_from_directory_by_hashes(CACHE_DIRECTORY, to_remove)\n elif kind == 'seeds':\n delete_from_directory_by_hashes(SEED_DIRECTORY, to_remove)\n\n reset_used()", "def mktemp(self):\n try:\n fd, fn = tempfile.mkstemp(dir=self.tempdir)\n yield fn\n finally:\n try:\n os.close(fd)\n os.unlink(fn)\n except (OSError, IOError) as e:\n print(\"could not remove temporary file: %s\" % e,\n file=sys.stderr)", "def cleanup(tempdir):\n try:\n shutil.rmtree(tempdir)\n except OSError:\n pass", "def __del__(self) -> None:\n try:\n shutil.rmtree(self.temp_path)\n except FileNotFoundError:\n pass", "def temporary_folder():\r\n tempdir = mkdtemp()\r\n try:\r\n yield tempdir\r\n finally:\r\n rmtree(tempdir)", "def clean(files):\n\tfor file in files:\n\t\ttry:\n\t\t\tos.remove(file)\n\t\texcept Exception as e:\n\t\t\tprint(e)", "def remove_unused_files(self):\n\n response_list = self.client.api_call(\n f'files.list?'\n f'count=1000&'\n )\n assert response_list['ok']\n\n for file in [\n f for f in response_list['files']\n if not f['channels'] and not f['groups'] and not f['ims']\n ]:\n response_delete = self.client.api_call(\n f'files.delete?'\n f'file={file[\"id\"]}'\n )\n assert response_delete['ok']", "def delete_previous_files():\n def delete(root: Path):\n shutil.rmtree(root / 'output', ignore_errors=True)\n for p in root.iterdir():\n if str(p).endswith(('.log', 'jobs.csv', 'csv.lock', '.yaml')):\n p.unlink()\n\n delete(wt_registration_dir)\n delete(mut_registration_dir)", "def clean():\n clean_files()", "def clean(self):\n if os.path.exists(self.initial):\n if os.path.exists(self.path) and os.stat(self.path).st_size == os.stat(\n self.initial).st_size:\n os.remove(self.initial)\n else:\n # if it doesn't match, something probably crashed; rename the temporary file and\n # it'll get uploaded at some point\n self.auto_filename()\n self.rename()\n self.connect()\n os.remove(self.initial)\n if os.path.exists(self.path):\n os.remove(self.path)\n self.filename_set = False", "def __del__(self):\n\t\tif self.temp_dir:\n\t\t\tself.temp_dir.cleanup()", "def temporary_directory(request):\n path = tempfile.mkdtemp()\n\n def cleanup():\n \"\"\"Remove temporary directory.\"\"\"\n shutil.rmtree(path)\n\n request.addfinalizer(cleanup)\n\n return path", "def tearDown(self):\r\n remove_files(self.files_to_remove)\r\n\r\n # Remove directories last, so we don't get errors trying to remove\r\n # files which may be in the directories.\r\n for d in self.dirs_to_remove:\r\n if exists(d):\r\n rmtree(d)", "def cleanup_temp_dir(context):\n\n try:\n os.chdir(context.cwd)\n except:\n print(\"Current working file record does not exist\")\n\n try:\n context.tempdir.cleanup()\n except:\n print(\"Temporary directory cannot be cleaned up - does it exist?\")", "def _cleanup_files(self):\n\n for root, dirs, files in os.walk(self.build_directory):\n dirs_to_delete = [\n Path(root).joinpath(x) for x in dirs if x == '__pycache__'\n ]\n files_to_delete = [\n Path(root).joinpath(x) for x in files if Path(x).suffix == '.pyc'\n ]\n for d in dirs_to_delete:\n logger.info('Deleting: %s', d)\n shutil.rmtree(d)\n for f in files_to_delete:\n logger.info('Deleting: %s', f)\n f.unlink()", "def delete_temp_dir(app_name):\n sudo('rm -rf /tmp/.fab-deploy-{}'.format(app_name))", "def tearDown(self):\n print(\n \"\\nDeleting temporary files...\\n\")\n try:\n shutil.rmtree(TEST_DIR)\n except OSError:\n pass", "def delete_temp_file(filename):\n try:\n os.remove(filename)\n except OSError as e:\n if e.errno != errno.ENOENT:\n raise e", "def clean(session):\n clean_dirs = (\n get_path(\".cache\"),\n get_path(\".coverage\"),\n get_path(\".pytest_cache\"),\n get_path(\"__pycache__\"),\n get_path(\"build\"),\n get_path(\"dist\"),\n get_path(\"docs\", \"__pycache__\"),\n get_path(\"docs\", \"build\"),\n get_path(\"scripts\", \"macos\", \"__pycache__\"),\n get_path(\"src\", \"python\", \"bezier.egg-info\"),\n get_path(\"src\", \"python\", \"bezier\", \"__pycache__\"),\n get_path(\"tests\", \"__pycache__\"),\n get_path(\"tests\", \"functional\", \"__pycache__\"),\n get_path(\"tests\", \"unit\", \"__pycache__\"),\n get_path(\"tests\", \"unit\", \"hazmat\", \"__pycache__\"),\n get_path(\"wheelhouse\"),\n )\n clean_globs = (\n get_path(\".coverage\"),\n get_path(\"*.mod\"),\n get_path(\"*.pyc\"),\n get_path(\"docs\", \"abi\", \"example\"),\n get_path(\"src\", \"python\", \"bezier\", \"*.pyc\"),\n get_path(\"src\", \"python\", \"bezier\", \"*.pyd\"),\n get_path(\"src\", \"python\", \"bezier\", \"*.so\"),\n get_path(\"src\", \"fortran\", \"*.o\"),\n get_path(\"tests\", \"*.pyc\"),\n get_path(\"tests\", \"functional\", \"*.pyc\"),\n get_path(\"tests\", \"unit\", \"*.pyc\"),\n )\n for dir_path in clean_dirs:\n session.run(shutil.rmtree, dir_path, ignore_errors=True)\n for glob_path in clean_globs:\n for filename in glob.glob(glob_path):\n session.run(os.remove, filename)", "def clean_files_for(file):\n for f in [file, f\"{file}.json\", f\"{file}.lock\"]:\n if os.path.isfile(f):\n os.remove(f)", "def destroyer(): # ;-)\n\n def find_files_to_remove(pyfile):\n for filename in (\"%sc\" % pyfile, \"%so\" % pyfile):\n if exists(filename):\n yield filename\n\n counter = 0\n try:\n while True:\n pyfile = (yield)\n for filename in find_files_to_remove(pyfile):\n try:\n log.debug('removing %s', filename)\n remove(filename)\n counter += 1\n except (IOError, OSError), e:\n log.error('cannot remove %s', filename)\n log.debug(e)\n except GeneratorExit:\n log.info(\"removed files: %s\", counter)", "def remove_persisted_files():\r\n persistIncarnations = get_persist_incarnation_dirs()\r\n for p in persistIncarnations:\r\n clear_dir(p)\r\n os.remove(p)\r\n clear_dir(get_persist_src_backup_dir())\r\n clear_dir(get_persist_src_dir())\r\n clear_dir(get_persist_root_dir()) \r\n\r\n #make sure the persist kb data structures aren't keeping any info \r\n global PERSISTED_LOAD_IDS\r\n AGENT_KB_MAP.clear()\r\n KB_WORKING_SET.clear()\r\n copy = PERSISTED_LOAD_IDS[:]\r\n for x in copy:\r\n PERSISTED_LOAD_IDS.remove(x)", "def delete_tempfolder(path):\n try:\n rmtree(path)\n except:\n pass", "def mp_tmpdir():\n # shutil.rmtree(TEMP_DIR, ignore_errors=True)\n os.makedirs(TEMP_DIR)\n yield TEMP_DIR\n shutil.rmtree(TEMP_DIR, ignore_errors=True)", "def TemporaryDirectory():\n name = tempfile.mkdtemp()\n try:\n yield name\n finally:\n shutil.rmtree(name)", "def cleanup(self):\n\n if self.do_nothing_bl is False:\n if os.path.exists(self.local_fileP_str) is True:\n if self.temp_dirP_obj is not None:\n self.temp_dirP_obj.cleanup()\n\n elif os.path.isdir(self.local_fileP_str) is True:\n log_obj.debug('Removing directory \"{:s}\"'.format(self.local_fileP_str))\n shutil.rmtree(self.local_fileP_str)\n\n else:\n log_obj.debug('Removing file \"{:s}\"'.format(self.local_fileP_str))\n os.remove(self.local_fileP_str)", "def clean_chunk_files(dirpath):\n workdir = os.getcwd()\n os.chdir(dirpath)\n for filename in glob.glob(\"[0-9]*_[0-9]*_[0-9]*.hdf5\"):\n os.remove(filename)\n os.chdir(workdir)", "def cleanup(self):\n try:\n self.wc = os.path.dirname(self.wc)\n rmtree2(self.wc)\n except IOError, err:\n self.log.exception(\"Can't remove working copy %s: %s\" % (self.wc, err))", "def _clean_workdir(self):\n\t\ttoremove = [self._get_config_filepath(), self._get_params_filepath(), self._get_conv_filepath(), self._get_psf_filepath()]\n\t\tfor filepath in toremove:\n\t\t\tif os.path.exists(filepath):\t\n\t\t\t\tlogger.debug(\"Removing existing file %s...\" % (filepath))\n\t\t\t\tos.remove(filepath)", "def teardown_upload(self, upload, filesystem_only=True):\n # This is like \"rm -rf path\"\n shutil.rmtree(upload.path, ignore_errors=True)\n if filesystem_only:\n return\n for input in upload.input_set.all():\n input.delete()\n upload.delete()", "def cleanup(self):\n files = self.nlst()\n latest = self.latest_filename\n for filename in files:\n if filename != latest:\n result = self.delete(filename)\n logger.info(f\"Deleted old export from FTP: {result}\")", "def clean(args):\n log = 'removing tmp dir %s ' % (args.tmpdir)\n if args.tmpdir.endswith('STAR'):\n cmd = ['rm -rf %s' % (args.tmpdir)]\n run_subprocess(cmd,args,log)\n log = \"remove tmp files from output dir\"\n cmd = ['mv %s/crick_joinedLog.final.out %s/Crick_joinedLog.final.out' % (args.output_dir, args.output_dir)]\n run_subprocess(cmd, args, log)\n cmd = ['mv %s/watson_joinedLog.final.out %s/Watson_joinedLog.final.out' % (args.output_dir, args.output_dir)]\n run_subprocess(cmd, args, log)\n cmd = ['mv %s/crick_mergedLog.final.out %s/Crick_mergedLog.final.out' % (args.output_dir, args.output_dir)]\n run_subprocess(cmd, args, log)\n cmd = ['mv %s/watson_mergedLog.final.out %s/Watson_mergedLog.final.out' % (args.output_dir, args.output_dir)]\n run_subprocess(cmd, args, log)\n cmd = ['rm -rf %s/crick_*' % args.output_dir]\n run_subprocess(cmd, args, log)\n cmd = ['rm -rf %s/watson_*' % args.output_dir]\n run_subprocess(cmd, args, log)\n cmd = ['rm -rf %s/joined* header.sam' % args.output_dir]\n run_subprocess(cmd, args, log)" ]
[ "0.7633441", "0.74177027", "0.72147197", "0.7178067", "0.6853436", "0.6829257", "0.67934287", "0.6738931", "0.67164594", "0.66771114", "0.66554505", "0.66493994", "0.6645599", "0.66091245", "0.65514976", "0.6517317", "0.6492228", "0.64918196", "0.6469804", "0.6463625", "0.6384307", "0.6337415", "0.63244194", "0.630464", "0.6293487", "0.6289055", "0.6287042", "0.6251614", "0.6228589", "0.6225558", "0.6214598", "0.62001336", "0.61762774", "0.6172705", "0.6162901", "0.6132595", "0.6106373", "0.6087703", "0.6058779", "0.6049737", "0.60460365", "0.6030746", "0.6006572", "0.60036427", "0.598809", "0.597805", "0.59555465", "0.5937812", "0.59225976", "0.5917463", "0.59167314", "0.590231", "0.5875408", "0.5851276", "0.5846214", "0.58213806", "0.58002186", "0.57968944", "0.5793422", "0.578492", "0.5784089", "0.5783603", "0.57478046", "0.5740641", "0.57396", "0.57326055", "0.5731126", "0.5731013", "0.5730098", "0.5728579", "0.5713782", "0.568548", "0.5678711", "0.56634337", "0.56577975", "0.5654923", "0.5641932", "0.56248987", "0.56214225", "0.56198853", "0.5611203", "0.5601234", "0.5590344", "0.5584606", "0.5580987", "0.5569908", "0.5568132", "0.55641216", "0.5557665", "0.5545068", "0.55425787", "0.5535302", "0.55308425", "0.55295897", "0.5525493", "0.5525173", "0.5517086", "0.55144644", "0.5514001", "0.5510968" ]
0.7262391
2
Used if copy.deepcopy is called on the variable.
def __deepcopy__(self, memo): return self.copy()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def varcopy(self, vars):", "def copy(self):\n pass", "def copy(self):\n pass", "def copy(self):\n pass", "def copy(self):", "def shallow_copy(self):\n # TODO: Rename this to __copy__()?\n raise NotImplementedError(\"shallow_copy is not implemented\")", "def __copy__(self):\n return type(self)(self.value)", "def v_full_copy(self, val):\n val = bool(val)\n self._full_copy = val", "def _prepare_cache(self, value):\n\n return deepcopy(value)", "def __copy__(self):\n return self.copy()", "def _copy_(self):\n return copy.copy(self)", "def __copy__(self):\n raise NotImplementedError", "def copy(self):\n return type(self)(self._val, lsd=self._lsd)", "def copy(self): # real signature unknown; restored from __doc__\n pass", "def copy(self): # real signature unknown; restored from __doc__\n pass", "def copy(self): # real signature unknown; restored from __doc__\n pass", "def deepcopy(self):\n return self.copy()", "def __getstate__(self):\n copy = self.__dict__.copy()\n copy['_workaround'] = None\n return copy", "def copy(self):\n return super().copy()", "def copy(self):\r\n raise Exception, \"not implemented\"", "def test_context_immutable_deepcopy():\n context: Context = Context()\n assert context is deepcopy(context)", "def __deepcopy__(self, memo):\r\n new_inst = super().__deepcopy__(memo)\r\n new_inst.road_width = self.road_width\r\n new_inst.road_length = self.road_length\r\n new_inst.surface = self.surface\r\n \r\n return new_inst", "def test_deepcopy(self):\n t = Precision()\n t.transform([2])\n copy.deepcopy(t)", "def __originate__(self):\n self.pos_to_num = deepcopy(self.o_pos_to_num)\n self.num_to_pos = deepcopy(self.o_num_to_pos)", "def __deepcopy__(self, memodict=None):\n return self.copy()", "def deepcopy(self, memo=None):\n from copy import deepcopy\n return deepcopy(self, memo)", "def deepcopy(self, memo=None):\n from copy import deepcopy\n return deepcopy(self, memo)", "def deepcopy(self, memo=None):\n from copy import deepcopy\n return deepcopy(self, memo)", "def deepcopy(self, memo=None):\n from copy import deepcopy\n return deepcopy(self, memo)", "def copy(self):\n cpy = deepcopy(self)\n # usually we use copy to perform transformations on the board\n # so it's good to reset memoized values\n cpy._memoized_compact = None \n return cpy", "def test_deepcopy(self):\n t = Reverse(Quantize())\n t.transform([2])\n copy.deepcopy(t)", "def test_deepcopy_removes_cached_values(self):\n foreign_object = Membership._meta.get_field(\"person\")\n # Trigger storage of cached_property into ForeignObject's __dict__.\n foreign_object.path_infos\n foreign_object.reverse_path_infos\n # The ForeignObjectRel doesn't have reverse_path_infos.\n foreign_object.remote_field.path_infos\n self.assertIn(\"path_infos\", foreign_object.__dict__)\n self.assertIn(\"reverse_path_infos\", foreign_object.__dict__)\n self.assertIn(\"path_infos\", foreign_object.remote_field.__dict__)\n # Cached value is removed via __getstate__() on ForeignObjectRel\n # because no __deepcopy__() method exists, so __reduce_ex__() is used.\n remote_field_copy = copy.deepcopy(foreign_object.remote_field)\n self.assertNotIn(\"path_infos\", remote_field_copy.__dict__)\n # Field.__deepcopy__() internally uses __copy__() on both the\n # ForeignObject and ForeignObjectRel, so all cached values are removed.\n foreign_object_copy = copy.deepcopy(foreign_object)\n self.assertNotIn(\"path_infos\", foreign_object_copy.__dict__)\n self.assertNotIn(\"reverse_path_infos\", foreign_object_copy.__dict__)\n self.assertNotIn(\"path_infos\", foreign_object_copy.remote_field.__dict__)", "def copy(self):\n import copy as pcopy\n return pcopy.deepcopy(self)", "def copy(self, deep=False):\n return _(copy.deepcopy(self._) if deep else copy.copy(self._))", "def _datacopied(arr, original):\n if arr is original:\n return False\n if not isinstance(original, numpy.ndarray) and hasattr(original, '__array__'):\n return False\n return arr.base is None", "def test_deepcopy(self):\n t = Enumerate([2, \"asfa\", \"ipsi\"])\n # Copy won't fail if vectorized function is not called at least once.\n t.transform([2])\n copy.deepcopy(t)", "def copy(self):\n\t\treturn pythoncopy.deepcopy(self)", "def clone(self):\n return None", "def copy(self):\r\n return self.replace()", "def _get_reuse_value(self):\n return self.__reuse_value", "def copy(self):\r\n return copy.copy(self)", "def __deepcopy__(self, memo):\n copy = self.__class__()\n copy.wvalues = self.wvalues\n return copy", "def __deepcopy__(self, memo):\n from copy import deepcopy\n return self.__class__(deepcopy(self.items(), memo), self.strict)", "def __deepcopy__(self, memo):\n return Quantity(copy.deepcopy(self._value, memo), self.unit)", "def deepcopy(self):\n return copy.deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def _datacopied(arr, original):\n if arr is original:\n return False\n if not isinstance(original, np.ndarray) and hasattr(original, '__array__'):\n return False\n return arr.base is None", "def v_full_copy(self):\n return self._full_copy", "def deepcopy(self):\n return copymod.deepcopy(self)", "def copy(self):\n from copy import deepcopy\n return deepcopy(self)", "def copy(self):\n return self.__class__(self.value, self.is_cloud)", "def debug_copies(self):\n self.b_debug = self.B.clone().detach()\n self.Bc_debug = self.Bc.clone().detach()\n self.W_debug = self.W.clone().detach()\n self.Wc_debug = self.Wc.clone().detach()", "def __copy__(self):\n return type(self)(self.number)", "def copy(self):\n return self.mutate().simple_copy()", "def test_copy(self):\n data = [[0, 1], [1, 0]]\n b1 = Board(data)\n b2 = b1.copy()\n # test if proper copy\n self.assertListEqual(b1.data, b2.data)\n # teset if not just a shallow copy\n b1.data[0][0] = 1\n self.assertNotEqual(b1.data[0][0], b2.data[0][0])", "def __getstate__(self):\n result = super(Parameter, self).__getstate__()\n\n # If we don't need a full copy of the Parameter (because a single process needs\n # only access to a single point in the parameter space) we can delete the rest\n if not self._full_copy:\n result[\"_explored_range\"] = []\n\n return result", "def test_deepcopy(self):\n t = Identity()\n t.transform([2])\n copy.deepcopy(t)", "def copy(self) -> \"Param\":\n copied = super().copy()\n copied._stack = OrderedDiot(\n [(key, param.copy()) for key, param in self._stack.items()]\n )\n return copied", "def cdup (self):\r\n pass", "def __deepcopy__(self, memo):\n memo[id(self)] = self\n return self", "def __deepcopy__(self, memo):\n memo[id(self)] = self\n return self", "def _save_state_as_orig(self):\n self._orig = None\n self._orig = deepcopy(self)", "def deep_copy(value: TValue) -> TValue:\n def pattern_dispatcher(v, memo=None):\n return v # we don't need to copy a regex pattern object, it's read-only\n\n old_dispatcher = copy._deepcopy_dispatch.get(PatternType, None)\n copy._deepcopy_dispatch[PatternType] = pattern_dispatcher\n try:\n return copy.deepcopy(value)\n finally:\n if old_dispatcher is not None: # pragma: no cover\n copy._deepcopy_dispatch[PatternType] = old_dispatcher\n else:\n del copy._deepcopy_dispatch[PatternType]", "def test_deepcopy_RiakDisabledForTest(self):\n rdft = RiakDisabledForTest()\n self.assertEqual(rdft, deepcopy(rdft))", "def copy(self):\n return self.__copy__()", "def copy(self):\n return self.__copy__()", "def copy(self):\n return self.__copy__()", "def copy(self):\n return self.__copy__()", "def test_deepcopy(self):\n t = Quantize()\n t.transform([2])\n copy.deepcopy(t)", "def clone(self):", "def copy(self):\n return copy(self)", "def copy(self):\n return copy(self)", "def __deepcopy__(self, memo):\n obj = self.__class__()\n for k, v in self.__dict__.items():\n if k in ('_iter', '_result_cache'):\n obj.__dict__[k] = None\n else:\n obj.__dict__[k] = copy.deepcopy(v, memo)\n return obj", "def copy(self):\n \n return deepcopy(self)", "def copy(self):\n return copy.copy(self)", "def copy(self):\n return copy.copy(self)", "def copy(self):\n return copy.copy(self)", "def test_copy_removes_direct_cached_values(self):\n foreign_object = Membership._meta.get_field(\"person\")\n # Trigger storage of cached_property into ForeignObject's __dict__.\n foreign_object.path_infos\n foreign_object.reverse_path_infos\n # The ForeignObjectRel doesn't have reverse_path_infos.\n foreign_object.remote_field.path_infos\n self.assertIn(\"path_infos\", foreign_object.__dict__)\n self.assertIn(\"reverse_path_infos\", foreign_object.__dict__)\n self.assertIn(\"path_infos\", foreign_object.remote_field.__dict__)\n # Cached value is removed via __getstate__() on ForeignObjectRel\n # because no __copy__() method exists, so __reduce_ex__() is used.\n remote_field_copy = copy.copy(foreign_object.remote_field)\n self.assertNotIn(\"path_infos\", remote_field_copy.__dict__)\n # Cached values are removed via __copy__() on ForeignObject for\n # consistency of behavior.\n foreign_object_copy = copy.copy(foreign_object)\n self.assertNotIn(\"path_infos\", foreign_object_copy.__dict__)\n self.assertNotIn(\"reverse_path_infos\", foreign_object_copy.__dict__)\n # ForeignObjectRel's remains because it's part of a shallow copy.\n self.assertIn(\"path_infos\", foreign_object_copy.remote_field.__dict__)", "def copy(self):\n copied = super().copy()\n copied.anonymize()\n return copied", "def copy (self):\n import copy\n return copy.copy(self)", "def copy(self):\r\n return copy.deepcopy(self)", "def __deepcopy__(self, memo):\n id_self = id(self)\n _copy = memo.get(id_self)\n if _copy is None:\n _copy = type(self)(\n deepcopy(self.value, memo))\n memo[id_self] = _copy\n return _copy", "def dup():\n stack = currentframe().f_back.f_locals.setdefault(SN, [])\n value = stack[-1]\n stack.append(value)\n return value", "def is_assign(self):\n return self.var.initializer is not None", "def __deepcopy__(self, memo):\n obj = self.__class__()\n for k, v in self.__dict__.items():\n if k == '_result_cache':\n obj.__dict__[k] = None\n else:\n obj.__dict__[k] = copy.deepcopy(v, memo)\n return obj", "def copy(self):\n # YOUR CODE HERE\n raise NotImplementedError()" ]
[ "0.6630489", "0.63350475", "0.63350475", "0.63350475", "0.6280567", "0.6222454", "0.6153182", "0.6143717", "0.61114347", "0.6110205", "0.6047252", "0.5978211", "0.5953629", "0.59274524", "0.59274524", "0.59274524", "0.58838916", "0.5878963", "0.5824744", "0.5819304", "0.5793884", "0.5777081", "0.5758991", "0.5758069", "0.572324", "0.5719103", "0.5719103", "0.5719103", "0.5719103", "0.56930023", "0.56777954", "0.5674818", "0.5668968", "0.5664992", "0.56465626", "0.56462395", "0.5645335", "0.56293344", "0.5628501", "0.5623695", "0.56125873", "0.56009096", "0.5600017", "0.55993164", "0.55986196", "0.55928713", "0.55928713", "0.55928713", "0.55928713", "0.55928713", "0.55928713", "0.55928713", "0.55928713", "0.55928713", "0.55928713", "0.55928713", "0.55928713", "0.55928713", "0.55928713", "0.55928713", "0.5592555", "0.5588263", "0.55844194", "0.5568928", "0.55683833", "0.5560667", "0.55541945", "0.555172", "0.55359006", "0.55347115", "0.55344105", "0.5534381", "0.55002373", "0.5499811", "0.5499811", "0.54897", "0.54830164", "0.5482144", "0.547824", "0.547824", "0.547824", "0.547824", "0.5478107", "0.5470999", "0.54693365", "0.54693365", "0.5465802", "0.5452104", "0.544655", "0.544655", "0.544655", "0.54386574", "0.54320234", "0.54219204", "0.5413256", "0.5394411", "0.5393753", "0.539016", "0.5378328", "0.5375993" ]
0.60275984
11
Called when the partition's reference count reaches zero. If the partition contains a temporary file which is not referenced by any other partition then the temporary file is removed from disk. If the partition contains a nontemporary file which is not referenced by any other partition then the file is closed.
def __del__(self): # subarray = getattr(self, '_subarray', None) subarray = self._subarray # If the subarray is unique it will have 2 references to # it plus 1 within this method, making 3. If it has more # than 3 references to it then it is not unique. if getrefcount is not None: self._decrement_file_counter() if subarray is None or getrefcount(subarray) > 3: return else: # getrefcount has itself been deleted or is in the process # of being torn down return _partition_file = getattr(subarray, "_partition_file", None) if _partition_file is not None: # This partition contains a temporary file which is not # referenced by any other partition on this process, so if # there are no lock files present remove the file from # disk. _remove_temporary_files(_partition_file) else: try: if FileArray is not None and isinstance(subarray, FileArray): try: filename = subarray.get_filename() except Exception: filename = None if self.file_counter.get(filename, 999) <= 0: # This partition contains a non-temporary file # which is not referenced by any other # partitions, so close the file. subarray.close() except Exception: # If we're here then it is likely that FileArray has been # torn down, so just do nothing. pass # --- End: if
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_file_deleted(self):\n try:\n with get_temp_file() as (fd, name):\n os.unlink(name)\n except Exception as err:\n self.fail('Failed with exception \"{}\"'.format(err))", "def _Close(self):\n self._fsfat_volume = None\n self._file_object = None", "def __del__(self):\n if (\n self._fpointer is not None and not self._fpointer.closed\n ): # pragma: no mutate\n self._fpointer.close()", "def clean(self):\n if os.path.exists(self.initial):\n if os.path.exists(self.path) and os.stat(self.path).st_size == os.stat(\n self.initial).st_size:\n os.remove(self.initial)\n else:\n # if it doesn't match, something probably crashed; rename the temporary file and\n # it'll get uploaded at some point\n self.auto_filename()\n self.rename()\n self.connect()\n os.remove(self.initial)\n if os.path.exists(self.path):\n os.remove(self.path)\n self.filename_set = False", "def _cleanup(self, fnum):\n while os.path.exists('%s.%s' % (self.name, fnum)):\n try:\n fname = '%s.%s' % (self.name, fnum)\n os.unlink(fname)\n # self.log.debug(\"Cleaned up file: %s\", fname)\n except:\n pass\n fnum -= 1", "def test_file_unused(self):\n try:\n with get_temp_file() as (fd, name):\n pass\n except Exception as err:\n self.fail('Failed with exception \"{}\"'.format(err))\n else:\n file_exists = os.access(name, os.F_OK)\n self.assertFalse(file_exists)", "def __purge_old_files(self):\n\n chkpts = self.checkpointer.sorted_checkpoints()\n p_chkpts = []\n e_chkpts = []\n for c in chkpts:\n if c.startswith(self.checkpointer.prefix + CheckpointingCallback.PERIODIC_PREFIX):\n p_chkpts.append(c)\n\n if c.startswith(self.checkpointer.prefix + CheckpointingCallback.EPOCH_PREFIX):\n e_chkpts.append(c)\n\n # Delete periodic checkpoints\n if self.max_files is not None and len(p_chkpts) > self.max_files:\n for c in p_chkpts[self.max_files:]:\n log.debug(\"CheckpointingCallback deleting {}\".format(c))\n self.checkpointer.delete(c)\n\n # Delete older epochs\n if self.max_epochs is not None and len(e_chkpts) > self.max_epochs:\n for c in e_chkpts[self.max_epochs:]:\n log.debug(\"CheckpointingCallback deleting (epoch) {}\".format(c))\n self.checkpointer.delete(c)", "def __del__(self):\n if self.file is None:\n return\n try:\n self.file.close()\n del self.file\n self.file = None\n except:\n getLogger(__name__).warning('Error on file close', exc_info=True)", "def __del__(self):\n self.close_files()", "def __del__(self):\r\n self.filename.close()", "def flow_file_chunk_delete(sender, instance, **kwargs):\n instance.file.delete(False)", "def _recover_disk_space(self):\n while self.used_disk_space > self.cache_size:\n space_to_recover = self.used_disk_space - self.cache_size\n logger.info('Recovering disk space %s', space_to_recover)\n lru_file = self.touch_list.pop(0)\n file_path = self._path_to_file(lru_file)\n logger.info('Deleting %s', file_path)\n os.remove(file_path)\n del self.index[lru_file]", "def __del__(self):\n\t\tif self.temp_dir:\n\t\t\tself.temp_dir.cleanup()", "def tearDown(self):\n try:\n os.remove(self.junk_file)\n except OSError as doh:\n if doh.errno == 2:\n # No such File, ignore\n pass\n else:\n raise", "def __del__(self) -> None:\n try:\n shutil.rmtree(self.temp_path)\n except FileNotFoundError:\n pass", "def Close(self):\n super(CPIOArchiveFile, self).Close()\n self._file_entries = None", "def dispose(self):\n rmtree(self._temp_path)", "def unlink(self,):\n self._wait()\n self.fd.close()\n self.fd = None\n os.unlink(self.fname)", "def __del__(self):\n self.file.close()", "def __del__(self):\r\n self.chunk = None", "def endWrite(self, withErrors):\r\n #if withErrors or self._file_obj.get_seek()>0:\r\n self.provider.cache_fs.remove(self.path)", "def release(self, path, fh, *args, **pargs):\n with(self.rwlock):\n # If we're closing a FLACCue file...\n if(path in self._open_subtracks):\n # Delete the file handle from the stored list.\n del self._open_subtracks[path]['Positions'][fh]\n # Close the OS reference to the file.\n return os.close(fh)", "def WriteAbort(self):\n if self._file_object:\n self._file_object.close()\n self._file_object = None\n\n if os.path.exists(self.name):\n os.remove(self.name)", "def __del__(self):\n for handle in self._filehandles:\n handle.close()", "def __del__(self):\n self.file_out.close()", "def __del__(self):\n self.f.close()", "def unique_files(self):\n self._tempfiles[-1].ctr = -1", "def test_ClearOldFile(self):\n q = Queue(self.path, chunksize=10)\n for i in range(15):\n q.put('var1')\n\n for i in range(11):\n q.get()\n\n q = Queue(self.path, chunksize=10)\n self.assertEqual(q.qsize(), 15)\n\n for i in range(11):\n q.get()\n q.task_done()\n self.assertEqual(q.qsize(), 4)", "def release(self):\n #关闭文件,删除文件\n if self.fd is not None:\n os.close(self.fd)\n os.unlink(self.lockfile)\n self.is_locked = False\n self.fd = None", "def releaseFile(self, fid):\n if fid in self.files:\n del self.files[fid]", "def __del__(self):\n for f in self._files:\n f.close()", "def test_removed(self):\n path = None\n with TemporaryDirectory() as tmp:\n path = tmp\n self.assertTrue(os.path.isdir(tmp))\n tmpfile = os.path.join(tmp, \"a_temp_file\")\n open(tmpfile, \"w\").write(\"data\")\n self.assertTrue(os.path.isfile(tmpfile))\n self.assertFalse(os.path.isdir(path))\n self.assertFalse(os.path.exists(path))", "def test_disconnect_file_watchers_removes_refs(self):\n session = _create_test_session()\n\n # Various listeners should have references to session file/pages/secrets changed\n # handlers.\n self.assertGreater(len(gc.get_referrers(session)), 0)\n\n session.disconnect_file_watchers()\n # Ensure that we don't count refs to session from an object that would have been\n # garbage collected along with it.\n gc.collect(2)\n\n self.assertEqual(len(gc.get_referrers(session)), 0)", "def Close(self):\n if not self._is_open:\n raise IOError('Storage file already closed.')\n\n if not self._read_only:\n self.Flush()\n\n if self._serializers_profiler:\n self._serializers_profiler.Write()\n\n # Make sure to flush the caches so that zipfile can be closed and freed.\n # Otherwise on Windows the ZIP file remains locked and cannot be renamed.\n\n self._offset_tables = {}\n self._offset_tables_lfu = []\n\n self._open_streams = {}\n self._streams_lfu = []\n\n self._event_timestamp_tables = {}\n self._event_timestamp_tables_lfu = []\n\n self._zipfile.close()\n self._zipfile = None\n self._is_open = False\n\n file_renamed = False\n if self._path != self._zipfile_path and os.path.exists(self._zipfile_path):\n # On Windows the file can sometimes be still in use and we have to wait.\n for attempt in range(1, self._MAXIMUM_NUMBER_OF_LOCKED_FILE_ATTEMPTS):\n try:\n os.rename(self._zipfile_path, self._path)\n file_renamed = True\n break\n\n except OSError:\n if attempt == self._MAXIMUM_NUMBER_OF_LOCKED_FILE_ATTEMPTS:\n raise\n time.sleep(self._LOCKED_FILE_SLEEP_TIME)\n\n self._path = None\n self._zipfile_path = None\n\n if self._path != self._zipfile_path and not file_renamed:\n raise IOError('Unable to close storage file.')", "def test_file_closed(self):\n try:\n with get_temp_file() as (fd, name):\n os.close(fd)\n except Exception as err:\n self.fail('Failed with exception \"{}\"'.format(err))\n else:\n file_exists = os.access(name, os.F_OK)\n self.assertFalse(file_exists)", "def teardown(self):\n super(TestCisPickleInput, self).teardown()\n if os.path.isfile(self.tempfile):\n os.remove(self.tempfile)", "def __del__(self):\n for component_name, file in self._file_list.items():\n file.close()", "def finalize(self):\n if self._file:\n toLog(\"Closing file `{0}`\".format(self._fname), True)\n self._file.close()\n self._file = None", "def purge(self):\n if self.is_open and self.direction == 'recv':\n try:\n self.fd.seek(0, os.SEEK_END)\n except (AttributeError, ValueError): # pragma: debug\n if self.is_open:\n raise", "def cleanup_files(self):\n\n self.backup_files()\n self.delete_files()", "def clear_tempfiles(self, remove=True):\n while self._tempfiles:\n self.pop(remove)\n self.push()", "def attempt_file_reset(f):\r\n if hasattr(f, 'seek'):\r\n f.seek(0)", "def teardown(self):\n super(TestCisPickleOutput, self).teardown()\n if os.path.isfile(self.tempfile):\n os.remove(self.tempfile)", "def _try_cleanup_uploads(self):\n try:\n with GlobalLock(\"BLOB_CLEANUP\", lock_ttl=LOCK_TTL):\n self._cleanup_uploads()\n except LockNotAcquiredException:\n logger.debug(\"Could not acquire global lock for blob upload cleanup worker\")\n return", "def purge(self):\n self.remaining = 0", "def delete_io( hash ):\n res = 0\n record_used('cache', hash)\n for packet in get_filenames_for_hash(CACHE_DIRECTORY, hash):\n try:\n os.remove(packet)\n res = res + 1\n except:\n if not os.environ.get('CALIENDO_TEST_SUITE', None):\n logger.warning( \"Failed to remove file: \" + packet )\n return res", "def close(self):\n if self.current_file_number is not None:\n self.fh_raw.close()\n self.current_file_number = None", "def erase_files(self):\n self.ofile_handle()\n self.efile_handle()\n\n os.remove(self.ofile_name())\n os.remove(self.efile_name())\n return None", "def classCleanup(cls):\n cls.RemoveTempFile(\"child_send1.txt\")\n cls.RemoveTempFile(\"child_read1.txt\")\n cls.RemoveTempFile(\"child_send2.txt\")\n cls.RemoveTempFile(\"child_read2.txt\")", "def cleanup(self):\n files = self.nlst()\n latest = self.latest_filename\n for filename in files:\n if filename != latest:\n result = self.delete(filename)\n logger.info(f\"Deleted old export from FTP: {result}\")", "def task_clean_tmp_files():\n client = google.cloud.storage.Client()\n blobs = client.list_blobs(settings.PODCAST_STORAGE_BUCKET,\n prefix=settings.PODCAST_TMP_STORAGE_DIRECTORY)\n for blob in blobs:\n if blob.time_created.replace(tzinfo=None) + datetime.timedelta(1) <= datetime.datetime.now():\n blob.delete()\n\n return OK_RESPONSE", "def __del__(self):\n if hasattr(self, \"_uniquefile_created\"):\n self._unlock()\n else:\n # When instance attributes don't exist, we probably had an error\n # in the construction process (like an invalid argument to\n # __init__()). In that case, there's no chance we have a unique\n # file or a lock to clean up.\n pass", "def file_pointer(self):\n\n try:\n self.__file.seek(self.__file.tell() - 1)\n except Exception as e:\n raise e", "def file_close(self):\n if self.on_disk:\n self._subarray.close()", "def cleanup(self):\n\n if self.do_nothing_bl is False:\n if os.path.exists(self.local_fileP_str) is True:\n if self.temp_dirP_obj is not None:\n self.temp_dirP_obj.cleanup()\n\n elif os.path.isdir(self.local_fileP_str) is True:\n log_obj.debug('Removing directory \"{:s}\"'.format(self.local_fileP_str))\n shutil.rmtree(self.local_fileP_str)\n\n else:\n log_obj.debug('Removing file \"{:s}\"'.format(self.local_fileP_str))\n os.remove(self.local_fileP_str)", "def _decrement_file_counter(self):\n self._add_to_file_counter(-1)", "def __del__(self):\n for filename in self.files:\n unlink(filename)", "def finalize(self):\n # 027 Not needed in the simple FilesAdaptor. \n pass", "def __del__(self):\n if not self.sigfile.closed:\n self.sigfile.close()", "def delete_partition(self, partition):\n raise NotImplementedError('delete_file')", "def teardown(self):\n self.file_comm.remove_file()\n super(TestCisAsciiFileOutput, self).teardown()", "def release(self):\n if self._ctx is None:\n return\n self.atomicfile.delete()\n try:\n self._ctx.__exit__(None, None, None)\n finally:\n self._ctx = None", "def cleanup(self):\r\n for f in [i for d in self.data.values() for i in d[\"filenames\"]]:\r\n try:\r\n os.unlink(f)\r\n except Exception: pass\r\n self.Destroy()", "def wipe(self):", "def wipe(self):", "def _wipe(self):\n log_method_call(self, self.name, status=self.status)\n\n start = self.partedPartition.geometry.start\n part_len = self.partedPartition.geometry.end - start\n bs = self.partedPartition.geometry.device.sectorSize\n device = self.partedPartition.geometry.device.path\n\n # Erase 1MiB or to end of partition\n count = int(Size(\"1 MiB\") / bs)\n count = min(count, part_len)\n\n cmd = [\"dd\", \"if=/dev/zero\", \"of=%s\" % device, \"bs=%s\" % bs,\n \"seek=%s\" % start, \"count=%s\" % count]\n try:\n util.run_program(cmd)\n except OSError as e:\n log.error(str(e))\n finally:\n # If a udev device is created with the watch option, then\n # a change uevent is synthesized and we need to wait for\n # things to settle.\n udev.settle()", "def file(self):\n del self._file", "def cleanup(self):\n if self.log_fo:\n self.log_fo.close()", "def release(self):\n self.filelock.set()\n self.locked = False\n self.exclusive = False", "def release(self):\n self.filelock.set()\n self.locked = False\n self.exclusive = False", "def current_remove(self):\n storage.close()", "def test_deleting_local_file_using_file_io() -> None:\n with tempfile.TemporaryDirectory() as tmpdirname:\n # Write to the temporary file\n output_file_location = os.path.join(tmpdirname, \"foo.txt\")\n with open(output_file_location, \"wb\") as f:\n f.write(b\"foo\")\n\n # Instantiate the file-io\n file_io = PyArrowFileIO()\n\n # Confirm that the file initially exists\n assert os.path.exists(output_file_location)\n\n # Delete the file using the file-io implementations delete method\n file_io.delete(output_file_location)\n\n # Confirm that the file no longer exists\n assert not os.path.exists(output_file_location)", "def clear_data():\n for i in range(_MAX_NUM_TESTS):\n rand, ref = filename(i)\n if os.path.exists(rand):\n os.remove(rand)\n if os.path.exists(ref):\n os.remove(ref)", "def remove(self):\n for ref_node in self.node.find_references():\n ref_node.destroy()\n File.remove(self)", "def _cleanup(self):\n if self.pidfile:\n os.unlink(self.pidfile)", "def clean(self):\n\t\tself.archiver.closeFile()", "def prune_empty(self): # FileObj.prune_empty\n return False # can't prune a file", "def close_ref(self, filename):\n ctx = self.open_ref_contexts.pop(filename)\n ctx.close()\n ref = ctx.get_completed_ref()\n self.task_record.publish_ref(ref)\n return ref", "def cleanup(self):\n try:\n self.wc = os.path.dirname(self.wc)\n rmtree2(self.wc)\n except IOError, err:\n self.log.exception(\"Can't remove working copy %s: %s\" % (self.wc, err))", "def _remove_unique_file(self):\n if self._uniquefile_created:\n self._unlink(self.uniquefile)\n self._uniquefile_created = False\n self._p(\"Unique file deleted: %s\" % self.uniquefile)", "def __del__(self):\n\n if self._is_open:\n self.close()", "def preview_file_cleanup(sender, **kwargs):\n\n instance = kwargs.get('instance')\n filename = instance.path.url[1:]\n if os.path.exists(filename):\n os.remove(filename)", "def _read_xid_from_fp_xid_temp_and_delete_file(self, fn_temp):\n\t\tif os.path.isfile(fn_temp): \n\t\t\tif os.stat(fn_temp).st_size > 0:\n\t\t\t\txid = at.Table.read(fn_temp, format='ascii.csv', comment='#')\n\t\t\t\tos.remove(fn_temp)\n\t\t\t\txid = self._xid_pick_only_closest(xid)\n\t\t\telse: \n\t\t\t\tprint(\"[hscObj] no object found - xid file empty\")\n\t\t\t\tos.remove(fn_temp)\n\t\t\t\txid = None\n\t\telse: \n\t\t\tprint(\"[hscObj] query failed\")\n\t\t\txid = None\n\t\treturn xid", "def _clean_up_temporary_files(dataset_dir):\n return", "def teardown(self):\n super(TestCisAsciiFileInput, self).teardown()\n if os.path.isfile(self.tempfile):\n os.remove(self.tempfile)", "def track_ref_for_deletion(self, ref):\n if ref not in self.__refs_for_deletion:\n self.__refs_for_deletion.append(ref)", "def tearDown(self):\r\n remove_files(self.files_to_remove, False)\r\n if self.tmpdir:\r\n rmtree(self.tmpdir)\r\n\r\n # clean up the file from init_flowgram_file\r\n if (hasattr(self, \"tmp_filename\") and exists(self.tmp_filename)):\r\n remove(self.tmp_filename)", "def teardown(self):\n super(TestCisObjInput, self).teardown()\n if os.path.isfile(self.tempfile):\n os.remove(self.tempfile)", "def wipeFile(file_name):\r\n WipeFileThread(file_name)", "def tearDown(self):\n for f in os.listdir('/tmp'):\n if not f.startswith(self.FILE_PREFIX):\n continue\n\n os.remove(os.path.join('/tmp', f))", "def _delete_temp():\n global _TEMP_NAME\n\n try:\n database.delete_temp(_TEMP_NAME)\n outputtools.delete_temp(_TEMP_NAME)\n except:\n raise", "def tearDown(self):\n os.remove(self._file)", "def the_ending(self):\n storage.close()", "def flow_file_delete(sender, instance, **kwargs):\n if FLOWJS_REMOVE_FILES_ON_DELETE:\n try:\n default_storage.delete(instance.path)\n except NotImplementedError:\n pass", "def cleanup(self):\n if os.path.exists(self.tgzfile):\n os.remove(self.tgzfile)\n\n if os.path.exists(self.dirname):\n shutil.rmtree(self.dirname)", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.remove_files()\n return False", "def doRollover(self):\r\n if self.maxBytes <= 0:\r\n return\r\n\r\n if not (self.stream.tell() >= self.maxBytes):\r\n return\r\n\r\n self.stream.close()\r\n if self.backupCount > 0:\r\n for i in range(self.backupCount - 1, 0, -1):\r\n sfn = \"%s.%d\" % (self.baseFilename, i)\r\n dfn = \"%s.%d\" % (self.baseFilename, i + 1)\r\n if os.path.exists(sfn):\r\n self.removeAndRename(sfn, dfn)\r\n dfn = self.baseFilename + \".1\"\r\n self.removeAndRename(self.baseFilename, dfn)\r\n self.stream = open(self.baseFilename, 'w')", "def clean_temp_storage_dir(self, filenames):\n for fn in filenames:\n try:\n pathlib.Path(pathlib.PurePath(self.temp_storage_dir, fn)).unlink()\n except FileNotFoundError:\n pass", "def stopWatchingFileSystem(self) :\n\n self.continueWatchingFS = False", "def test_monitor_correctly_deletes_temporary_directory_in_the_case_of_any_error(\n self,\n ):\n # Arrange\n feed_pages = [fixtures.PROQUEST_FEED_PAGE_1, fixtures.PROQUEST_FEED_PAGE_2]\n\n client = create_autospec(spec=ProQuestAPIClient)\n client.download_all_feed_pages = MagicMock(\n return_value=list(map(fixtures.serialize, feed_pages))\n )\n\n client_factory = create_autospec(spec=ProQuestAPIClientFactory)\n client_factory.create = MagicMock(return_value=client)\n\n monitor = ProQuestOPDS2ImportMonitor(\n client_factory, self._db, self._proquest_collection, ProQuestOPDS2Importer\n )\n monitor.import_one_feed = MagicMock(return_value=([], []))\n\n results = {\"temp_directory\": None, \"temp_files\": []}\n original_mkdtemp = tempfile.mkdtemp\n original_temp_file_constructor = tempfile.NamedTemporaryFile\n original_rmtree = shutil.rmtree\n\n def create_temp_directory():\n results[\"temp_directory\"] = original_mkdtemp()\n\n return results[\"temp_directory\"]\n\n def create_temp_file(**kwargs):\n temp_file = original_temp_file_constructor(**kwargs)\n results[\"temp_files\"].append(temp_file.name)\n\n return temp_file\n\n # Act\n with patch(\"tempfile.mkdtemp\") as mkdtemp_mock, patch(\n \"tempfile.NamedTemporaryFile\"\n ) as named_temporary_file_constructor_mock, patch(\n \"shutil.rmtree\"\n ) as rmtree_mock, patch(\n \"api.proquest.importer.parse_feed\"\n ) as parse_feed_mock:\n mkdtemp_mock.side_effect = create_temp_directory\n named_temporary_file_constructor_mock.side_effect = create_temp_file\n rmtree_mock.side_effect = original_rmtree\n parse_feed_mock.side_effect = core.opds2_import.parse_feed\n\n # An exception will be raised while trying to parse the feed page.\n parse_feed_mock.side_effect = Exception(\"\")\n\n monitor.run_once(False)\n\n # Assert\n # Ensure that the temp directory was successfully created.\n tempfile.mkdtemp.assert_called_once()\n\n # Ensure that only one temp file was created, after this an exception was raised and the process stopped.\n tempfile.NamedTemporaryFile.assert_has_calls(\n [call(mode=\"r+\", dir=results[\"temp_directory\"], delete=False)]\n )\n\n # Ensure that parse_feed method was called only once.\n parse_feed_mock.assert_has_calls([call(ANY, silent=False)])\n\n # Ensure that the temp directory was successfully removed.\n shutil.rmtree.assert_called_once_with(results[\"temp_directory\"])\n assert False == os.path.exists(results[\"temp_directory\"])" ]
[ "0.637165", "0.6141179", "0.6079966", "0.60165036", "0.5893113", "0.5771018", "0.5769767", "0.5752214", "0.5731687", "0.5730452", "0.57243013", "0.5715975", "0.5715163", "0.5711628", "0.5696097", "0.56944114", "0.56878215", "0.56815344", "0.5681272", "0.5633728", "0.5630855", "0.56236386", "0.5620949", "0.5599183", "0.5533983", "0.5530115", "0.552735", "0.5516672", "0.55129987", "0.5509583", "0.5503348", "0.5479684", "0.54776025", "0.54776025", "0.5476024", "0.5467237", "0.5454183", "0.5448012", "0.5440473", "0.54392135", "0.54392", "0.54382205", "0.5435678", "0.542985", "0.5427196", "0.54195726", "0.54117715", "0.53861576", "0.53838927", "0.5373599", "0.537312", "0.53651375", "0.53607184", "0.5358157", "0.5340452", "0.53342813", "0.5334151", "0.5323054", "0.53058237", "0.5295279", "0.5289411", "0.5282252", "0.5279168", "0.5266135", "0.5266135", "0.52655274", "0.5265467", "0.52640206", "0.52572554", "0.52572554", "0.52481645", "0.5247483", "0.524704", "0.52389145", "0.52380466", "0.5229377", "0.52276593", "0.5226616", "0.5217799", "0.5217318", "0.5216734", "0.5212733", "0.5212685", "0.52046865", "0.5198442", "0.51889175", "0.5185455", "0.5184727", "0.5181946", "0.51801217", "0.51741946", "0.5173718", "0.5170676", "0.5169946", "0.51690054", "0.51684195", "0.51642966", "0.5161535", "0.5157501", "0.51567405" ]
0.61570215
1
Add i to the count of subarrays referencing the file of this partition's subarray. Only do this if self._subarray is an instance of FileArray, but not a temporary FileArray.
def _add_to_file_counter(self, i): # subarray = getattr(self, '_subarray', None) subarray = self._subarray if subarray is None: return try: if isinstance(subarray, FileArray) and not isinstance( subarray, CachedArray ): try: filename = subarray.get_filename() except Exception: filename = None if filename is None: return file_counter = self.file_counter # count = file_counter.get(filename, 0) # file_counter[filename] = count + i # if file_counter[filename] <= 0: count = file_counter.get(filename, 0) + i if count <= 0: # Remove the file from the dictionary if its count has # dropped to zero file_counter.pop(filename, None) else: file_counter[filename] = count except Exception: # If we're here then it is likely that FileArray has been # torn down, so just do nothing. pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def n_subfile(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_file(recursive=False):\n n += 1\n return n", "def update(self, i, v):\n # index in BTree is 1 more than index in arr[]\n i += 1\n\n # Traverse to ancestors of BITree[i]\n while i <= self.size:\n self.BITree[i] += v\n\n # Update index to next set bit in binary representation\n i += i & (-i)", "def __del__(self):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n # If the subarray is unique it will have 2 references to\n # it plus 1 within this method, making 3. If it has more\n # than 3 references to it then it is not unique.\n if getrefcount is not None:\n self._decrement_file_counter()\n if subarray is None or getrefcount(subarray) > 3:\n return\n else:\n # getrefcount has itself been deleted or is in the process\n # of being torn down\n return\n\n _partition_file = getattr(subarray, \"_partition_file\", None)\n if _partition_file is not None:\n # This partition contains a temporary file which is not\n # referenced by any other partition on this process, so if\n # there are no lock files present remove the file from\n # disk.\n _remove_temporary_files(_partition_file)\n\n else:\n try:\n if FileArray is not None and isinstance(subarray, FileArray):\n try:\n filename = subarray.get_filename()\n except Exception:\n filename = None\n\n if self.file_counter.get(filename, 999) <= 0:\n # This partition contains a non-temporary file\n # which is not referenced by any other\n # partitions, so close the file.\n subarray.close()\n except Exception:\n # If we're here then it is likely that FileArray has been\n # torn down, so just do nothing.\n pass\n # --- End: if", "def write_sub_index(self):\n for sie in self.subIndex:\n self.db_file.write(sie.get_representation())", "def add(self, i: int, v: int) -> None:\n while i < self.size:\n self.tree[i] += v\n i += self._lsb(i)", "def __setitem__(self, filenr, data_arr):\n cvcfile = self.filenames[filenr]\n cvcpath = os.path.join(self.filefolder, cvcfile)\n data_arr.tofile(cvcpath)", "def _increment_file_counter(self):\n self._add_to_file_counter(1)", "def add_index(self, idx, subproblem_shape):\n self.indices.append(int(idx))\n self.subproblem_shapes.append(subproblem_shape)", "def add_photo(self, new_photo, i):\r\n self.__photos[i] = new_photo", "def append(self, i):\n \n self.ret.append(i)", "def array(self):\n config = self.config\n\n unique_array = config[\"unique_subarray\"]\n\n p_axes = self.axes\n p_flip = self.flip\n p_part = self.part\n p_units = self.Units\n p_shape = self.shape\n p_location = self.location\n subarray = self._subarray\n\n len_p_axes = len(p_axes)\n\n if not self.in_memory:\n # --------------------------------------------------------\n # The subarray is not in memory.\n #\n # It could be in a file on disk or implied by a FileArray\n # object, etc.\n # --------------------------------------------------------\n self._original = self.copy()\n\n unique_array = True\n update = True\n copy = False\n\n if not p_part:\n indices = Ellipsis\n else:\n indices = tuple(p_part)\n\n # Read from a file into a numpy array\n p_data = subarray[indices]\n\n # We've just copied p_data from disk, so in place changes\n # are not possible\n in_place_changes = False\n else:\n # --------------------------------------------------------\n # The subarray is in memory\n # --------------------------------------------------------\n update = config[\"update\"]\n\n if p_part:\n p_data = get_subspace(subarray, p_part)\n elif not unique_array:\n p_data = subarray.view()\n else:\n p_data = subarray\n\n copy = config[\"extra_memory\"]\n\n # In place changes to p_data might be possible if we're not\n # copying the data\n in_place_changes = not copy\n\n if not p_data.ndim and isinstance(p_data, (numpy_number, numpy_bool_)):\n # --------------------------------------------------------\n # p_data is a numpy number (like numpy.int64) which does\n # not support assignment, so convert it to a numpy array.\n # --------------------------------------------------------\n p_data = numpy_array(p_data)\n # We've just copied p_data, so in place changes are\n # not possible\n copy = False\n in_place_changes = False\n\n masked = numpy_ma_isMA(p_data)\n if masked:\n # The p_data is a masked array\n if p_data.mask is numpy_ma_nomask or not numpy_ma_is_masked(\n p_data\n ):\n # There are no missing data points so recast as an\n # unmasked numpy array\n p_data = p_data.data\n masked = False\n # --- End: if\n\n if masked:\n # Set the hardness of the mask\n if config[\"hardmask\"]:\n p_data.harden_mask()\n else:\n p_data.soften_mask()\n # --- End: if\n\n self.masked = masked\n\n # ------------------------------------------------------------\n # Make sure that the data array has the correct units. This\n # process will deep copy the data array if required (e.g. if\n # another partition is referencing this numpy array), even if\n # the units are already correct.\n # ------------------------------------------------------------\n func = config.get(\"func\")\n units = config[\"units\"]\n if func is None:\n if not p_units.equals(units) and bool(p_units) is bool(units):\n func = Units.conform\n\n if func is not None:\n inplace = not copy\n p_data = func(p_data, p_units, units, inplace)\n p_units = units\n\n if not inplace:\n # We've just copied p_data, so in place changes are\n # not possible\n copy = False\n in_place_changes = False\n # --- End: if\n\n flip = config.get(\"flip\", None)\n if flip or p_flip:\n flip_axes = set(p_flip).symmetric_difference(flip)\n else:\n flip_axes = None\n\n axes = config[\"axes\"]\n\n if p_data.size > 1:\n # --------------------------------------------------------\n # Flip axes\n # --------------------------------------------------------\n if flip_axes:\n indices = [\n (\n slice(None, None, -1)\n if axis in flip_axes\n else slice(None)\n )\n for axis in p_axes\n ]\n p_data = p_data[tuple(indices)]\n\n # --------------------------------------------------------\n # Transpose axes\n # --------------------------------------------------------\n if p_axes != axes:\n iaxes = [p_axes.index(axis) for axis in axes if axis in p_axes]\n\n if len_p_axes > len(iaxes):\n for i in range(len_p_axes):\n if i not in iaxes:\n # iaxes.append(i)\n iaxes.insert(i, i)\n # --- End: if\n\n p_data = numpy_transpose(p_data, iaxes)\n # --- End: if\n\n # ------------------------------------------------------------\n # Remove excessive/insert missing size 1 axes\n # ------------------------------------------------------------\n if p_shape != p_data.shape:\n # if len_p_axes != len(p_shape):\n p_data = p_data.reshape(p_shape)\n\n # ------------------------------------------------------------\n # Apply the auxiliary mask\n # ------------------------------------------------------------\n auxiliary_mask = config[\"auxiliary_mask\"]\n if auxiliary_mask:\n for mask in auxiliary_mask:\n if mask.any():\n if not masked:\n p_data = p_data.view(numpy_ma_MaskedArray)\n masked = True\n\n p_data.mask = (mask | p_data.mask).array\n # --- End: for\n\n self.masked = True\n\n # ------------------------------------------------------------\n # Convert the array's data type\n # ------------------------------------------------------------\n p_dtype = p_data.dtype\n dtype = config.get(\"dtype\", None)\n if dtype is not None and dtype != p_dtype:\n try:\n p_data = p_data.astype(dtype) # Note: returns a copy\n except ValueError:\n raise ValueError(\n \"Can't recast partition array from {} to {}\".format(\n p_dtype.name, dtype.name\n )\n )\n else:\n # We've just copied p_data, so in place changes are\n # not possible\n copy = False\n in_place_changes = False\n # --- End: if\n\n # ------------------------------------------------------------\n # Copy the array\n # -----------------------------------------------------------\n if copy:\n if p_dtype.char != \"O\":\n if not masked or p_data.ndim > 0:\n p_data = p_data.copy()\n else:\n # This is because numpy.ma.copy doesn't work for\n # scalar arrays (at the moment, at least)\n p_data = numpy_ma_masked_all((), p_data.dtype)\n\n # We've just copied p_data, so in place changes are\n # not possible\n in_place_changes = False\n else:\n # whilst netCDF4.netcdftime.datetime is mucking bout,\n # don't copy!!!!\n # p_data = _copy(p_data)\n pass\n # --- End: if\n\n # ------------------------------------------------------------\n # Update the partition\n # ------------------------------------------------------------\n if update:\n self.subarray = p_data # ?? DCH CHECK\n self.Units = p_units\n self.part = []\n self.axes = axes\n self.flip = flip\n self.flatten = []\n self.shape = p_shape\n self.location = p_location\n\n self._in_place_changes = in_place_changes\n\n # ------------------------------------------------------------\n # Return the numpy array\n # ------------------------------------------------------------\n return p_data", "def __len__(self):\n return len(self.files[self.split])", "def _extend_contiguous_traj_field(self, run_idx, traj_idx, field_path, field_data):\n\n traj_grp = self.h5['{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx)]\n field = traj_grp[field_path]\n\n # make sure this is a feature vector\n assert len(field_data.shape) > 1, \\\n \"field_data must be a feature vector with the same number of dimensions as the number\"\n\n # of datase new frames\n n_new_frames = field_data.shape[0]\n\n # check the field to make sure it is not empty\n if all([i == 0 for i in field.shape]):\n\n # check the feature shape against the maxshape which gives\n # the feature dimensions for an empty dataset\n assert field_data.shape[1:] == field.maxshape[1:], \\\n \"field feature dimensions must be the same, i.e. all but the first dimension\"\n\n # if it is empty resize it to make an array the size of\n # the new field_data with the maxshape for the feature\n # dimensions\n feature_dims = field.maxshape[1:]\n field.resize( (n_new_frames, *feature_dims) )\n\n # set the new data to this\n field[0:, ...] = field_data\n\n else:\n # make sure the new data has the right dimensions against\n # the shape it already has\n assert field_data.shape[1:] == field.shape[1:], \\\n \"field feature dimensions must be the same, i.e. all but the first dimension\"\n\n\n # append to the dataset on the first dimension, keeping the\n # others the same, these must be feature vectors and therefore\n # must exist\n field.resize( (field.shape[0] + n_new_frames, *field.shape[1:]) )\n # add the new data\n field[-n_new_frames:, ...] = field_data", "def append_filepath(self, filepath):\n idx = len(self.t_sect['filepaths'])\n self.t_sect['filepaths'].append(filepath)\n return idx", "def write_sub_4(self):\n self.subIndex[constants.sub_4_genre_albums].offset = (\n self.db_file.tell())\n self.subIndex[constants.sub_4_genre_albums].size = 8\n self.subIndex[constants.sub_4_genre_albums].count = (\n len(self.genreIndex) - 1)\n\n entry_offset = 0\n for giEntry in self.genreIndex[1:]:\n self.db_file.write(\n struct.pack(\n \"<HHHH\",\n giEntry.number,\n entry_offset,\n giEntry.number_of_albums,\n 0x0000))\n entry_offset += giEntry.number_of_albums", "def load(self, i: int) -> np.ndarray:\n raise NotImplementedError(\"Do not call load from BaseLoader\")", "def write_all_sub_indices(self):\n\n # remember where we are.\n temp_offset_1 = self.db_file.tell()\n\n # Write a filler for the relative offset to the first table\n self.db_file.write(struct.pack(\"<I\", 0x00000000))\n\n # Write the sub index entries (blank at this stage)\n self.write_sub_index()\n\n # self.subIndex[constants.sub_0_genre_performers].offset = \\\n # self.db_file.tell()\n self.write_sub_0()\n\n # self.subIndex[constants.sub_1_genre_performer_albums].offset = \\\n # self.db_file.tell()\n self.write_sub_1()\n\n # self.subIndex[constants.sub_2_genre_performer_album_titles].offset = \\\n # self.db_file.tell()\n self.write_sub_2()\n\n # self.subIndex[constants.sub_3_genre_ordered_titles].offset = \\\n # self.db_file.tell()\n self.write_sub_3()\n\n # self.subIndex[constants.sub_4_genre_albums].offset = \\\n # self.db_file.tell()\n self.write_sub_4()\n\n # self.subIndex[constants.sub_5_genre_album_titles].offset = \\\n # self.db_file.tell()\n self.write_sub_5()\n\n # self.subIndex[constants.sub_6_genre_titles].offset = \\\n # self.db_file.tell()\n self.write_sub_6()\n\n # self.subIndex[constants.sub_7_performer_albums].offset = \\\n # self.db_file.tell()\n self.write_sub_7()\n\n # self.subIndex[constants.sub_8_performer_album_titles].offset = \\\n # self.db_file.tell()\n self.write_sub_8()\n\n # self.subIndex[constants.sub_9_performer_titles].offset = \\\n # self.db_file.tell()\n self.write_sub_9()\n\n # self.subIndex[constants.sub_10_genre_performers].offset = \\\n # self.db_file.tell()\n self.write_sub_10()\n\n # self.subIndex[constants.sub_11_genre_performer_titles].offset = \\\n # self.db_file.tell()\n self.write_sub_11()\n\n # self.subIndex[constants.sub_12_genre_ordered_titles].offset = \\\n # self.db_file.tell()\n self.write_sub_12()\n\n # Remeber where we are\n temp_offset_2 = self.db_file.tell()\n\n # Go back to the start\n self.db_file.seek(temp_offset_1)\n\n # Write the offset to the first table\n self.db_file.write(\n struct.pack(\n \"<I\",\n self.subIndex[constants.sub_0_genre_performers].offset -\n temp_offset_1))\n\n # Write the real data now\n self.write_sub_index()\n\n # Go to the end\n self.db_file.seek(temp_offset_2)", "def extend(self, i):\n for x in i:\n self.add(x)", "def update_subvarga(self, subvarga):\n\t\tself.subvarga = subvarga\n\t\tself.subvargaNum += 1", "def _write_array_on_file(self, pa_array):\n pa_batch = pa.RecordBatch.from_struct_array(pa_array)\n self._num_bytes += pa_array.nbytes\n self.pa_writer.write_batch(pa_batch)", "def add(perm, i):\n for j in self.increasing_children(i):\n add(perm, j)\n perm.append(i)", "def enqueue(self, i):\n if len(self) == self.capacity:\n self._resize(self.capacity*2)\n\n if self.tail == self.capacity:\n self.tail = 0\n\n self.lst[self.tail] = i\n self.tail += 1\n self.n += 1", "def _update_subfiles(self) -> None:\n\t\t# Clear list of subfiles\n\t\tself.subfiles.clear()\n\t\t# Iterate over Nodes\n\t\tfor node in self.nodes:\n\t\t\tfor file in node.get_subfiles():\n\t\t\t\tself.subfiles.add(\"{}/{}\".format(self.xml_dir, file))\n\t\t# Iterate over SubNodes\n\t\tfor subnode in self.subnodes:\n\t\t\tfor file in subnode.filenames:\n\t\t\t\tself.subfiles.add(\"{}/{}\".format(self.xml_dir, file))", "def sequential_files(self, ctr=0):\n self._tempfiles[-1].ctr = ctr", "def add(perm, i):\n for j in self.decreasing_children(i):\n add(perm, j)\n perm.append(i)", "def extra_memory(self):\n if not self.in_memory:\n # --------------------------------------------------------\n # The subarray is on disk so getting the partition's data\n # array will require extra memory\n # --------------------------------------------------------\n extra_memory = True\n else:\n # --------------------------------------------------------\n # The subarray is already in memory\n # --------------------------------------------------------\n config = self.config\n\n p_part = self.part\n if p_part:\n extra_memory = True\n elif not config[\"unique_subarray\"]:\n extra_memory = True\n else:\n p_data = self._subarray\n\n if not numpy_ma_isMA(p_data):\n # The p_data is not a masked array\n extra_memory = isinstance(p_data.base, numpy_ndarray)\n else:\n # The p_data is a masked array\n memory_overlap = isinstance(\n p_data.data.base, numpy_ndarray\n )\n if not (\n p_data.mask is numpy_ma_nomask\n or not numpy_ma_is_masked(p_data)\n ):\n # There is at least one missing data point\n memory_overlap |= isinstance(\n p_data.mask.base, numpy_ndarray\n )\n\n extra_memory = memory_overlap\n # --- End: if\n\n p_dtype = p_data.dtype\n\n if not extra_memory:\n if config[\"func\"] is not None:\n extra_memory = True\n else:\n p_units = self.Units\n units = config[\"units\"]\n if (\n not p_units.equals(units)\n and bool(p_units) is bool(units)\n and not (\n p_data.flags[\"C_CONTIGUOUS\"]\n and p_dtype.kind == \"f\"\n )\n ):\n extra_memory = True\n\n # ------------------------------------------------------------\n # Extra memory is required if the dtype needs changing\n # ------------------------------------------------------------\n if not extra_memory:\n dtype = config[\"dtype\"]\n if dtype is not None and dtype != p_data.dtype:\n extra_memory = True\n # --- End: if\n\n # ------------------------------------------------------------\n # Amount of extra memory (in bytes) required to access the\n # array\n # ------------------------------------------------------------\n return self.nbytes if extra_memory else 0", "def _dfs(self, i):\n self.tracks[i] = self.cnt\n for j in self.edges[i]:\n if self.tracks[j] == -1:\n self._dfs(j)", "def get_sum(self, i):\n s = 0\n\n # index in BITree is 1 more than index in arr[]\n i += 1\n\n # Traverse to leaves of BITree[i]:\n while i > 0:\n s += self.BITree[i]\n\n # Move index to parent node (next set bit in binary representation)\n i -= i & (-i)\n\n return s", "def nbytes_at(self, device_id:int):\n if self._slices:\n if isinstance(self._coherence._local_states[device_id], dict): # there are subarrays no this device\n if self._slices_hash in self._coherence._local_states[device_id].keys(): # this subarray is already there\n return self._array.nbytes_at(device_id)\n else: # the subarray will be moved to there\n return self._array.nbytes_at(device_id) + self.subarray_nbytes # add the incoming subarray size\n else: # there is a complete copy on this device, no need to prepare subarray\n return self.nbytes\n else:\n return self.nbytes", "def inc_size(self):\r\n self.__length += 1", "def i(self, i):\n\n self._i = i", "def __add__(self, i):\n self.n += i\n plt.subplot(self.nx, self.ny, self.n)\n return True", "def numberFiles(self):\n return self.n", "def numberFiles(self):\n with open(self.inputfile) as fin:\n for n, _ in enumerate(fin, start=1): pass\n self.n = n\n return self.n", "def append(self, file, idx):\n\n # print \"append %s %d\" % (file, idx)\n src = \"%s/%s\" % (self._dir, file)\n dst = \"%s/.%d.new\" % (self._tempdir, idx)\n copyfile(src, dst)\n result = self._run(\"%s --%d --block-size %d --bits %d --quiet --threads %d %s --mode %s --rehash %s %s\" %\n (self._ishakesumd, self._mode, self._block_size, self._output_bits, self._threads,\n self._profile, self._alg, self._hash, self._tempdir))\n os.remove(dst)\n return result", "def add(self, index):\n index_bytes = int(index).to_bytes(self._index_size,\n byteorder=\"little\", signed=False)\n self._fout.write(index_bytes)", "def file_close(self):\n if self.on_disk:\n self._subarray.close()", "def write_sub_6(self):\n self.subIndex[constants.sub_6_genre_titles].offset = (\n self.offsets[constants.genre_title_offset])\n self.subIndex[constants.sub_6_genre_titles].size = (2)\n self.subIndex[constants.sub_6_genre_titles].count = (\n self.genre_title_table_length) # TODO: Could be len(mainIndex)", "def putarow(self,i_,subi_,vali_):\n nzi_ = None\n if nzi_ is None:\n nzi_ = len(subi_)\n elif nzi_ != len(subi_):\n raise IndexError(\"Inconsistent length of array subi\")\n if nzi_ is None:\n nzi_ = len(vali_)\n elif nzi_ != len(vali_):\n raise IndexError(\"Inconsistent length of array vali\")\n if subi_ is None:\n raise ValueError(\"Argument subi cannot be None\")\n if subi_ is None:\n raise ValueError(\"Argument subi may not be None\")\n if isinstance(subi_, numpy.ndarray) and subi_.dtype is numpy.dtype(numpy.int32) and subi_.flags.contiguous:\n _subi_copyarray = False\n _subi_tmp = ctypes.cast(subi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subi_ is not None:\n _subi_copyarray = True\n _subi_np_tmp = numpy.zeros(len(subi_),numpy.dtype(numpy.int32))\n _subi_np_tmp[:] = subi_\n assert _subi_np_tmp.flags.contiguous\n _subi_tmp = ctypes.cast(_subi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subi_copyarray = False\n _subi_tmp = None\n \n if vali_ is None:\n raise ValueError(\"Argument vali cannot be None\")\n if vali_ is None:\n raise ValueError(\"Argument vali may not be None\")\n if isinstance(vali_, numpy.ndarray) and vali_.dtype is numpy.dtype(numpy.float64) and vali_.flags.contiguous:\n _vali_copyarray = False\n _vali_tmp = ctypes.cast(vali_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif vali_ is not None:\n _vali_copyarray = True\n _vali_np_tmp = numpy.zeros(len(vali_),numpy.dtype(numpy.float64))\n _vali_np_tmp[:] = vali_\n assert _vali_np_tmp.flags.contiguous\n _vali_tmp = ctypes.cast(_vali_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _vali_copyarray = False\n _vali_tmp = None\n \n res = __library__.MSK_XX_putarow(self.__nativep,i_,nzi_,_subi_tmp,_vali_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def assignMoreVectors(self, i):\n return", "def __getitem__(self, i):\n new_data = super().__getitem__(i)\n if isinstance(i, slice):\n new_data = self.__class__(new_data)\n new_data.global_settings = copy.copy(self.global_settings)\n return new_data", "def parse_subfiles(self) -> None:\n\t\t# Exit if type has no subfiles\n\n\t\tif not self.has_subfile:\n\t\t\treturn\n\t\t# Variables\n\t\tfile: str\n\t\tsubfiles: Set[str] = set()\n\n\t\t# Set has_subfile to false, allows parsing subfiles only as needed\n\t\tself.has_subfile = False\n\n\t\t# Iterate over RootNode children\n\t\tfor rootnode in self.root_nodes:\n\t\t\tfor file in rootnode.get_subfiles():\n\t\t\t\t# Check if rootnode already exists, skip if so.\n\t\t\t\tif self._valid_rootnode_file(file):\n\t\t\t\t\t# Add to subfiles list if not already present\n\t\t\t\t\tif file not in subfiles:\n\t\t\t\t\t\tsubfiles.add(file)\n\n\t\t# Add subfiles as RootNodes\n\t\tfor subfile in subfiles:\n\t\t\tself.add_rootnode(subfile)\n\t\t# Update\n\t\tself.update()\n\n\t\t# Rerun if new subfiles were added\n\t\tif self.has_subfile:\n\t\t\tself.parse_subfiles()", "def add_slice(self, value):\n if isinstance(value, str):\n self._add_slice_length(len(value))\n self._data += value.encode(\"utf-8\")\n elif isinstance(value, (bytes, bytearray)):\n self._add_slice_length(len(value))\n self._data += value\n else:\n length = 0\n for _ in value:\n length += 1\n self._add_slice_length(length)\n self.add_array(value)", "def refine(self, ijk):\n if self.cbc is None or not self.sub_block_count:\n raise ValueError(\n \"Cannot refine sub block model without specifying number \"\n \"of parent and sub blocks\"\n )\n try:\n inds = self.ijk_array_to_indices(ijk)\n except ValueError:\n inds = self.ijk_to_index(ijk)\n self.cbc.array[inds] = np.prod(self.sub_block_count) # pylint: disable=E1137", "def write_sub_3(self):\n self.subIndex[constants.sub_3_genre_ordered_titles].offset = (\n self.offsets[constants.genre_title_order_offset])\n self.subIndex[constants.sub_3_genre_ordered_titles].size = 2\n # TODO: Could be len(mainIndex)\n self.subIndex[constants.sub_3_genre_ordered_titles].count = (\n self.genre_title_order_table_length)", "def __len__(self) -> int:\n return len(self.files)", "def appendsize(self, numents):\n pass", "def putaijlist(self,subi_,subj_,valij_):\n num_ = None\n if num_ is None:\n num_ = len(subi_)\n elif num_ != len(subi_):\n raise IndexError(\"Inconsistent length of array subi\")\n if num_ is None:\n num_ = len(subj_)\n elif num_ != len(subj_):\n raise IndexError(\"Inconsistent length of array subj\")\n if num_ is None:\n num_ = len(valij_)\n elif num_ != len(valij_):\n raise IndexError(\"Inconsistent length of array valij\")\n if subi_ is None:\n raise ValueError(\"Argument subi cannot be None\")\n if subi_ is None:\n raise ValueError(\"Argument subi may not be None\")\n if isinstance(subi_, numpy.ndarray) and subi_.dtype is numpy.dtype(numpy.int32) and subi_.flags.contiguous:\n _subi_copyarray = False\n _subi_tmp = ctypes.cast(subi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subi_ is not None:\n _subi_copyarray = True\n _subi_np_tmp = numpy.zeros(len(subi_),numpy.dtype(numpy.int32))\n _subi_np_tmp[:] = subi_\n assert _subi_np_tmp.flags.contiguous\n _subi_tmp = ctypes.cast(_subi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subi_copyarray = False\n _subi_tmp = None\n \n if subj_ is None:\n raise ValueError(\"Argument subj cannot be None\")\n if subj_ is None:\n raise ValueError(\"Argument subj may not be None\")\n if isinstance(subj_, numpy.ndarray) and subj_.dtype is numpy.dtype(numpy.int32) and subj_.flags.contiguous:\n _subj_copyarray = False\n _subj_tmp = ctypes.cast(subj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subj_ is not None:\n _subj_copyarray = True\n _subj_np_tmp = numpy.zeros(len(subj_),numpy.dtype(numpy.int32))\n _subj_np_tmp[:] = subj_\n assert _subj_np_tmp.flags.contiguous\n _subj_tmp = ctypes.cast(_subj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subj_copyarray = False\n _subj_tmp = None\n \n if valij_ is None:\n raise ValueError(\"Argument valij cannot be None\")\n if valij_ is None:\n raise ValueError(\"Argument valij may not be None\")\n if isinstance(valij_, numpy.ndarray) and valij_.dtype is numpy.dtype(numpy.float64) and valij_.flags.contiguous:\n _valij_copyarray = False\n _valij_tmp = ctypes.cast(valij_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif valij_ is not None:\n _valij_copyarray = True\n _valij_np_tmp = numpy.zeros(len(valij_),numpy.dtype(numpy.float64))\n _valij_np_tmp[:] = valij_\n assert _valij_np_tmp.flags.contiguous\n _valij_tmp = ctypes.cast(_valij_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _valij_copyarray = False\n _valij_tmp = None\n \n res = __library__.MSK_XX_putaijlist64(self.__nativep,num_,_subi_tmp,_subj_tmp,_valij_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def load_chunk(self, idx):\n for f in self.filenames[idx:]:\n ...", "def add_item(self, i, k):\n if k == self.K:\n self.K += 1\n self.m_N_numerators[k, :] = self.prior.k_0*self.prior.m_0\n self.S_N_partials[k, :] = self.prior.S_0 + self.prior.k_0*self._cached_prior_square_m_0\n self.m_N_numerators[k, :] += self.X[i]\n self.S_N_partials[k, :] += self._cached_square[i]\n self.counts[k] += 1\n self._update_log_prod_vars_and_inv_vars(k)\n self.assignments[i] = k", "def _setup_n_ints_in_file(self):\n self.n_ints_in_file = sigproc.calc_n_ints_in_file(self.filename)", "def add(self, data):\n if data.shape != self.shape:\n self.shape = data.shape\n if isinstance(self.child, vmedian):\n self.child.add(data)\n if (self.child.index == 0):\n self.buffer[self.index, :] = self.child.get(reshape=False)\n self.index = self.index + 1\n else:\n self.buffer[self.index, :] = np.ravel(data)\n self.index = self.index + 1\n\n if self.index == 3:\n self.index = 0\n self.initialized = True", "def to_disk(self, reopen=True):\n # try:\n tfa = CachedArray(self.array)\n # except Exception:\n # return False\n\n fd, _lock_file = mkstemp(\n prefix=tfa._partition_file + \"_\", dir=tfa._partition_dir\n )\n close(fd)\n\n self.subarray = tfa\n _temporary_files[tfa._partition_file] = (\n tfa._partition_dir,\n _lock_file,\n set(),\n )\n\n if reopen:\n # Re-open the partition\n self.open(self.config)\n\n return True", "def reduce(self, array, index):\n\n return 0", "def __setitem__(self, i, value):\n self._ar[i] = value", "def n_total_files(self):\n return len(self.fileinfo)", "def number_idx(self, filename):\n with open(filename) as fh:\n firstline = fh.readline()\n parts = firstline.split('\\t')\n # only add if there are 4 parts\n if len(parts) != 4:\n return\n\n count = 1\n def writeline(fho, line, count):\n fho.write(line.rstrip() + '\\t' + str(count) + '\\n')\n\n with open(filename + '.tmp', 'w+b') as fho:\n writeline(fho, firstline, count)\n count += 1\n for line in fh:\n writeline(fho, line, count)\n count += 1\n\n shutil.move(filename + '.tmp', filename)", "def append(self, item):\n\n # resize array to 2*capacity if max capacity reached\n if self.count == self.capacity:\n self._resize(2 * self.capacity)\n\n # Append the item at the end of array\n self.the_array[self.count] = item\n self.count += 1", "def add_overlap(self, other):\n assert isinstance(other, Chunk)\n overlap_slices = self._get_overlap_slices(other.slices)\n self.array[overlap_slices] += other.array[overlap_slices]", "def add_error(self, i: int, error: Exception):\n\t\twith self.get_locks(self.error_case_indices,\n\t\t self.errors,\n\t\t self.error_count):\n\t\t\tself.error_case_indices[self.error_count.value] = i\n\t\t\tself.errors[self.error_count.value] = ERROR_TO_ID[error.__class__]\n\t\t\tself.error_count.value += 1", "def set_chopped_reduced_files(self, run_number, slicer_key, gsas_file_list, append):\n # get tracker\n tracker = self.get_tracker(run_number, slicer_key)\n assert isinstance(tracker, DataReductionTracker), 'Must be a DataReductionTracker'\n\n # add files\n tracker.set_reduced_files(gsas_file_list, append)\n\n return", "def add_subROI(self, ROI_in):\n self.subROIs.append(ROI_in)", "def push_substructure_fraction(self):\n Total_sub_fraction = np.zeros((0,), dtype=np.float)\n ParType0_sub_fraction = np.zeros((0,), dtype=np.float)\n ParType1_sub_fraction = np.zeros((0,), dtype=np.float)\n ParType4_sub_fraction = np.zeros((0,), dtype=np.float)\n ParType5_sub_fraction = np.zeros((0,), dtype=np.float)\n\n for r in self.cluster.generate_apertures():\n part_sub_fraction_aperture = self.cluster.group_substructure_fraction(aperture_radius=r, \n out_allPartTypes=True)\n ParType0_sub_fraction = np.concatenate((ParType0_sub_fraction, [part_sub_fraction_aperture[0]]), axis=0)\n ParType1_sub_fraction = np.concatenate((ParType1_sub_fraction, [part_sub_fraction_aperture[1]]), axis=0)\n ParType4_sub_fraction = np.concatenate((ParType4_sub_fraction, [part_sub_fraction_aperture[2]]), axis=0)\n ParType5_sub_fraction = np.concatenate((ParType5_sub_fraction, [part_sub_fraction_aperture[3]]), axis=0)\n Total_sub_fraction_aperture = self.cluster.group_substructure_fraction(aperture_radius=r, \n out_allPartTypes=False)\n Total_sub_fraction = np.concatenate((Total_sub_fraction, [Total_sub_fraction_aperture]), axis=0)\n\n data = {'/Total_substructure_fraction' : np.array(Total_sub_fraction),\n '/ParType0_substructure_fraction': np.array(ParType0_sub_fraction),\n '/ParType1_substructure_fraction': np.array(ParType1_sub_fraction),\n '/ParType4_substructure_fraction': np.array(ParType4_sub_fraction),\n '/ParType5_substructure_fraction': np.array(ParType5_sub_fraction)}\n\n attributes = {'Description': \"\"\"Datasets with the fraction of mass bound to subhalos of the cluster, calculated \n from particles within a specific aperture radius from the Centre of Potential. Individual datasets contain \n substructure fraction information about each particle type separately, as well as one with combined \n total contribution.\n The substructure fraction is computed according to the equation:\n substructure_fraction = (total mass - fuzz mass) / total mass.\n\n Note: The substructure fraction can also be used as an indicator for a merging index.\n \"\"\",\n 'Units': '[None]'}\n\n out = FOFOutput(self.cluster, filename='substructure_fraction.hdf5', data=data, attrs=attributes)\n out.makefile()", "def undo_scan(self, sub_array_id: int):", "def write_sub_5(self):\n self.subIndex[constants.sub_5_genre_album_titles].offset = (\n self.db_file.tell())\n self.subIndex[constants.sub_5_genre_album_titles].size = 8\n\n entry_offset = len(self.genreIndex[0].titles)\n count = 0\n for giEntry in self.genreIndex[1:]:\n for album in giEntry.album_numbers:\n # print(\"Sub5 Album: {}\".format(album))\n number_of_titles = giEntry.number_of_titles_for_album(album)\n if number_of_titles > 0:\n self.db_file.write(\n struct.pack(\n \"<HHHH\",\n album,\n entry_offset,\n number_of_titles,\n 0x0000))\n entry_offset += number_of_titles\n count += 1\n\n self.subIndex[constants.sub_5_genre_album_titles].count = (count)", "def files(self) -> _SeqNumSlicer:\n if self._seq_num_slicer is None:\n self._seq_num_slicer = _SeqNumSlicer(self)\n return self._seq_num_slicer", "def __len__(self):\n return len(self.files)", "def append(self, element):\n if self.n == self.capacity:\n self._resize(2*self.capacity) # resizing by 2x if size is not enough\n\n self.original_array[self.n] = element\n self.n += 1", "def save(self, patch):\n internalSlices = self._get_internal_slices(patch.slices)\n self.array[internalSlices] = patch.array", "def compute_accumulation(self, i):\n\n #These are just pointer reassignments, not a deep-copy.\n dx = self.dx_arr\n phi = self.porosity\n area = self.res_area\n cf = self.compressibility\n Bw = self.form_volume_factor\n\n return area * dx[i] * phi[i] * cf / Bw", "def __len__(self):\n\n return len(self._file_list)", "def __setitem__(self, i: int, item: Any) -> None:\n if i < 0:\n i = self._length + i\n\n curr = self._first\n index_so_far = 0\n\n while curr is not None:\n if index_so_far == i:\n curr.item = item\n break\n index_so_far += 1\n curr = curr.next\n if curr is None:\n raise IndexError", "def propagateDirty(self, slot, subindex, roi):\n totalIndex = (self._subSlots.index(slot),) + subindex\n self.operator.propagateDirty(self, totalIndex, roi)", "def totalfiles(self):\n return len([sz for sz in self.iterate()])", "def addAtTail(self, val: int) -> None:\n self.addAtIndex(self.size, val)", "def _average_data(self):\n # TODO: This function looks inefficient - can it be improved?\n output = np.zeros([self.nints_file, self.ngroups_file,\n self.rows, self.columns], dtype=self.data.dtype)\n count = np.zeros([self.nints_file, self.ngroups_file, 1, 1],\n dtype=self.data.dtype)\n\n for intg in range(0, self.nints):\n intg_file = intg // self.intavg\n for grp in range(0, self.ngroups):\n grp_file = grp // self.grpavg\n output[intg_file, grp_file, :, :] += self.data[intg, grp, :, :]\n count[intg_file, grp_file, 0, 0] += 1.0\n\n # Avoid divide by zero.\n iszero = np.where(count < 1.0)\n if iszero:\n count[iszero] = 1.0\n output = output / count\n\n del count \n return output", "def insert(self, item: Crop) -> None:\n self._content.append(item)\n self._file_counts[item.annot_type] = self._file_counts.get(item.annot_type, 0) + 1", "def putbaraijlist(self,subi_,subj_,alphaptrb_,alphaptre_,matidx_,weights_):\n num_ = None\n if num_ is None:\n num_ = len(subi_)\n elif num_ != len(subi_):\n raise IndexError(\"Inconsistent length of array subi\")\n if num_ is None:\n num_ = len(subj_)\n elif num_ != len(subj_):\n raise IndexError(\"Inconsistent length of array subj\")\n if num_ is None:\n num_ = len(alphaptrb_)\n elif num_ != len(alphaptrb_):\n raise IndexError(\"Inconsistent length of array alphaptrb\")\n if num_ is None:\n num_ = len(alphaptre_)\n elif num_ != len(alphaptre_):\n raise IndexError(\"Inconsistent length of array alphaptre\")\n if subi_ is None:\n raise ValueError(\"Argument subi cannot be None\")\n if subi_ is None:\n raise ValueError(\"Argument subi may not be None\")\n if isinstance(subi_, numpy.ndarray) and subi_.dtype is numpy.dtype(numpy.int32) and subi_.flags.contiguous:\n _subi_copyarray = False\n _subi_tmp = ctypes.cast(subi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subi_ is not None:\n _subi_copyarray = True\n _subi_np_tmp = numpy.zeros(len(subi_),numpy.dtype(numpy.int32))\n _subi_np_tmp[:] = subi_\n assert _subi_np_tmp.flags.contiguous\n _subi_tmp = ctypes.cast(_subi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subi_copyarray = False\n _subi_tmp = None\n \n if subj_ is None:\n raise ValueError(\"Argument subj cannot be None\")\n if subj_ is None:\n raise ValueError(\"Argument subj may not be None\")\n if isinstance(subj_, numpy.ndarray) and subj_.dtype is numpy.dtype(numpy.int32) and subj_.flags.contiguous:\n _subj_copyarray = False\n _subj_tmp = ctypes.cast(subj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subj_ is not None:\n _subj_copyarray = True\n _subj_np_tmp = numpy.zeros(len(subj_),numpy.dtype(numpy.int32))\n _subj_np_tmp[:] = subj_\n assert _subj_np_tmp.flags.contiguous\n _subj_tmp = ctypes.cast(_subj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subj_copyarray = False\n _subj_tmp = None\n \n if alphaptrb_ is None:\n raise ValueError(\"Argument alphaptrb cannot be None\")\n if alphaptrb_ is None:\n raise ValueError(\"Argument alphaptrb may not be None\")\n if isinstance(alphaptrb_, numpy.ndarray) and alphaptrb_.dtype is numpy.dtype(numpy.int64) and alphaptrb_.flags.contiguous:\n _alphaptrb_copyarray = False\n _alphaptrb_tmp = ctypes.cast(alphaptrb_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif alphaptrb_ is not None:\n _alphaptrb_copyarray = True\n _alphaptrb_np_tmp = numpy.zeros(len(alphaptrb_),numpy.dtype(numpy.int64))\n _alphaptrb_np_tmp[:] = alphaptrb_\n assert _alphaptrb_np_tmp.flags.contiguous\n _alphaptrb_tmp = ctypes.cast(_alphaptrb_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _alphaptrb_copyarray = False\n _alphaptrb_tmp = None\n \n if alphaptre_ is None:\n raise ValueError(\"Argument alphaptre cannot be None\")\n if alphaptre_ is None:\n raise ValueError(\"Argument alphaptre may not be None\")\n if isinstance(alphaptre_, numpy.ndarray) and alphaptre_.dtype is numpy.dtype(numpy.int64) and alphaptre_.flags.contiguous:\n _alphaptre_copyarray = False\n _alphaptre_tmp = ctypes.cast(alphaptre_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif alphaptre_ is not None:\n _alphaptre_copyarray = True\n _alphaptre_np_tmp = numpy.zeros(len(alphaptre_),numpy.dtype(numpy.int64))\n _alphaptre_np_tmp[:] = alphaptre_\n assert _alphaptre_np_tmp.flags.contiguous\n _alphaptre_tmp = ctypes.cast(_alphaptre_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _alphaptre_copyarray = False\n _alphaptre_tmp = None\n \n if matidx_ is None:\n raise ValueError(\"Argument matidx cannot be None\")\n if matidx_ is None:\n raise ValueError(\"Argument matidx may not be None\")\n if isinstance(matidx_, numpy.ndarray) and matidx_.dtype is numpy.dtype(numpy.int64) and matidx_.flags.contiguous:\n _matidx_copyarray = False\n _matidx_tmp = ctypes.cast(matidx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif matidx_ is not None:\n _matidx_copyarray = True\n _matidx_np_tmp = numpy.zeros(len(matidx_),numpy.dtype(numpy.int64))\n _matidx_np_tmp[:] = matidx_\n assert _matidx_np_tmp.flags.contiguous\n _matidx_tmp = ctypes.cast(_matidx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _matidx_copyarray = False\n _matidx_tmp = None\n \n if weights_ is None:\n raise ValueError(\"Argument weights cannot be None\")\n if weights_ is None:\n raise ValueError(\"Argument weights may not be None\")\n if isinstance(weights_, numpy.ndarray) and weights_.dtype is numpy.dtype(numpy.float64) and weights_.flags.contiguous:\n _weights_copyarray = False\n _weights_tmp = ctypes.cast(weights_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif weights_ is not None:\n _weights_copyarray = True\n _weights_np_tmp = numpy.zeros(len(weights_),numpy.dtype(numpy.float64))\n _weights_np_tmp[:] = weights_\n assert _weights_np_tmp.flags.contiguous\n _weights_tmp = ctypes.cast(_weights_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _weights_copyarray = False\n _weights_tmp = None\n \n res = __library__.MSK_XX_putbaraijlist(self.__nativep,num_,_subi_tmp,_subj_tmp,_alphaptrb_tmp,_alphaptre_tmp,_matidx_tmp,_weights_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def __setitem__(self, i, val):\n\t\tif i < self.n:\n\t\t\tself.v[i] = val", "def subarray(self):\n return self._subarray", "def fileCount(self):\n pass", "def __getitem__(self, index):\n row = self.metadata.iloc[index]\n vid_id = row.filename.split('.')[0]\n array_dict = self.dp.get_arrays(vid_id, self.num_subbursts, self.padding, self.random_start_points)\n array_dict['subbursts'] = self.apply_transform(array_dict['subbursts'])\n return array_dict", "def getarow(self,i_,subi_,vali_):\n nzi_ = ctypes.c_int32()\n _subi_minlength = self.getarownumnz((i_))\n if self.getarownumnz((i_)) > 0 and subi_ is not None and len(subi_) != self.getarownumnz((i_)):\n raise ValueError(\"Array argument subi is not long enough: Is %d, expected %d\" % (len(subi_),self.getarownumnz((i_))))\n if isinstance(subi_,numpy.ndarray) and not subi_.flags.writeable:\n raise ValueError(\"Argument subi must be writable\")\n if subi_ is None:\n raise ValueError(\"Argument subi may not be None\")\n if isinstance(subi_, numpy.ndarray) and subi_.dtype is numpy.dtype(numpy.int32) and subi_.flags.contiguous:\n _subi_copyarray = False\n _subi_tmp = ctypes.cast(subi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subi_ is not None:\n _subi_copyarray = True\n _subi_np_tmp = numpy.zeros(len(subi_),numpy.dtype(numpy.int32))\n _subi_np_tmp[:] = subi_\n assert _subi_np_tmp.flags.contiguous\n _subi_tmp = ctypes.cast(_subi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subi_copyarray = False\n _subi_tmp = None\n \n _vali_minlength = self.getarownumnz((i_))\n if self.getarownumnz((i_)) > 0 and vali_ is not None and len(vali_) != self.getarownumnz((i_)):\n raise ValueError(\"Array argument vali is not long enough: Is %d, expected %d\" % (len(vali_),self.getarownumnz((i_))))\n if isinstance(vali_,numpy.ndarray) and not vali_.flags.writeable:\n raise ValueError(\"Argument vali must be writable\")\n if vali_ is None:\n raise ValueError(\"Argument vali may not be None\")\n if isinstance(vali_, numpy.ndarray) and vali_.dtype is numpy.dtype(numpy.float64) and vali_.flags.contiguous:\n _vali_copyarray = False\n _vali_tmp = ctypes.cast(vali_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif vali_ is not None:\n _vali_copyarray = True\n _vali_np_tmp = numpy.zeros(len(vali_),numpy.dtype(numpy.float64))\n _vali_np_tmp[:] = vali_\n assert _vali_np_tmp.flags.contiguous\n _vali_tmp = ctypes.cast(_vali_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _vali_copyarray = False\n _vali_tmp = None\n \n res = __library__.MSK_XX_getarow(self.__nativep,i_,ctypes.byref(nzi_),_subi_tmp,_vali_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n nzi_ = nzi_.value\n _nzi_return_value = nzi_\n if _subi_copyarray:\n subi_[:] = _subi_np_tmp\n if _vali_copyarray:\n vali_[:] = _vali_np_tmp\n return (_nzi_return_value)", "def getFileCount(self) -> int:\n ...", "def add_filechild(self, node):\n self._filechildren.append(node)", "def unique_files(self):\n self._tempfiles[-1].ctr = -1", "def append(self, obj):\n if self._size == self._capacity: # not enough room\n self._resize(2 * self._capacity) # so double capacity\n self._Array[self._size] = obj\n self._size += 1", "def appendsize(self, numents):\n self._numents += numents", "def _add_field(self, field_path, data, sparse_idxs=None,\n force=False):\n\n for i, run_idx in enumerate(self.run_idxs):\n if sparse_idxs is not None:\n self._add_run_field(run_idx, field_path, data[i], sparse_idxs=sparse_idxs[i],\n force=force)\n else:\n self._add_run_field(run_idx, field_path, data[i],\n force=force)", "def add(self, filename, n_pages):\n with open(filename, 'rb') as f:\n content = f.read()\n size = len(content)\n\n # Before storing the filename I strip first part of output path which is the parent\n # directory of all of these files. We don't want that info in here because it will become\n # wrong if these files are ever moved, and we want them to be relocatable without\n # breaking anything.\n filename = os.path.relpath(filename, self.output_path)\n\n self.fileinfo[filename] = {'n_pages': n_pages,\n 'size': size,\n 'hash': hashlib.sha256(content).hexdigest(),\n }\n\n self.n_total_pages += n_pages\n self.n_total_bytes += size", "def addFile(self, filePath): \n \n self.filePathDict[filePath] = [0,[]]", "def _append_x(self, i):\n if not 0 <= i < self.num_qubits:\n raise QiskitError(\"X qubit out of bounds.\")\n self.shift[i] = (self.shift[i] + 1) % 2", "def set_iload(self):\n k = self.istore[0]\n v = self.stencil.get_all_velocities()\n indices = self.istore[1:].copy()\n indices[1] += v[k].T[1]\n self.iload.append(np.concatenate([k[np.newaxis, :], indices]))", "def record(self, variable, t_start=None): # @UnusedVariable\n for comp_array in self.component_arrays:\n comp_array.record(variable)", "def __init__(self, arr, n):\n self.BITree = [0] * (n+1)\n self.size = n\n\n for i in range(n):\n self.update(i, arr[i])", "def putarow(self,i_,subi,vali): # 3\n nzi_ = None\n if nzi_ is None:\n nzi_ = len(subi)\n elif nzi_ != len(subi):\n raise IndexError(\"Inconsistent length of array subi\")\n if nzi_ is None:\n nzi_ = len(vali)\n elif nzi_ != len(vali):\n raise IndexError(\"Inconsistent length of array vali\")\n if nzi_ is None: nzi_ = 0\n if subi is None: raise TypeError(\"Invalid type for argument subi\")\n if subi is None:\n subi_ = None\n else:\n try:\n subi_ = memoryview(subi)\n except TypeError:\n try:\n _tmparr_subi = array.array(\"i\",subi)\n except TypeError:\n raise TypeError(\"Argument subi has wrong type\")\n else:\n subi_ = memoryview(_tmparr_subi)\n \n else:\n if subi_.format != \"i\":\n subi_ = memoryview(array.array(\"i\",subi))\n \n if vali is None: raise TypeError(\"Invalid type for argument vali\")\n if vali is None:\n vali_ = None\n else:\n try:\n vali_ = memoryview(vali)\n except TypeError:\n try:\n _tmparr_vali = array.array(\"d\",vali)\n except TypeError:\n raise TypeError(\"Argument vali has wrong type\")\n else:\n vali_ = memoryview(_tmparr_vali)\n \n else:\n if vali_.format != \"d\":\n vali_ = memoryview(array.array(\"d\",vali))\n \n res = self.__obj.putarow(i_,nzi_,subi_,vali_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def __len__(self):\n return len(self.file_paths)", "def append(self, narray, name):\n if narray is NoneArray:\n # if NoneArray, nothing to do.\n return\n\n if self.Association == ArrayAssociation.POINT:\n arrLength = self.DataSet.GetNumberOfPoints()\n elif self.Association == ArrayAssociation.CELL:\n arrLength = self.DataSet.GetNumberOfCells()\n elif self.Association == ArrayAssociation.ROW \\\n and self.DataSet.GetNumberOfColumns() > 0:\n arrLength = self.DataSet.GetNumberOfRows()\n else:\n if not isinstance(narray, numpy.ndarray):\n arrLength = 1\n else:\n arrLength = narray.shape[0]\n\n # if input is not a valid array (i.e. unexpected shape[0]),\n # create a new array and copy input for each element\n if not isinstance(narray, numpy.ndarray) or numpy.ndim(narray) == 0: # Scalar input\n dtype = narray.dtype if isinstance(narray, numpy.ndarray) else type(narray)\n tmparray = numpy.empty(arrLength, dtype=dtype)\n tmparray.fill(narray)\n narray = tmparray\n elif narray.shape[0] != arrLength: # Vector input\n components = 1\n for l in narray.shape:\n components *= l\n try:\n tmparray = numpy.empty((arrLength, components), dtype=narray.dtype)\n except numpy.core._exceptions._ArrayMemoryError as npErr:\n sys.stderr.write(\"Fail to copy input array for each dataset element: array is too big to be duplicated.\\n\"\n \"Input should either be small enough to be duplicated for each element, or shape[0] should \"\n \"match number of element.\\n\"\n \"Example of correct usage: to add a point PointData array, it is common to have\\n\"\n \"array.shape[0] == 3 or array.shape[0] == dataset.GetNumberOfPoints()\\n\"\n )\n sys.stderr.write(str(type(npErr)) + \"\\n\")\n sys.stderr.write(str(npErr))\n return\n\n tmparray[:] = narray.flatten()\n narray = tmparray\n\n shape = narray.shape\n\n if len(shape) == 3:\n # Array of matrices. We need to make sure the order in memory is right.\n # If column order (c order), transpose. VTK wants row order (fortran\n # order). The deep copy later will make sure that the array is contiguous.\n # If row order but not contiguous, transpose so that the deep copy below\n # does not happen.\n size = narray.dtype.itemsize\n if (narray.strides[1]/size == 3 and narray.strides[2]/size == 1) or \\\n (narray.strides[1]/size == 1 and narray.strides[2]/size == 3 and \\\n not narray.flags.contiguous):\n narray = narray.transpose(0, 2, 1)\n\n # If array is not contiguous, make a deep copy that is contiguous\n if not narray.flags.contiguous:\n narray = numpy.ascontiguousarray(narray)\n\n # Flatten array of matrices to array of vectors\n if len(shape) == 3:\n narray = narray.reshape(shape[0], shape[1]*shape[2])\n\n # this handle the case when an input array is directly appended on the\n # output. We want to make sure that the array added to the output is not\n # referring to the input dataset.\n copy = VTKArray(narray)\n try:\n copy.VTKObject = narray.VTKObject\n except AttributeError: pass\n arr = numpyTovtkDataArray(copy, name)\n self.VTKObject.AddArray(arr)", "def close(self, **kwargs):\n config = getattr(self, \"config\", None)\n\n if config is None:\n return\n\n if kwargs:\n config.update(kwargs)\n\n original = getattr(self, \"_original\", None)\n logger.partitioning(\"Partition.close: original = {}\".format(original))\n\n if not original:\n originally_on_disk = False\n original_subarray = None\n else:\n originally_on_disk = not original.in_memory\n original_subarray = original._subarray\n\n config = self.config\n logger.partitioning(\" config = {}\".format(config))\n\n if config[\"serial\"]:\n # --------------------------------------------------------\n # SERIAL\n # --------------------------------------------------------\n logger.partitioning(\" serial\")\n\n if config[\"readonly\"]:\n logger.partitioning(\" readonly=True\")\n\n if originally_on_disk:\n logger.partitioning(\" subarray originally on disk\")\n\n if config.get(\"to_disk\", False):\n # 1.1.1.1 The original subarray was on disk,\n # we don't want to keep the current\n # subarray in memory, and we are happy\n # to discard any changes that may have\n # been made to the subarray.\n logger.partitioning(\" 1.1.1.1 revert\")\n self.revert()\n elif free_memory() <= cf_fm_threshold():\n # 1.1.1.2 The original subarray was on disk,\n # we are happy to keep the current\n # subarray in memory, but there is not\n # enough free memory to do so.\n logger.partitioning(\n \" 1.1.1.2 revert ({} <= {})\".format(\n free_memory(), cf_fm_threshold()\n )\n )\n self.revert()\n else:\n # 1.1.1.3 The original subarray was on disk\n # and there is enough memory to keep\n # the current subarray in memory\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # The original subarray was a temporary\n # file which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n del self.masked\n logger.partitioning(\n \" 1.1.1.3 del masked ({} > {})\".format(\n free_memory(), cf_fm_threshold()\n )\n )\n\n else:\n logger.partitioning(\" subarray originally in memory\")\n if config.get(\"to_disk\", False):\n # 1.1.2.1 Original subarray was in memory and\n # we don't want to keep the current\n # subarray in memory\n logger.partitioning(\" 1.1.2.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.1.2.2 Original subarray was in memory and\n # unique but there is not enough\n # memory to keep the current subarray\n logger.partitioning(\" 1.1.2.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.1.2.3 Original subarray was in memory and\n # unique and there is enough memory to\n # keep the current subarray in memory\n logger.partitioning(\" 1.1.2.3 pass\")\n pass\n else:\n # config['readonly'] is False\n if originally_on_disk:\n if config.get(\"to_disk\", False):\n # 1.2.1.1 Original subarray was on disk and\n # there and we don't want to keep the\n # array\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # Original subarray was a temporary file\n # on disk which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n logger.partitioning(\" 1.2.1.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.2.1.2 Original subarray was on disk but\n # there is not enough memory to keep\n # it\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # Original subarray was a temporary file\n # on disk which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n logger.partitioning(\" 1.2.1.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.2.1.3 Original subarray was on disk and\n # there is enough memory to keep it\n logger.partitioning(\" 1.2.1.3 pass\")\n del self.masked\n else:\n if config.get(\"to_disk\", False):\n # 1.2.2.1 Original subarray was in memory but\n # we don't want to keep it\n logger.partitioning(\" 1.2.2.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.2.2.2 Original subarray was an in memory\n # but there is not enough memory to\n # keep it\n logger.partitioning(\" 1.2.2.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.2.2.3 Original subarray was in memory and\n # there is enough memory to keep it\n logger.partitioning(\" 1.2.2.3 del masked\")\n del self.masked\n else:\n logger.partitioning(\"Partition.close: parallel\")\n # --------------------------------------------------------\n # PARALLEL\n # --------------------------------------------------------\n pass\n\n # if hasattr(self, '_original'):\n # del self._original\n\n # print(hasattr(self, 'config')),\n try:\n del self.config\n except AttributeError:\n pass", "def n_file(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_file(recursive=True):\n n += 1\n return n" ]
[ "0.5323221", "0.5309046", "0.5237938", "0.52028364", "0.51664454", "0.5117871", "0.51150346", "0.5107957", "0.50491905", "0.50423014", "0.5001557", "0.499577", "0.49840355", "0.4932267", "0.4909338", "0.48659304", "0.48641643", "0.48607743", "0.48591715", "0.4854085", "0.48242405", "0.4823937", "0.481012", "0.48052314", "0.479596", "0.47512335", "0.47405446", "0.47338173", "0.4703466", "0.4691803", "0.4688549", "0.46817592", "0.4680104", "0.4654203", "0.46459475", "0.46297154", "0.46291628", "0.46258196", "0.46218735", "0.46185297", "0.46144783", "0.46128556", "0.4603453", "0.46022874", "0.45898673", "0.45790026", "0.45789003", "0.4577212", "0.4553954", "0.4548344", "0.45458877", "0.45420754", "0.45385808", "0.45285302", "0.45233715", "0.45175835", "0.4508444", "0.44914016", "0.4490856", "0.44868115", "0.44860977", "0.44850603", "0.44798177", "0.447787", "0.44748962", "0.44737417", "0.4465449", "0.44598728", "0.4451822", "0.4449522", "0.44396365", "0.44305938", "0.4429977", "0.44233325", "0.4397269", "0.43967542", "0.43899348", "0.43896294", "0.43873632", "0.4383333", "0.43830273", "0.4368216", "0.43651482", "0.4364754", "0.4362301", "0.43620268", "0.43598688", "0.4359031", "0.43568614", "0.43551305", "0.4352283", "0.43510228", "0.43477228", "0.43430525", "0.434094", "0.43387714", "0.4334496", "0.4331667", "0.43255523", "0.43240017" ]
0.8270168
0
Add 1 to the Partition.file_counter if self._subarray is an instance of FileArray and not a temporary FileArray.
def _increment_file_counter(self): self._add_to_file_counter(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _add_to_file_counter(self, i):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n if subarray is None:\n return\n\n try:\n if isinstance(subarray, FileArray) and not isinstance(\n subarray, CachedArray\n ):\n try:\n filename = subarray.get_filename()\n except Exception:\n filename = None\n\n if filename is None:\n return\n\n file_counter = self.file_counter\n # count = file_counter.get(filename, 0)\n # file_counter[filename] = count + i\n # if file_counter[filename] <= 0:\n count = file_counter.get(filename, 0) + i\n if count <= 0:\n # Remove the file from the dictionary if its count has\n # dropped to zero\n file_counter.pop(filename, None)\n else:\n file_counter[filename] = count\n except Exception:\n # If we're here then it is likely that FileArray has been\n # torn down, so just do nothing.\n pass", "def __len__(self):\n return len(self.files[self.split])", "def _decrement_file_counter(self):\n self._add_to_file_counter(-1)", "def fileCount(self):\n pass", "def getFileCount(self) -> int:\n ...", "def numberFiles(self):\n with open(self.inputfile) as fin:\n for n, _ in enumerate(fin, start=1): pass\n self.n = n\n return self.n", "def numberFiles(self):\n return self.n", "def count_files(self):\n self.file_count = 0\n self.count_files_loop(self.dirpath)\n return", "def unique_files(self):\n self._tempfiles[-1].ctr = -1", "def __len__(self) -> int:\n return len(self.files)", "def stat_beg_file(self, filename):\n\n self.batchvals['numfiles'] += 1\n self.filevals['filename'] = filename\n self.filevals['start_time'] = time.time()\n\n return -1", "def __len__(self):\n return len(self.files)", "def file_num_increment(full_fpath):\r\n while os.path.isfile(full_fpath) == True:\r\n \r\n fpath, fext = os.path.splitext(full_fpath) #['C:\\Users\\Desktop\\file(1)', '.ext']\r\n\r\n if re.findall(\"[(]\\d+[)]\", fpath) != []: #Check if there is (x) in the path.\r\n for counter in range(1000): #Loop 1000 times\r\n if fpath.endswith(f\"({counter})\"): \r\n fpath = replace_last(fpath, f\"({counter})\", f\"({counter+1})\") #Replace the last occurence of (counter) in the string.\r\n full_fpath = fpath + fext\r\n break\r\n else: #here we pass for cases where (counter) is in the file/folder name itself. We skip them.\r\n continue\r\n else: #If there is no (counter), we create (1)\r\n counter = 1\r\n full_fpath = fpath + '(' + str(counter) + ')' + fext\r\n\r\n return full_fpath", "def __len__(self):\n\n return len(self._file_list)", "def sequential_files(self, ctr=0):\n self._tempfiles[-1].ctr = ctr", "def __del__(self):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n # If the subarray is unique it will have 2 references to\n # it plus 1 within this method, making 3. If it has more\n # than 3 references to it then it is not unique.\n if getrefcount is not None:\n self._decrement_file_counter()\n if subarray is None or getrefcount(subarray) > 3:\n return\n else:\n # getrefcount has itself been deleted or is in the process\n # of being torn down\n return\n\n _partition_file = getattr(subarray, \"_partition_file\", None)\n if _partition_file is not None:\n # This partition contains a temporary file which is not\n # referenced by any other partition on this process, so if\n # there are no lock files present remove the file from\n # disk.\n _remove_temporary_files(_partition_file)\n\n else:\n try:\n if FileArray is not None and isinstance(subarray, FileArray):\n try:\n filename = subarray.get_filename()\n except Exception:\n filename = None\n\n if self.file_counter.get(filename, 999) <= 0:\n # This partition contains a non-temporary file\n # which is not referenced by any other\n # partitions, so close the file.\n subarray.close()\n except Exception:\n # If we're here then it is likely that FileArray has been\n # torn down, so just do nothing.\n pass\n # --- End: if", "def _write_array_on_file(self, pa_array):\n pa_batch = pa.RecordBatch.from_struct_array(pa_array)\n self._num_bytes += pa_array.nbytes\n self.pa_writer.write_batch(pa_batch)", "def on_disk(self):\n return isinstance(self._subarray, FileArray)", "def n_subfile(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_file(recursive=False):\n n += 1\n return n", "def __number_of_files(self):\n self.__get_files()\n return len(self.files)", "def fileCounter(directory):", "def n_total_files(self):\n return len(self.fileinfo)", "def setNextFile(self):\n\n if (self.nReadBlocks >= self.processingHeaderObj.dataBlocksPerFile):\n self.nReadFiles=self.nReadFiles+1\n if self.nReadFiles > self.nTotalReadFiles:\n self.flagNoMoreFiles=1\n raise schainpy.admin.SchainWarning('No more files to read')\n\n print('------------------- [Opening file] ------------------------------',self.nReadFiles)\n self.nReadBlocks = 0\n #if self.nReadBlocks==0:\n # self.readFirstHeader()", "def number_idx(self, filename):\n with open(filename) as fh:\n firstline = fh.readline()\n parts = firstline.split('\\t')\n # only add if there are 4 parts\n if len(parts) != 4:\n return\n\n count = 1\n def writeline(fho, line, count):\n fho.write(line.rstrip() + '\\t' + str(count) + '\\n')\n\n with open(filename + '.tmp', 'w+b') as fho:\n writeline(fho, firstline, count)\n count += 1\n for line in fh:\n writeline(fho, line, count)\n count += 1\n\n shutil.move(filename + '.tmp', filename)", "def to_disk(self, reopen=True):\n # try:\n tfa = CachedArray(self.array)\n # except Exception:\n # return False\n\n fd, _lock_file = mkstemp(\n prefix=tfa._partition_file + \"_\", dir=tfa._partition_dir\n )\n close(fd)\n\n self.subarray = tfa\n _temporary_files[tfa._partition_file] = (\n tfa._partition_dir,\n _lock_file,\n set(),\n )\n\n if reopen:\n # Re-open the partition\n self.open(self.config)\n\n return True", "def __len__(self):\n return len(self.file_paths)", "def increment_count(self):\n self.image_count +=1\n if self.image_count > self.max_count:\n self.image_count = self.count_start # overflow", "def totalfiles(self):\n return len([sz for sz in self.iterate()])", "def append_filepath(self, filepath):\n idx = len(self.t_sect['filepaths'])\n self.t_sect['filepaths'].append(filepath)\n return idx", "def incr_counter(self, path):\n res = self.read_counter(path)\n # print 'incr_counter:', path, res, '->', res + 1\n res += 1\n self.cursor.execute('REPLACE INTO counter(fullpath, count) VALUES(?, ?)', (path, res))\n self.conn.commit()\n pass", "def inc_size(self):\r\n self.__length += 1", "def n_file(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_file(recursive=True):\n n += 1\n return n", "def count_lines(file_obj):\n for idx, line in enumerate(file_obj):\n pass\n file_obj.seek(0)\n return idx + 1", "def __setitem__(self, filenr, data_arr):\n cvcfile = self.filenames[filenr]\n cvcpath = os.path.join(self.filefolder, cvcfile)\n data_arr.tofile(cvcpath)", "def files_processed(self) -> int:\n with self.lock:\n return self._files_processed", "def getnrfiles(self):\n return len(self.filenames)", "def __len__(self):\n return len(self.frame1_files)", "def file_close(self):\n if self.on_disk:\n self._subarray.close()", "def get_num_files(self, file_type):\n return self.file_type_counter.get(file_type, 0)", "def fileno(self):\n return 1", "def increment(self):\n self.pos += 1\n if self.pos == len(self.progress) - 1:\n self.pos = 0", "def getFileCount(self, startingWithPath=\"\"):\n return self.__controller._getRecordsCount(startingWithPath)", "def file_count(self) -> int:\n if self.dataset is None:\n raise ValueError('No known dataset found!')\n return self._max_file_count", "def inc(self):\n \n self.count += 1", "def __len__(self):\n return len(self.files_self_A_rgbd)", "def getFileCount(self):\n\n if self.filecount == -1:\n self.filecount = self.db.filecount()\n\n return self.filecount", "def increment(self):\n self.increments += 1\n if self.increments == self.length:\n self.finished = True", "def updateCounts(self):\n found = False\n fileName = \"counts\"\n if not os.access(fileName, os.F_OK):\n try:\n TFH = open(fileName, \"w\")\n TFH.close()\n except IOError as inst: # @UnusedVariable\n self.logIt(__name__ + \".updateCounts(): Unable to open \" + fileName + \" for write.\" + \" => \" + str(\n inst.errno) + \":\" + str(inst.strerror) + \"\\n\")\n raise\n\n self.logIt(__name__ + \".updateCounts(): fileName=\" + fileName + \"\\n\")\n try:\n FH = open(fileName, \"rb+\")\n # FH = posixfile.open(fileName, \"rb+\") # posixfile has been deprecated.\n # FH.lock('w|')\n data = None\n while 1:\n data = str(FH.readline())\n if data is None or data == \"\": break\n data = re.sub(\"\\n\", \"\", data)\n self.debug(__name__ + \".updateCounts(): data is \" + str(data) + \"\\n\")\n ms = str(self.msgNum) + \"=\"\n self.debug(__name__ + \".updateCounts(): ms is\" + str(ms) + \"\\n\")\n if re.search(ms, data):\n found = True\n self.debug(__name__ + \".updateCounts(): DEBUG0.5\\n\")\n break\n self.debug(__name__ + \".updateCounts(): DEBUG1\\n\")\n if data and found:\n self.debug(__name__ + \".updateCounts(): DEBUG2\\n\")\n eloc = FH.tell()\n self.debug(__name__ + \".updateCounts(): eloc=\" + str(eloc) + \"\\n\")\n sloc = eloc - len(data) - 1\n self.debug(__name__ + \".updateCounts(): sloc=\" + str(sloc) + \"\\n\")\n FH.seek(sloc, os.SEEK_SET)\n cloc = FH.tell()\n self.debug(__name__ + \".updateCounts(): cloc=\" + str(cloc) + \"\\n\")\n myList = list()\n myList = data.split('=')\n icount = int(myList[1]) + 1\n FH.write(str(self.msgNum) + \"=\" + str(icount) + \"\\n\")\n else:\n self.debug(__name__ + \".updateCounts(): DEBUG3\\n\")\n FH.write(str(self.msgNum) + \"=1\" + \"\\n\")\n FH.lock('u')\n FH.close()\n except IOError as inst: # @UnusedVariable\n pass\n # self.logIt( __name__ + \".updateCounts(): Unable to open \" + fileName + \" for write.\" + \" => \" + str( inst.errno ) + \":\" + str( inst.strerror ) + \"\\n\" )\n # Endtry", "def _get_file_length(self, file):\n self[file] = file.stat().st_size", "def fget(self):\n if not hasattr(self, \"_n\"):\n self._n = 0\n self._n += 1\n return self._n", "def incrementWriteCount(self):\n self.writeCount += 1", "def __len__(self):\n return self._num_samples_per_file * len(self._files) // self._world_size", "def inc( self ):\n self.count += 1", "def _setcounter():\n fname = os.path.basename(camera.status.lastfile)\n tname = fname.split('.')[0]\n i = len(tname)-1\n if i > -1:\n while tname[i].isdigit() and i>-1:\n i = i - 1\n nname = fname[:-4]\n bname = tname[:i+1]\n for file in glob.glob('/data/counters/'+bname+'[0-9][0-9][0-9].cntr'):\n os.remove(file)\n for file in glob.glob('/data/counters/'+bname+'[0-9][0-9][0-9][0-9].cntr'):\n os.remove(file)\n f = open('/data/counters/'+nname+'cntr','w')\n f.close()", "def advance_image_count(self,file_id=None,image_num=None):\n # self.next_image = (self.next_image+1) % self.num_images\n if file_id is not None:\n self.file_id = file_id\n if image_num is None:\n self.next_image += 1\n else:\n self.next_image = image_num + 1\n if self.next_image >= self.num_images:\n self.next_image = 0\n self.file_id += 1\n self.signal_next_image_num.emit(self.next_image)\n self.signal_file_id.emit(self.file_id)", "def _register_temporary_file(self):\n _partition_file = self._subarray._partition_file\n _partition_dir = self._subarray._partition_dir\n if _partition_file not in _temporary_files:\n fd, _lock_file = mkstemp(\n prefix=_partition_file + \"_\", dir=_partition_dir\n )\n close(fd)\n _temporary_files[_partition_file] = (\n _partition_dir,\n _lock_file,\n set(),\n )\n else:\n _, _lock_file, _ = _temporary_files[_partition_file]\n\n return _lock_file", "def getNumTimeDataFiles(self):\n return self.nTimeDataFiles", "def file_len(f):\n\n for n, l in enumerate(f, 1):\n pass\n f.seek(0) # rewind\n return n", "def fs_files_total(self):\n return self._fs_files_total", "def increase_counter(self):\n self.values = self.values + 1", "def increment(self, features, fname, v=1):\n if fname not in features:\n features[fname] = 0\n features[fname] += v", "def get_nrof_aux(self):\n aux = 0\n for l in self.aux_array:\n if l:\n aux += 1\n return aux", "def fix_index(self):\n if self.record_size <= self.size:\n self.record_size += 1\n if self.index % self.size == 0:\n self.isFull = True if len(self._storage) == self.size else False\n if self.replace_flag:\n self.index = 1\n return self.index\n else:\n sys.stderr.write('Experience replay buff is full and replace is set to FALSE!\\n')\n return -1\n else:\n self.index += 1\n return self.index", "def fileno(self):\r\n raise NotImplementedError()", "def _setup_n_ints_in_file(self):\n self.n_ints_in_file = sigproc.calc_n_ints_in_file(self.filename)", "def increment_counter(self) -> None:", "def file_progress_sig_handler(self, bytes_read: int):\n # Increment the bytes read\n self.file_bytes_read += bytes_read\n\n # Update the progress bar\n self.fileAnalyzeProgressBar.setValue(self.file_bytes_read)\n\n logging.debug(\"Analyzing File Progress: \" + str(self.file_bytes_read))", "def getNumStatDataFiles(self):\n return self.nStatDataFiles", "def has_subfile(self) -> bool:\n\t\tself._update_subfiles()\n\t\treturn bool(len(self.subfiles))", "def size(self) -> int:\n return sum(p.size for p in self.iterfiles())", "def add_count(self):\n self.count += 1", "def file_num(self):\n command = \"SELECT COUNT(id) FROM files;\"\n return self.c.execute(command)", "def __len__(self):\n return int(np.floor(len(self.wav_paths)))", "def touched_files(self, parent):", "def file_count(self) -> str:\n return pulumi.get(self, \"file_count\")", "def len(self):\n # print(self.processed_file_names)\n return self.len_", "def updateCounter(self):\n self.counter = self.counter + 1\n self.syncDataStructure[\"+\"][str(self.instanceID)] = self.counter", "def handle_put_progress(self, filegen):\n # print \"bytes so-far: \", filegen.bytes_read\n\n if self.maybe_touch():\n self.log(\"UPLOAD_PROGRESS\", level=INFO)\n self.touch()\n Backend.touch(\n self.current_upload,\n bytes_downloaded=filegen.bytes_read,\n location=self.location)", "def segment_counter(self, _):\n raise NotImplementedError(\n \"We do not support externally altering the segment counter\")", "def get_data_id(self):\n self.data_id = len(glob.glob(osp.join(self.save_dir, 'depth', '*npy')))\n return self.data_id", "def get_lenght(self):\n return len(self.filelist)", "def file_number(self, file_number):\n\n self._file_number = file_number", "def file_number(self, file_number):\n\n self._file_number = file_number", "def initiallize_buffer(self):\n assert os.path.isdir(self.directory)\n #sorting files topologically, files' format is -> data_num.h5 \n files_list = sorted(os.listdir(self.directory + '/' + self.name + '/'), key = lambda x: int(x.split(\"_\")[1].split(\".\")[0]))\n self.files_counter = 0\n if files_list != []: \n for file_name in files_list:\n self.memorize(name = file_name, error = 1)\n self.files_counter += 1\n self.files_tracker = file_name\n else:\n self.files_tracker = 'data_-1.h5'", "def add_file(self, letter, block_size):\n cluster = 1\n i = 0\n j = 0\n\n continuous = True\n while(i<self.size and j<block_size):\n if(self.disk_mem[i]==\".\"):\n self.disk_mem[i] = letter\n if not continuous:\n continuous = True\n cluster += 1\n j+=1\n else:\n continuous = False\n i+=1\n return cluster", "def upload_files(self):\n return 1 << 0", "def addFile(self, filePath): \n \n self.filePathDict[filePath] = [0,[]]", "def _inc_counter(self) -> None:\n self._state_storage.increment_counter()", "def fs_files_used(self):\n return self._fs_files_used", "def get_num_files(self):\n\t\tif self.num_files_in_set is None and self.set_type == FAST5SET_TARBALL:\n\t\t\tself.num_files_in_set = len(self.files)\n\t\treturn self.num_files_in_set", "def addPointCountToEdge(self, edge):\n attributes = edge.getAttributes()\n if self.edge_id__count.has_key(attributes.get(self.shapeFileUniqueId)):\n self.edge_id__count[attributes.get(self.shapeFileUniqueId)] = self.edge_id__count[attributes.get(self.shapeFileUniqueId)] + 1\n else:\n self.edge_id__count[attributes.get(self.shapeFileUniqueId)] = 1\n edge.setAttributes(attributes)", "def process(self):\n self.reader += 1", "def count(self):\n\t\treturn sum(read.copy for read in self.__iter__())", "def extra_memory(self):\n if not self.in_memory:\n # --------------------------------------------------------\n # The subarray is on disk so getting the partition's data\n # array will require extra memory\n # --------------------------------------------------------\n extra_memory = True\n else:\n # --------------------------------------------------------\n # The subarray is already in memory\n # --------------------------------------------------------\n config = self.config\n\n p_part = self.part\n if p_part:\n extra_memory = True\n elif not config[\"unique_subarray\"]:\n extra_memory = True\n else:\n p_data = self._subarray\n\n if not numpy_ma_isMA(p_data):\n # The p_data is not a masked array\n extra_memory = isinstance(p_data.base, numpy_ndarray)\n else:\n # The p_data is a masked array\n memory_overlap = isinstance(\n p_data.data.base, numpy_ndarray\n )\n if not (\n p_data.mask is numpy_ma_nomask\n or not numpy_ma_is_masked(p_data)\n ):\n # There is at least one missing data point\n memory_overlap |= isinstance(\n p_data.mask.base, numpy_ndarray\n )\n\n extra_memory = memory_overlap\n # --- End: if\n\n p_dtype = p_data.dtype\n\n if not extra_memory:\n if config[\"func\"] is not None:\n extra_memory = True\n else:\n p_units = self.Units\n units = config[\"units\"]\n if (\n not p_units.equals(units)\n and bool(p_units) is bool(units)\n and not (\n p_data.flags[\"C_CONTIGUOUS\"]\n and p_dtype.kind == \"f\"\n )\n ):\n extra_memory = True\n\n # ------------------------------------------------------------\n # Extra memory is required if the dtype needs changing\n # ------------------------------------------------------------\n if not extra_memory:\n dtype = config[\"dtype\"]\n if dtype is not None and dtype != p_data.dtype:\n extra_memory = True\n # --- End: if\n\n # ------------------------------------------------------------\n # Amount of extra memory (in bytes) required to access the\n # array\n # ------------------------------------------------------------\n return self.nbytes if extra_memory else 0", "def min_file_histogram(self):\n return self._min_file_histogram", "def _update_count(self):\n self._count = len(self._items)", "def __len__(self):\n\n return math.ceil(len(self.img_files) * self.gen_count / self.batch_size)", "def reduce(self, array, index):\n\n return 0", "def __len__(self):\n return len(self.image_file_names)", "def fileno(self):\n return None" ]
[ "0.83802605", "0.6079216", "0.60073787", "0.5972287", "0.5871502", "0.58649766", "0.58356875", "0.5813897", "0.5656929", "0.5650024", "0.5628978", "0.5588129", "0.5575472", "0.55536973", "0.55466735", "0.55383646", "0.5529106", "0.5502226", "0.54975855", "0.5481056", "0.54359984", "0.5412819", "0.5331949", "0.52709615", "0.5260593", "0.52554363", "0.5216349", "0.5193865", "0.51864076", "0.51768965", "0.5171308", "0.5161445", "0.5160157", "0.5151611", "0.512528", "0.51065946", "0.51014495", "0.5088919", "0.50852406", "0.5068842", "0.5065401", "0.50386566", "0.50366586", "0.5024731", "0.5022408", "0.50217134", "0.501115", "0.50025594", "0.49950272", "0.4993197", "0.49873495", "0.49836847", "0.4965577", "0.4941178", "0.49391267", "0.4934542", "0.4930227", "0.49134943", "0.49080187", "0.49061012", "0.49033248", "0.4892175", "0.4879754", "0.48753807", "0.48634028", "0.48473728", "0.4834655", "0.48266846", "0.48257676", "0.48153964", "0.48047906", "0.48020706", "0.47917888", "0.47833127", "0.4780719", "0.47712538", "0.4770419", "0.47691157", "0.47683647", "0.47659448", "0.47630748", "0.47630462", "0.47630462", "0.47607407", "0.47483426", "0.47433466", "0.47384593", "0.47356316", "0.47345176", "0.47258684", "0.472575", "0.4718935", "0.4715791", "0.47103822", "0.46854302", "0.46809283", "0.46724072", "0.4670188", "0.46681052", "0.466746" ]
0.71174276
1
Subtract 1 from the Partition.file_counter if self._subarray is an instance of FileArray and not a temporary FileArray.
def _decrement_file_counter(self): self._add_to_file_counter(-1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _add_to_file_counter(self, i):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n if subarray is None:\n return\n\n try:\n if isinstance(subarray, FileArray) and not isinstance(\n subarray, CachedArray\n ):\n try:\n filename = subarray.get_filename()\n except Exception:\n filename = None\n\n if filename is None:\n return\n\n file_counter = self.file_counter\n # count = file_counter.get(filename, 0)\n # file_counter[filename] = count + i\n # if file_counter[filename] <= 0:\n count = file_counter.get(filename, 0) + i\n if count <= 0:\n # Remove the file from the dictionary if its count has\n # dropped to zero\n file_counter.pop(filename, None)\n else:\n file_counter[filename] = count\n except Exception:\n # If we're here then it is likely that FileArray has been\n # torn down, so just do nothing.\n pass", "def __del__(self):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n # If the subarray is unique it will have 2 references to\n # it plus 1 within this method, making 3. If it has more\n # than 3 references to it then it is not unique.\n if getrefcount is not None:\n self._decrement_file_counter()\n if subarray is None or getrefcount(subarray) > 3:\n return\n else:\n # getrefcount has itself been deleted or is in the process\n # of being torn down\n return\n\n _partition_file = getattr(subarray, \"_partition_file\", None)\n if _partition_file is not None:\n # This partition contains a temporary file which is not\n # referenced by any other partition on this process, so if\n # there are no lock files present remove the file from\n # disk.\n _remove_temporary_files(_partition_file)\n\n else:\n try:\n if FileArray is not None and isinstance(subarray, FileArray):\n try:\n filename = subarray.get_filename()\n except Exception:\n filename = None\n\n if self.file_counter.get(filename, 999) <= 0:\n # This partition contains a non-temporary file\n # which is not referenced by any other\n # partitions, so close the file.\n subarray.close()\n except Exception:\n # If we're here then it is likely that FileArray has been\n # torn down, so just do nothing.\n pass\n # --- End: if", "def _increment_file_counter(self):\n self._add_to_file_counter(1)", "def __len__(self):\n return len(self.files[self.split])", "def unique_files(self):\n self._tempfiles[-1].ctr = -1", "def numberFiles(self):\n return self.n", "def getFileCount(self) -> int:\n ...", "def fileCount(self):\n pass", "def __len__(self) -> int:\n return len(self.files)", "def numberFiles(self):\n with open(self.inputfile) as fin:\n for n, _ in enumerate(fin, start=1): pass\n self.n = n\n return self.n", "def __len__(self):\n return len(self.files)", "def stat_beg_file(self, filename):\n\n self.batchvals['numfiles'] += 1\n self.filevals['filename'] = filename\n self.filevals['start_time'] = time.time()\n\n return -1", "def file_close(self):\n if self.on_disk:\n self._subarray.close()", "def count_files(self):\n self.file_count = 0\n self.count_files_loop(self.dirpath)\n return", "def __len__(self):\n\n return len(self._file_list)", "def on_disk(self):\n return isinstance(self._subarray, FileArray)", "def n_subfile(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_file(recursive=False):\n n += 1\n return n", "def sequential_files(self, ctr=0):\n self._tempfiles[-1].ctr = ctr", "def getFileCount(self):\n\n if self.filecount == -1:\n self.filecount = self.db.filecount()\n\n return self.filecount", "def __number_of_files(self):\n self.__get_files()\n return len(self.files)", "def __len__(self):\n return len(self.frame1_files)", "def file_count(self) -> int:\n if self.dataset is None:\n raise ValueError('No known dataset found!')\n return self._max_file_count", "def get_nrof_aux(self):\n aux = 0\n for l in self.aux_array:\n if l:\n aux += 1\n return aux", "def fix_index(self):\n if self.record_size <= self.size:\n self.record_size += 1\n if self.index % self.size == 0:\n self.isFull = True if len(self._storage) == self.size else False\n if self.replace_flag:\n self.index = 1\n return self.index\n else:\n sys.stderr.write('Experience replay buff is full and replace is set to FALSE!\\n')\n return -1\n else:\n self.index += 1\n return self.index", "def count_deleted_bytes(self): # FileObj.count_deleted_bytes\n if self.deleted:\n return self.bytes \n else:\n return 0", "def getnrfiles(self):\n return len(self.filenames)", "def reduce(self, array, index):\n\n return 0", "def n_total_files(self):\n return len(self.fileinfo)", "def __len__(self):\n return len(self.file_paths)", "def close(self, **kwargs):\n config = getattr(self, \"config\", None)\n\n if config is None:\n return\n\n if kwargs:\n config.update(kwargs)\n\n original = getattr(self, \"_original\", None)\n logger.partitioning(\"Partition.close: original = {}\".format(original))\n\n if not original:\n originally_on_disk = False\n original_subarray = None\n else:\n originally_on_disk = not original.in_memory\n original_subarray = original._subarray\n\n config = self.config\n logger.partitioning(\" config = {}\".format(config))\n\n if config[\"serial\"]:\n # --------------------------------------------------------\n # SERIAL\n # --------------------------------------------------------\n logger.partitioning(\" serial\")\n\n if config[\"readonly\"]:\n logger.partitioning(\" readonly=True\")\n\n if originally_on_disk:\n logger.partitioning(\" subarray originally on disk\")\n\n if config.get(\"to_disk\", False):\n # 1.1.1.1 The original subarray was on disk,\n # we don't want to keep the current\n # subarray in memory, and we are happy\n # to discard any changes that may have\n # been made to the subarray.\n logger.partitioning(\" 1.1.1.1 revert\")\n self.revert()\n elif free_memory() <= cf_fm_threshold():\n # 1.1.1.2 The original subarray was on disk,\n # we are happy to keep the current\n # subarray in memory, but there is not\n # enough free memory to do so.\n logger.partitioning(\n \" 1.1.1.2 revert ({} <= {})\".format(\n free_memory(), cf_fm_threshold()\n )\n )\n self.revert()\n else:\n # 1.1.1.3 The original subarray was on disk\n # and there is enough memory to keep\n # the current subarray in memory\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # The original subarray was a temporary\n # file which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n del self.masked\n logger.partitioning(\n \" 1.1.1.3 del masked ({} > {})\".format(\n free_memory(), cf_fm_threshold()\n )\n )\n\n else:\n logger.partitioning(\" subarray originally in memory\")\n if config.get(\"to_disk\", False):\n # 1.1.2.1 Original subarray was in memory and\n # we don't want to keep the current\n # subarray in memory\n logger.partitioning(\" 1.1.2.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.1.2.2 Original subarray was in memory and\n # unique but there is not enough\n # memory to keep the current subarray\n logger.partitioning(\" 1.1.2.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.1.2.3 Original subarray was in memory and\n # unique and there is enough memory to\n # keep the current subarray in memory\n logger.partitioning(\" 1.1.2.3 pass\")\n pass\n else:\n # config['readonly'] is False\n if originally_on_disk:\n if config.get(\"to_disk\", False):\n # 1.2.1.1 Original subarray was on disk and\n # there and we don't want to keep the\n # array\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # Original subarray was a temporary file\n # on disk which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n logger.partitioning(\" 1.2.1.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.2.1.2 Original subarray was on disk but\n # there is not enough memory to keep\n # it\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # Original subarray was a temporary file\n # on disk which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n logger.partitioning(\" 1.2.1.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.2.1.3 Original subarray was on disk and\n # there is enough memory to keep it\n logger.partitioning(\" 1.2.1.3 pass\")\n del self.masked\n else:\n if config.get(\"to_disk\", False):\n # 1.2.2.1 Original subarray was in memory but\n # we don't want to keep it\n logger.partitioning(\" 1.2.2.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.2.2.2 Original subarray was an in memory\n # but there is not enough memory to\n # keep it\n logger.partitioning(\" 1.2.2.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.2.2.3 Original subarray was in memory and\n # there is enough memory to keep it\n logger.partitioning(\" 1.2.2.3 del masked\")\n del self.masked\n else:\n logger.partitioning(\"Partition.close: parallel\")\n # --------------------------------------------------------\n # PARALLEL\n # --------------------------------------------------------\n pass\n\n # if hasattr(self, '_original'):\n # del self._original\n\n # print(hasattr(self, 'config')),\n try:\n del self.config\n except AttributeError:\n pass", "def files_processed(self) -> int:\n with self.lock:\n return self._files_processed", "def file_pointer(self):\n\n try:\n self.__file.seek(self.__file.tell() - 1)\n except Exception as e:\n raise e", "def undo_scan(self, sub_array_id: int):", "def _get_file_length(self, file):\n self[file] = file.stat().st_size", "def get_num_files(self, file_type):\n return self.file_type_counter.get(file_type, 0)", "def files(self) -> _SeqNumSlicer:\n if self._seq_num_slicer is None:\n self._seq_num_slicer = _SeqNumSlicer(self)\n return self._seq_num_slicer", "def fileno(self):\n return 1", "def __len__(self):\n return int(np.floor(len(self.wav_paths)))", "def count_deleted(self): # FileObj.count_deleted\n if self.deleted:\n return 1\n else:\n return 0", "def __len__(self):\n return len(self.files_self_A_rgbd)", "def totalfiles(self):\n return len([sz for sz in self.iterate()])", "def touched_files(self, parent):", "def to_disk(self, reopen=True):\n # try:\n tfa = CachedArray(self.array)\n # except Exception:\n # return False\n\n fd, _lock_file = mkstemp(\n prefix=tfa._partition_file + \"_\", dir=tfa._partition_dir\n )\n close(fd)\n\n self.subarray = tfa\n _temporary_files[tfa._partition_file] = (\n tfa._partition_dir,\n _lock_file,\n set(),\n )\n\n if reopen:\n # Re-open the partition\n self.open(self.config)\n\n return True", "def fileno(self):\r\n raise NotImplementedError()", "def fileCounter(directory):", "def getNumTimeDataFiles(self):\n return self.nTimeDataFiles", "def __len__(self):\n return self._num_samples_per_file * len(self._files) // self._world_size", "def count_lines(file_obj):\n for idx, line in enumerate(file_obj):\n pass\n file_obj.seek(0)\n return idx + 1", "def file_len(f):\n\n for n, l in enumerate(f, 1):\n pass\n f.seek(0) # rewind\n return n", "def fget(self):\n if not hasattr(self, \"_n\"):\n self._n = 0\n self._n += 1\n return self._n", "def min_file_histogram(self):\n return self._min_file_histogram", "def __len__(self):\n return self._fa.faidx.index[self.name].rlen", "def fileno(self):\n return None", "def fileno(self):\n return None", "def __len__(self):\n return len(self._current_block) - 1", "def _write_array_on_file(self, pa_array):\n pa_batch = pa.RecordBatch.from_struct_array(pa_array)\n self._num_bytes += pa_array.nbytes\n self.pa_writer.write_batch(pa_batch)", "def get_lenght(self):\n return len(self.filelist)", "def n_file(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_file(recursive=True):\n n += 1\n return n", "def file_num_increment(full_fpath):\r\n while os.path.isfile(full_fpath) == True:\r\n \r\n fpath, fext = os.path.splitext(full_fpath) #['C:\\Users\\Desktop\\file(1)', '.ext']\r\n\r\n if re.findall(\"[(]\\d+[)]\", fpath) != []: #Check if there is (x) in the path.\r\n for counter in range(1000): #Loop 1000 times\r\n if fpath.endswith(f\"({counter})\"): \r\n fpath = replace_last(fpath, f\"({counter})\", f\"({counter+1})\") #Replace the last occurence of (counter) in the string.\r\n full_fpath = fpath + fext\r\n break\r\n else: #here we pass for cases where (counter) is in the file/folder name itself. We skip them.\r\n continue\r\n else: #If there is no (counter), we create (1)\r\n counter = 1\r\n full_fpath = fpath + '(' + str(counter) + ')' + fext\r\n\r\n return full_fpath", "def getFileCount(self, startingWithPath=\"\"):\n return self.__controller._getRecordsCount(startingWithPath)", "def FreeFile():\n existing = VBFiles.getOpenChannels()\n if existing:\n return max(existing) + 1\n else:\n return 1", "def __len__(self):\n return int(np.ceil(len(self.image_filenames) / (self.batch_size)))", "def reset(self):\n self.all_files_idx = np.arange(self._div*self._nb_dir)\n\n if self.shuffle>1:\n np.random.shuffle(self.all_files_idx)\n\n self.idx_folder = self.all_files_idx//self._div\n self.idx_file = self.all_files_idx % self._div\n self.current_folder = self.idx_folder[0]\n self.current_file = self.idx_file[0]", "def fs_files_total(self):\n return self._fs_files_total", "def process(self):\n\n if len(self.files) == self._file_ptr:\n raise pipeline.PipelineStopIteration\n\n # Collect garbage to remove any prior CorrData objects\n gc.collect()\n\n # Fetch and remove the first item in the list\n file_ = self.files[self._file_ptr]\n self._file_ptr += 1\n\n # Set up product selection\n # NOTE: this probably doesn't work with stacked data\n prod_sel = None\n if self.only_autos:\n rd = andata.CorrReader(file_)\n prod_sel = np.array(\n [ii for (ii, pp) in enumerate(rd.prod) if pp[0] == pp[1]]\n )\n\n # Load file\n if (\n isinstance(self.freq_sel, slice)\n and (prod_sel is None)\n and (self.datasets is None)\n ):\n self.log.info(\n \"Reading file %i of %i. (%s) [fast io]\",\n self._file_ptr,\n len(self.files),\n file_,\n )\n ts = andata.CorrData.from_acq_h5_fast(\n file_, freq_sel=self.freq_sel, comm=self.comm\n )\n else:\n self.log.info(\n \"Reading file %i of %i. (%s) [slow io]\",\n self._file_ptr,\n len(self.files),\n file_,\n )\n ts = andata.CorrData.from_acq_h5(\n file_,\n datasets=self.datasets,\n distributed=True,\n comm=self.comm,\n freq_sel=self.freq_sel,\n prod_sel=prod_sel,\n )\n\n # Store file name\n ts.attrs[\"filename\"] = file_\n\n # Use a simple incrementing string as the tag\n if \"tag\" not in ts.attrs:\n tag = \"file%03i\" % self._file_ptr\n ts.attrs[\"tag\"] = tag\n\n # Add a weight dataset if needed\n if \"vis_weight\" not in ts.flags:\n weight_dset = ts.create_flag(\n \"vis_weight\",\n shape=ts.vis.shape,\n dtype=np.uint8,\n distributed=True,\n distributed_axis=0,\n )\n weight_dset.attrs[\"axis\"] = ts.vis.attrs[\"axis\"]\n\n # Set weight to maximum value (255), unless the vis value is\n # zero which presumably came from missing data. NOTE: this may have\n # a small bias\n weight_dset[:] = np.where(ts.vis[:] == 0.0, 0, 255)\n\n # Return timestream\n if self.use_draco_container:\n ts = containers.CHIMETimeStream.from_corrdata(ts)\n\n return ts", "def fill_values(self, array: List[float]) -> int:\n\n if self._max_collected_data_time <= 0:\n return 0\n\n current_time = self.start_time\n index = 0\n while current_time < self._max_collected_data_time and index < len(array):\n array[index] = self.feedback(current_time)\n index += 1\n current_time = index * self.time_step\n\n # subtract one since index - 1 is the actual last index that was\n # written to\n return index - 1", "def get_size(self, fileobject):\n # move the cursor to the end of the file\n fileobject.seek(0, 2)\n size = fileobject.tell()\n # move the cursor to the begin of the file\n fileobject.seek(0)\n return size", "def file_num(self):\n command = \"SELECT COUNT(id) FROM files;\"\n return self.c.execute(command)", "def number_idx(self, filename):\n with open(filename) as fh:\n firstline = fh.readline()\n parts = firstline.split('\\t')\n # only add if there are 4 parts\n if len(parts) != 4:\n return\n\n count = 1\n def writeline(fho, line, count):\n fho.write(line.rstrip() + '\\t' + str(count) + '\\n')\n\n with open(filename + '.tmp', 'w+b') as fho:\n writeline(fho, firstline, count)\n count += 1\n for line in fh:\n writeline(fho, line, count)\n count += 1\n\n shutil.move(filename + '.tmp', filename)", "def get_data_id(self):\n self.data_id = len(glob.glob(osp.join(self.save_dir, 'depth', '*npy')))\n return self.data_id", "def len(self):\n # print(self.processed_file_names)\n return self.len_", "def extra_memory(self):\n if not self.in_memory:\n # --------------------------------------------------------\n # The subarray is on disk so getting the partition's data\n # array will require extra memory\n # --------------------------------------------------------\n extra_memory = True\n else:\n # --------------------------------------------------------\n # The subarray is already in memory\n # --------------------------------------------------------\n config = self.config\n\n p_part = self.part\n if p_part:\n extra_memory = True\n elif not config[\"unique_subarray\"]:\n extra_memory = True\n else:\n p_data = self._subarray\n\n if not numpy_ma_isMA(p_data):\n # The p_data is not a masked array\n extra_memory = isinstance(p_data.base, numpy_ndarray)\n else:\n # The p_data is a masked array\n memory_overlap = isinstance(\n p_data.data.base, numpy_ndarray\n )\n if not (\n p_data.mask is numpy_ma_nomask\n or not numpy_ma_is_masked(p_data)\n ):\n # There is at least one missing data point\n memory_overlap |= isinstance(\n p_data.mask.base, numpy_ndarray\n )\n\n extra_memory = memory_overlap\n # --- End: if\n\n p_dtype = p_data.dtype\n\n if not extra_memory:\n if config[\"func\"] is not None:\n extra_memory = True\n else:\n p_units = self.Units\n units = config[\"units\"]\n if (\n not p_units.equals(units)\n and bool(p_units) is bool(units)\n and not (\n p_data.flags[\"C_CONTIGUOUS\"]\n and p_dtype.kind == \"f\"\n )\n ):\n extra_memory = True\n\n # ------------------------------------------------------------\n # Extra memory is required if the dtype needs changing\n # ------------------------------------------------------------\n if not extra_memory:\n dtype = config[\"dtype\"]\n if dtype is not None and dtype != p_data.dtype:\n extra_memory = True\n # --- End: if\n\n # ------------------------------------------------------------\n # Amount of extra memory (in bytes) required to access the\n # array\n # ------------------------------------------------------------\n return self.nbytes if extra_memory else 0", "def sacar_ficha(self, letra):\n \n self.__estado[letra][\"cantidad\"] = self.__estado[letra][\"cantidad\"] - 1\n self.__cant_fichas = self.__cant_fichas - 1\n self.__fichas_disponibles.remove(letra)\n self.__actualizar_letra(letra)", "def file_count(self) -> str:\n return pulumi.get(self, \"file_count\")", "def array(self):\n config = self.config\n\n unique_array = config[\"unique_subarray\"]\n\n p_axes = self.axes\n p_flip = self.flip\n p_part = self.part\n p_units = self.Units\n p_shape = self.shape\n p_location = self.location\n subarray = self._subarray\n\n len_p_axes = len(p_axes)\n\n if not self.in_memory:\n # --------------------------------------------------------\n # The subarray is not in memory.\n #\n # It could be in a file on disk or implied by a FileArray\n # object, etc.\n # --------------------------------------------------------\n self._original = self.copy()\n\n unique_array = True\n update = True\n copy = False\n\n if not p_part:\n indices = Ellipsis\n else:\n indices = tuple(p_part)\n\n # Read from a file into a numpy array\n p_data = subarray[indices]\n\n # We've just copied p_data from disk, so in place changes\n # are not possible\n in_place_changes = False\n else:\n # --------------------------------------------------------\n # The subarray is in memory\n # --------------------------------------------------------\n update = config[\"update\"]\n\n if p_part:\n p_data = get_subspace(subarray, p_part)\n elif not unique_array:\n p_data = subarray.view()\n else:\n p_data = subarray\n\n copy = config[\"extra_memory\"]\n\n # In place changes to p_data might be possible if we're not\n # copying the data\n in_place_changes = not copy\n\n if not p_data.ndim and isinstance(p_data, (numpy_number, numpy_bool_)):\n # --------------------------------------------------------\n # p_data is a numpy number (like numpy.int64) which does\n # not support assignment, so convert it to a numpy array.\n # --------------------------------------------------------\n p_data = numpy_array(p_data)\n # We've just copied p_data, so in place changes are\n # not possible\n copy = False\n in_place_changes = False\n\n masked = numpy_ma_isMA(p_data)\n if masked:\n # The p_data is a masked array\n if p_data.mask is numpy_ma_nomask or not numpy_ma_is_masked(\n p_data\n ):\n # There are no missing data points so recast as an\n # unmasked numpy array\n p_data = p_data.data\n masked = False\n # --- End: if\n\n if masked:\n # Set the hardness of the mask\n if config[\"hardmask\"]:\n p_data.harden_mask()\n else:\n p_data.soften_mask()\n # --- End: if\n\n self.masked = masked\n\n # ------------------------------------------------------------\n # Make sure that the data array has the correct units. This\n # process will deep copy the data array if required (e.g. if\n # another partition is referencing this numpy array), even if\n # the units are already correct.\n # ------------------------------------------------------------\n func = config.get(\"func\")\n units = config[\"units\"]\n if func is None:\n if not p_units.equals(units) and bool(p_units) is bool(units):\n func = Units.conform\n\n if func is not None:\n inplace = not copy\n p_data = func(p_data, p_units, units, inplace)\n p_units = units\n\n if not inplace:\n # We've just copied p_data, so in place changes are\n # not possible\n copy = False\n in_place_changes = False\n # --- End: if\n\n flip = config.get(\"flip\", None)\n if flip or p_flip:\n flip_axes = set(p_flip).symmetric_difference(flip)\n else:\n flip_axes = None\n\n axes = config[\"axes\"]\n\n if p_data.size > 1:\n # --------------------------------------------------------\n # Flip axes\n # --------------------------------------------------------\n if flip_axes:\n indices = [\n (\n slice(None, None, -1)\n if axis in flip_axes\n else slice(None)\n )\n for axis in p_axes\n ]\n p_data = p_data[tuple(indices)]\n\n # --------------------------------------------------------\n # Transpose axes\n # --------------------------------------------------------\n if p_axes != axes:\n iaxes = [p_axes.index(axis) for axis in axes if axis in p_axes]\n\n if len_p_axes > len(iaxes):\n for i in range(len_p_axes):\n if i not in iaxes:\n # iaxes.append(i)\n iaxes.insert(i, i)\n # --- End: if\n\n p_data = numpy_transpose(p_data, iaxes)\n # --- End: if\n\n # ------------------------------------------------------------\n # Remove excessive/insert missing size 1 axes\n # ------------------------------------------------------------\n if p_shape != p_data.shape:\n # if len_p_axes != len(p_shape):\n p_data = p_data.reshape(p_shape)\n\n # ------------------------------------------------------------\n # Apply the auxiliary mask\n # ------------------------------------------------------------\n auxiliary_mask = config[\"auxiliary_mask\"]\n if auxiliary_mask:\n for mask in auxiliary_mask:\n if mask.any():\n if not masked:\n p_data = p_data.view(numpy_ma_MaskedArray)\n masked = True\n\n p_data.mask = (mask | p_data.mask).array\n # --- End: for\n\n self.masked = True\n\n # ------------------------------------------------------------\n # Convert the array's data type\n # ------------------------------------------------------------\n p_dtype = p_data.dtype\n dtype = config.get(\"dtype\", None)\n if dtype is not None and dtype != p_dtype:\n try:\n p_data = p_data.astype(dtype) # Note: returns a copy\n except ValueError:\n raise ValueError(\n \"Can't recast partition array from {} to {}\".format(\n p_dtype.name, dtype.name\n )\n )\n else:\n # We've just copied p_data, so in place changes are\n # not possible\n copy = False\n in_place_changes = False\n # --- End: if\n\n # ------------------------------------------------------------\n # Copy the array\n # -----------------------------------------------------------\n if copy:\n if p_dtype.char != \"O\":\n if not masked or p_data.ndim > 0:\n p_data = p_data.copy()\n else:\n # This is because numpy.ma.copy doesn't work for\n # scalar arrays (at the moment, at least)\n p_data = numpy_ma_masked_all((), p_data.dtype)\n\n # We've just copied p_data, so in place changes are\n # not possible\n in_place_changes = False\n else:\n # whilst netCDF4.netcdftime.datetime is mucking bout,\n # don't copy!!!!\n # p_data = _copy(p_data)\n pass\n # --- End: if\n\n # ------------------------------------------------------------\n # Update the partition\n # ------------------------------------------------------------\n if update:\n self.subarray = p_data # ?? DCH CHECK\n self.Units = p_units\n self.part = []\n self.axes = axes\n self.flip = flip\n self.flatten = []\n self.shape = p_shape\n self.location = p_location\n\n self._in_place_changes = in_place_changes\n\n # ------------------------------------------------------------\n # Return the numpy array\n # ------------------------------------------------------------\n return p_data", "def len(self):\n return self._fsize - self._fp.tell()", "def files_processed(self) -> float:\n return pulumi.get(self, \"files_processed\")", "def DeleteFiles(self, min_size=0):\n\n ndeleted = 0\n for filename, counts in list(self.mCounts.items()):\n if counts < min_size:\n os.remove(filename)\n ndeleted += 1\n\n return ndeleted", "def setNextFile(self):\n\n if (self.nReadBlocks >= self.processingHeaderObj.dataBlocksPerFile):\n self.nReadFiles=self.nReadFiles+1\n if self.nReadFiles > self.nTotalReadFiles:\n self.flagNoMoreFiles=1\n raise schainpy.admin.SchainWarning('No more files to read')\n\n print('------------------- [Opening file] ------------------------------',self.nReadFiles)\n self.nReadBlocks = 0\n #if self.nReadBlocks==0:\n # self.readFirstHeader()", "def fileno(self):\n return self.file.fileno()", "def len(self):\n return self._fsize - self._tell", "def _setcounter():\n fname = os.path.basename(camera.status.lastfile)\n tname = fname.split('.')[0]\n i = len(tname)-1\n if i > -1:\n while tname[i].isdigit() and i>-1:\n i = i - 1\n nname = fname[:-4]\n bname = tname[:i+1]\n for file in glob.glob('/data/counters/'+bname+'[0-9][0-9][0-9].cntr'):\n os.remove(file)\n for file in glob.glob('/data/counters/'+bname+'[0-9][0-9][0-9][0-9].cntr'):\n os.remove(file)\n f = open('/data/counters/'+nname+'cntr','w')\n f.close()", "def fileno(self):\n raise io.UnsupportedOperation", "def getFirstObjectIndex(self):\n if not self.fileInfo.isEsp(): raise StateError(_('FileRefs.renumberObjects is for esps only.'))\n for cell in self.cells:\n objects = cell.getObjects()\n for object in objects.list():\n if object[0] == 0:\n return object[1]\n return 0", "def test_op_sub_array_int(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=int)\n o = a + 1\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a - o\n\n offl_a = stream.bind(a)\n offl_r = offl_a - o\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def __calculate_number_of_frames(self):\n # Save current position\n current_pos = self.__file_object.tell()\n\n # Go to start of first frame\n self.__file_object.seek(self.__first_frame_raw_data_position)\n self.number_of_frames = 0\n\n while True:\n if not self.__file_object.read(self.__frame_raw_data_size):\n break\n\n self.__file_object.readline()\n self.number_of_frames += 1\n\n # Restore file pointer\n self.__file_object.seek(current_pos)\n print('Number of frames:', self.number_of_frames)", "def trace_file_len(fname):\n try:\n with open(fname) as f:\n for i, l in enumerate(f):\n pass\n return i - 1\n except FileNotFoundError:\n return 0", "def get_file(self) -> int:\r\n return self.file", "def prune_empty(self): # EntryList.prune_empty\n prevCount = self.count_deleted()\n for name, e in allFiles.contents.iteritems():\n e.prune_empty()\n return allFiles.count_deleted() - prevCount", "def __len__(self):\n\n return math.ceil(len(self.img_files) * self.gen_count / self.batch_size)", "def refreshSize(self):\n if self.isLoaded:\n return 0\n else:\n return self.fileInfo.size", "def Rear(self):\n return -1 if self.isEmpty() else self.__buffer[(self.__start+self.__size-1) % len(self.__buffer)]", "def seek(self, *args) -> \"int\":\n return _ida_fpro.qfile_t_seek(self, *args)", "def in_cached_file(self):\n return isinstance(self._subarray, CachedArray)", "def getModificationNumber(self) -> long:\n ...", "def size(self) -> int:\n return sum(p.size for p in self.iterfiles())", "def tell(self):\n return self._upload_position", "def has_subfile(self) -> bool:\n\t\tself._update_subfiles()\n\t\treturn bool(len(self.subfiles))", "def __len__(self):\n return len(self.array)", "def __len__(self):\n return len(self.array)" ]
[ "0.7747842", "0.63648546", "0.6072333", "0.60351396", "0.5918244", "0.57059807", "0.56674904", "0.5664642", "0.56031275", "0.5573989", "0.5520654", "0.5453089", "0.5448838", "0.54281026", "0.5422772", "0.5378375", "0.5307265", "0.5244931", "0.52217174", "0.5220032", "0.5216291", "0.51331407", "0.5115629", "0.5106185", "0.50907373", "0.5089161", "0.5085614", "0.507725", "0.5066728", "0.50590724", "0.50359875", "0.5020529", "0.50147223", "0.49775988", "0.49749458", "0.49738547", "0.49649253", "0.49604303", "0.49502134", "0.49456447", "0.4944183", "0.49393135", "0.49268013", "0.49217892", "0.48981854", "0.48959568", "0.48843467", "0.48842198", "0.48836866", "0.4881064", "0.4877542", "0.48724556", "0.4844187", "0.4844187", "0.48379225", "0.48361284", "0.4816804", "0.48031083", "0.4801541", "0.47996238", "0.4778122", "0.47719982", "0.47565272", "0.47562754", "0.4756017", "0.47559637", "0.47401142", "0.4727482", "0.47270516", "0.4725971", "0.47178772", "0.47169724", "0.47143152", "0.4712884", "0.47011933", "0.47006902", "0.4695959", "0.46883664", "0.46880865", "0.46875745", "0.46849707", "0.46837255", "0.46647176", "0.4656943", "0.46559966", "0.465177", "0.46486926", "0.4644861", "0.46445367", "0.4642497", "0.46409014", "0.46387452", "0.4635584", "0.46266317", "0.4622208", "0.46218398", "0.46215257", "0.46171033", "0.4611914", "0.4611914" ]
0.6648334
1
Add the auxiliary mask to the config dictionary. Assumes that ``self.config`` already exists.
def _configure_auxiliary_mask(self, auxiliary_mask): indices = self.indices new = [ mask[ tuple( [ (slice(None) if n == 1 else index) for n, index in zip(mask.shape, indices) ] ) ] for mask in auxiliary_mask ] # # If the partition is to be parallelised then get rid of mask # # components which are all False so the mask component does # # not get copied to the child process # if not config['serial']: # new = [mask for mask in new if not mask.any()] self.config["auxiliary_mask"] = new
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def configure_masking(self, masks):\n self.masks = masks", "def add_config(self):\n\n config = {\n 'invert_byte': InvertByte,\n 'invert_word': InvertWord,\n 'invert_double_word': InvertDoubleWord,\n 'and_byte': AndByte,\n 'and_word': AndWord,\n 'and_double_word': AndDoubleWord,\n 'or_byte': OrByte,\n 'or_word': OrWord,\n 'or_double_word': OrDoubleWord,\n 'exclusive_or_byte': ExclusiveOrByte,\n 'exclusive_or_word': ExclusiveOrWord,\n 'exclusive_or_double_word': ExclusiveOrDoubleWord\n }\n\n return config", "def _updateMaskedValueSet():\n global masked_value_set\n for confName in controller.CONF:\n # Add all needed values to masked_value_set\n if (controller.getParamKeyValue(confName, \"MASK_INPUT\") == True):\n masked_value_set.add(controller.CONF[confName])", "def update_mask(self, mask):\n\n # Get general mask\n general_mask = self.general_mask\n\n # Complete with the input mask\n new_mask = (general_mask | mask)\n\n # Update attribute\n self.mask = new_mask\n\n # Correct i_bounds if it was not specified\n # self.update_i_bnds()\n\n # Re-compute weights\n self.weights, self.weights_k_idx = self.compute_weights()\n\n return", "def add(self, files, mask):\n pass", "def update_mask(self):\r\n \r\n # Binary mask from ML detection\r\n if len(self.selected_ML_Index) > 0:\r\n # Delete items in dictionary that are not roi items\r\n roi_dict = self.selected_cells_infor_dict.copy()\r\n del_key_list=[]\r\n for key in roi_dict:\r\n print(key)\r\n if 'ROIitem' not in key:\r\n del_key_list.append(key)\r\n for key in del_key_list:\r\n del roi_dict[key]\r\n \r\n self.MLmask = ProcessImage.ROIitem2Mask(roi_dict, mask_resolution = (self.MLtargetedImg.shape[0], self.MLtargetedImg.shape[1]))\r\n # Binary mask of added rois\r\n self.addedROIitemMask = ProcessImage.ROIitem2Mask(self.roi_list_freehandl_added, mask_resolution = (self.MLtargetedImg.shape[0], self.MLtargetedImg.shape[1]))\r\n \r\n self.intergrate_into_final_mask()", "def add_mask_layer(self):\n return Masking(mask_value=self.mask_value, input_shape=(self.max_sequence_size, 1))", "def open(self, config):\n unique_subarray = getrefcount(self._subarray) <= 2\n\n config = config.copy()\n config[\"unique_subarray\"] = unique_subarray\n\n self.config = config\n\n if config.get(\"auxiliary_mask\"):\n self._configure_auxiliary_mask(config[\"auxiliary_mask\"])\n\n self.config[\"extra_memory\"] = self.extra_memory()\n\n self._in_place_changes = True\n self.masked = True\n\n if hasattr(self, \"output\"):\n del self.output\n\n return config", "def __set_special_config_values(cfg: __Config, config: dict) -> \"__Config\":\n cfg.file_name_plane_masks = lambda i: str(i) + config['file_name_plane_mask_suf']\n cfg.file_name_planercnn_image = lambda i: str(i) + config['file_name_planercnn_image_suf']\n cfg.dir_results = f\"{cfg.edge_detection_type}\" # will be the output folder, create in data dir\n cfg.image_size = tuple(int(x) for x in config['image_size'].split(\" \"))\n return cfg", "def _buildMaskImage(self,maskname, mask_array):\n # If an old version of the maskfile was present,\n # remove it and rebuild it.\n if fileutil.findFile(maskname):\n fileutil.removeFile(maskname)\n\n _file = pyfits.open(maskname,mode='append')\n _phdu = pyfits.PrimaryHDU(data=mask_array)\n\n _file.append(_phdu)\n _file.close()\n del _file, _phdu", "def setup_mask(self, d25scale): \n\n logger = logging.getLogger(name=\"ShotSensitivity\")\n \n # see if this is a bad shot\n #print(\"Bad shot from \", self.conf.badshot)\n badshot = loadtxt(self.conf.badshot, dtype=int)\n badtpshots = loadtxt(self.conf.lowtpshots, dtype=int)\n if (self.shotid in badshot) or (self.shotid in badtpshots):\n logger.warn(\"Shot is in bad. Making mask zero everywhere\")\n self.badshot = True\n else:\n self.badshot = False\n \n # set up bad amps\n logger.info(\"Bad amps from {:s}\".format(self.conf.badamp))\n self.bad_amps = Table.read(self.conf.badamp)\n sel_shot = (self.bad_amps[\"shotid\"] == self.shotid)\n self.bad_amps = self.bad_amps[sel_shot]\n \n # set up galaxy mask\n logger.info(\"Galaxy mask from {:s}\".format(self.conf.rc3cat))\n galaxy_cat = Table.read(self.conf.rc3cat, format='ascii')\n gal_coords = SkyCoord(galaxy_cat['Coords'], frame='icrs')\n shot_coords = SkyCoord(ra=self.shot_ra, dec=self.shot_dec,\n unit=\"deg\")\n sel_reg = where(shot_coords.separation(gal_coords) < 1.*u.deg)[0]\n\n self.gal_regions = []\n if len(sel_reg) > 0:\n for idx in sel_reg:\n self.gal_regions.append(create_gal_ellipse(galaxy_cat, \n row_index=idx, \n d25scale=d25scale))\n \n # set up meteor mask\n # check if there are any meteors in the shot:\n logger.info(\"Meteors from {:s}\".format(self.conf.meteor))\n self.met_tab = Table.read(self.conf.meteor, format=\"ascii\")\n self.met_tab = self.met_tab[self.shotid == self.met_tab[\"shotid\"]]", "def add_mask(self):\n return xr.open_dataset(f'/{test.dlfile_directory}/{test.climate}_mask_{test.mask_str}_dldata_traintest.nc')", "def apply_mask(self, mask_band=None, mask_val=None):\n pass", "def prepareMask(self, mask):\n\n # Make sure that the mask has the same\n # number of voxels as the atlas image.\n # Use nearest neighbour interpolation\n # for resampling, as it is most likely\n # that the mask is binary.\n try:\n mask, xform = resample.resample(\n mask, self.shape[:3], dtype=np.float32, order=0)\n\n except ValueError:\n raise MaskError('Mask has wrong number of dimensions')\n\n # TODO allow non-aligned mask - as long as it overlaps\n # in world coordinates, it should be allowed\n if not fslimage.Image(mask, xform=xform).sameSpace(self):\n raise MaskError('Mask is not in the same space as atlas')\n\n return mask", "def customize_experiment_config(self, config):\n # TODO: use ConfigList from Coach launcher, and share customization code.\n hyperparams_dict = json.loads(os.environ.get(\"SM_HPS\", \"{}\"))\n\n # Set output dir to intermediate\n # TODO: move this to before customer-specified so they can override\n hyperparams_dict[\"rl.training.local_dir\"] = \"/opt/ml/output/intermediate\"\n\n self.hyperparameters = ConfigurationList() # TODO: move to shared\n for name, value in hyperparams_dict.items():\n # self.map_hyperparameter(name, val) #TODO\n if name.startswith(\"rl.\"):\n # self.apply_hyperparameter(name, value) #TODO\n self.hyperparameters.store(name, value)\n # else:\n # raise ValueError(\"Unknown hyperparameter %s\" % name)\n\n self.hyperparameters.apply_subset(config, \"rl.\")\n return config", "def mask_custom(self, custom_mask):\n\t\t## combine the list and remove the duplicates\n\t\tself.mask = list(set().union(self.mask, custom_mask))\n\n\t\tself.wave = np.delete(self.oriWave, list(self.mask))\n\t\tself.flux = np.delete(self.oriFlux, list(self.mask))\n\t\tself.noise = np.delete(self.oriNoise, list(self.mask))\n\n\t\treturn self", "def add_extra(self, entry, value):\n\n config_spec = vim.vm.ConfigSpec()\n self.logger.info(\"Adding/Updating extra config: {0} = {1}\".format(entry, value))\n opt = vim.option.OptionValue()\n opt.key = entry\n opt.value = value\n config_spec.extraConfig = [opt]\n return self.vm_obj.ReconfigVM_Task(spec=config_spec)", "def attention_mask(model, x):\n config = model.config\n input_mask = model.inputs[\"input_mask\"]\n final_mask = model.builder.customOp(opName=\"AttentionMask\",\n opVersion=1,\n domain=\"ai.graphcore\",\n inputs=[input_mask, x],\n attributes={\"dataType\": model.config.popart_dtype})[0]\n final_mask = model.detach(final_mask)\n return final_mask", "def merge_config(self_config, indict):\n\n self_config.merge(indict)\n patch_config(self_config, indict)", "def set_measurement_mask(self, program_name, mask_name, begins, lengths) -> Tuple[numpy.ndarray, numpy.ndarray]:", "def add_config(self, conf_map):\n if self.active.isChecked():\n self.add_feat_conf(conf_map)", "def add_config(self, config):\n clean=lambda n: n.strip().strip('\"').lower()\n for line in config.split('\\n'):\n items=line.strip().split()\n if items and len(items) >= 3:\n cmd, evt, hnd=items[:3]\n \"\"\" NOTE\n - just 'bind' command expected right now\n - '+' prepended ti the handler means REPEAT (make sense just for keyboard keys actually)\n \"\"\"\n cmd=clean(cmd)\n if cmd in ['bind']:\n evt,hnd=(clean(evt), clean(hnd))\n if not cmd in self.config: self.config[cmd]={}\n repeat=hnd.startswith('+')\n if repeat: hnd=hnd[1:]\n self.config[cmd].update([[evt, [hnd, repeat]]])", "def _draw_mask_on_image(self, mask):\n mask = self.STANDARD_COLORS_ARRAY[mask]\n cv2.addWeighted(mask,self.config.ALPHA,self.image,1.0,0,self.image)", "def _update_mask_type(configs, mask_type):\n configs[\"train_input_config\"].mask_type = mask_type\n _update_all_eval_input_configs(configs, \"mask_type\", mask_type)", "def createMaskDictionary(self):\n try:\n self.maskMap = dict(list(zip(self.inds,list(range(len(self.inds))))))\n self.maskSet = set(self.inds)\n except Exception as error:\n print(\"failed in createMaskDictionary\", error)", "def add_mask(self, bg, mask):\n # if mask is to tall for the background image, decrease the size by 50%\n if bg.shape[0] < mask.shape[0]:\n mask = cv2.resize(mask, (int(0.5*mask.shape[0]), int(0.5*mask.shape[1])), interpolation=cv2.INTER_AREA)\n h_mask, w_mask = mask.shape[:2]\n h, w = bg.shape[:2]\n \n # select random location for mask\n h_rand = np.random.rand() * 0.9\n h_rand = np.clip(h_rand, 0, 1.0 - h_mask/h)\n h_update = int(h_rand * h)\n w_rand = np.random.rand() * 0.9\n w_rand = np.clip(w_rand, 0, 1.0 - w_mask/w)\n w_update = int(w_rand * w)\n \n # define filter for a mask\n filt = (mask == 0)\n \n # place the mask in the bg img\n mod = bg.copy()\n mod[h_update:h_update+h_mask, w_update:w_update+w_mask, :] *= filt\n mod[h_update:h_update+h_mask, w_update:w_update+w_mask, :] += mask\n \n # yolo dim for mask\n locy = (h_update+h_update+h_mask)/2/h\n locx = (w_update+w_update+w_mask)/2/w\n sizey = (h_mask/h)\n sizex = (w_mask/w)\n \n dim = [locx, locy, sizex, sizey]\n \n return mod, dim", "def _GetChangesForMask(config_sed_input):\n config_sed = config_sed_input\n config_sed += [(r'WALLTIME_MINUTES=100',\n (r'WALLTIME_MINUTES=100\\n'\n r'export CONT=mlperf-nvidia:object_detection\\n'\n r'export DATADIR=\\/data\\n'\n r'export PKLDIR=\\/data\\/coco2017\\/pkl_coco\\n'\n r'export NEXP=1'))]\n if MASKRCNN_BATCH_SIZE.value:\n config_sed.append(\n (r'BATCHSIZE=.*', fr'BATCHSIZE={MASKRCNN_BATCH_SIZE.value}'))\n return config_sed", "def apply_mask(self, mask, parameters=None):\n if parameters is None:\n self.dates = self.dates[mask]\n for key in self.data.keys():\n self.data[key] = self.data[key][mask]\n\n self.manufacturer = self.manufacturer[mask]\n self.data_file = self.data_file[mask]\n self.serial_number = self.serial_number[mask]\n else:\n for parameter in parameters:\n self.data[parameter][~mask] = np.nan", "def add_dimension(self, name, bit_index=None, default=False):\n if not self.flag_masks.get(name) is None:\n raise ValueError(\"the name %s is already in this flag space\" % name)\n bit_nums = list(self.flag_bits.values())\n if bit_index is None:\n bit_index = 0\n #assign the lowest currently unused bit number\n while bit_index in bit_nums:\n bit_index += 1\n if bit_index in bit_nums:\n raise ValueError(\"bit_index %d is already taken\" % bit_index)\n self.flag_bits[name] = bit_index\n self.flag_masks[name] = 2**bit_index\n self.default_dict[name] = default", "def _configure(self) -> None:\n reg_data = self.configuration\n conf_data = reg_data & ~0xC0 | 0x80\n # check if already in the right configuration, do not re-configure on and on again\n if reg_data != conf_data:\n self.configuration = conf_data", "def get_config(self):\n layer_config = {\n \"anchors\": self._anchors, \n \"classes\": self._classes,\n \"ignore_thresh\": self._ignore_thresh, \n \"truth_thresh\": self._truth_thresh, \n \"iou_thresh\": self._iou_thresh, \n \"loss_type\": self._loss_type, \n \"iou_normalizer\": self._iou_normalizer,\n \"cls_normalizer\": self._cls_normalizer, \n \"scale_x_y\": self._scale_x_y, \n }\n layer_config.update(super().get_config())\n return layer_config", "def AddWithColourMask(*args, **kwargs):\n return _gdi_.ImageList_AddWithColourMask(*args, **kwargs)", "def mask(self, mask):\n\n self._mask = mask", "def mask(self, mask):\n\n self._mask = mask", "def setup(self, config, base, xsize, ysize, ignore, logger):\n # .. Do any custom setup you need to do.\n # Probably want to call the base class setup function to do the normal determination\n # of the size and position values.\n\n # Extra processing of 'bandpass' argument\n # Most needed type-checking is done in galsim.bandpass\n self._req_bp_fields = ['throughput', 'wave_type']\n self._opt_bp_fields = ['red_limit', 'blue_limit', 'zeropoint']\n try:\n bp = config['bandpass']\n for req in self._req_bp_fields:\n if req not in bp.keys():\n raise ValueError('Must pass field {} for a bandpass object!'.format(req))\n # for opt in self._opt_bp_fields:\n # if opt not in bp.keys():\n # config['bandpass'][opt] = None\n for key in bp.keys():\n if key not in (self._req_bp_fields+self._opt_bp_fields):\n raise ValueError('Field {} is not a valid entry for a bandpass!'.format(key))\n except KeyError:\n raise KeyError('`bandpass` is a required field for a COSMOSChromatic stamp!')\n\n extra_ignore = ignore + ['bandpass']\n return super(self.__class__, self).setup(config, base, xsize, ysize, extra_ignore, logger)", "def set_mask(self, mask):\n self.mask = mask", "def add_config(self, config):\n\n if config.identifier in self.configs:\n raise DuplicateConfigException(\n \"Builder already has config with identifier : {}\".format(\n config.identifier\n )\n )\n\n self.configs[config.identifier] = config", "def set_derived_configs(self):\n if 'dim' in self.config and self.config['dim'] <= 0:\n self.config['dim'] = self.descriptors['input']['dim']", "def add_log_config(self, monitor_name, log_config):\n pass", "def ApplyMask(data,mask):\n \n # loop through portions\n for portion in data.keys():\n # match data keys and apply mask \n for key in data[portion].keys():\n if key in 'xyerr':\n if mask != 'UnMasked':\n data[portion][key].mask = data[portion]['UnMasked']\n data[portion][key].mask = data[portion][mask]\n\t\n return data", "def addMasking(self):\n self.abundance_df['masked'] = [False]*len(self.abundance_df.index)\n self.abundance_df['colour'] = ['undefined']*len(self.abundance_df.index)", "def apply_additional_mask(\n old_mask_file=None,\n new_mask_file=None,\n new_thresh=0.0,\n operation='AND'\n):\n if root_mask == None:\n logger.info(\"Specify a cube root file name.\")\n return\n\n myia = au.createCasaTool(casaStuff.iatool)\n myia.open(new_mask_file)\n new_mask = myia.getchunk()\n myia.close()\n\n myia.open(old_mask_file)\n mask = myia.getchunk()\n if operation == \"AND\":\n mask *= (new_mask > new_thresh)\n else:\n mask = (mask + (new_mask > new_thresh)) >= 1.0\n myia.putchunk(mask)\n myia.close()\n\n return", "def set_mask_key(self, func):\r\n self.get_mask_key = func", "def update_configuration(self, config):\n\n config[\"data_transformation\"][\"n_classification_bins\"] = config[\"n_classification_bins\"]\n config[\"data_transformation\"][\"nassets\"] = config[\"nassets\"]\n config[\"data_transformation\"][\"classify_per_series\"] = config[\"classify_per_series\"]\n config[\"data_transformation\"][\"normalise_per_series\"] = config[\"normalise_per_series\"]\n\n return config", "def _addDefaultsToMaskedValueSet():\n global masked_value_set\n for group in controller.getAllGroups():\n for param in group.getAllParams():\n # Keep default password values masked, but ignore default empty values\n if ((param.getKey(\"MASK_INPUT\") == True) and param.getKey(\"DEFAULT_VALUE\") != \"\"):\n masked_value_set.add(param.getKey(\"DEFAULT_VALUE\"))\n\n # Add deault consts we want to mask\n # TODO: add future consts to mask here\n masked_value_set.add(basedefs.CONST_CA_PASS)", "def _add_mask(batch, num_batch_dims):\n mask = tf.ones(tf.shape(list(batch.values())[0])[:num_batch_dims])\n if \"mask\" in batch:\n mask *= batch[\"mask\"]\n batch[\"mask\"] = mask\n return batch", "def paintMask(self):\n if self.avatarConfiguration[\"mask\"]:\n if not os.path.isfile(MASK_UPLOAD):\n image = self.parent.getPlayer().getImageLabel()\n filePath = GG.genteguada.GenteGuada.getInstance().getDataPath(image)\n guiobjects.generateImageSize(filePath, [244, 244], IMG_UPLOAD)\n self.generateMask(\"imgUpload.png\")\n imgPath = MASK_UPLOAD\n else:\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(os.path.join(PATH_EDITOR_IMG, self.avatarConfiguration[\"gender\"], self.avatarConfiguration[\"headSize\"], \"mask.png\"))\n self.newAvatarImage(imgPath, \"mask\")", "def set_mask(self, mask):\n self.mask = self._image_to_vector(mask)\n\n # PCA needs to be rerun\n self._mark_cache_invalid()", "def apply_mask(self):\n for mask, param in self.masked_parameters:\n param.mul_(mask)", "def add_mask(self, image_id: int, category_id: int, segmentation: np.ndarray, score: float):\n rle = pycocotools.mask.encode(np.asfortranarray(segmentation.astype(np.uint8)))\n rle['counts'] = rle['counts'].decode('ascii') # json.dump doesn't like bytes strings\n\n self.mask_data.append({\n 'image_id': int(image_id),\n 'category_id': get_coco_cat(int(category_id)),\n 'segmentation': rle,\n 'score': float(score)\n })", "def setup_masking(self, inclusion_masks, exclusion_masks):\n if len(inclusion_masks) != MAX_INCLUSION_MASKS_COUNT:\n raise ValueError(\"The inclusion mask list has {0} masks. That exceeds the expected maximum mask count of \"\n \"{1}.\".format(len(inclusion_masks), MAX_INCLUSION_MASKS_COUNT))\n\n if len(exclusion_masks) != MAX_EXCLUSION_MASKS_COUNT:\n raise ValueError(\"The exclusion mask list has {0} masks. That exceeds the expected maximum mask count of \"\n \"{1}.\".format(len(exclusion_masks), MAX_EXCLUSION_MASKS_COUNT))\n\n masking_prefix = self._base_pv_name + \":\" + self._slot_number\n for i in range(MAX_INCLUSION_MASKS_COUNT):\n self._run_cmd(CAPUT + \" \" + masking_prefix + \":INCLUSION\" + str(i + 1) + \" \" +\n str(inclusion_masks[i]))\n\n for i in range(MAX_EXCLUSION_MASKS_COUNT):\n self._run_cmd(CAPUT + \" \" + masking_prefix + \":EXCLUSION\" + str(i + 1) + \" \" + str(exclusion_masks[i]))", "def _initial_set_mask(self, mask):\n if mask is None:\n mask = BooleanArrayMask(np.ones_like(self.value, dtype=bool),\n self._wcs, shape=self.value.shape)\n elif isinstance(mask, np.ndarray):\n if mask.shape != self.value.shape:\n raise ValueError(\"Mask shape must match the {0} shape.\"\n .format(self.__class__.__name__)\n )\n mask = BooleanArrayMask(mask, self._wcs, shape=self.value.shape)\n elif isinstance(mask, MaskBase):\n pass\n else:\n raise TypeError(\"mask of type {} is not a supported mask \"\n \"type.\".format(type(mask)))\n\n # Validate the mask before setting\n mask._validate_wcs(new_data=self.value, new_wcs=self._wcs,\n wcs_tolerance=self._wcs_tolerance)\n\n self._mask = mask", "def update_network_section(self):\n rconfig = configparser.RawConfigParser()\n rconfig.read(self.conf_file)\n if self.ext_net:\n if not rconfig.has_section('network'):\n rconfig.add_section('network')\n rconfig.set('network', 'public_network_id', self.ext_net.id)\n rconfig.set('network', 'floating_network_name', self.ext_net.name)\n rconfig.set('network-feature-enabled', 'floating_ips', True)\n else:\n if not rconfig.has_section('network-feature-enabled'):\n rconfig.add_section('network-feature-enabled')\n rconfig.set('network-feature-enabled', 'floating_ips', False)\n with open(self.conf_file, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)", "def add_config(self):\n\n config = {\n 'byte_to_integer': ByteToInteger,\n 'integer_to_byte': IntegerToByte,\n 'integer_to_double_integer': IntegerToDoubleInteger,\n 'integer_to_string': IntegerToString,\n 'double_integer_to_integer': DoubleIntegerToInteger,\n 'double_integer_to_real': DoubleIntegerToReal,\n 'double_integer_to_string': DoubleIntegerToString,\n 'binary_coded_decimal_to_integer': BinaryCodedDecimalToInteger,\n 'integer_to_binary_coded_decimal': IntegerToBinaryCodedDecimal,\n 'round': Round,\n 'truncate': Truncate,\n 'real_to_string': RealToString,\n 'integer_to_ascii': IntegerToASCII,\n 'double_integer_to_ascii': DoubleIntegerToASCII,\n 'real_to_ascii': RealToASCII,\n 'ascii_to_hexadecimal': ASCIIToHexadecimal,\n 'hexadecimal_to_ascii': HexadecimalToASCII,\n 'string_to_integer': StringToInteger,\n 'string_to_double_integer': StringToDoubleInteger,\n 'string_to_real': StringToReal,\n 'decode': Decode,\n 'encode': Encode,\n 'segment': Segment\n }\n\n return config", "def addHostmask(self, hostmask):\n assert ircutils.isUserHostmask(hostmask), 'got %s' % hostmask\n if len(unWildcardHostmask(hostmask)) < 8:\n raise ValueError, \\\n 'Hostmask must contain at least 8 non-wildcard characters.'\n self.hostmasks.add(hostmask)", "def _postprocess_config(self, config: Dict[str, Any]) -> Dict[str, Any]:\n new_config = self.config_space.copy()\n new_config.update(cast_config_values(config, config_space=self.config_space))\n return new_config", "def editMaskVariable(self, product, mask_variable):\r\n return mask_variable", "def merge_jupyter_config_data(self, config, in_config):\n self.log.debug(f\"\"\"[lite][config][merge] ..... {config}\"\"\")\n self.log.debug(f\"\"\"[lite][config][merge] ..... {in_config}\"\"\")\n\n config = config or {}\n in_config = in_config or {}\n\n for k, v in in_config.items():\n if k in [DISABLED_EXTENSIONS, FEDERATED_EXTENSIONS]:\n config[k] = [*config.get(k, []), *v]\n elif k in [SETTINGS_OVERRIDES]:\n config[k] = config.get(k, {})\n for pkg, pkg_config in v.items():\n config[k][pkg] = config[k].get(pkg, {})\n config[k][pkg].update(pkg_config)\n else:\n config[k] = v\n self.log.debug(f\"\"\"[lite][config][merge] ..... {config}\"\"\")\n return config", "def merge_jupyter_config_data(self, config, in_config):\n self.log.debug(f\"\"\"[lite][config][merge] ..... {config}\"\"\")\n self.log.debug(f\"\"\"[lite][config][merge] ..... {in_config}\"\"\")\n\n config = config or {}\n in_config = in_config or {}\n\n for k, v in in_config.items():\n if k in [DISABLED_EXTENSIONS, FEDERATED_EXTENSIONS]:\n config[k] = [*config.get(k, []), *v]\n elif k in [SETTINGS_OVERRIDES]:\n config[k] = config.get(k, {})\n for pkg, pkg_config in v.items():\n config[k][pkg] = config[k].get(pkg, {})\n config[k][pkg].update(pkg_config)\n else:\n config[k] = v\n self.log.debug(f\"\"\"[lite][config][merge] ..... {config}\"\"\")\n return config", "def _update_retain_original_image_additional_channels(\n eval_config,\n retain_original_image_additional_channels):\n eval_config.retain_original_image_additional_channels = (\n retain_original_image_additional_channels)", "def build_attention_mask_3d_padding(source_mask, target_mask):\n mask = make_attention_mask_3d(source_mask, target_mask)\n # invert mask for Megatron\n return mask < 0.5", "def set_datamask(self, mask, update=True):\n if mask is not None and np.shape(mask) != self.shape:\n raise TypeError(\"the shape of the given mask does not match the data shape\")\n \n self._side_properties[\"datamask\"] = mask", "def get_config(self):\n config = {'epsilon':self.eps}\n base_config = super(MatrixReLU, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))", "def RPC_DigitizationBasicCfg(flags, **kwargs):\n acc = MuonGeoModelCfg(flags)\n if \"PileUpTools\" not in kwargs:\n PileUpTools = acc.popToolsAndMerge(RPC_DigitizationToolCfg(flags))\n kwargs[\"PileUpTools\"] = PileUpTools\n acc.merge(PileUpToolsCfg(flags, **kwargs))\n return acc", "def add_feat_conf(self, conf_map):\n pass", "def addAuth(self, hostmask):\n if self.checkHostmask(hostmask, useAuth=False) or not self.secure:\n self.auth.append((time.time(), hostmask))\n else:\n raise ValueError, 'secure flag set, unmatched hostmask'", "def Expand_Mask(mask, feature_dict):\n new_mask = np.zeros(mask.shape + (len(feature_dict),))\n for i in feature_dict.keys():\n ni = int(i)\n new_mask[mask == ni,ni] = 1 \n return new_mask", "def update_config(self, config):\n # add follower public folder to the CKAN's list of public folders\n here = os.path.dirname(__file__)\n public_dir = os.path.join(here, 'public')\n if config.get('extra_public_paths'):\n config['extra_public_paths'] += ',' + public_dir\n else:\n config['extra_public_paths'] = public_dir\n # add follower template folder to the CKAN's list of template folders\n template_dir = os.path.join(here, 'templates')\n if config.get('extra_template_paths'):\n config['extra_template_paths'] += ',' + template_dir\n else:\n config['extra_template_paths'] = template_dir", "def generate_MLmask(self):\r\n self.MLmask = np.zeros((self.MLtargetedImg.shape[0], self.MLtargetedImg.shape[1]))\r\n \r\n if len(self.selected_ML_Index) > 0:\r\n for selected_index in self.selected_ML_Index:\r\n self.MLmask = np.add(self.MLmask, self.Mask[:,:,selected_index])\r\n \r\n self.intergrate_into_final_mask()\r\n \r\n self.add_rois_of_selected()\r\n \r\n else:\r\n self.intergrate_into_final_mask()", "def Adjust_Data(img,mask,feature_dict, normalize):\n ## Normalize image\n if normalize:\n img = Normalize_Image(img)\n\n ## Assume mask shape has 4 dimensions - mask is (batch, x, y, color-channel)\n ## color-channels are redundant, so just choose the first. \n mask = mask[:,:,:,0]\n \n ## Image_datagen performs interpolation when rotating, resulting in non-integer\n ## mask values. Round these back to integers before expanding the mask. \n mask = mask.round() \n mask = Expand_Mask(mask, feature_dict)\n #print(mask.shape, np.unique(mask, axis = 0))\n return (img,mask)", "def _update(self, mask):\n if self.reporting:\n for pin in self.pins:\n if pin.mode is INPUT:\n pin_nr = pin.pin_number - self.port_number * 8\n pin.value = (mask & (1 << pin_nr)) > 0", "def get_config(self):\n config = {\n }\n base_config = super(MatrixConcat, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))", "def update_add_mask_sym(\n A: dshape(\"M... * M * complex\"),\n B: dshape(\"M... * M * complex\"),\n ind: dshape(\"M... * complex\"),\n mask: dshape(\"M... * complex\"),\n # symmetric: bool = True,\n) -> dshape(\"M... * M * complex\"):\n for (i, ind_i), (j, ind_j) in itertools.product(enumerate(ind), repeat=2):\n if mask[i]:\n A[ind_i, ind_j] += B[i, j]\n\n return A", "def concat_config(config, new_config):\n for new_path in new_config:\n if new_path not in config:\n config[new_path] = new_config[new_path]\n else:\n config[new_path][0] = config[new_path][0] or new_config[new_path][0]\n for filename in config[new_path]:\n if filename != 0:\n if filename in new_config[new_path]:\n for opt in config[new_path][filename]:\n if opt in new_config[new_path][filename]:\n new_config[new_path][filename][opt]\\\n .update(config[new_path][filename][opt])\n else:\n new_config[new_path][filename][opt] = \\\n config[new_path][filename][opt]\n else:\n new_config[new_path][filename] = config[new_path][filename]\n return config", "def __set_mask_regions(self):\n self.bottom_clip = np.int32(np.int32([[[60,0], [1179,0], [1179,650], [60,650]]]))\n self.roi_clip = np.int32(np.int32([[[640, 425], [1179,550], [979,719],\n [299,719], [100, 550], [640, 425]]]))", "def load_lab_config(self, lab, host):\n _lab_conf = create_lab_config(lab, host)\n ip = self._hosts.get(host)\n\n # Some labs require additional customization\n # We don't know yet if customization is required\n _custom_conf = None\n\n # lab 8 (Multicast) customization\n if lab == str(8) and host == 'vrdevice':\n\n # customizing lab config\n _lab_conf = customize_lab8_config(_lab_conf)\n\n # loading lab config\n load_cfg_pyez(ip, _lab_conf, self._user, self._pass, mode='merge')\n\n # creating additional (custom) config\n configs = ['Rec1.conf', 'Rec3.conf', 'Rec4.conf', 'Rec2.conf']\n _custom_conf = prepare_custom_config(lab, host, configs)\n\n # loading custom config\n load_cfg_pyez(ip, _custom_conf, self._user, self._pass, mode='merge')\n\n # load multiping.slax script\n install_script(lab, host, ip, self._user, self._pass, 'multiping.slax', '/var/db/scripts/op/')\n\n # No customization required\n else:\n load_cfg_pyez(ip, _lab_conf, self._user, self._pass, mode='merge')\n\n # we want only interfaces used in the lab to be used, the rest disabled\n try:\n interfaces_to_use = get_interfaces(_lab_conf)\n if _custom_conf:\n interfaces_to_use = interfaces_to_use + get_interfaces(_custom_conf)\n disable_unused_interfaces(ip, self._user, self._pass, interfaces_to_use)\n except Exception as e:\n print('Cannot disable unused interfaces due to an error, ')\n print('however this is not critical - you can proceed further')\n # TO IMPLEMENT LATER - LOG THE EXCEPTION\n print(e)", "def extend(clself, other):\n clself._cfg_def.extend(other._cfg_def)\n for key, optdef in clself._cfg_def.options.iteritems():\n setattr(clself, key, optdef)", "def extend_mask_nonlocal(mask,kernel=np.ones((3,3))):\n\tassert (mask.dtype is np.dtype(np.bool)), \"input mask must be of bool type\"\n\n\text_mask = mask.copy()\n\tinp_ind = masked_indices(mask)\n\tim_h, im_w = mask.shape\n\n\tker_y, ker_x = kernel.shape\n\tassert(ker_x%2>0), \"kernel must have odd dimensions\"\n\tassert(ker_y%2>0), \"kernel must have odd dimensions\"\n\n\t# indices of the nonzero kernel elements\n\tker_ind_y, ker_ind_x = np.nonzero(kernel)\n\tker_ind_x -= ker_x//2\n\tker_ind_y -= ker_y//2\n\n\tfor ind_x, ind_y in zip(inp_ind%im_w, inp_ind//im_w):\n\t\tfor i, j in zip(ker_ind_x, ker_ind_y):\n\t\t\text_mask[min(max(0,ind_y+j),im_h-1),min(max(0,ind_x+i),im_w-1)] = True\n\n\treturn ext_mask", "def _ApplyFlags(cls, config_values, flag_values):\n super(ContainerSpec, cls)._ApplyFlags(config_values, flag_values)\n if flag_values['image'].present:\n config_values['image'] = flag_values.image\n if flag_values['static_container_image'].present:\n config_values['static_image'] = flag_values.static_container_image", "def configFeAsic(self,gain,shape,base,slk=None,slkh=None,monitorBandgap=None,monitorTemp=None):\n pass", "def set_config(self, cfg):\n\n cfg.add_section(self.name)\n for attr, value in self.__dict__.items():\n if value not in [\"false\", \"none\", \"0\"] and attr != \"name\":\n attr = attr.replace(\"_\", \"-\")\n cfg.set(self.name, attr, value)", "def with_mask(self, mask):\n return self[mask]", "def update(self, config):\n # find keys are in config but not in self.config\n extra_keys = set(config.keys()) - set(self.config.keys())\n if len(extra_keys) > 0:\n raise ValueError(\"keys {} in config are not in Config.config\".format(extra_keys))\n # update self.config by config\n else:\n self.config.update(config)", "def setMask(self, mask):\n self.mask = mask", "def read_config(self, config):\n try:\n newconfig = ConfigObj(config, interpolation=False,\n configspec=self._configspec)\n except ConfigObjError as e:\n raise ConfigError(e)\n newconfig = self._validate(newconfig)\n self._config.merge(newconfig)\n logger.info(\"Loaded additional config: {0}\".format(config))", "def setup_b_instance(self,norm,add_ps_mask=True):\n inst_tag = self.tag + '_'+str(self.flux_array_ebin)\n b = bsm.bayesian_scan_NPTF(tag=inst_tag,nside=self.nside,work_dir='/tmp/'+self.tag+'/',psf_dir=psf_dir,nlive=700)\n # Input the data, using the external data if provided\n if self.use_external_data:\n b.load_external_data(self.f1.CTB_en_bins,[self.external_data[self.flux_array_ebin]],self.f1.CTB_exposure_maps)\n else:\n b.load_external_data(self.f1.CTB_en_bins,self.f1.CTB_count_maps,self.f1.CTB_exposure_maps)\n\n if add_ps_mask:\n b.make_mask_total(band_mask_range = [-self.band_mask,self.band_mask],mask_ring = False,ps_mask_array = self.f1.ps_mask_array)\n else:\n b.make_mask_total(band_mask_range = [-self.band_mask,self.band_mask],mask_ring = False)\n\n b.add_new_template(self.f1.template_dict)\n b.rebin_external_data(1)\n\n b.add_poiss_model('ps_model','$A_{ps}$',[0.0,3.0],False)\n b.add_poiss_model('p7','$A_{p7}$',[0.0,2.0],False)\n b.add_poiss_model('bubs','$A_{bubs}$',[0.0,2.0],False)\n b.add_poiss_model('iso','$A_{iso}$',[0.0,3.0],False)\n # Add in a fixed J_map template\n b.add_fixed_templates({'J_map':[norm*self.J_map_arr[self.flux_array_ebin]/np.mean(self.J_map_arr[self.flux_array_ebin])]})\n\n b.initiate_poissonian_edep()\n return b", "def _update_classification_mask(self, obj_class, mask_coords):\n # Remove background where object resides\n self.classification_mask[0, mask_coords[1]:mask_coords[3], mask_coords[0]:mask_coords[2]] = 0\n # Set classification mask where object resides\n self.classification_mask[obj_class, mask_coords[1]:mask_coords[3], mask_coords[0]:mask_coords[2]] = 1", "def update_compute_section(self):\n rconfig = configparser.RawConfigParser()\n rconfig.read(self.conf_file)\n if not rconfig.has_section('compute'):\n rconfig.add_section('compute')\n rconfig.set(\n 'compute', 'fixed_network_name',\n self.network.name if self.network else env.get(\"EXTERNAL_NETWORK\"))\n with open(self.conf_file, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)", "def add_config_field(self, content_type, name, *args, **kwargs):\n if name == 'representation_args':\n raise ValueError('{} is a reserved Config field name'.format(name))\n self._add_config_arg(ConfigField, content_type, name, *args, **kwargs)", "def config_extra_settings(self, data_dir):\n # load data directory configuration\n self.label_path = data_dir\n self.label_config_dir = os.path.join(self.label_path, 'labelconfig')\n self.label_config_suffix = 'lbl'\n\n # set icon configuration\n self._icon_dir=get_icon_dir()\n\n # set window title\n self.setWindowTitle('FreeROI')\n #self.resize(1280, 1000)\n self.center()\n # set window icon\n self.setWindowIcon(QIcon(os.path.join(self._icon_dir,'icon.png')))\n\n self._init_configuration()\n self._init_label_config_center()\n\n # create actions\n self._create_actions()\n\n # create menus\n self._create_menus()", "def add_noise(self, mdct_norm, masking_threshold):\n return self.psychoacoustic.add_noise(mdct_norm, masking_threshold)", "def build_mask(cell, wgt, final_layer=None, final_datatype=None):\n fl = wgt.clad_layer+1 if final_layer==None else final_layer\n fd = 0 if final_datatype==None else final_datatype\n\n polygons = cell.get_polygons(by_spec=True)\n try:\n pWG = polygons[(wgt.wg_layer, wgt.wg_datatype)]\n pCLAD = polygons[(wgt.clad_layer, wgt.clad_datatype)]\n except KeyError:\n print(\"Warning! No objects written to layer/datatype specified by WaveguideTemplate\")\n if wgt.resist=='+':\n cell.add(gdspy.fast_boolean(pWG, pCLAD, 'xor', precision=0.001, max_points=199, layer=fl, datatype=fd))\n elif wgt.resist=='-':\n cell.add(gdspy.fast_boolean(pWG, pCLAD, 'and', precision=0.001, max_points=199, layer=fl, datatype=fd))", "def set_config(self, cfg):\n\n cfg.add_section(self.name)\n for attr, value in self.__dict__.items():\n if value not in [\"false\", \"none\"] and attr != \"name\":\n attr = attr.replace(\"_\", \"-\")\n cfg.set(self.name, attr, value)", "def set_config(self, cfg):\n\n cfg.add_section(self.name)\n for attr, value in self.__dict__.items():\n if value not in [\"false\", \"none\"] and attr != \"name\":\n attr = attr.replace(\"_\", \"-\")\n cfg.set(self.name, attr, value)", "def set_config(self, cfg):\n\n cfg.add_section(self.name)\n for attr, value in self.__dict__.items():\n if value not in [\"false\", \"none\"] and attr != \"name\":\n attr = attr.replace(\"_\", \"-\")\n cfg.set(self.name, attr, value)", "def add(self, host, **kwargs):\n self.configs_[0][1].add(host, **kwargs)", "def set_feature_mask(self, feature_mask):\n self.feature_mask = feature_mask", "def add_config(self):\n\n config = {\n 'count_up': CountUp,\n 'count_down': CountDown,\n 'count_up_or_down': CountUpOrDown,\n 'high_speed_counter_definition': HighSpeedCounterDefinition,\n 'high_speed_counter': HighSpeedCounter,\n 'pulse_output': PulseOutput\n }\n\n return config", "def populate_config(self, config):\n self.use_wine_mappings.set_active(config['use_wine_mappings'])\n self.force_recheck.set_active(config['force_recheck'])\n self._previous_force_recheck = config['force_recheck']\n self.resume.set_active(config['resume'])\n try:\n self.glade.get_widget('time_added_checkbox').set_active(\n 'time_added' in config['transfer_meta'])\n except KeyError:\n pass\n self.resume_dat_entry.set_text(config['previous_resume_dat_path'])", "def get_config(self):\n\n # these are all that is needed to rebuild this class\n config = dict(hidden_size=self.hidden_size,\n word_embedding=self.word_embedding,\n detection_embedding=self.detection_embedding,\n mode=self.mode,\n decoder_pos_emb=self.decoder_pos_emb,\n ** self.kwargs)\n\n base_config = super(RegionFeature, self).get_config()\n return dict(list(base_config.items()) +\n list(config.items()))" ]
[ "0.5756575", "0.5562516", "0.54862016", "0.5367435", "0.5347834", "0.53455114", "0.5306387", "0.52775955", "0.522134", "0.5194576", "0.51778084", "0.5152401", "0.51056355", "0.51011837", "0.50790006", "0.5067728", "0.5065551", "0.4962702", "0.4946801", "0.4946075", "0.49183005", "0.49159494", "0.49100894", "0.49084634", "0.49076694", "0.48849526", "0.48177648", "0.48108754", "0.48106894", "0.47964016", "0.47839794", "0.4772327", "0.47660932", "0.47660932", "0.47464186", "0.47456405", "0.47291517", "0.47210765", "0.47192547", "0.47168988", "0.47162175", "0.47138405", "0.470761", "0.47030455", "0.46968415", "0.46941915", "0.46916538", "0.46812856", "0.46789327", "0.46658984", "0.46528703", "0.4652148", "0.46498463", "0.4648871", "0.46485573", "0.46315843", "0.46312842", "0.4624812", "0.4624812", "0.46230158", "0.46130076", "0.46051002", "0.46008947", "0.4593308", "0.4580749", "0.45794028", "0.45785326", "0.45618272", "0.4560683", "0.45586264", "0.45581818", "0.4551106", "0.45469415", "0.45461303", "0.45377606", "0.4531351", "0.4521042", "0.45142546", "0.45125037", "0.4503131", "0.4499034", "0.4498298", "0.4493728", "0.4490731", "0.44901147", "0.44876668", "0.4478467", "0.44779912", "0.44751477", "0.44688734", "0.446438", "0.44607773", "0.44565517", "0.44565517", "0.44565517", "0.44534683", "0.444959", "0.4445261", "0.44449583", "0.44417897" ]
0.71955097
0
The indices of the master array which correspond to this partition's data array.
def indices(self): return tuple([slice(*r) for r in self.location])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def indices(self) -> np.ndarray:\n return self.impl.indices", "def master_ndindex(self): # itermaster_indices(self):\n return itertools_product(\n *[range(*r) for r in self.location]\n ) # TODO check", "def atom_idxs(self):\n\n return np.array([atom.atom_idxs for atom in self])", "def get_index_array(self):\n return self.region_pairs", "def get_indices(self):\r\n return self._indices", "def indices(self):\n return self.index.indices", "def get_data_idx(self)->list:\n return self.__data_idx", "def mainIndices(self):\n return self.i1, self.i2", "def indices(self):\n return self._kbounded_partitions", "def _get_split_indices(self):\n\n cumsum = np.cumsum(\n np.concatenate((np.array([0], dtype=np.int8), self.split_sizes)))\n \n fold_inds = np.array(\n [(cumsum[n], cumsum[n + 1]) for n in range(self.n_splits)])\n\n return fold_inds", "def get_indexes(self, dataset: BaseDataset) -> int:\n\n index = [np.random.randint(0, len(dataset)) for _ in range(1)]\n\n return index", "def indices(self):\n _indices = []\n for h in self.miller.indices():\n _indices.append(self.indices_hkl(*h)[0])\n return _indices", "def getIndices(self):\r\n return self._indices", "def get_main_branch_indices(self):\n\n assert self.halt is not None\n prog_main_index = self.halt_index\n prog_main_indices = self.halt.prop(\n 'progenitor.main.indices', self.halt_index)\n self.main_branch_indices = prog_main_indices\n return prog_main_indices", "def batch_indices(self):\n b = self.batch_size\n return [np.arange(i*b, i*b+b) for i in range(self.num_batches)]", "def get_all_master_idx_paths(self):\n paths = utilities.get_all_master_index_paths(rootdir=constants.flow_data_dir)\n return paths", "def indices(self):\n return range(len(self))", "def getLandmarkindices(self):\n return self.subsetnodes_indices", "def getLandmarkindices(self):\n return self.subsetindices", "def indices_of_split(self, split_name='train'):\n return self.indices_of('split', split_name)", "def get_all_master_ids(self):\r\n return self._handler.get_all_master_ids()", "def indices(self):\n i, j, _edge = self.indicesAndEdge()\n return i, j", "def get_34index_list(self):\n msk = self.load_mask()\n return [i for (i,v) in enumerate(msk) if v==1]", "def index(self):\n return self.data.index.values", "def get_indexes(self):\n indexes = []\n for c in self.components:\n indexes.extend(c.get_indexes())\n return indexes", "def get_indexes(self, dataset: BaseDataset) -> int:\n\n indexes = [random.randint(0, len(dataset)) for _ in range(3)]\n return indexes", "def _load_split_indices(self):\n split_file = self.SPLITS.get(self.split)\n indices_file = self._filepath(split_file)\n\n with open(indices_file) as txt_file:\n idx_data = [int(i) for i in txt_file.readline().split()]\n\n return idx_data", "def get_indexes(self, dataset):\n\n indexs = [random.randint(0, len(dataset)) for _ in range(3)]\n return indexs", "def SectionIndicesConnectedToSoma(self):\n indices = []\n index = 0\n for each_section in self._section_list:\n if each_section.ParentId() == -1:\n indices.append(index)\n index += 1\n return indices", "def get_active_register_indices(self):\n assert self.sketch.ndim == 1, 'Currently only support 1-dimensional sketch.'\n return np.flatnonzero(self.sketch)", "def get_subset_inds(self, adata_parent):\r\n subset_inds = np.ones(len(adata_parent), dtype=bool)\r\n for condition, values in self.subset_cond.items():\r\n subset_inds *= adata_parent.obs[condition].isin(values)\r\n return subset_inds", "def data_index_from_selection(self, sel_indexes):\n data_indexes = []\n # Translate the landmark indexes to the original data indexes\n for i in sel_indexes:\n data_indexes.append(self.analysis.landmark_orig_indexes[i])\n return data_indexes", "def get_local_indices(self, part, ctx):\n return self.map_to_global(\n F.arange(0, self.local_size(part), ctx=ctx), part\n )", "def _getNonPrototypeIndices(self, clusters: ndarray) -> ndarray:\n return np.delete(np.arange(self.dataSize), clusters.flatten())", "def vir_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==0:\n indices.append(index)\n return indices", "def inidices(self):\n return [Index(name, index) for name, index in self._data['indices'].iteritems()]", "def childWellIndices(self):\n return self._wellIndices", "def get_idxvals(self):\n input_rows = list()\n input_cols = list()\n for key in self.index:\n input_rows.append(key[0])\n input_cols.append(key[1])\n\n return list(OrderedSet(input_rows)), list(OrderedSet(input_cols))", "def ordered_indices(self):\n return self.base_dataset.ordered_indices()", "def _family_index(self):\n\n if hasattr(self, \"_family_index_cached\"):\n return self._family_index_cached\n\n ind = np.empty((len(self),), dtype='int8')\n for i, f in enumerate(self.ancestor.families()):\n ind[self._get_family_slice(f)] = i\n\n self._family_index_cached = ind\n\n return ind", "def data(self) -> List[int]:\n return self.__ids", "def data(self) -> List[int]:\n return self.__ids", "def data(self) -> List[int]:\n return self.__ids", "def indices(self):\n if self._indices is None:\n i = []\n\n # TODO: this is not right for multi-column keys\n # TODO: new style indexes\n\n global_name = '^DD(%s,0,\"IX\",\"0\")' % self.fileid\n prefix = '^DD(%s,0,\"IX\",' % self.fileid\n while 1:\n global_name = M.mexec('set s0=$query(%s)' % global_name, M.INOUT(\"\"))[0]\n if not global_name or not global_name.startswith(prefix):\n break\n suffix = global_name[len(prefix):-1]\n parts = suffix.split(\",\")\n idx_name = parts[0][1:-1]\n idx_table = parts[1]\n idx_columns = parts[2:]\n index = Index(idx_name, idx_table, idx_columns)\n i.append(index)\n\n # A second list, gives indices for a field\n columns = {}\n for idx in i:\n for c in idx.columns:\n columns[c] = 1\n\n # Now trawl the listed columns in the data dictionary, and load their\n # cross references.\n cr_names = {}\n for c in columns.keys():\n idx_root = M.Globals[\"^DD\"][self.fileid][c][1]\n if not idx_root[0].exists():\n continue\n for cr_id, val in idx_root.keys_with_decendants():\n if float(cr_id) > 0:\n cr_header = idx_root[cr_id][0].value\n parts = cr_header.split(\"^\")\n if len(parts) == 2 and parts[1]: # if more than 2 parts, assume MUMPs trigger\n f = cr_names.get(parts[1], list())\n f.append(c)\n cr_names[parts[1]] = f\n\n # Now, just delete items from the index list if they are not in cr_names\n self._indices = []\n for index in i:\n cr = cr_names.get(index.name)\n if cr:\n # verify columns - lots of errors in real systems\n if len(cr) == len(index.columns):\n invalid = False\n for c in cr:\n if c not in index.columns:\n invalid = True\n continue\n if not invalid:\n self._indices.append(index)\n\n return self._indices", "def get_indices(self):\n selection_model = self.selectionModel()\n return selection_model.selectedRows()", "def get_instance_index(self):\n return np.unique([tp[0] for tp in self._innercontainer])", "def get_raster_ids(self):\n return numpy.array(range(self._lo_atom, self._lo_atom + self._n_atoms))", "def ordered_indices(self):\n return self.d1.ordered_indices()\n # RETURN BASED ON D1's sizes", "def occ_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==1:\n indices.append(index)\n return indices", "def labeled_indices(self):\n return self._labeled_indices", "def index(self):\n return self.dataset.index", "def _get_indexes(self, participants):\n tr_idx = int(np.floor(self.tr_size*len(participants)))\n j = self.val_size + self.tr_size\n val_idx = int(np.floor(j*len(participants)))\n return tr_idx, val_idx", "def getIndexes(self, val):\n # Find where this value is listed. \n valNdx = (self.values == val).nonzero()[0]\n \n # If this value is not actually in those listed, then we \n # must return empty indexes\n if len(valNdx) == 0:\n start = 0\n end = 0\n else:\n # The index into counts, etc. for this value. \n valNdx = valNdx[0]\n start = self.start[valNdx]\n end = self.end[valNdx]\n \n # Create a tuple of index arrays, one for each index of the original array. \n ndx = ()\n for i in range(self.nDims):\n ndx += (self.indexes[start:end, i], )\n return ndx", "def indices(self):\n\n # We used lookup tables here. Read more about other methods here:\n # https://chessprogramming.wikispaces.com/Bitboard+Serialization\n\n if self.num == 0:\n return []\n\n bits = []\n\n for i in [0, 1, 2, 3, 4, 5, 6, 7]:\n row = (self.num >> UINT64_PADDING[i]) & EIGHT_ONES\n indices = row_to_indices[row]\n for index in indices:\n bits.append(index + i*8)\n\n return bits", "def all_sampled_nodes_indexes(self) -> torch.LongTensor:\n all_sampled_nodes_indexes: _typing.Any = self.__all_sampled_nodes_indexes\n return all_sampled_nodes_indexes", "def indices(self, position=None):\n \n raise NotImplementedError()", "def sparse_arrays(self):\n return self._sparse_arrays", "def get_multi_index(self):\n return self.basis.elements", "def index(self):\n return self.data.index", "def _dofidxs(self):\n return [const['dofidxs'] for i, const in self._constraints_df.iterrows()]", "def expert_to_batch_indices(self):\n return tf.split(\n self._batch_index, self._part_sizes_tensor, 0, num=self._num_experts)", "def idx_adjacency_lists(self) -> List[List[int]]:\n result = []\n\n for intersection in self._intersection_list:\n nbs = []\n\n for nb in self.adj_dict[intersection]:\n nbs.append(self._intersection_to_idx[nb])\n\n result.append(nbs)\n\n return result", "def _get_indices_1(image_set, num_labels=2, num_protected=2):\r\n indices = [[[] for _ in range(num_protected)] for _ in range(num_labels)]\r\n for _, label, cluster, index in image_set:\r\n indices[label][cluster].append(index)\r\n\r\n return indices", "def getBitArrayIndices(self, key):\n\t\treturnList = []\n\t\tfor i in range(1, self.k + 1):\n\t\t\treturnList.append((hash(key) + i * mmh3.hash(key)) % self.m)\n\t\t#print \"Indices list for key: \", key, \" is: \", str(returnList)\n\t\treturn returnList", "def ordered_indices(self):\r\n return np.arange(len(self), dtype=np.int64)", "def _raveled_index(self):\n return np.r_[:self.size]", "def _raveled_index(self):\n return np.r_[:self.size]", "def cluster_ids(self):\n return self.model.cluster_ids", "def geneIds(self):\n\t\treturn self._dataframe.index.tolist()", "def getGlobalIdxVals( self, i : int ):\n return range(self._layout.starts[i],self._layout.ends[i])", "def main_rep_idxs(self):\n\n if '{}/{}'.format(SETTINGS, MAIN_REP_IDXS) in self.h5:\n return self.h5['{}/{}'.format(SETTINGS, MAIN_REP_IDXS)][:]\n else:\n return None", "def target_nodes_indexes(self) -> _TargetNodes:\n return self.__target_nodes_indexes", "def get_index(self):\n return self.disk.partitions.index(self)", "def _compute_indices(self):\n self.indices = np.arange(len(self.im_filenames))\n np.random.shuffle(self.indices)", "def _get_chunk_indexer(self, array):\n if self.data.num_chunks == 1:\n return np.broadcast_to(0, len(array))\n return np.digitize(array, self.offsets[1:])", "def _idxs_postformat_array(self):\n self.idxs = np.array(self.idxs)", "def get_final_pruned_indices(self):\n return self.final_pruned_indices", "def getind(self,start,end,blk):\n\n if blk is None:\n # Return all blocks\n blk = np.arange(self.ind[start].size)\n\n ind=np.array([])\n for k,val in enumerate(blk):\n ind=np.append(ind,np.arange(self.ind[start][val],self.ind[end][val]))\n return ind.astype(int)", "def get_indexes(self):\n return set(k.index for k in self if k.has_index)", "def activeChildWellIndices(self):\n return self._activeWellIndices", "def _calculate_chunk_offsets(self):\n offset = 0\n offsets = []\n for chunk in self.data.iterchunks():\n offsets.append(offset)\n offset += len(chunk)\n return np.array(offsets)", "def index(self):\n return list(self._innercontainer)", "def _selected_indices(self, subset):\n # We want the DataFrame to be indexed the same way its values array is\n ftr = self.frametracks.reset_index(drop=True)\n if subset is not None:\n ftr['tmpindex'] = ftr.index.values\n ftr = ftr.set_index('particle').reindex(subset).set_index('tmpindex')\n if self.autoclip:\n # Boundaries are computed for the whole system\n xmin = self.frametracks.x.min() + self.nncutoff\n xmax = self.frametracks.x.max() - self.nncutoff\n ymin = self.frametracks.y.min() + self.nncutoff\n ymax = self.frametracks.y.max() - self.nncutoff\n r = ftr.index[ (ftr.x > xmin) & (ftr.x < xmax) & \\\n (ftr.y > ymin) & (ftr.y < ymax) ].values.astype(int)\n else:\n r = ftr.index.values.astype(int)\n if self.fast:\n return np.random.permutation(r)[:int(len(r) / 10)]\n else:\n return r", "def indexes(self) -> list:\n return self._indexes", "def get_cluster_indices(self,dataset, cluster_number):\n\t\tself.__init__(dataset, self.k)\n\t\tself.e_step() #got responsibilities\n\t\tmax_cluster = np.argmax(self.w, axis = 1)\n\t\tindices = []\n\t\tfor i in range(dataset.shape[0]):\n\t\t\tif max_cluster[i] == cluster_number:\n\t\t\t\tindices.append(i)\n\t\treturn indices", "def _prog_field_indices(self):\n\n if self._pfi is not None:\n return self._pfi\n\n self.arbor._grow_tree(self)\n self._pfi = np.array([node.tree_id for node in self._prog_nodes])\n return self._pfi", "def edges_indices(self) -> numpy.array:\n r = numpy.array(self._receivers, dtype=numpy.int16)\n s = numpy.array(self._senders, dtype=numpy.int16)\n return numpy.asarray([r, s], dtype=numpy.int16)", "def get_all_master_idx_dfs(self):\n dfs = utilities.get_all_master_index_dfs(master_index_paths=self.all_master_idx_paths)\n return dfs", "def get_index(self, x, y):\n i = (y - self.y0) // self.dy\n j = (x - self.x0) // self.dx\n i = min(max(i, 0), self.n-1)\n j = min(max(j, 0), self.m-1)\n return [i, j]", "def index_dict(self):\n msk = self.load_mask()\n mski = enumerate(msk)\n ifiltered = (i for (i, m) in mski if m == 1)\n return {i: j for (j, i) in enumerate(ifiltered)}", "def index_dict(self):\n msk = self.load_mask()\n mski = enumerate(msk)\n ifiltered = (i for (i, m) in mski if m == 1)\n return {i: j for (j, i) in enumerate(ifiltered)}", "def agent_locs_idx(self):\n return tuple(self.agent_locs.T)", "def ordered_indices(self):\r\n '''we need random order'''\r\n if self.shuffle:\r\n indices = np.random.permutation(len(self))\r\n else:\r\n indices = np.arange(len(self))\r\n '''\r\n if self.tgt_sizes is not None:\r\n indices = indices[np.argsort(self.tgt_sizes[indices], kind='mergesort')]\r\n return indices[np.argsort(self.src_sizes[indices], kind='mergesort')]\r\n '''\r\n return indices", "def get_data(self):\n idxs = self.get_indexes(self._start, self._length, self.maxsize)\n return self._data[idxs].copy()", "def get_indexes(self, dataset):\n\n for i in range(self.max_iters):\n index = random.randint(0, len(dataset))\n gt_bboxes_i = dataset.get_ann_info(index)['bboxes']\n if len(gt_bboxes_i) != 0:\n break\n\n return index", "def _notstaticneighs_get_corestored_by_inds_slice(self, inds):\n inds = [inds] if type(inds) == int else inds\n idxs = self.idxs\n if self.sp_relative_pos is not None:\n sp_relative_pos = []\n for k in range(len(self.sp_relative_pos)):\n sp_relative_pos += [[self.sp_relative_pos[k][i] for i in inds]]\n else:\n sp_relative_pos = None\n return idxs, sp_relative_pos", "def _tree_field_indices(self):\n\n if self._tfi is not None:\n return self._tfi\n\n self.arbor._grow_tree(self)\n self._tfi = np.array([node.tree_id for node in self._tree_nodes])\n return self._tfi", "def _get_k_indices(self, ks):\n if self.staticneighs:\n idx_ks = ks\n else:\n idx_ks = [self.ks.index(e) for e in ks]\n return idx_ks", "def GetPartitioningArray(self):\n return _hypre.HypreParVector_GetPartitioningArray(self)", "def run_idxs(self):\n return list(range(len(self._h5[RUNS])))" ]
[ "0.70275134", "0.6850407", "0.683511", "0.6796057", "0.6708753", "0.6668921", "0.6662458", "0.66586286", "0.6644368", "0.6611067", "0.66031367", "0.65675586", "0.65319914", "0.64623576", "0.6393266", "0.6372617", "0.6343201", "0.63220435", "0.6305004", "0.62970823", "0.628111", "0.62674505", "0.624798", "0.6227296", "0.6190301", "0.6181901", "0.61509717", "0.61253864", "0.612371", "0.6105406", "0.610102", "0.6082863", "0.6060004", "0.6039434", "0.602746", "0.6016745", "0.6013818", "0.5987249", "0.5986039", "0.5977027", "0.59651345", "0.59651345", "0.59651345", "0.5962879", "0.5957046", "0.5941614", "0.59289116", "0.5906297", "0.59025055", "0.5901664", "0.5886664", "0.5878906", "0.5851613", "0.5848616", "0.5815533", "0.58128786", "0.58127564", "0.5805883", "0.5803346", "0.58005875", "0.5794765", "0.5793853", "0.5788719", "0.57824725", "0.5780825", "0.5760377", "0.5760377", "0.5759219", "0.57566726", "0.5742169", "0.5740475", "0.57397515", "0.57309896", "0.5730029", "0.5720656", "0.5713963", "0.56969166", "0.56939006", "0.5684604", "0.5675086", "0.56661737", "0.5665716", "0.5657009", "0.5651558", "0.5648632", "0.5644165", "0.5638548", "0.56361437", "0.5634593", "0.5634037", "0.5634037", "0.5624275", "0.5617037", "0.5616045", "0.5596753", "0.5595638", "0.55885994", "0.5587997", "0.5573761", "0.5570025" ]
0.62476814
23
True if and only if the partition's subarray is in memory as opposed to on disk.
def in_memory(self): return hasattr(self._subarray, "__array_interface__")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def in_memory(self) -> bool:\n return all(isinstance(x, np.ndarray) for x in self.chunks.values())", "def extra_memory(self):\n if not self.in_memory:\n # --------------------------------------------------------\n # The subarray is on disk so getting the partition's data\n # array will require extra memory\n # --------------------------------------------------------\n extra_memory = True\n else:\n # --------------------------------------------------------\n # The subarray is already in memory\n # --------------------------------------------------------\n config = self.config\n\n p_part = self.part\n if p_part:\n extra_memory = True\n elif not config[\"unique_subarray\"]:\n extra_memory = True\n else:\n p_data = self._subarray\n\n if not numpy_ma_isMA(p_data):\n # The p_data is not a masked array\n extra_memory = isinstance(p_data.base, numpy_ndarray)\n else:\n # The p_data is a masked array\n memory_overlap = isinstance(\n p_data.data.base, numpy_ndarray\n )\n if not (\n p_data.mask is numpy_ma_nomask\n or not numpy_ma_is_masked(p_data)\n ):\n # There is at least one missing data point\n memory_overlap |= isinstance(\n p_data.mask.base, numpy_ndarray\n )\n\n extra_memory = memory_overlap\n # --- End: if\n\n p_dtype = p_data.dtype\n\n if not extra_memory:\n if config[\"func\"] is not None:\n extra_memory = True\n else:\n p_units = self.Units\n units = config[\"units\"]\n if (\n not p_units.equals(units)\n and bool(p_units) is bool(units)\n and not (\n p_data.flags[\"C_CONTIGUOUS\"]\n and p_dtype.kind == \"f\"\n )\n ):\n extra_memory = True\n\n # ------------------------------------------------------------\n # Extra memory is required if the dtype needs changing\n # ------------------------------------------------------------\n if not extra_memory:\n dtype = config[\"dtype\"]\n if dtype is not None and dtype != p_data.dtype:\n extra_memory = True\n # --- End: if\n\n # ------------------------------------------------------------\n # Amount of extra memory (in bytes) required to access the\n # array\n # ------------------------------------------------------------\n return self.nbytes if extra_memory else 0", "def on_disk(self):\n return isinstance(self._subarray, FileArray)", "def in_cached_file(self):\n return isinstance(self._subarray, CachedArray)", "def has_shareable_memory(a):\r\n return _get_backing_memmap(a) is not None", "def is_full(self):\n if len(self._page_map) >= self.memory_size:\n return True\n return False", "def IsAllocated(self):\n return self._fsntfs_file_entry.is_allocated()", "def is_full(self):\n elements_in_sects = sum(\n map(opr.attrgetter(\"size\"), self.sects.values())\n )\n elements_in_total = fct.reduce(\n opr.mul, type(self).flatten_shape(self.shape), 1\n )\n res = elements_in_sects >= elements_in_total\n return res", "def is_array(self):\n return len(self.descriptor) > 1", "def is_full(self) -> bool:\n return self._array[0].all()", "def pageable(self):\n return maxSRAM(self.mem) <= self.dev.SRAM_PAGE_LEN", "def is_space_available(partition, size):\n available_space = psutil.disk_usage(partition).free\n return False if available_space < size else True", "def is_full(self) -> bool:\r\n return self.size == self.capacity", "def is_full(self):\r\n if self.size == self.capacity:\r\n return True\r\n return False", "def full(self):\n return self.size >= self.maxsize", "def isFull(self) -> bool:\n return self._elems == self._k", "def full(self):\n return self._current_size == self._size", "def is_full(self):\n return self.heap_size >= self.capacity", "def isSetSize(self):\n return _libsbml.Compartment_isSetSize(self)", "def has_next(self):\n while self._row < self._n and not self._arr[self._row]: # current sub-array is empty\n self._row += 1 # move to next sub-array\n self._col = 0\n if self._row >= self._n: # end of master-array already\n return False\n return True", "def isFull(self):\n\t\treturn self.size == self.capacity", "def full(self) -> bool:\n return self.maxsize and self.qsize() >= self.maxsize", "def is_full(self):\n\n return self.count == len(self.array)", "def has_full_batch(self) -> bool:", "def is_in_heap(self, address):\n return self.is_address_of_type(address, MemoryType.MajorHeap, MemoryType.MinorHeap)", "def _is_full(self):\n if self.allocated_spaces == self.capacity:\n return True\n elif self.allocated_spaces < self.capacity:\n return False", "def is_free(self):\n return self._size > 0", "def is_partition(disk): #TODO: Could change to use \"Whole\" attrib. Good idea?\n\n return \"s\" in disk.split(\"disk\")[1]", "def full(self):\r\n if self._maxsize <= 0:\r\n return False\r\n else:\r\n return self.qsize() >= self._maxsize", "def check_free(self, arr):\n cell_location = self.cartesian_to_cell(arr)\n cell = self.occ_matrix[cell_location[0], cell_location[1]]\n return cell == 0", "def is_full(self):\n return len(self.__occupied_slots__) >= self.__size__", "def _loaded_data(self):\n try:\n dsize = [int(d) for d\n in self.run('fits size', via='get').split()]\n except (ValueError, TypeError, AttributeError) as err:\n log.debug(f' FITS size error: {err}')\n return False\n else:\n if 0 in dsize:\n return False\n else:\n return True", "def __check_flat_array__(self):\n if self.flat_array is not None:\n return True\n else:\n return False", "def has_definite_size(iterable):\n return hasattr(iterable, '__len__')", "def is_array(self):\n return False", "def isFull(self):\n return self.rear == self.size", "def has_subfile(self) -> bool:\n\t\tself._update_subfiles()\n\t\treturn bool(len(self.subfiles))", "def is_contiguous(arr):\n mn, mx = min(arr), max(arr)\n s = sum(arr)\n sn = (mn*(mn-1))/2 if mn!=0 else 0\n sx = (mx*(mx+1))/2\n if s == sx-sn:\n return True\n else:\n return False", "def is_in(self, starting_address, size):\r\n if starting_address > self.starting_address:\r\n return (self.starting_address+self.size) > starting_address\r\n elif starting_address < self.starting_address:\r\n return (starting_address + size) > self.starting_address\r\n return True", "def is_enough_space(self) -> bool:\n return self._free_space() > self.minimum_disk", "def _is_size_bound(self, path):\n return path.suffix == \".bin\"", "def is_full(self):\n return self.idx == self.len", "def is_array(a):\n try:\n shape = a.shape\n return len(shape) >= 1\n except AttributeError:\n return False", "def _is_partitioned(self):\n ## check if the table are partitioned, need the split because of a change in the type of partitions in pydantic\n partitions = self.table_config[\"partitions\"]\n if partitions is None or len(partitions) == 0:\n return False\n\n if isinstance(partitions, list):\n # check if any None inside list.\n # False if it is the case Ex: [None, 'partition']\n # True otherwise Ex: ['partition1', 'partition2']\n return all(item is not None for item in partitions)\n\n raise ValueError(\"Partitions must be a list or None\")", "def is_fragmentable(fragment_size, offset, chunk_size):\n return ((chunk_size - fragment_size) / offset) % 1 == 0", "def _array_name_implies_ND_slice(self, array_name):\n for v in self._split_arrays.values():\n if array_name in v:\n return True\n\n generic_match = re.findall(\"^(.+)_[xyz]$\", array_name)\n loadable_keys = self.loadable_keys()\n keys = list(self.keys())\n if len(generic_match) == 1 and generic_match[0] not in self._split_arrays:\n return generic_match[0] in loadable_keys or generic_match[0] in keys\n return False", "def has_max_size(self, max_size):\n return self.polyp_size and sorted(self.polyp_size, reverse=True)[0].get_max_dim() < max_size", "def _test_obssize(t):\n return t.shape[0] != len(t.ids(axis='observation'))", "def contains_offset(self, offset):\n return (offset >= self.offset) and (offset < self.offset + self.filesize)", "def is_subset(subset: np.array, superset: np.array) -> bool:\n superset_lookup = set(superset)\n for val in subset:\n if val not in superset_lookup:\n return False\n\n return True", "def is_empty(self):\n return self.size == []", "def full(self):\n return self.q_size.current_value == self._maxsize", "def isscalar(array):\n arr = ma.array(array)\n if not hasattr(arr, '__len__') or arr.shape == () or len(arr) == 1:\n return True\n return False", "def __contains__(self, item):\n if len(item) != len(self.sizes):\n raise ValueError('Point dimension does not match grid dimension')\n for i in range(len(self.sizes)):\n if not 1 <= item[i] < self.sizes[i] - 1:\n return False\n return True", "def isFull(self) -> bool:\n return self.size == self.maxlen", "def full(self) -> bool:\n\n return (self._size == self._capacity)", "def is_slice(self) -> bool:\n return self._is_slice", "def isFull(T):\r\n return len(T.data) >= T.max_data", "def isFull(self):\n if len(self.batch) == self.__batch_size:\n return True\n return False", "def is_full(self):\n return len(self._data) == 1", "def is_complete(self) -> bool:\n return (\n (\n self.materialized_subset | self.failed_and_downstream_subset\n ).num_partitions_and_non_partitioned_assets\n == self.target_subset.num_partitions_and_non_partitioned_assets\n )", "def hasmem(state, mem):\n if mem <= state[HEAD][MEM]:\n return True\n else:\n state[HEAD][STATUS] = OOM\n return False", "def is_empty(self):\n return self.heap_size <= 0", "def is_array(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_array)", "def dictsub_sparse(small,big):\n\treturn all([(r,v) in catalog(big) for r,v in catalog(small)])", "def is_virtualized (self):\n return len([i for i in self.infras if\n i.infra_type not in (self.TYPE_INFRA_SDN_SW, self.TYPE_INFRA_EE,\n self.TYPE_INFRA_STATIC_EE)]) > 0", "def full(self) -> bool:\n return self.current_offset == self.max_offset", "def check_subarray(array1, array2):\r\n \r\n # check assumption\r\n if (len(array2.shape) != 1) or (array2.shape[0] != array1.shape[-1]):\r\n raise ValueError('Attempting to check for subarray equality when shape assumption does not hold.')\r\n \r\n return np.all(array1==array2, axis=-1)", "def is_array(self, arr):\n return isinstance(arr, np.ndarray)", "def device_out_of_memory(self) -> bool:\n return pulumi.get(self, \"device_out_of_memory\")", "def NeedsArray(self, type_):\n return self._NameComponents(type_) in self._array_types", "def _is_empty(self):\n return self.size == 0", "def stop_loading(self):\n return psutil.virtual_memory()[2] >= self.max_memory", "def stop_loading(self):\n return psutil.virtual_memory()[2] >= self.max_memory", "def isFull(self):\n if len(self._data) == self._length:\n return True\n else:\n return False", "def isFull(self) -> bool:\n return self.count == self.capacity", "def isFull(self) -> bool:\n return self.count == self.capacity", "def PartiallyEmpty(self):\n return None==self.piecesToRecover", "def __bool__(self):\n return self.end < len(self.data)", "def is_empty(self):\n return self.__size == 0", "def _are_features_already_extracted(self, output_path: str, subset: str) -> bool:\n file_path = join(output_path, subset + '.npy')\n return os.path.exists(file_path)", "def is_out_of_memory(self):\n\n return self._state == \"OUT_OF_MEMORY\"", "def is_empty(self):\n return self._size == 0", "def is_subdivision_available(self, position: np.ndarray) -> bool:\n\t\tsubdivision_x_index = int(position[0]) // self.square_subdivision_length\n\t\tsubdivision_y_index = int(position[1]) // self.square_subdivision_length\n\t\treturn self.plane_subdivisions_availability[subdivision_x_index, subdivision_y_index] == 1", "def iseod(self):\n\n return self.byte_ptr >= len(self.data)", "def _is_empty(self):\n if self.allocated_spaces == 0:\n return True\n else:\n return False", "def canPartition(self, nums):\n cache = {}\n\n def helper(nums, i, k):\n if (i, k) in cache:\n return False\n if i >= len(nums):\n return False\n if k == 0:\n return True\n include_curr = helper(nums, i + 1, k - nums[i])\n exclude_curr = helper(nums, i + 1, k)\n if include_curr:\n cache[(i, k)] = False\n return include_curr or exclude_curr\n if not nums:\n return True\n s = sum(nums)\n if s % 2 != 0:\n return False\n return helper(nums, 0, s/2)", "def is_empty(self) -> bool:\n return self.heap.length() == 0", "def is_empty(self) -> bool:\n return self.heap.length() == 0", "def __contains__(self, point):\n for component, dim in zip(point, self.dimensions):\n if component not in dim:\n return False\n return True", "def isPeakAssigned(peak, fully=True):\n\n n = 0\n for peakDim in peak.peakDims:\n if len(peakDim.peakDimContribs) > 0:\n n +=1\n \n if n == len(peak.peakDims):\n return True\n \n elif n > 0:\n if fully:\n return False\n else:\n return True\n \n else:\n return False", "def is_empty(self):\n return len(self.__heap) == 0", "def isFull(self):\n return self.rear - self.front == self.size", "def allocate(self) -> bool:\n if hasattr(self.at_options, 'allocate'):\n return self.at_options.allocate == 1\n return False", "def is_full(self):\n return len(self.keys) > self.m", "def is_full(self):\n return len(self.cache_data) >= self.MAX_ITEMS", "def healthy_test(obj: np.ndarray) -> bool:\n nb_rows, nb_cols = obj.shape\n return nb_rows == nb_cols > 1 and np.array_equal(obj, colony(nb_rows))", "def is_full(self) -> bool:", "def isleaf(self):\n no_kids = super(PartitionDevice, self).isleaf\n # it is possible that the disk that originally contained this partition\n # no longer contains a disklabel, in which case we can assume that this\n # device is a leaf\n if self.disk and self.partedPartition and \\\n self.disk.format.type == \"disklabel\" and \\\n self.partedPartition in self.disk.format.partitions:\n disklabel = self.disk.format\n else:\n disklabel = None\n\n extended_has_logical = (self.isExtended and\n (disklabel and disklabel.logicalPartitions))\n return (no_kids and not extended_has_logical)", "def is_distributed(self) -> bool:\n return self.size > 1" ]
[ "0.7677807", "0.75874203", "0.73676527", "0.6567502", "0.6373806", "0.62520576", "0.6168975", "0.6139325", "0.61339194", "0.6126576", "0.611511", "0.608777", "0.6025861", "0.6025105", "0.60162103", "0.5953649", "0.59421575", "0.59196216", "0.5899218", "0.5882104", "0.5880964", "0.5842848", "0.584197", "0.583599", "0.5819761", "0.5816238", "0.5800492", "0.57884943", "0.57869345", "0.5768234", "0.57646", "0.57633954", "0.57630956", "0.5754871", "0.5750774", "0.57436514", "0.5730692", "0.57179475", "0.5693052", "0.5687919", "0.568315", "0.56760603", "0.5665462", "0.5658374", "0.56544167", "0.5648596", "0.563141", "0.5621424", "0.5620027", "0.56016713", "0.55919033", "0.55896264", "0.5574594", "0.557013", "0.5567939", "0.5558107", "0.5557331", "0.5543551", "0.55386424", "0.55360925", "0.5528201", "0.55249494", "0.55246234", "0.55162436", "0.5515016", "0.55120623", "0.5498643", "0.54982555", "0.5493889", "0.54934096", "0.54904944", "0.54704463", "0.54613495", "0.54613495", "0.54604244", "0.544921", "0.544921", "0.54372215", "0.54320014", "0.543098", "0.54293364", "0.54267144", "0.54163945", "0.5414924", "0.5410628", "0.54078674", "0.5404977", "0.54027945", "0.54027945", "0.5401461", "0.53973633", "0.53939974", "0.5391803", "0.53884137", "0.53869176", "0.5377037", "0.5375299", "0.53736126", "0.53706396", "0.5369923" ]
0.7635218
1
True if and only if the partition's subarray is on disk in a temporary file.
def in_cached_file(self): return isinstance(self._subarray, CachedArray)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_disk(self):\n return isinstance(self._subarray, FileArray)", "def _are_features_already_extracted(self, output_path: str, subset: str) -> bool:\n file_path = join(output_path, subset + '.npy')\n return os.path.exists(file_path)", "def has_subfile(self) -> bool:\n\t\tself._update_subfiles()\n\t\treturn bool(len(self.subfiles))", "def in_memory(self) -> bool:\n return all(isinstance(x, np.ndarray) for x in self.chunks.values())", "def in_file(self):\n return self.on_disk and not self.in_cached_file", "def to_disk(self, reopen=True):\n # try:\n tfa = CachedArray(self.array)\n # except Exception:\n # return False\n\n fd, _lock_file = mkstemp(\n prefix=tfa._partition_file + \"_\", dir=tfa._partition_dir\n )\n close(fd)\n\n self.subarray = tfa\n _temporary_files[tfa._partition_file] = (\n tfa._partition_dir,\n _lock_file,\n set(),\n )\n\n if reopen:\n # Re-open the partition\n self.open(self.config)\n\n return True", "def in_memory(self):\n return hasattr(self._subarray, \"__array_interface__\")", "def output_files_exist(self):\n return all([split.exists() for split in self.split_files])", "def has_fileout(self):\n return self.fileout is not None", "def extra_memory(self):\n if not self.in_memory:\n # --------------------------------------------------------\n # The subarray is on disk so getting the partition's data\n # array will require extra memory\n # --------------------------------------------------------\n extra_memory = True\n else:\n # --------------------------------------------------------\n # The subarray is already in memory\n # --------------------------------------------------------\n config = self.config\n\n p_part = self.part\n if p_part:\n extra_memory = True\n elif not config[\"unique_subarray\"]:\n extra_memory = True\n else:\n p_data = self._subarray\n\n if not numpy_ma_isMA(p_data):\n # The p_data is not a masked array\n extra_memory = isinstance(p_data.base, numpy_ndarray)\n else:\n # The p_data is a masked array\n memory_overlap = isinstance(\n p_data.data.base, numpy_ndarray\n )\n if not (\n p_data.mask is numpy_ma_nomask\n or not numpy_ma_is_masked(p_data)\n ):\n # There is at least one missing data point\n memory_overlap |= isinstance(\n p_data.mask.base, numpy_ndarray\n )\n\n extra_memory = memory_overlap\n # --- End: if\n\n p_dtype = p_data.dtype\n\n if not extra_memory:\n if config[\"func\"] is not None:\n extra_memory = True\n else:\n p_units = self.Units\n units = config[\"units\"]\n if (\n not p_units.equals(units)\n and bool(p_units) is bool(units)\n and not (\n p_data.flags[\"C_CONTIGUOUS\"]\n and p_dtype.kind == \"f\"\n )\n ):\n extra_memory = True\n\n # ------------------------------------------------------------\n # Extra memory is required if the dtype needs changing\n # ------------------------------------------------------------\n if not extra_memory:\n dtype = config[\"dtype\"]\n if dtype is not None and dtype != p_data.dtype:\n extra_memory = True\n # --- End: if\n\n # ------------------------------------------------------------\n # Amount of extra memory (in bytes) required to access the\n # array\n # ------------------------------------------------------------\n return self.nbytes if extra_memory else 0", "def prune_empty(self): # FileObj.prune_empty\n return False # can't prune a file", "def in_folder(self):\n return len(os.path.split(self.file_path)) > 1", "def is_file_ingested(self, original_name, tablename):\n prep_stmt = self.session.prepare(\n 'SELECT * FROM {0} WHERE {1}=?'.format(tablename, COLUMNS_META[2])\n )\n bound = prep_stmt.bind([original_name])\n results = self.session.execute(bound)\n return True if len(results.current_rows) > 0 else False", "def has_full_batch(self) -> bool:", "def file_populated(filepath):\n\n return file_exists(filepath) and os.stat(filepath).st_size > 0", "def exists(self):\n\t\tif self.hasUdim:\n\t\t\treturn len( self.udimPaths ) != 0\n\t\treturn super( textureFile, self ).exists", "def has_local_tails_file(self) -> bool:\n tails_file_path = Path(self.get_receiving_tails_local_path())\n return tails_file_path.is_file()", "def check_for_file(self):\n if self.task.file_name in os.listdir(self.task.file_storage):\n return True\n return False", "def is_local(self):\n try:\n return os.path.isfile(self.get_absolute_path())\n except ValueError:\n logger.error(\"'%s' is not a file\", self.get_absolute_path())\n except TypeError: # no datafile available or file does not exist\n pass\n return False", "def is_multi_file(self):\n return 'files' in self.torrent['info']", "def is_dataset_exported(filename):\n try:\n with open(filename):\n return True\n except IOError:\n return False", "def is_complete(self) -> bool:\n return (\n (\n self.materialized_subset | self.failed_and_downstream_subset\n ).num_partitions_and_non_partitioned_assets\n == self.target_subset.num_partitions_and_non_partitioned_assets\n )", "def check_corrupted_files(self):\r\n for store in STORES:\r\n path = f\"{self.system.config_path}/.storage/{STORES[store]}\"\r\n if os.path.exists(path):\r\n if os.stat(path).st_size == 0:\r\n # File is empty (corrupted)\r\n return True\r\n return False", "def isTemp(self,object):\n return (object in self.tempObjects)", "def isfile(self):\n return os.path.isfile(self.path)", "def IsAllocated(self):\n return self._fsntfs_file_entry.is_allocated()", "def _check_truncation(self):\n\n temp_pos = self._handle.tell()\n self._handle.seek(-28, 2)\n eof = self._handle.read()\n self._handle.seek(temp_pos)\n if eof == _bgzf_eof:\n return False\n else:\n warnings.BytesWarning('No EOF character found. File may be truncated')\n return True", "def exists(self):\r\n return os.path.exists(self.full_path)", "def verify_results(outdir_path, original_array_path, R, O, file_format, addition, split_merge=False):\n\n if file_format == \"HDF5\":\n file_manager = HDF5_manager()\n else:\n print(\"File format not supported yet. Aborting...\")\n sys.exit(1)\n\n partition = get_blocks_shape(R, O)\n orig_arr_data = file_manager.read_all(original_array_path)\n all_true = True\n\n if split_merge:\n result_arrpath = os.path.join(outdir_path, \"0_0_0.hdf5\")\n return file_manager.check_split_merge(original_array_path, result_arrpath)\n\n for i in range(partition[0]):\n for j in range(partition[1]):\n for k in range(partition[2]):\n outfilepath = os.path.join(outdir_path, str(i) + \"_\" + str(j) + \"_\" + str(k) + \".hdf5\")\n data_stored = file_manager.read_all(outfilepath)\n ground_truth = orig_arr_data[i*O[0]:(i+1)*O[0],j*O[1]:(j+1)*O[1],k*O[2]:(k+1)*O[2]]\n \n if addition:\n ground_truth = ground_truth +1\n\n try:\n assert np.allclose(data_stored, ground_truth, rtol=1e-02)\n # print(f\"Good output file {outfilepath}\")\n except:\n print(f\"Error: bad rechunking {outfilepath}\")\n print(f\"Slices from ground truth {i*O[0]}:{(i+1)*O[0]}, {j*O[1]}:{(j+1)*O[1]}, {k*O[2]}:{(k+1)*O[2]}\")\n print(\"data_stored\", data_stored)\n print(\"ground_truth\", ground_truth)\n all_true = False # do not return here to see all failures\n\n file_manager.close_infiles() # close all files\n return all_true", "def ofile_exists(self):\n return os.path.isfile(self.ofile)", "def eof_check(self) -> bool:\n eof = False\n curr_pos = self.fileobject.tell()\n # print(curr_pos, self.st_size)\n chunk = self.fileobject.read(25)\n if chunk == '':\n # Is there something on the back burner??\n if len(self._backburner) > 0:\n self.fileobject = self._backburner.pop()\n # TODO: what if it is the end of the back burner file? Is that handled?\n else:\n eof = True\n else:\n self.fileobject.seek(curr_pos)\n return eof", "def is_partition(disk): #TODO: Could change to use \"Whole\" attrib. Good idea?\n\n return \"s\" in disk.split(\"disk\")[1]", "def check_coords_file(self):\n if path.exists(self.coords_file):\n return True\n return False", "def has_file(self) -> bool:\n return self._file is not None", "def _register_temporary_file(self):\n _partition_file = self._subarray._partition_file\n _partition_dir = self._subarray._partition_dir\n if _partition_file not in _temporary_files:\n fd, _lock_file = mkstemp(\n prefix=_partition_file + \"_\", dir=_partition_dir\n )\n close(fd)\n _temporary_files[_partition_file] = (\n _partition_dir,\n _lock_file,\n set(),\n )\n else:\n _, _lock_file, _ = _temporary_files[_partition_file]\n\n return _lock_file", "def check_already_extracted(video_parts):\n filename_no_ext, _ = video_parts\n return bool(os.path.exists(os.path.join(output_dir,\n filename_no_ext + '-0030.jpg')))", "def found_empty_file(self):\n self.is_empty = True", "def objectsReady(self, n):\n return len(self.files) >= n", "def contains_files(self):\n if self.file_list is None:\n self._set_file_list()\n for individual_file in self.file_list:\n if not os.path.exists(os.path.join(self.base_dir, individual_file)):\n return False\n return True", "def is_writable_file(obj):\n try:\n obj.write(\"\")\n except(AttributeError, OSError, IOError):\n return False\n else:\n return True", "def has_measurement(self, filepath):\n entries = self.get_measurement(file_path=filepath, select_list=[COL_NAME_FILES_MEASID])\n return len(entries) == 1", "def has_contents(self):\n return len(self.byteruns())>0", "def exists(self):\n return self.path.is_file()", "def meshfn_is_used(meshfn):\n if glob.glob(meshfn):\n return True\n else:\n return False", "def path_contains_data(bucket, root_path, min_file_size=0, file_extension=None):\n for key in bucket.list(root_path):\n if file_extension and not key.name.endswith(file_extension):\n continue\n if key.size > min_file_size:\n return True\n\n return False", "def test_get_subgrid_snapshots():\n subgrid_snapshots = get_subgrid_snapshots(nTime=200)\n\n subgrid_snapshots_corr = np.load('./preprocessing/tests/test_data/\\\nsubgrid_snapshots.npy')\n\n assert (subgrid_snapshots == subgrid_snapshots_corr).all()", "def exist(self):\n return self.file_path.exists()", "def has_file(self, doc):\n return len(doc.package.files) != 0", "def is_empty(self):\n if self.file_exists:\n with open_hdf5(self.file_name) as h:\n return len(h.keys()) == 0\n else:\n return True", "def is_separate_file(self):\n return self.uri is not None and not self.has_data_uri", "def check_helpers(self):\n paths = self.get_helper_out_paths()\n\n for p in paths:\n full_path = p + \".data-00000-of-00001\"\n file = Path(full_path)\n if not file.exists():\n return False\n\n return True", "def checkFileInObject(self, obj, fp):\n\n containted = False\n ## Haso of all files\n filehash = self.getObjectFilesHash(obj)\n\n ## Local hash\n BLOCKSIZE = 65536\n hasher = hashlib.sha1()\n\n with fp.open('rb') as afile:\n buf = afile.read(BLOCKSIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(BLOCKSIZE)\n localhash = hasher.hexdigest()\n\n if localhash.upper() in filehash:\n containted = True\n\n return containted", "def check_needs_upload(self, path):\n if self.upload_always:\n return True\n fn = '/'.join([self.hdfs_home, '.knitDeps', os.path.basename(path)])\n if self.hdfs and self.hdfs.exists(fn):\n st = os.stat(path)\n size = st.st_size\n t = st.st_mtime\n info = self.hdfs.info(fn)\n if info['size'] == size and t < info['last_mod']:\n return False\n else:\n return True\n else:\n return True", "def check_enough_space(dataset_local_dir, remote_fname, local_fname,\n max_disk_usage=0.9):\n storage_need = os.path.getsize(remote_fname)\n storage_total, storage_used = disk_usage(dataset_local_dir)\n\n # Instead of only looking if there's enough space, we ensure we do not\n # go over max disk usage level to avoid filling the disk/partition\n return ((storage_used + storage_need) <\n (storage_total * max_disk_usage))", "def contains_offset(self, offset):\n return (offset >= self.offset) and (offset < self.offset + self.filesize)", "def assert_data_fragments_correct(self) -> bool:\n read_path = Path(os.environ[\"DATA_PATH\"]) / \"fragments\"\n if not read_path.exists():\n return False\n bin_images = [img for img in read_path.iterdir() if \"binarized\" in img.name]\n if len(bin_images) == 0:\n return False\n return True", "def is_subtag(tag_name, subtag_name, user_path, current_user) -> bool:\n user = current_user[0]\n subtag_list = os.listdir((user_path + '\\\\' + user + '\\\\' + tag_name).encode('unicode_escape'))\n temp = list(map(bytes.decode, subtag_list))\n\n if subtag_name + '.txt' in temp:\n return True\n else:\n return False", "def is_wave(self,path):\n return len(path[0])>1", "def efile_exists(self):\n return os.path.isfile(self.efile)", "def exist_partition(self, partition_spec):\n return partition_spec in self.partitions", "def _has_processed_data(self):\n return \\\n os.path.exists(\n os.path.join(self._data_root_path, self._processed_train_data_file_name)) and \\\n os.path.exists(\n os.path.join(self._data_root_path, self._processed_dev_data_file_name)) and \\\n os.path.exists(\n os.path.join(self._data_root_path, self._word_vocab_file_name)) and \\\n os.path.exists(\n os.path.join(self._data_root_path, self._char_vocab_file_name))", "def check(self, evidence, path_on_disk):\n return True", "def _supports_binary_writing(path):\n return not path.startswith(\"/bigstore\")", "def _split_exists( file_list, target_locus ):\n for filename in file_list:\n basename = filename.split('.')[0]\n parts = basename.split('_')\n if parts[-1] in ['5p', '3p'] and parts[-2] == target_locus:\n return True\n return False", "def is_empty_file(fpath):\n return \\\n fpath is not None and \\\n os.path.isfile(fpath) and \\\n os.path.getsize(fpath) == 0", "def is_input_file(self):\r\n return self.depth == 0", "def _loaded_data(self):\n try:\n dsize = [int(d) for d\n in self.run('fits size', via='get').split()]\n except (ValueError, TypeError, AttributeError) as err:\n log.debug(f' FITS size error: {err}')\n return False\n else:\n if 0 in dsize:\n return False\n else:\n return True", "def test_as_file_false(self):\n with TemporaryDirectory() as tmp:\n # define path to file\n fp = os.path.join(tmp, \"asdf.txt\")\n\n # invoke atomic_write with param as_file set to False\n # this should return a temporary file path string\n with atomic_write(fp, as_file=False) as f:\n self.assertIsInstance(f, str)", "def has_file(self, name):\n return bool(self.input(name).__class__.__name__ == 'cgi_FieldStorage')", "def check_already_extracted(video_parts):\n train_or_test, classname, filename_no_ext, _ = video_parts\n return bool(os.path.exists(os.path.join(\"/data/niteshku001/Ravdess\", train_or_test, classname,\n filename_no_ext + '-0001.jpg')))", "def f_supports_fast_access(self):\n return not self.f_is_empty()", "def test_my_subset_files_exist():\n assert isinstance(dist_a_subset, pd.DataFrame)\n assert isinstance(dist_b_subset, pd.DataFrame)\n assert isinstance(clone_df_subset, pd.DataFrame)", "def object_exists(self, fname):\n return False", "def seekable(self):\n # Not seekable, but we do support tell...\n return False", "def is_full(self) -> bool:\n return self._array[0].all()", "def cleanup_incomplete_uploads_from_blob_store() -> bool:\n\n DAYS_TO_RETAIN = 1\n\n # Get current time in UTC timezone\n now = datetime.datetime.now(pytz.timezone(\"UTC\"))\n\n client = get_s3_client(settings=node.settings)\n incomplete_upload_objs = client.list_multipart_uploads(Bucket=node.id.no_dash).get(\n \"Uploads\", []\n )\n\n for obj in incomplete_upload_objs:\n # Get the upload id and object name\n upload_id: str = obj[\"UploadId\"]\n obj_name: str = obj[\"Key\"]\n\n # Get the list of all parts of the object uploaded\n # This step is required to get the upload time of the object\n object_parts: list = client.list_parts(\n Bucket=node.id.no_dash, UploadId=upload_id, Key=obj_name\n ).get(\"Parts\", [])\n\n obj_part_expired = False\n for part in object_parts:\n # Normalize upload time to UTC timezone\n part_upload_time = pytz.timezone(\"UTC\").normalize(part[\"LastModified\"])\n\n # If upload time of any part of the object\n # crosses DAYS_TO_RETAIN, then expire the whole object\n if (now - part_upload_time).days > DAYS_TO_RETAIN:\n obj_part_expired = True\n break\n\n if obj_part_expired:\n # Abort multipart upload\n client.abort_multipart_upload(\n UploadId=upload_id,\n Key=obj_name,\n Bucket=node.id.no_dash,\n )\n\n return True", "def seekable(self):\n return True", "def PartiallyEmpty(self):\n return None==self.piecesToRecover", "def object_exists(self, fname):\n return True", "def isSegmentFile(self, segment):\n return os.path.isfile(\"{wd}/{jn}-run/{seg}.rst7\".format( wd=self.workdir, jn=self.jobname, seg=segment.getNameString()))", "def is_primary_file(self):\n return self.file_type() == FileType.FILE_TYPE_PRIMARY", "def is_partition_the_last(dbapi, partition):\n idisk_uuid = partition.get('idisk_uuid')\n onidisk_parts = dbapi.partition_get_by_idisk(idisk_uuid)\n part_number = get_part_number(partition.get('device_path'))\n\n if int(part_number) != len(onidisk_parts):\n return False\n\n return True", "def hdf_exist_p(store, data_name):\n if not data_name.startswith(\"/\"):\n data_name = \"/\" + data_name\n if os.path.isfile(store):\n with HDFStore(store) as hdf:\n return data_name in hdf.keys()\n else:\n return False", "def seekable(self):\n self._check_not_closed()\n return False", "def is_in_swestore(f):\n with open(os.devnull, 'w') as null:\n try:\n check_call(['ils', f], stdout=null, stderr=null)\n except CalledProcessError:\n # ils will fail if the file does not exist in swestore\n return False\n else:\n return True", "def isfile (self, path):\r\n pass", "def isfile(self):\n return not self.isdir()", "def _index_file_exists(idx_fn):\n if os.path.exists(idx_fn + \".npy\") and os.path.exists(idx_fn + \".info\"):\n return True\n else:\n return False", "def check_valid_device(self, path, run_as_root=True):\n sheepdog_handle = path\n\n if sheepdog_handle is None:\n return False\n\n original_offset = sheepdog_handle.tell()\n\n try:\n sheepdog_handle.read(4096)\n except Exception as e:\n LOG.error(\"Failed to access sheepdog device \"\n \"handle: %(error)s\",\n {\"error\": e})\n return False\n finally:\n sheepdog_handle.seek(original_offset, 0)\n\n return True", "def checkTrueArrayIsDeleted(node):\n if (node.raw) is not None or (node.raw_housing) is not None:\n raise Exception(\"The true data array has not been deleted\")", "def _is_partitioned(self):\n ## check if the table are partitioned, need the split because of a change in the type of partitions in pydantic\n partitions = self.table_config[\"partitions\"]\n if partitions is None or len(partitions) == 0:\n return False\n\n if isinstance(partitions, list):\n # check if any None inside list.\n # False if it is the case Ex: [None, 'partition']\n # True otherwise Ex: ['partition1', 'partition2']\n return all(item is not None for item in partitions)\n\n raise ValueError(\"Partitions must be a list or None\")", "def coeff_write_ok(self):\n return os.access(self.coeffroot, os.W_OK)", "def is_file_exist(self):\n return os.path.isfile(os.path.join(self.output_path, 'amr_corpus_ext.pickle'))", "def exists_in_path(self):\n return os.path.isfile(self.IN_PATH)", "def test_record_is_dataset_file(self) -> None:\n path = \"/home/user/dataset.record\"\n result = is_dataset_file(path)\n self.assertTrue(result)", "def exists(path: str) -> bool:\n tdb_uri = paths.tiledb_uri_from_path(path)\n try:\n tiledb.cloud.array.info(tdb_uri)\n return True\n except tiledb.cloud.TileDBCloudError:\n pass\n return False", "def _test_obssize(t):\n return t.shape[0] != len(t.ids(axis='observation'))", "def is_empty(self): # DirObj.is_empty\n\n for fileName, fileEntry in self.files.iteritems():\n if not fileEntry.deleted and not fileEntry.ignore:\n #print '# ' + self.pathname + ' is not empty due to a file ' + fileEntry.name\n return False\n\n for dirName, subdir in self.subdirs.iteritems():\n if not subdir.deleted and not subdir.is_empty() and not subdir.ignore:\n #print '# ' + self.pathname + ' is not empty due to a dir ' + subdir.name\n return False\n\n #print '# ' + self.pathname + ' is empty!'\n return True", "def is_file(file_to_test):\r\n return all(hasattr(file_to_test, method) for method in ['read', 'name'])", "def is_signed(self):\n file_size = os.stat(self._file_name).st_size\n self._document.seek(file_size - self._append_size)\n last = self._document.read()\n self._document.seek(0)\n\n if not (chr(last[0]) == self._seperator and chr(last[-1]) == self._seperator):\n return False\n else:\n return True" ]
[ "0.72301924", "0.65045905", "0.64364123", "0.5942543", "0.5936233", "0.591641", "0.5817689", "0.5676702", "0.5673538", "0.5658594", "0.5589181", "0.5555981", "0.5479751", "0.5440639", "0.5393117", "0.53697217", "0.5364029", "0.5358785", "0.5355252", "0.53492653", "0.53334475", "0.5329815", "0.53274125", "0.5301783", "0.5297761", "0.52869755", "0.5279954", "0.5278576", "0.5257029", "0.524318", "0.52423215", "0.5241412", "0.52393115", "0.5205895", "0.5195221", "0.51760715", "0.5174296", "0.51727843", "0.5172245", "0.51699734", "0.5169771", "0.51676923", "0.51647234", "0.5161527", "0.51583415", "0.51561344", "0.5154286", "0.5152785", "0.5142926", "0.5141158", "0.51403356", "0.51370573", "0.512613", "0.5125592", "0.5118623", "0.5115187", "0.5111936", "0.5109239", "0.5108766", "0.5105998", "0.5101779", "0.5098058", "0.5095226", "0.50934654", "0.5084821", "0.50838816", "0.50832033", "0.5078422", "0.50760955", "0.50751746", "0.5072461", "0.5069113", "0.50690866", "0.506589", "0.5064742", "0.50637734", "0.50543684", "0.5052863", "0.5049823", "0.50463456", "0.50451845", "0.503784", "0.50215477", "0.50103503", "0.5008163", "0.49985757", "0.49982268", "0.49915206", "0.49892285", "0.49871543", "0.49817294", "0.49776906", "0.49693653", "0.49679446", "0.49560532", "0.49558458", "0.4954451", "0.49513745", "0.4950765", "0.49480093" ]
0.58127344
7
True if and only if the partition's subarray is on disk as opposed to in memory.
def on_disk(self): return isinstance(self._subarray, FileArray)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def in_memory(self):\n return hasattr(self._subarray, \"__array_interface__\")", "def in_memory(self) -> bool:\n return all(isinstance(x, np.ndarray) for x in self.chunks.values())", "def extra_memory(self):\n if not self.in_memory:\n # --------------------------------------------------------\n # The subarray is on disk so getting the partition's data\n # array will require extra memory\n # --------------------------------------------------------\n extra_memory = True\n else:\n # --------------------------------------------------------\n # The subarray is already in memory\n # --------------------------------------------------------\n config = self.config\n\n p_part = self.part\n if p_part:\n extra_memory = True\n elif not config[\"unique_subarray\"]:\n extra_memory = True\n else:\n p_data = self._subarray\n\n if not numpy_ma_isMA(p_data):\n # The p_data is not a masked array\n extra_memory = isinstance(p_data.base, numpy_ndarray)\n else:\n # The p_data is a masked array\n memory_overlap = isinstance(\n p_data.data.base, numpy_ndarray\n )\n if not (\n p_data.mask is numpy_ma_nomask\n or not numpy_ma_is_masked(p_data)\n ):\n # There is at least one missing data point\n memory_overlap |= isinstance(\n p_data.mask.base, numpy_ndarray\n )\n\n extra_memory = memory_overlap\n # --- End: if\n\n p_dtype = p_data.dtype\n\n if not extra_memory:\n if config[\"func\"] is not None:\n extra_memory = True\n else:\n p_units = self.Units\n units = config[\"units\"]\n if (\n not p_units.equals(units)\n and bool(p_units) is bool(units)\n and not (\n p_data.flags[\"C_CONTIGUOUS\"]\n and p_dtype.kind == \"f\"\n )\n ):\n extra_memory = True\n\n # ------------------------------------------------------------\n # Extra memory is required if the dtype needs changing\n # ------------------------------------------------------------\n if not extra_memory:\n dtype = config[\"dtype\"]\n if dtype is not None and dtype != p_data.dtype:\n extra_memory = True\n # --- End: if\n\n # ------------------------------------------------------------\n # Amount of extra memory (in bytes) required to access the\n # array\n # ------------------------------------------------------------\n return self.nbytes if extra_memory else 0", "def is_partition(disk): #TODO: Could change to use \"Whole\" attrib. Good idea?\n\n return \"s\" in disk.split(\"disk\")[1]", "def in_cached_file(self):\n return isinstance(self._subarray, CachedArray)", "def has_subfile(self) -> bool:\n\t\tself._update_subfiles()\n\t\treturn bool(len(self.subfiles))", "def _is_partitioned(self):\n ## check if the table are partitioned, need the split because of a change in the type of partitions in pydantic\n partitions = self.table_config[\"partitions\"]\n if partitions is None or len(partitions) == 0:\n return False\n\n if isinstance(partitions, list):\n # check if any None inside list.\n # False if it is the case Ex: [None, 'partition']\n # True otherwise Ex: ['partition1', 'partition2']\n return all(item is not None for item in partitions)\n\n raise ValueError(\"Partitions must be a list or None\")", "def IsAllocated(self):\n return self._fsntfs_file_entry.is_allocated()", "def isleaf(self):\n no_kids = super(PartitionDevice, self).isleaf\n # it is possible that the disk that originally contained this partition\n # no longer contains a disklabel, in which case we can assume that this\n # device is a leaf\n if self.disk and self.partedPartition and \\\n self.disk.format.type == \"disklabel\" and \\\n self.partedPartition in self.disk.format.partitions:\n disklabel = self.disk.format\n else:\n disklabel = None\n\n extended_has_logical = (self.isExtended and\n (disklabel and disklabel.logicalPartitions))\n return (no_kids and not extended_has_logical)", "def is_array(self):\n return len(self.descriptor) > 1", "def is_space_available(partition, size):\n available_space = psutil.disk_usage(partition).free\n return False if available_space < size else True", "def is_full(self) -> bool:\n return self._array[0].all()", "def in_file(self):\n return self.on_disk and not self.in_cached_file", "def is_partition_the_last(dbapi, partition):\n idisk_uuid = partition.get('idisk_uuid')\n onidisk_parts = dbapi.partition_get_by_idisk(idisk_uuid)\n part_number = get_part_number(partition.get('device_path'))\n\n if int(part_number) != len(onidisk_parts):\n return False\n\n return True", "def _are_features_already_extracted(self, output_path: str, subset: str) -> bool:\n file_path = join(output_path, subset + '.npy')\n return os.path.exists(file_path)", "def _is_size_bound(self, path):\n return path.suffix == \".bin\"", "def _array_name_implies_ND_slice(self, array_name):\n for v in self._split_arrays.values():\n if array_name in v:\n return True\n\n generic_match = re.findall(\"^(.+)_[xyz]$\", array_name)\n loadable_keys = self.loadable_keys()\n keys = list(self.keys())\n if len(generic_match) == 1 and generic_match[0] not in self._split_arrays:\n return generic_match[0] in loadable_keys or generic_match[0] in keys\n return False", "def has_shareable_memory(a):\r\n return _get_backing_memmap(a) is not None", "def is_partition(dev):\n dev = os.path.realpath(dev)\n if not stat.S_ISBLK(os.lstat(dev).st_mode):\n raise Error('not a block device', dev)\n\n name = get_dev_name(dev)\n if os.path.exists(os.path.join('/sys/block', name)):\n return False\n\n # make sure it is a partition of something else\n for basename in os.listdir('/sys/block'):\n if os.path.exists(os.path.join('/sys/block', basename, name)):\n return True\n\n raise Error('not a disk or partition', dev)", "def has_full_batch(self) -> bool:", "def is_part_of_disk(part_device_path, disk_device_path):\n is_part_of_disk = False\n\n if disk_device_path in part_device_path:\n is_part_of_disk = True\n elif constants.DEVICE_NAME_MPATH in disk_device_path:\n path_split = disk_device_path.split(constants.DEVICE_NAME_MPATH)\n if (path_split[0] in part_device_path and\n path_split[1] in part_device_path):\n is_part_of_disk = True\n\n return is_part_of_disk", "def has_next(self):\n while self._row < self._n and not self._arr[self._row]: # current sub-array is empty\n self._row += 1 # move to next sub-array\n self._col = 0\n if self._row >= self._n: # end of master-array already\n return False\n return True", "def exist_partition(self, partition_spec):\n return partition_spec in self.partitions", "def isFull(self) -> bool:\n return self._elems == self._k", "def _loaded_data(self):\n try:\n dsize = [int(d) for d\n in self.run('fits size', via='get').split()]\n except (ValueError, TypeError, AttributeError) as err:\n log.debug(f' FITS size error: {err}')\n return False\n else:\n if 0 in dsize:\n return False\n else:\n return True", "def __check_flat_array__(self):\n if self.flat_array is not None:\n return True\n else:\n return False", "def is_slice(self) -> bool:\n return self._is_slice", "def contains_offset(self, offset):\n return (offset >= self.offset) and (offset < self.offset + self.filesize)", "def is_mounted(device):\n\n partitions = psutil.disk_partitions()\n device_path = \"/dev/\" + device\n for i in partitions:\n if i.device == device_path:\n return True\n return False", "def full(self):\n return self.size >= self.maxsize", "def in_folder(self):\n return len(os.path.split(self.file_path)) > 1", "def full(self):\n return self._current_size == self._size", "def is_full(self) -> bool:\r\n return self.size == self.capacity", "def is_full(self):\r\n if self.size == self.capacity:\r\n return True\r\n return False", "def is_complete(self) -> bool:\n return (\n (\n self.materialized_subset | self.failed_and_downstream_subset\n ).num_partitions_and_non_partitioned_assets\n == self.target_subset.num_partitions_and_non_partitioned_assets\n )", "def is_full(self):\n elements_in_sects = sum(\n map(opr.attrgetter(\"size\"), self.sects.values())\n )\n elements_in_total = fct.reduce(\n opr.mul, type(self).flatten_shape(self.shape), 1\n )\n res = elements_in_sects >= elements_in_total\n return res", "def isFull(self):\n\t\treturn self.size == self.capacity", "def is_full(self):\n\n return self.count == len(self.array)", "def to_disk(self, reopen=True):\n # try:\n tfa = CachedArray(self.array)\n # except Exception:\n # return False\n\n fd, _lock_file = mkstemp(\n prefix=tfa._partition_file + \"_\", dir=tfa._partition_dir\n )\n close(fd)\n\n self.subarray = tfa\n _temporary_files[tfa._partition_file] = (\n tfa._partition_dir,\n _lock_file,\n set(),\n )\n\n if reopen:\n # Re-open the partition\n self.open(self.config)\n\n return True", "def is_array(self):\n return False", "def is_full(self):\n return self.idx == self.len", "def f_supports_fast_access(self):\n return len(self._data) == 1 and self.v_name in self._data", "def is_distributed(self) -> bool:\n return self.size > 1", "def isFull(self):\n return self.rear == self.size", "def is_subdivision_available(self, position: np.ndarray) -> bool:\n\t\tsubdivision_x_index = int(position[0]) // self.square_subdivision_length\n\t\tsubdivision_y_index = int(position[1]) // self.square_subdivision_length\n\t\treturn self.plane_subdivisions_availability[subdivision_x_index, subdivision_y_index] == 1", "def is_subset(subset: np.array, superset: np.array) -> bool:\n superset_lookup = set(superset)\n for val in subset:\n if val not in superset_lookup:\n return False\n\n return True", "def is_array(a):\n try:\n shape = a.shape\n return len(shape) >= 1\n except AttributeError:\n return False", "def is_array(self, arr):\n return isinstance(arr, np.ndarray)", "def checkTrueArrayIsDeleted(node):\n if (node.raw) is not None or (node.raw_housing) is not None:\n raise Exception(\"The true data array has not been deleted\")", "def is_full(self):\n return len(self._data) == 1", "def isFull(self):\n if len(self.batch) == self.__batch_size:\n return True\n return False", "def is_enough_space(self) -> bool:\n return self._free_space() > self.minimum_disk", "def pageable(self):\n return maxSRAM(self.mem) <= self.dev.SRAM_PAGE_LEN", "def is_full(self) -> bool:", "def f_supports_fast_access(self):\n return not self.f_is_empty()", "def is_virtualized (self):\n return len([i for i in self.infras if\n i.infra_type not in (self.TYPE_INFRA_SDN_SW, self.TYPE_INFRA_EE,\n self.TYPE_INFRA_STATIC_EE)]) > 0", "def is_full(self):\n if len(self._page_map) >= self.memory_size:\n return True\n return False", "def is_leaf(self, pos):\n if pos >= (self.size//2) and pos <= self.size: \n return True\n return False", "def regular(self):\n if all(self._volumes - self._volumes[0] == 0):\n return True\n else:\n return False", "def storage_can_read(self):\n return True", "def full(self) -> bool:\n return self.maxsize and self.qsize() >= self.maxsize", "def is_bad_partition(par):\n return 'Letter' not in par or REGEX_BAD_PARTITION.search(par['FileSystem'])", "def isFull(self):\n if len(self._data) == self._length:\n return True\n else:\n return False", "def has_definite_size(iterable):\n return hasattr(iterable, '__len__')", "def _supports_binary_writing(path):\n return not path.startswith(\"/bigstore\")", "def _has_processed_data(self):\n return \\\n os.path.exists(\n os.path.join(self._data_root_path, self._processed_train_data_file_name)) and \\\n os.path.exists(\n os.path.join(self._data_root_path, self._processed_dev_data_file_name)) and \\\n os.path.exists(\n os.path.join(self._data_root_path, self._word_vocab_file_name)) and \\\n os.path.exists(\n os.path.join(self._data_root_path, self._char_vocab_file_name))", "def check_subarray(array1, array2):\r\n \r\n # check assumption\r\n if (len(array2.shape) != 1) or (array2.shape[0] != array1.shape[-1]):\r\n raise ValueError('Attempting to check for subarray equality when shape assumption does not hold.')\r\n \r\n return np.all(array1==array2, axis=-1)", "def prune_empty(self): # FileObj.prune_empty\n return False # can't prune a file", "def is_fragmentable(fragment_size, offset, chunk_size):\n return ((chunk_size - fragment_size) / offset) % 1 == 0", "def NeedsArray(self, type_):\n return self._NameComponents(type_) in self._array_types", "def disk_is_valid(dhandle):\n if is_64bits:\n return dhandle.value != c_uint64(0).value\n else:\n return dhandle.value != c_uint32(0).value", "def full(self) -> bool:\n return self.current_offset == self.max_offset", "def isscalar(array):\n arr = ma.array(array)\n if not hasattr(arr, '__len__') or arr.shape == () or len(arr) == 1:\n return True\n return False", "def is_full(self):\n return self.heap_size >= self.capacity", "def is_dataset(self):\n return self._dataset is not None", "def data_available(dataset_name=None):\r\n for file_list in data_resources[dataset_name]['files']:\r\n for file in file_list:\r\n if not os.path.exists(os.path.join(data_path, dataset_name, file)):\r\n return False\r\n return True", "def is_internal(self):\n if self.is_leaf() or self.is_semileaf():\n return False\n return True", "def __bool__(self):\n for root, products in self.rel_paths():\n if products:\n return True\n return False", "def PartiallyEmpty(self):\n return None==self.piecesToRecover", "def path_contains_data(bucket, root_path, min_file_size=0, file_extension=None):\n for key in bucket.list(root_path):\n if file_extension and not key.name.endswith(file_extension):\n continue\n if key.size > min_file_size:\n return True\n\n return False", "def isFull(self) -> bool:\n return self.size == self.maxlen", "def has_contents(self):\n return len(self.byteruns())>0", "def is_empty(self):\n return self.size == []", "def fs_ok(fs_info):\n if fs_info.mountpoint == '/':\n return True\n\n if (fs_info.device == fs_info.fstype or fs_info.fstype == 'nullfs' or\n '/docker' in fs_info.mountpoint or\n fs_info.mountpoint.startswith('/etc') or\n fs_info.mountpoint.startswith('/lib/modules')):\n return False\n\n if fs_info.device.startswith('/dev/'):\n return True\n\n return False", "def _test_obssize(t):\n return t.shape[0] != len(t.ids(axis='observation'))", "def is_total_slice(item, shape: Tuple[int]) -> bool:\n\n # N.B., assume shape is normalized\n\n if item == Ellipsis:\n return True\n if item == slice(None):\n return True\n if isinstance(item, slice):\n item = (item,)\n if isinstance(item, tuple):\n return all(\n (\n isinstance(it, slice)\n and ((it == slice(None)) or ((it.stop - it.start == sh) and (it.step in [1, None])))\n )\n for it, sh in zip(item, shape)\n )\n else:\n raise TypeError(\"expected slice or tuple of slices, found %r\" % item)", "def isFull(T):\r\n return len(T.data) >= T.max_data", "def is_array(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_array)", "def has_leaf(self) -> bool:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"has_leaf\"))\r\n return self._hvac_mode == \"eco\"", "def full(self):\r\n if self._maxsize <= 0:\r\n return False\r\n else:\r\n return self.qsize() >= self._maxsize", "def storage_available(self):\n logger.debug('Function storage_available start')\n \n # 2.9 GB\n max_size = 2.9*10**9\n \n if self.total_image_data_size >= max_size:\n logger.info(\"Storage not available\")\n return False\n else:\n logger.info(\"Storage available\")\n return True\n\n logger.debug('Function storage_available end')", "def __contains__(self, x):\n indexes = self.get_indexes(x)\n return self.sketch[indexes] > 0", "def has_catalog_data (ff_hdus_list, which_hdu=1):\n if (which_hdu == 0): # not allowed? FITS 4.0: 3.3.2\n return False\n else: # it's an extension and so marked\n return ( (len(ff_hdus_list) > which_hdu) and\n (ff_hdus_list[which_hdu].header.get('XTENSION') in ['BINTABLE', 'TABLE']) )", "def is_leaf(self):\n return self.pixel_count > 0", "def utilize_ephemeral_storage(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"utilize_ephemeral_storage\")", "def _is_leaf(self, index):\r\n return 2*index+1 > self._size - 1", "def full(self) -> bool:\n\n return (self._size == self._capacity)", "def _is_full(self):\n if self.allocated_spaces == self.capacity:\n return True\n elif self.allocated_spaces < self.capacity:\n return False", "def _is_empty(self):\n return self.size == 0", "def iseod(self):\n\n return self.byte_ptr >= len(self.data)" ]
[ "0.69458973", "0.67629313", "0.6696292", "0.656141", "0.63517463", "0.6290223", "0.61257756", "0.59961635", "0.59486187", "0.59276515", "0.59175736", "0.5881189", "0.5803704", "0.577553", "0.5774722", "0.5755425", "0.5737828", "0.57311875", "0.5723483", "0.5720749", "0.5682879", "0.5678204", "0.56532705", "0.5638273", "0.56376", "0.5626054", "0.5618182", "0.5615703", "0.56024516", "0.5587035", "0.5584256", "0.557709", "0.5576315", "0.55673695", "0.5561574", "0.5530081", "0.5504011", "0.5486583", "0.54836535", "0.5483039", "0.54767233", "0.5468246", "0.5459933", "0.545245", "0.54522634", "0.54515356", "0.54295284", "0.54217386", "0.5419535", "0.54073864", "0.5398171", "0.53974444", "0.53786916", "0.53773993", "0.53724277", "0.5371112", "0.5369092", "0.53521395", "0.5340982", "0.5307776", "0.5302411", "0.5292718", "0.528551", "0.5282625", "0.52814937", "0.52775794", "0.52765363", "0.5272008", "0.5271376", "0.52550143", "0.5254123", "0.52478075", "0.52476114", "0.5246837", "0.5246544", "0.5245442", "0.52408814", "0.5235635", "0.5234526", "0.52259785", "0.52233267", "0.5223169", "0.52195394", "0.521708", "0.5217009", "0.52162397", "0.52134025", "0.5211867", "0.5211654", "0.5211134", "0.52094615", "0.52087545", "0.52043337", "0.5201568", "0.51984894", "0.51961607", "0.5194446", "0.5194344", "0.51878417", "0.51835746" ]
0.80375713
0
True if and only if the partition's subarray is on disk as opposed to in memory.
def in_file(self): return self.on_disk and not self.in_cached_file
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_disk(self):\n return isinstance(self._subarray, FileArray)", "def in_memory(self):\n return hasattr(self._subarray, \"__array_interface__\")", "def in_memory(self) -> bool:\n return all(isinstance(x, np.ndarray) for x in self.chunks.values())", "def extra_memory(self):\n if not self.in_memory:\n # --------------------------------------------------------\n # The subarray is on disk so getting the partition's data\n # array will require extra memory\n # --------------------------------------------------------\n extra_memory = True\n else:\n # --------------------------------------------------------\n # The subarray is already in memory\n # --------------------------------------------------------\n config = self.config\n\n p_part = self.part\n if p_part:\n extra_memory = True\n elif not config[\"unique_subarray\"]:\n extra_memory = True\n else:\n p_data = self._subarray\n\n if not numpy_ma_isMA(p_data):\n # The p_data is not a masked array\n extra_memory = isinstance(p_data.base, numpy_ndarray)\n else:\n # The p_data is a masked array\n memory_overlap = isinstance(\n p_data.data.base, numpy_ndarray\n )\n if not (\n p_data.mask is numpy_ma_nomask\n or not numpy_ma_is_masked(p_data)\n ):\n # There is at least one missing data point\n memory_overlap |= isinstance(\n p_data.mask.base, numpy_ndarray\n )\n\n extra_memory = memory_overlap\n # --- End: if\n\n p_dtype = p_data.dtype\n\n if not extra_memory:\n if config[\"func\"] is not None:\n extra_memory = True\n else:\n p_units = self.Units\n units = config[\"units\"]\n if (\n not p_units.equals(units)\n and bool(p_units) is bool(units)\n and not (\n p_data.flags[\"C_CONTIGUOUS\"]\n and p_dtype.kind == \"f\"\n )\n ):\n extra_memory = True\n\n # ------------------------------------------------------------\n # Extra memory is required if the dtype needs changing\n # ------------------------------------------------------------\n if not extra_memory:\n dtype = config[\"dtype\"]\n if dtype is not None and dtype != p_data.dtype:\n extra_memory = True\n # --- End: if\n\n # ------------------------------------------------------------\n # Amount of extra memory (in bytes) required to access the\n # array\n # ------------------------------------------------------------\n return self.nbytes if extra_memory else 0", "def is_partition(disk): #TODO: Could change to use \"Whole\" attrib. Good idea?\n\n return \"s\" in disk.split(\"disk\")[1]", "def in_cached_file(self):\n return isinstance(self._subarray, CachedArray)", "def has_subfile(self) -> bool:\n\t\tself._update_subfiles()\n\t\treturn bool(len(self.subfiles))", "def _is_partitioned(self):\n ## check if the table are partitioned, need the split because of a change in the type of partitions in pydantic\n partitions = self.table_config[\"partitions\"]\n if partitions is None or len(partitions) == 0:\n return False\n\n if isinstance(partitions, list):\n # check if any None inside list.\n # False if it is the case Ex: [None, 'partition']\n # True otherwise Ex: ['partition1', 'partition2']\n return all(item is not None for item in partitions)\n\n raise ValueError(\"Partitions must be a list or None\")", "def IsAllocated(self):\n return self._fsntfs_file_entry.is_allocated()", "def isleaf(self):\n no_kids = super(PartitionDevice, self).isleaf\n # it is possible that the disk that originally contained this partition\n # no longer contains a disklabel, in which case we can assume that this\n # device is a leaf\n if self.disk and self.partedPartition and \\\n self.disk.format.type == \"disklabel\" and \\\n self.partedPartition in self.disk.format.partitions:\n disklabel = self.disk.format\n else:\n disklabel = None\n\n extended_has_logical = (self.isExtended and\n (disklabel and disklabel.logicalPartitions))\n return (no_kids and not extended_has_logical)", "def is_array(self):\n return len(self.descriptor) > 1", "def is_space_available(partition, size):\n available_space = psutil.disk_usage(partition).free\n return False if available_space < size else True", "def is_full(self) -> bool:\n return self._array[0].all()", "def is_partition_the_last(dbapi, partition):\n idisk_uuid = partition.get('idisk_uuid')\n onidisk_parts = dbapi.partition_get_by_idisk(idisk_uuid)\n part_number = get_part_number(partition.get('device_path'))\n\n if int(part_number) != len(onidisk_parts):\n return False\n\n return True", "def _are_features_already_extracted(self, output_path: str, subset: str) -> bool:\n file_path = join(output_path, subset + '.npy')\n return os.path.exists(file_path)", "def _is_size_bound(self, path):\n return path.suffix == \".bin\"", "def _array_name_implies_ND_slice(self, array_name):\n for v in self._split_arrays.values():\n if array_name in v:\n return True\n\n generic_match = re.findall(\"^(.+)_[xyz]$\", array_name)\n loadable_keys = self.loadable_keys()\n keys = list(self.keys())\n if len(generic_match) == 1 and generic_match[0] not in self._split_arrays:\n return generic_match[0] in loadable_keys or generic_match[0] in keys\n return False", "def has_shareable_memory(a):\r\n return _get_backing_memmap(a) is not None", "def is_partition(dev):\n dev = os.path.realpath(dev)\n if not stat.S_ISBLK(os.lstat(dev).st_mode):\n raise Error('not a block device', dev)\n\n name = get_dev_name(dev)\n if os.path.exists(os.path.join('/sys/block', name)):\n return False\n\n # make sure it is a partition of something else\n for basename in os.listdir('/sys/block'):\n if os.path.exists(os.path.join('/sys/block', basename, name)):\n return True\n\n raise Error('not a disk or partition', dev)", "def has_full_batch(self) -> bool:", "def is_part_of_disk(part_device_path, disk_device_path):\n is_part_of_disk = False\n\n if disk_device_path in part_device_path:\n is_part_of_disk = True\n elif constants.DEVICE_NAME_MPATH in disk_device_path:\n path_split = disk_device_path.split(constants.DEVICE_NAME_MPATH)\n if (path_split[0] in part_device_path and\n path_split[1] in part_device_path):\n is_part_of_disk = True\n\n return is_part_of_disk", "def has_next(self):\n while self._row < self._n and not self._arr[self._row]: # current sub-array is empty\n self._row += 1 # move to next sub-array\n self._col = 0\n if self._row >= self._n: # end of master-array already\n return False\n return True", "def exist_partition(self, partition_spec):\n return partition_spec in self.partitions", "def isFull(self) -> bool:\n return self._elems == self._k", "def _loaded_data(self):\n try:\n dsize = [int(d) for d\n in self.run('fits size', via='get').split()]\n except (ValueError, TypeError, AttributeError) as err:\n log.debug(f' FITS size error: {err}')\n return False\n else:\n if 0 in dsize:\n return False\n else:\n return True", "def __check_flat_array__(self):\n if self.flat_array is not None:\n return True\n else:\n return False", "def is_slice(self) -> bool:\n return self._is_slice", "def contains_offset(self, offset):\n return (offset >= self.offset) and (offset < self.offset + self.filesize)", "def is_mounted(device):\n\n partitions = psutil.disk_partitions()\n device_path = \"/dev/\" + device\n for i in partitions:\n if i.device == device_path:\n return True\n return False", "def full(self):\n return self.size >= self.maxsize", "def in_folder(self):\n return len(os.path.split(self.file_path)) > 1", "def full(self):\n return self._current_size == self._size", "def is_full(self) -> bool:\r\n return self.size == self.capacity", "def is_full(self):\r\n if self.size == self.capacity:\r\n return True\r\n return False", "def is_complete(self) -> bool:\n return (\n (\n self.materialized_subset | self.failed_and_downstream_subset\n ).num_partitions_and_non_partitioned_assets\n == self.target_subset.num_partitions_and_non_partitioned_assets\n )", "def is_full(self):\n elements_in_sects = sum(\n map(opr.attrgetter(\"size\"), self.sects.values())\n )\n elements_in_total = fct.reduce(\n opr.mul, type(self).flatten_shape(self.shape), 1\n )\n res = elements_in_sects >= elements_in_total\n return res", "def isFull(self):\n\t\treturn self.size == self.capacity", "def is_full(self):\n\n return self.count == len(self.array)", "def to_disk(self, reopen=True):\n # try:\n tfa = CachedArray(self.array)\n # except Exception:\n # return False\n\n fd, _lock_file = mkstemp(\n prefix=tfa._partition_file + \"_\", dir=tfa._partition_dir\n )\n close(fd)\n\n self.subarray = tfa\n _temporary_files[tfa._partition_file] = (\n tfa._partition_dir,\n _lock_file,\n set(),\n )\n\n if reopen:\n # Re-open the partition\n self.open(self.config)\n\n return True", "def is_array(self):\n return False", "def is_full(self):\n return self.idx == self.len", "def f_supports_fast_access(self):\n return len(self._data) == 1 and self.v_name in self._data", "def is_distributed(self) -> bool:\n return self.size > 1", "def isFull(self):\n return self.rear == self.size", "def is_subdivision_available(self, position: np.ndarray) -> bool:\n\t\tsubdivision_x_index = int(position[0]) // self.square_subdivision_length\n\t\tsubdivision_y_index = int(position[1]) // self.square_subdivision_length\n\t\treturn self.plane_subdivisions_availability[subdivision_x_index, subdivision_y_index] == 1", "def is_subset(subset: np.array, superset: np.array) -> bool:\n superset_lookup = set(superset)\n for val in subset:\n if val not in superset_lookup:\n return False\n\n return True", "def is_array(a):\n try:\n shape = a.shape\n return len(shape) >= 1\n except AttributeError:\n return False", "def is_array(self, arr):\n return isinstance(arr, np.ndarray)", "def checkTrueArrayIsDeleted(node):\n if (node.raw) is not None or (node.raw_housing) is not None:\n raise Exception(\"The true data array has not been deleted\")", "def is_full(self):\n return len(self._data) == 1", "def isFull(self):\n if len(self.batch) == self.__batch_size:\n return True\n return False", "def is_enough_space(self) -> bool:\n return self._free_space() > self.minimum_disk", "def pageable(self):\n return maxSRAM(self.mem) <= self.dev.SRAM_PAGE_LEN", "def is_full(self) -> bool:", "def f_supports_fast_access(self):\n return not self.f_is_empty()", "def is_virtualized (self):\n return len([i for i in self.infras if\n i.infra_type not in (self.TYPE_INFRA_SDN_SW, self.TYPE_INFRA_EE,\n self.TYPE_INFRA_STATIC_EE)]) > 0", "def is_full(self):\n if len(self._page_map) >= self.memory_size:\n return True\n return False", "def is_leaf(self, pos):\n if pos >= (self.size//2) and pos <= self.size: \n return True\n return False", "def regular(self):\n if all(self._volumes - self._volumes[0] == 0):\n return True\n else:\n return False", "def storage_can_read(self):\n return True", "def full(self) -> bool:\n return self.maxsize and self.qsize() >= self.maxsize", "def is_bad_partition(par):\n return 'Letter' not in par or REGEX_BAD_PARTITION.search(par['FileSystem'])", "def isFull(self):\n if len(self._data) == self._length:\n return True\n else:\n return False", "def has_definite_size(iterable):\n return hasattr(iterable, '__len__')", "def _supports_binary_writing(path):\n return not path.startswith(\"/bigstore\")", "def _has_processed_data(self):\n return \\\n os.path.exists(\n os.path.join(self._data_root_path, self._processed_train_data_file_name)) and \\\n os.path.exists(\n os.path.join(self._data_root_path, self._processed_dev_data_file_name)) and \\\n os.path.exists(\n os.path.join(self._data_root_path, self._word_vocab_file_name)) and \\\n os.path.exists(\n os.path.join(self._data_root_path, self._char_vocab_file_name))", "def check_subarray(array1, array2):\r\n \r\n # check assumption\r\n if (len(array2.shape) != 1) or (array2.shape[0] != array1.shape[-1]):\r\n raise ValueError('Attempting to check for subarray equality when shape assumption does not hold.')\r\n \r\n return np.all(array1==array2, axis=-1)", "def prune_empty(self): # FileObj.prune_empty\n return False # can't prune a file", "def is_fragmentable(fragment_size, offset, chunk_size):\n return ((chunk_size - fragment_size) / offset) % 1 == 0", "def NeedsArray(self, type_):\n return self._NameComponents(type_) in self._array_types", "def disk_is_valid(dhandle):\n if is_64bits:\n return dhandle.value != c_uint64(0).value\n else:\n return dhandle.value != c_uint32(0).value", "def full(self) -> bool:\n return self.current_offset == self.max_offset", "def isscalar(array):\n arr = ma.array(array)\n if not hasattr(arr, '__len__') or arr.shape == () or len(arr) == 1:\n return True\n return False", "def is_full(self):\n return self.heap_size >= self.capacity", "def is_dataset(self):\n return self._dataset is not None", "def data_available(dataset_name=None):\r\n for file_list in data_resources[dataset_name]['files']:\r\n for file in file_list:\r\n if not os.path.exists(os.path.join(data_path, dataset_name, file)):\r\n return False\r\n return True", "def is_internal(self):\n if self.is_leaf() or self.is_semileaf():\n return False\n return True", "def __bool__(self):\n for root, products in self.rel_paths():\n if products:\n return True\n return False", "def PartiallyEmpty(self):\n return None==self.piecesToRecover", "def path_contains_data(bucket, root_path, min_file_size=0, file_extension=None):\n for key in bucket.list(root_path):\n if file_extension and not key.name.endswith(file_extension):\n continue\n if key.size > min_file_size:\n return True\n\n return False", "def isFull(self) -> bool:\n return self.size == self.maxlen", "def has_contents(self):\n return len(self.byteruns())>0", "def is_empty(self):\n return self.size == []", "def fs_ok(fs_info):\n if fs_info.mountpoint == '/':\n return True\n\n if (fs_info.device == fs_info.fstype or fs_info.fstype == 'nullfs' or\n '/docker' in fs_info.mountpoint or\n fs_info.mountpoint.startswith('/etc') or\n fs_info.mountpoint.startswith('/lib/modules')):\n return False\n\n if fs_info.device.startswith('/dev/'):\n return True\n\n return False", "def _test_obssize(t):\n return t.shape[0] != len(t.ids(axis='observation'))", "def is_total_slice(item, shape: Tuple[int]) -> bool:\n\n # N.B., assume shape is normalized\n\n if item == Ellipsis:\n return True\n if item == slice(None):\n return True\n if isinstance(item, slice):\n item = (item,)\n if isinstance(item, tuple):\n return all(\n (\n isinstance(it, slice)\n and ((it == slice(None)) or ((it.stop - it.start == sh) and (it.step in [1, None])))\n )\n for it, sh in zip(item, shape)\n )\n else:\n raise TypeError(\"expected slice or tuple of slices, found %r\" % item)", "def isFull(T):\r\n return len(T.data) >= T.max_data", "def is_array(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_array)", "def has_leaf(self) -> bool:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"has_leaf\"))\r\n return self._hvac_mode == \"eco\"", "def full(self):\r\n if self._maxsize <= 0:\r\n return False\r\n else:\r\n return self.qsize() >= self._maxsize", "def storage_available(self):\n logger.debug('Function storage_available start')\n \n # 2.9 GB\n max_size = 2.9*10**9\n \n if self.total_image_data_size >= max_size:\n logger.info(\"Storage not available\")\n return False\n else:\n logger.info(\"Storage available\")\n return True\n\n logger.debug('Function storage_available end')", "def __contains__(self, x):\n indexes = self.get_indexes(x)\n return self.sketch[indexes] > 0", "def has_catalog_data (ff_hdus_list, which_hdu=1):\n if (which_hdu == 0): # not allowed? FITS 4.0: 3.3.2\n return False\n else: # it's an extension and so marked\n return ( (len(ff_hdus_list) > which_hdu) and\n (ff_hdus_list[which_hdu].header.get('XTENSION') in ['BINTABLE', 'TABLE']) )", "def is_leaf(self):\n return self.pixel_count > 0", "def utilize_ephemeral_storage(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"utilize_ephemeral_storage\")", "def _is_leaf(self, index):\r\n return 2*index+1 > self._size - 1", "def full(self) -> bool:\n\n return (self._size == self._capacity)", "def _is_full(self):\n if self.allocated_spaces == self.capacity:\n return True\n elif self.allocated_spaces < self.capacity:\n return False", "def _is_empty(self):\n return self.size == 0", "def iseod(self):\n\n return self.byte_ptr >= len(self.data)" ]
[ "0.80375713", "0.69458973", "0.67629313", "0.6696292", "0.656141", "0.63517463", "0.6290223", "0.61257756", "0.59961635", "0.59486187", "0.59276515", "0.59175736", "0.5881189", "0.577553", "0.5774722", "0.5755425", "0.5737828", "0.57311875", "0.5723483", "0.5720749", "0.5682879", "0.5678204", "0.56532705", "0.5638273", "0.56376", "0.5626054", "0.5618182", "0.5615703", "0.56024516", "0.5587035", "0.5584256", "0.557709", "0.5576315", "0.55673695", "0.5561574", "0.5530081", "0.5504011", "0.5486583", "0.54836535", "0.5483039", "0.54767233", "0.5468246", "0.5459933", "0.545245", "0.54522634", "0.54515356", "0.54295284", "0.54217386", "0.5419535", "0.54073864", "0.5398171", "0.53974444", "0.53786916", "0.53773993", "0.53724277", "0.5371112", "0.5369092", "0.53521395", "0.5340982", "0.5307776", "0.5302411", "0.5292718", "0.528551", "0.5282625", "0.52814937", "0.52775794", "0.52765363", "0.5272008", "0.5271376", "0.52550143", "0.5254123", "0.52478075", "0.52476114", "0.5246837", "0.5246544", "0.5245442", "0.52408814", "0.5235635", "0.5234526", "0.52259785", "0.52233267", "0.5223169", "0.52195394", "0.521708", "0.5217009", "0.52162397", "0.52134025", "0.5211867", "0.5211654", "0.5211134", "0.52094615", "0.52087545", "0.52043337", "0.5201568", "0.51984894", "0.51961607", "0.5194446", "0.5194344", "0.51878417", "0.51835746" ]
0.5803704
13
The data type of the master array.
def dtype(self): return self.config["dtype"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def datatype_name(self):\n return 'array'", "def data_type(self):\r\n return self._data_type", "def data_type(self):\n return self._data_type", "def data_type(self):\n return self._data_type", "def data_type(self):\n return self._data_type", "def dtype(self):\n return self.array.dtype", "def dtype(self):\n return self.data.dtype", "def dtype(self):\n return self._data.dtype", "def dtype(self):\n return self._data.dtype", "def type(self):\n return self.data.type", "def recarrtype(self):\n return str(self.dtype.shape) + self.dtype.base.str[1:]", "def dataType(self, data):\n if isinstance(data,str):\n return STRING\n elif isinstance(data,dict):\n return ASSOC\n elif isinstance(data,int) or isinstance(data,float):\n return STRING\n elif is_python2() and isinstance(data,long):\n return STRING\n elif isinstance(data, SpecArray.SpecArrayData):\n self.rows, self.cols = data.shape\n return data.type", "def dtype(self):\n return self.dataset.dtype", "def form_datatype(self):\n et = get_lh5_element_type(self)\n return 'array<1>{array<1>{' + et + '}}'", "def get_data_type(self, idx):\n return(self.data[idx].dtype)", "def data_type(self) -> int:\n return self.data[\"args\"][\"dataType\"]", "def dtype(self):\n return self._vars[0].dtype", "def dtype(self):\n return self.initial_value.dtype", "def required_data_type(self):\n return Data", "def datatype(self):\n # datatype is type of first dataarg\n return self[self.dataargs()[0]].typename", "def type_array():\n return []", "def GetDataType(self):\n return _gmat_py.ElementWrapper_GetDataType(self)", "def dtype(self):\n return self.__dtype", "def dtype(self):\n return self._dtype", "def dtype(self):\n return self._dtype", "def dtype(self):\n return self._dtype", "def type(self) -> np.dtype:\n return self._tensorInfo.dtype", "def dtype(self):\n return self._dtype", "def dtype(self):\n return self._channel.datatype", "def dtype(self) -> np.dtype:\n return self._channel_arrays[0].dtype", "def type(self) -> DataType:\n return self._type", "def get_array_element_type(self):\r\n return conf.lib.clang_getArrayElementType(self)", "def type(self) -> 'Data_Type':\n return Data_Type(self._info.htype, self._info.ptype)", "def dtype(self) -> np.dtype:\n ...", "def dtype(self):\n return self._fl.raw.dtype", "def array_type(self):\n return exclusions.closed()", "def getDataType(self):\n\n return self._dataType", "def NumPyDataType(self):\n datatype = self.DataType\n if datatype == gdalconst.GDT_Byte:\n pixeltype = self.GetMetadataItem('PIXELTYPE', 'IMAGE_STRUCTURE')\n if pixeltype == 'SIGNEDBYTE':\n return numpy.int8\n return numpy.uint8\n elif datatype == gdalconst.GDT_UInt16:\n return numpy.uint16\n elif datatype == gdalconst.GDT_UInt32:\n return numpy.uint32\n elif datatype == gdalconst.GDT_Int16:\n return numpy.int16\n elif datatype == gdalconst.GDT_Int32:\n return numpy.int32\n elif datatype == gdalconst.GDT_Float32:\n return numpy.float32\n elif datatype == gdalconst.GDT_Float64:\n return numpy.float64\n else:\n raise ValueError(\n \"Cannot handle DataType: {0}\".format(\n gdal.GetDataTypeName(datatype)\n )\n )", "def dtype(self) -> np.dtype:\n return self._dtype", "def datatype(self):\n return self._datatype", "def data_types(self):", "def data_types(self):\n return self['data_types']", "def dtype(self) -> np.dtype[np.void]:\n return np.dtype(list(self.items()))", "async def get_dtype(self):\r\n pass", "def dtype(self) -> str:\n return self._dtype", "def type(self):\n # type: () -> type\n return _python_type_map[str(self.xnd_dtype.hidden_dtype)]", "def dtype(self) -> DataType:\n return self._dtype", "def dtype(a):\n return a.dtype", "def to_numpy(self) -> np.dtype:\n return self._numpy_type", "def kind(self):\n # type () -> str\n return np.dtype(self.type).kind", "def dtype(self) -> DtypeLike:\n\n return self.data.dtype", "def type(self):\n return struct.unpack('<B', self.raw_data[0])[0]", "def data_type():\n return DataTypeUtil.getDTypeForName(DataTypeUtil.getDtypeFromContext())", "def data_type(self):\n try:\n return self.attributes.workspace.attributes['library:datatype']['items']\n except Exception as e:\n self._logger.debug(f\"data_category {e}\")\n return None", "def datatype(self):\n hcell = self._get_hcell2()\n celltype = hcell[\"celltype\"]\n assert celltype == \"structured\"\n return hcell[\"datatype\"]", "def memtype(self):\n # easy enough\n return self._memtype", "def data_type(self):\n return self.unpack_dword(0xC) & DEVPROP_MASK_TYPE", "def dtype(self):\n if self.num_polygons < 2 ** 8:\n dtype = numpy.uint8\n elif self.num_polygons < 2 ** 16:\n dtype = numpy.uint16\n else:\n dtype = numpy.uint32\n return dtype", "def _variable_array_types(self):\n return [\n 'BinaryArray',\n 'KeyValueArray',\n 'StringArray',\n 'TCEntityArray',\n 'TCEnhancedEntityArray',\n ]", "def dtype(self):\n # type: () -> ExtensionDtype\n return self._dtype", "def array_axis_physical_types(self) -> Iterable[Tuple[str, ...]]:", "def typecode (self) :\r\n return self.numeric_typecode", "def astype(self, dtype):\n return NoneArray", "def getDataSetType(self):\n return self.__data_set_type__", "def type(self):\n return _python_type_map[self.arrow_dtype.id]", "def outputDataType(self):\n raise NotImplementedError()", "def data_type(self) -> pulumi.Input['AssetModelDataType']:\n return pulumi.get(self, \"data_type\")", "def infer_dtype(self):\n raise NotImplementedError", "def base_dtype(self):\r\n _base_dtype = [\r\n (ptidCol, '<i8'),\r\n (surveyidCol, 'S25'),\r\n (siteCol, 'S10'),\r\n (tranCol, '<i4'),\r\n (datetimeCol, '<M8[us]'),\r\n (dateCol, 'S12'),\r\n (timeCol, 'S11'),\r\n (latCol, '<f8'),\r\n (lonCol, '<f8'),\r\n (depObsCol, '<f8'),\r\n (depInterpCol, '<f8'),\r\n (videoCol, '<i4'),\r\n ]\r\n return _base_dtype", "def get_type(self):\n\n return self.scalertype", "def value_type(self) -> global___Type:", "def data_particle_type(self):\n if self._data_particle_type is None:\n raise NotImplementedException(\"_data_particle_type not initialized\")\n\n return self._data_particle_type", "def _dtype(self):\n dref = {\n \"data_mode\": object,\n \"latitude\": np.float64,\n \"longitude\": np.float64,\n \"position_qc\": np.int64,\n \"time\": object,\n \"time_qc\": np.int64,\n \"direction\": object,\n \"platform_number\": np.int64,\n \"config_mission_number\": np.int64,\n \"vertical_sampling_scheme\": object,\n \"cycle_number\": np.int64,\n \"pres\": np.float64,\n \"temp\": np.float64,\n \"psal\": np.float64,\n \"doxy\": np.float64,\n \"pres_qc\": np.int64,\n \"temp_qc\": object,\n \"psal_qc\": object,\n \"doxy_qc\": object,\n \"pres_adjusted\": np.float64,\n \"temp_adjusted\": np.float64,\n \"psal_adjusted\": np.float64,\n \"doxy_adjusted\": np.float64,\n \"pres_adjusted_qc\": object,\n \"temp_adjusted_qc\": object,\n \"psal_adjusted_qc\": object,\n \"doxy_adjusted_qc\": object,\n \"pres_adjusted_error\": np.float64,\n \"temp_adjusted_error\": np.float64,\n \"psal_adjusted_error\": np.float64,\n \"doxy_adjusted_error\": np.float64,\n \"ptmp\": np.float64,\n }\n plist = self._minimal_vlist\n response = {}\n for p in plist:\n response[p] = dref[p]\n return response", "def dtype():\n return RaggedDtype()", "def dtype(self):\n return self.MJD.dtype", "def identity(self):\n return self.dtype()", "def type(self):\n # easy enough\n return self._dataset._pyre_id.type", "def _variable_types(self):\n return self._variable_single_types + self._variable_array_types", "def basetype(series):\n if not series.dtype == types.ObjectType:\n # Type is specific already; return it without further inspection.\n return series.dtype\n # Now type is Object (string) See if any more specific type can be deduced:", "def properties_dtype(self):\n return self.properties.dtype()", "def data_type_str(self):\n return data_ref_type_str(self.data_type)", "def test_datatype(self):\n with Pandas() as pd:\n if pd is None:\n return\n with Numpy() as np: # noqa\n if numpy is None:\n return\n sys.stderr.write(\"\\n\")\n\n df, hist1, hist2, hist3 = get_test_histograms1()\n\n assert hist1.datatype == str\n np.testing.assert_array_equal(hist2.datatype, [numpy.number, str])\n np.testing.assert_array_equal(hist3.datatype, [numpy.datetime64, numpy.number, str])", "def data_type_id(self) -> str:\n return self._data_type_id", "def get_data_type(self, col):\n if ((self.data_df[col].dtype == np.int64) or (self.data_df[col].dtype == np.int32)):\n return 'int'\n elif ((self.data_df[col].dtype == np.float64) or (self.data_df[col].dtype == np.float32)):\n return 'float'\n else:\n raise ValueError(\"Unknown data type of feature %s: must be int or float\" % col)", "def _getDTypeList(self):\n return self._dtype", "def get_data_type(self) -> Type[ATTRIBUTE_TYPES]:\n if isinstance(self.value, (list, tuple, set)):\n value = next(iter(self.value))\n else:\n value = self.value\n value = cast(ATTRIBUTE_TYPES, value)\n return type(value)", "def make_series_dtype(self):\n return numpy.dtype([\n # Timestep\n # ('t', numpy.int32),\n\n # Some data we store\n # If you add it here, you'll need to update the getters below too\n ('count', numpy.float64),\n # ('explored_by_type', numpy.float64, self.num_types)\n ])", "def data_types():\n\n return ...", "def dtype(self) -> tf.dtypes.DType:", "def _dtype(self):\n if self._dtype_ is not None:\n return self._dtype_\n dtype = None\n for raw_extra, filename in zip(self._raw_extras, self._filenames):\n for ent in raw_extra[\"ent\"]:\n if ent is not None:\n with _fiff_get_fid(filename) as fid:\n fid.seek(ent.pos, 0)\n tag = read_tag_info(fid)\n if tag is not None:\n if tag.type in (\n FIFF.FIFFT_COMPLEX_FLOAT,\n FIFF.FIFFT_COMPLEX_DOUBLE,\n ):\n dtype = np.complex128\n else:\n dtype = np.float64\n if dtype is not None:\n break\n if dtype is not None:\n break\n if dtype is None:\n raise RuntimeError(\"bug in reading\")\n self._dtype_ = dtype\n return dtype", "def data_type(x):\n return (\n DATA_TYPES.get(file_format(x)) or\n DATA_TYPES.get(genomic_file_ext(x))\n )", "def test_out_dtype(self):\n byt = bytscl(self.array1)\n dtype = byt.dtype\n self.assertEqual(dtype, 'uint8')", "def data_format(self):\n return self._data_format", "def type(self):\r\n return self.__type", "def create_data_types(self):\n for col in self.all_columns:\n try:\n if float(self.train[col].iloc[-3]):\n self.train[col] = self.train[col].astype(np.float32)\n except:\n pass\n self.d_types = self.train.dtypes", "def explore_type(name, datatype, is_child):\n target_type = datatype.target()\n print (\"%s is an array of '%s'.\" % (name, str(target_type)))\n\n Explorer.explore_type(\"the array element of %s\" % name, target_type,\n is_child)\n return False", "def datatype_conversion(self):\n\n category_cols = self.FEATURE_TYPES[\"category_cols\"]\n integer_cols = self.FEATURE_TYPES[\"integer_cols\"]\n float_cols = self.FEATURE_TYPES[\"float_cols\"]\n datetime_cols = self.FEATURE_TYPES[\"datetime_cols\"]\n string_cols = self.FEATURE_TYPES[\"string_cols\"]\n bool_cols = self.FEATURE_TYPES[\"bool_cols\"]\n data = self.data\n \n data[category_cols] = data[category_cols].astype('category',copy=False) \n data[integer_cols] = data[integer_cols].astype('int64',copy=False)\n data[float_cols] = data[float_cols].astype('float64',copy=False)\n data[datetime_cols] = data[datetime_cols].astype('datetime64[ns]',copy=False)\n data[string_cols] = data[string_cols].astype('str',copy=False)\n data[bool_cols] = data[bool_cols].astype('bool', copy=False)\n\n return data", "def get_data_structure_type(self) -> str:\n return \"ColorGrid\"", "def test_array_dtype(self):\n dt1 = np.dtype('f4', (2,))\n dt2 = np.dtype('f4', [2])\n dt3 = np.dtype('f4', 2)\n dt4 = np.dtype('f4', 2.1)\n ht1 = h5t.py_create(dt1)\n ht2 = h5t.py_create(dt2)\n ht3 = h5t.py_create(dt3)\n ht4 = h5t.py_create(dt4)\n self.assertEqual(ht1.dtype, dt1)\n self.assertEqual(ht2.dtype, dt1)\n self.assertEqual(ht3.dtype, dt1)\n self.assertEqual(ht4.dtype, dt1)", "def GetType(vDataSet):\r\n return imaris_types[str(vDataSet.GetType())]" ]
[ "0.7694177", "0.76688254", "0.75949335", "0.75949335", "0.75949335", "0.7573461", "0.74812984", "0.7376351", "0.7376351", "0.72733635", "0.7250659", "0.71817577", "0.7178896", "0.71689445", "0.7107126", "0.7105419", "0.70681125", "0.7060085", "0.7039309", "0.7015803", "0.7010048", "0.69812846", "0.698086", "0.6973812", "0.6973812", "0.6973812", "0.6966532", "0.6961613", "0.69350046", "0.6929445", "0.68953294", "0.6893467", "0.6888927", "0.68875015", "0.68765545", "0.68448097", "0.6796803", "0.6780559", "0.6774283", "0.6772126", "0.67576337", "0.6742372", "0.6720827", "0.66934305", "0.6689053", "0.66881853", "0.6669848", "0.6665709", "0.66450894", "0.664375", "0.66313803", "0.66223556", "0.65659994", "0.6565347", "0.6561051", "0.6527298", "0.65221816", "0.65153736", "0.6506231", "0.64676327", "0.646236", "0.64607257", "0.6450405", "0.64491147", "0.6432219", "0.6426516", "0.64049697", "0.6378265", "0.63739735", "0.6357614", "0.6315173", "0.6305308", "0.6292668", "0.6290685", "0.62734383", "0.6234966", "0.62208104", "0.6169394", "0.61483353", "0.61366826", "0.6118393", "0.60987866", "0.6091577", "0.6091375", "0.60773116", "0.6072602", "0.60595286", "0.60502255", "0.60499704", "0.60252285", "0.59982294", "0.5995335", "0.5992722", "0.5991097", "0.5979544", "0.5970867", "0.59702563", "0.5968463", "0.5956033", "0.5953662" ]
0.69170535
30
True if and only if the partition's data array is a scalar array.
def isscalar(self): return not self.axes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_scalar(obj: _std_typing.Any) -> bool:\n return obj.ndim == 0", "def is_scalar(x: Any) -> bool:\r\n return np.isscalar(x) or (isinstance(x, np.ndarray) and x.ndim == 0)", "def is_array(self):\n return False", "def is_scalar(self):\n return len(self.coeffs.shape[self.sdim:]) == 0", "def is_array(self):\n return len(self.descriptor) > 1", "def is_np_scalar(x):\n return isinstance(x, np.generic)", "def is_scalar(x):\n return x.ndim == 0", "def is_array(self, arr):\n return isinstance(arr, np.ndarray)", "def is_scalar(self):", "def _is_scalar(shape):\n return F.shape_mul(shape) == 1", "def is_scalar(val,\n include_np: bool = True,\n include_torch: bool = True) -> bool:\n if isinstance(val, numbers.Number):\n return True\n elif include_np and isinstance(val, np.ndarray) and val.ndim == 0:\n return True\n elif include_torch and isinstance(val, torch.Tensor) and len(val) == 1:\n return True\n else:\n return False", "def _is_scalar_from_shape(shape):\n return _logical_equal(_ndims_from_shape(shape), 0)", "def isScalar(self) -> bool:\n\n indices = list(range(self.layout.gaDims))\n indices.remove(self.layout.gradeList.index(0))\n\n for i in indices:\n if abs(self.value[i]) < _eps:\n continue\n else:\n return False\n\n return True", "def is_array(a):\n try:\n shape = a.shape\n return len(shape) >= 1\n except AttributeError:\n return False", "def isscalar(array):\n arr = ma.array(array)\n if not hasattr(arr, '__len__') or arr.shape == () or len(arr) == 1:\n return True\n return False", "def isarray(a):\r\n try:\r\n validity = isinstance(a, ndarray)\r\n except:\r\n validity = False\r\n\r\n return validity", "def isarray(a):\n try:\n validity=isinstance(a,ndarray)\n except:\n validity=False\n\n return validity", "def is_array(t):\n return isinstance(t, ast.Array)", "def _is_key_value_array(self, data):\n for d in data:\n if not self._is_key_value(d):\n return False\n return True", "def is_scalar(self, typ: Union[Type, None] = _Any) -> bool:\n if isinstance(self.yaml_node, yaml.ScalarNode):\n if typ != _Any and typ in scalar_type_to_tag:\n if typ is None:\n typ = type(None)\n return cast(str, self.yaml_node.tag) == scalar_type_to_tag[typ]\n\n if typ is _Any:\n return True\n raise ValueError('Invalid scalar type passed to is_scalar()')\n return False", "def is_array(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_array)", "def is_scalar_type(type):\n return type.code in Explorer._SCALAR_TYPE_LIST", "def _is_1d_varray(arr):\r\n return len(arr.shape) < 2 or arr.shape[1] == 1", "def _is_DataArrays(data):\n if isinstance(data, (Dataset, DataArray)):\n return True\n if isinstance(data, Mapping):\n for da in data.values():\n if not isinstance(da, DataArray):\n raise TypeError(\"Please provide List/Mapping of DataArrays\")\n return True\n if isinstance(data, Iterable):\n for da in data:\n if not isinstance(da, DataArray):\n return False\n # raise TypeError(\"Please provide List/Mapping of DataArrays\")\n return True\n return False", "def is_array(val):\n return (\n isinstance(val, tuple) or \\\n isinstance(val, dict) or \\\n isinstance(val, list)\n )", "def f_supports(self, data):\n dtype = type(data)\n if dtype is tuple or dtype is list and len(data) == 0:\n return True # ArrayParameter does support empty tuples\n elif dtype is np.ndarray and data.size == 0 and data.ndim == 1:\n return True # ArrayParameter supports empty numpy arrays\n else:\n return super(ArrayParameter, self).f_supports(data)", "def NeedsArray(self, type_):\n return self._NameComponents(type_) in self._array_types", "def __check_is_xarray(self, data):\n if type(data) is xr.core.dataarray.DataArray or \\\n type(data) is xr.core.dataarray.Dataset:\n\n return True\n else:\n msg = \"Variable {data} is not an xarray DataArray/Dataset\"\n raise Exception(msg)", "def is_scalar(space, w_obj):\n return space.wrap(w_obj.tp in (space.tp_int, space.tp_float,\n space.tp_str, space.tp_bool))", "def on_disk(self):\n return isinstance(self._subarray, FileArray)", "def __check_flat_array__(self):\n if self.flat_array is not None:\n return True\n else:\n return False", "def isScalar(self):\n return _libsbml.Rule_isScalar(self)", "def IsArray(obj):\n return isinstance(obj, (list, tuple))", "def is_scalar(self):\n return self.expr.lhs.is_Symbol", "def is_array(schema_obj):\n\n if isinstance(schema_obj, schema.Field):\n return schema_obj.is_array\n return False", "def is_vector(self):\n return True if self.width > 1 else False", "def _is_tc_entity_array(self, data):\n for d in data:\n if not self._is_tc_entity(d):\n return False\n return True", "def is_arraylike(obj):\n if isinstance(obj, list):\n return True\n elif isinstance(obj, np.ndarray):\n return True\n elif isinstance(obj, pd.Series):\n return True\n elif isinstance(obj, pd.DataFrame):\n return True\n return False", "def is_array(type):\n nake_type = remove_alias(type)\n nake_type = remove_reference(nake_type)\n nake_type = remove_cv(nake_type)\n return isinstance(nake_type, cpptypes.array_t)", "def isfloatarray(cell):\n try:\n cell.astype(float)\n return True\n except ValueError:\n return False", "def isscalar(x):\n arrayed_x = asarray(x)\n return asarray(x).ndim == 0 and arrayed_x.dtype != 'object'", "def is_intscalar(x: Any) -> bool:\r\n return isinstance(x, (\r\n int,\r\n np.int8,\r\n np.int16,\r\n np.int32,\r\n np.int64,\r\n np.uint8,\r\n np.uint16,\r\n np.uint32,\r\n np.uint64,\r\n ))", "def testscfvaluetype(self):\r\n assert isinstance(self.data.scfvalues, list)\r\n assert isinstance(self.data.scfvalues[0], numpy.ndarray)", "def _isvalid(self, data):\n if data is None:\n return False\n elif isinstance(data, (list,tuple)):\n if len(data) <= 0:\n return False\n else:\n return True\n elif isinstance(data, (np.ndarray)):\n if data.size <= 0:\n return False\n else:\n return True\n elif not data:\n return False\n else:\n return True", "def _is_1d_harray(arr):\r\n return len(arr.shape) < 2 or arr.shape[0] == 1", "def isscalar(cls, dataset, dim, per_geom=False):\n dim = dataset.get_dimension(dim)\n if (dim in cls.geom_dims(dataset)):\n return False\n elif per_geom:\n return all(isscalar(v) or len(list(unique_array(v))) == 1\n for v in dataset.data[dim.name])\n dim = dataset.get_dimension(dim)\n return len(dataset.data[dim.name].unique()) == 1", "def is_vector(self):\n return len(self.coeffs.shape[self.sdim:]) == 1", "def is_scalar(value):\n return not isinstance(value, (list, tuple, dict))", "def is_vector(x):\r\n return len(x.shape) == 1", "def can_insert(data):\n if not isinstance(data, np.ndarray):\n return False\n if data.dtype.char in UNSUPPORTED_NUMERIC_TYPE_CODES:\n return False\n return np.issubdtype(data.dtype, np.number)", "def is_tensor(self):\n return not self.is_scalar", "def test_isarray_vrt(self):\n self.assertIsInstance(_test_array(landsat_vrt), np.ndarray)", "def in_memory(self):\n return hasattr(self._subarray, \"__array_interface__\")", "def payload_valid(self, payload):\n return (\n isinstance(payload, DPTArray)\n and len(payload.value) == self.dpt_class.payload_length\n )", "def is_string_array(self):\n return self.type == Property.PropertyType.stringArray", "def is_scalar(self, indexable, axis):\n index = self._obj.index\n complete_key = False\n partial_key = False\n duplicated_key = False\n if axis == 0 and self._has_fancy_index():\n try:\n if type(indexable) is tuple:\n complete_key = (len(indexable) == len(index.levshape) and\n indexable in index)\n partial_key = not complete_key and indexable in index\n except TypeError: # Unhashable type, no biggie\n pass\n if index.has_duplicates:\n duplicated_key = indexable in index.get_duplicates()\n return (not duplicated_key and\n ((np.isscalar(indexable) and not partial_key) or complete_key))", "def is_array_type(self, objtype):\n return isinstance(objtype, self.__arrayt) # _ctypes.PyCArrayType", "def isHandleArray(self):\n return (self.decl.name in mpi_array_calls\n and self.pos in mpi_array_calls[self.decl.name])", "def is_arrayexpress_array(val):\n return arrayexpress_array_regexp.match(val)", "def is_scalar_assign(self):\n return self.is_scalar and not self.is_Increment", "def _check_input_for_asarray(array_like):\n if isinstance(array_like, (Tensor, list, tuple, int, float, bool, onp.ndarray)):\n return True\n raise TypeError(\"input data must be `int`, `float`, `bool`, `Tensor`, `list`, `tuple`\" + \\\n f\"or numpy.ndarray, but got {type(array_like)}\")", "def is_multiobjects(x: Any) -> bool:\r\n return (is_listlike(x) or (isinstance(x, np.ndarray)\r\n and x.dtype == \"O\")) and len(x) > 0 and not is_scalar(x[0])", "def is_floatscalar(x: Any) -> bool:\r\n return isinstance(x, (\r\n float,\r\n np.float16,\r\n np.float32,\r\n np.float64,\r\n ))", "def has_vector_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.vector3", "def _is_string(data):\n return len(data) and isinstance(_to_ndarray(data).flat[0], str)", "def is_numpy(obj):\n return 'numpy' in str(type(obj))", "def in_memory(self) -> bool:\n return all(isinstance(x, np.ndarray) for x in self.chunks.values())", "def is_dataset(self):\n return self._dataset is not None", "def isdense(qob):\n return isinstance(qob, np.ndarray)", "def really1d(arr):\n if np.ndim(arr) != 1:\n return False\n # Empty list or array\n if len(arr) == 0:\n return True\n if np.any(np.vectorize(np.ndim)(arr)):\n return False\n return True", "def is_a_numpy_array(obj):\n return type(obj).__module__ == np.__name__", "def is_splitable_var(var: Any) -> bool:\n if isinstance(var, DataSample):\n return True\n if isinstance(var, torch.Tensor):\n return True\n if isinstance(var, np.ndarray):\n return True\n if isinstance(var, abc.Sequence) and not isinstance(var, str):\n return True\n return False", "def can_insert(data):\n types = (float, complex, int, np.long)\n if isinstance(data, types) and not isinstance(data, bool):\n return True\n elif isinstance(data, np.number):\n return data.dtype.char not in UNSUPPORTED_NUMERIC_TYPE_CODES", "def test_scalar_index(self):\n dset = self.f.create_dataset('x', shape=(), dtype='f')\n out = dset[...]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, ())", "def HasArrayOuts(self, function):\n if function.callback:\n for param in function.callback.params:\n if self._IsOrContainsArray(param.type_):\n return True\n return function.returns and self._IsOrContainsArray(function.returns)", "def is_line_vec(x):\n return x.ndim == 1", "def is_real(self):\n return all([isinstance(dim, Real) for dim in self.dimensions])", "def is_vec(x):\n return x.ndim == 1 or (x.ndim == 2 and \n (x.shape[0] == 1 or x.shape[1] == 1))", "def isAny(self,test):\n for x in np.nditer(self.t, op_flags=['readonly']):\n if op(x):\n return True\n return False", "def test_data(self):\n\n self.assertIsInstance(self.image.data, np.ndarray)", "def is_pyvista_dataset(obj):\n return isinstance(obj, (pyvista.DataSet, pyvista.MultiBlock))", "def _is_unicode(arr):\n if (isinstance(arr, str) or\n issubclass(numpy.asarray(arr).dtype.type, str)):\n return True\n return False", "def _is_number(data):\n return len(data) and np.issubdtype(_to_ndarray(data).dtype, np.number)", "def _datacopied(arr, original):\n if arr is original:\n return False\n if not isinstance(original, np.ndarray) and hasattr(original, '__array__'):\n return False\n return arr.base is None", "def isNumeric(data):\n\tif type(data) == list or type(data) == np.ndarray:\n\t\tcol = pd.Series(data)\n\telse:\n\t\tcol = data\n\treturn col.dtype == np.int32 or col.dtype == np.int64 or col.dtype == np.float32 or col.dtype == np.float64", "def is_full(self) -> bool:\n return self._array[0].all()", "def _values_of_same_type(self, val1, val2):\n if (type(val1) in (np.ndarray, tuple, np.matrix)) and (\n type(val2) is type(val1)\n ):\n return True\n else:\n return super(ArrayParameter, self)._values_of_same_type(val1, val2)", "def is_array_of_basic_type(self, objtype):\n return self.is_array_type(objtype) and hasattr(objtype, '_type_') and self.is_basic_type(objtype._type_)", "def _check_output_is_scalar(cls, module: Module) -> None:\n if module.output.numel() != 1:\n raise ValueError(\n \"Output must be scalar. Got {}\".format(module.output.shape)\n )", "def isscalar(self):\n return not bool(self.shape)", "def _datacopied(arr, original):\n if arr is original:\n return False\n if not isinstance(original, numpy.ndarray) and hasattr(original, '__array__'):\n return False\n return arr.base is None", "def _isscalar(x):\n return np.isscalar(x) or hasattr(x, \"shape\") and x.shape == ()", "def isvec(qob):\n shp = qob.shape\n return len(shp) == 1 or (len(shp) == 2 and (shp[0] == 1 or shp[1] == 1))", "def is_co(self, astype):\n if isinstance(astype, (tuple, list)):\n return self.package(\"DataStructure\").CO in astype\n\n return astype is self.package(\"DataStructure\").CO", "def check_array(self, v, t):\n raise NotImplementedError('check_array')", "def _type_check(data):\n if data.__class__.__name__ != \"Matrix3\":\n return False\n return True", "def isFloat(data):\n\tif type(data) == list or type(data) == np.ndarray:\n\t\tcol = pd.Series(data)\n\telse:\n\t\tcol = data\n\treturn col.dtype == np.float32 or col.dtype == np.float64", "def is_terminal(self, u1):\n\t\treturn (u1 in self.T) # returns True if in array, else False", "def is1d(a):\n return np.sum(asarray(asarray(a).shape) > 1) <= 1", "def valid(self):\n if (self._npix == []\n or self._gpix == []\n or self._epix == []\n or self._ppix == []) :\n return False\n return True", "def NeedsOptionalArray(self, type_):\n return self._NameComponents(type_) in self._optional_array_types" ]
[ "0.74611837", "0.74560386", "0.7210494", "0.71899956", "0.7152722", "0.7101354", "0.70952576", "0.7088053", "0.7086914", "0.69344157", "0.6923261", "0.69007254", "0.68844897", "0.68752134", "0.67174804", "0.66352606", "0.66348445", "0.65968424", "0.658544", "0.65799415", "0.6557284", "0.6505468", "0.6459045", "0.64290345", "0.64111173", "0.6410244", "0.63991654", "0.6393071", "0.6372672", "0.6341929", "0.6273665", "0.62705076", "0.6268065", "0.62492824", "0.62067956", "0.61981165", "0.6171595", "0.6159577", "0.61583346", "0.614487", "0.61434245", "0.6134229", "0.6120269", "0.61083704", "0.6101795", "0.61015964", "0.6101361", "0.60985875", "0.6081736", "0.60817266", "0.6054252", "0.6035537", "0.6031037", "0.60202837", "0.6018561", "0.6013677", "0.6006127", "0.60021734", "0.5986654", "0.597252", "0.5967119", "0.59473664", "0.59448504", "0.5941868", "0.59343165", "0.5914852", "0.59124994", "0.59072024", "0.5895152", "0.58922434", "0.5892022", "0.5872773", "0.5872195", "0.5865648", "0.5818467", "0.5796972", "0.57894266", "0.57872", "0.57867545", "0.5771822", "0.5769761", "0.5764762", "0.5757891", "0.5754791", "0.57460696", "0.5742229", "0.57361573", "0.5730914", "0.5722491", "0.5722441", "0.5717465", "0.5705535", "0.57010245", "0.5694234", "0.568672", "0.568411", "0.56804854", "0.5678261", "0.5663252", "0.5645139", "0.5644997" ]
0.0
-1
The size in bytes of the subarray. The size takes into account the datatype and assumes that there is a boolean mask, unless it can be ascertained that there isn't one.
def nbytes(self): dtype = self.config["dtype"] if dtype is None: return None size = reduce(mul, self.shape, 1) nbytes = size * dtype.itemsize if getattr(self, "masked", True): nbytes += size return nbytes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def arraySize( cls, value, typeCode = None ):\n return value.size", "def array_size(self):\n return self._array_size", "def container_size(self):\n import cPickle\n import sys\n t = cPickle.dumps(self.filter_bitarray)\n return sys.getsizeof(t)", "def size(self):\n ret = 0\n for ii in self.__data:\n ret += int(ii.get_size())\n return ret", "def size(self):\n return self.dtype.itemsize", "def ndarray_size(self) -> int:\n pass", "def getArrayLength(self):\r\n return self.arraySize", "def get_size(self, shape_info):\r\n if shape_info:\r\n return numpy.prod(shape_info) * numpy.dtype(self.dtype).itemsize\r\n else: # a scalar\r\n return numpy.dtype(self.dtype).itemsize", "def mask_size(self):\n m = self.size * self.mask()\n return m.astype(np.int8)", "def _size(self):\n return self._logicalSize", "def __len__(self):\n return int(self.size._value)", "def size(self):\n return len(self.arr)", "def size(self):\r\n return self._logicalSize", "def size(self):\n size = 1\n for current_slice in self.slices:\n size *= current_slice.stop - current_slice.start\n return size", "def size(self):\n return len(self.array_form)", "def get_array_size(self):\r\n return conf.lib.clang_getArraySize(self)", "def size(self):\n return self.N", "def size(self):\n return self._N", "def nbytes(self):\n\n return self.data.type.datasize", "def __len__(self):\r\n if self.is_superset:\r\n length = 0\r\n for ds in self.data:\r\n length += len(ds)\r\n return length\r\n else:\r\n return len(self.data)", "def get_size(self):\n return (\n sys.getsizeof(self.children) +\n sys.getsizeof(self.parent) +\n sys.getsizeof(self.dataset_id) +\n sys.getsizeof(self.k) +\n self.filter.get_size()\n )", "def size(self) -> int:\n size = self.da.length()\n return size", "def size(self):\n return self.data.size", "def ndarray_size(data, dtype=\"int32\"):\n return _make.ndarray_size(data, dtype)", "def __len__(self) -> int:\n\n return self.shape.shape[1]", "def arrayByteCount( cls, value, typeCode = None ):\n try:\n return value.nbytes\n except AttributeError:\n if cls.ERROR_ON_COPY:\n raise error.CopyError(\n \"\"\"Non-numpy array passed to numpy arrayByteCount: %s\"\"\",\n type(value),\n )\n value = cls.asArray( value, typeCode )\n return value.nbytes", "def size(self):\n size = 0\n size += self.data.size * sys.getsizeof(self.data)\n return size / 1024.0 / 1024.0 / 1024.0", "def nbytes(self):\n return self.nnz * self.dtype.itemsize", "def sizeof(shape, dtype=\"uint8\"):\n itemsize = numpy.dtype(dtype).itemsize\n cnt = 1\n if \"__len__\" in dir(shape):\n for dim in shape:\n cnt *= dim\n else:\n cnt = int(shape)\n return cnt * itemsize", "def size (self):\n\t\timport struct\n\t\treturn struct.calcsize (self.struct)", "def __len__(self):\n return self._arr.shape[1]", "def size(self) -> int:", "def size(self):\n return int(misc.intprod(self.shape))", "def size(self) -> int:\r\n return self.da.length()", "def size(self):\n\t\treturn self.dims", "def size(self) -> int:\n return self.da.length()", "def size(self) -> int:\n return self.da.length()", "def size(self) -> int:\n return self.da.length()", "def size(self) -> int:\n return self.da.length()", "def size(self):\r\n return self.__length", "def get_size(self):\n return self._data_size", "def itemsize(self):\n return self.dtype.base.itemsize", "def itemsize(self):\n return self.dtype.base.itemsize", "def itemsize(self):\n return self.dtype.base.itemsize", "def itemsize(self):\n return self.dtype.base.itemsize", "def size(self):\r\n return self.size.data", "def size(self):\n return self.__length", "def size(self) -> Tuple[groupable, pdarray]:\n return self.count()", "def __len__(self):\n a = 1\n for size in self.sizes:\n a *= size\n return a", "def numel(self):\n return self.t.size", "def size(self):\n return self.__size", "def get_size(self):\n tmpsize = 0\n for variable in self.variables:\n tmpsize += variable.get_size()\n for subchunk in self.subchunks:\n tmpsize += subchunk.get_size()\n return tmpsize", "def size(self):\r\n # Anthony stage 2\r\n return number_size(self.n) - 1", "def get_size(self):\n # return the size along the index dimension\n size = 0\n if self._data is not None:\n size = shape(self._data)[self.index_dimension]\n\n return size", "def count(self):\r\n return self.data_array.size", "def __len__(self):\n # type: () -> int\n return self.shape[0]", "def size(self):\r\n return self._size", "def bitSizeOf() -> int:\n\n return 1", "def __len__(self):\n return len(np.where(np.logical_not(self.data.mask))[0])", "def size(self):\n return self._length", "def __len__(self):\n return len(self.array)", "def __len__(self):\n return len(self.array)", "def size(self) -> int:\n return int(np.multiply(*self.shape))", "def size(self):\n return len(self.data)", "def size(self):\n return len(self.data)", "def size(self):\n return len(self.data)", "def size(self):\n return len(self.data)", "def size(self):\n return len(self.data)", "def size(self, index):\n return self.base_dataset.size(index)", "def size(self):\n return self.__size", "def size(self):\n return self.__size", "def size(self):\n return self.__size", "def size(self):\n return self.__size", "def size(self):\n return self.__size", "def size(self):\n return self.unpack_dword(0x4)", "def size(self):\n return self.unpack_dword(0x4)", "def __len__(self):\n return np.size(self.A,0)", "def nbytes_at(self, device_id:int):\n if self._slices:\n if isinstance(self._coherence._local_states[device_id], dict): # there are subarrays no this device\n if self._slices_hash in self._coherence._local_states[device_id].keys(): # this subarray is already there\n return self._array.nbytes_at(device_id)\n else: # the subarray will be moved to there\n return self._array.nbytes_at(device_id) + self.subarray_nbytes # add the incoming subarray size\n else: # there is a complete copy on this device, no need to prepare subarray\n return self.nbytes\n else:\n return self.nbytes", "def __len__(self):\n\t\treturn self._size", "def size(self):\n\t\treturn self._size", "def size(self):\n return self.__size", "def unitSize( cls, value, typeCode=None ):\n return value.shape[-1]", "def __len__(self) -> int:\n\n return self.__size", "def __len__(self):\r\n return self._size", "def __len__(self):\r\n return self._size", "def size(self):\n if type(self._shape).__name__ == 'tuple':\n return self._shape[-1]\n else:\n return self._shape", "def __len__(self):\n if not self.opt.union:\n return min(len(self.dataset), self.opt.max_dataset_size)\n else:\n return len(self.batch_sampler)", "def __len__(self) -> int:\n return self.size", "def __len__(self) -> int:\n return self.size", "def __len__(self) -> int:\n return self.size", "def size(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size" ]
[ "0.718968", "0.71867967", "0.7167967", "0.70721114", "0.69927114", "0.69399047", "0.6921515", "0.68736595", "0.6852514", "0.6835874", "0.6802148", "0.67929095", "0.67913187", "0.67521507", "0.6731404", "0.6717999", "0.67143357", "0.6703314", "0.67020184", "0.6700556", "0.6697106", "0.6682291", "0.6651554", "0.66490614", "0.6648868", "0.66486084", "0.6644012", "0.6624058", "0.6616036", "0.6614012", "0.6613987", "0.65858674", "0.6571506", "0.6548967", "0.6544703", "0.6543546", "0.6543546", "0.6543546", "0.6543546", "0.6538286", "0.6535692", "0.65194714", "0.65194714", "0.65194714", "0.65194714", "0.6514875", "0.6498841", "0.6498648", "0.6492229", "0.64905584", "0.6470526", "0.64686674", "0.6458288", "0.64574635", "0.645642", "0.64554864", "0.64527833", "0.6442754", "0.6437928", "0.6437905", "0.6433788", "0.6433788", "0.64305586", "0.6421952", "0.6421952", "0.6421952", "0.6421952", "0.6421952", "0.64153194", "0.641009", "0.641009", "0.641009", "0.641009", "0.641009", "0.64043105", "0.64043105", "0.6402194", "0.6402177", "0.6399923", "0.63992476", "0.6390697", "0.63888264", "0.63844407", "0.63751054", "0.63751054", "0.6372049", "0.63711333", "0.6369956", "0.6369956", "0.6369956", "0.63686407", "0.636768", "0.636768", "0.636768", "0.636768", "0.636768", "0.636768", "0.636768", "0.636768", "0.636768" ]
0.6896727
7
Number of array dimensions.
def ndim(self): return len(self.shape)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def n_dims(self):\n return len(self.dimensions)", "def num_dim(self):\n return len(self._dimensions)", "def num_dim(self):\n return len(self._dimensions)", "def get_num_dimensions(self):\n dimensions = self.data.shape\n return dimensions[1]", "def count_dims(da):\n return len(da.dims)", "def ndims(x):\n return len(x.get_shape())", "def dims(x):\n return len(x.shape)", "def size(self)->int:\n\n return np.prod([axes if is_integer(axes) else len(axes) for axes in self._dim_axes])", "def n_dims(self):\n return self.pdm.n_dims", "def num_dims(self):\n return self.h5['{}/{}'.format(SETTINGS, N_DIMS_STR)][()]", "def getNumDimensions(self):\n return len(self.di.keys())", "def dim(self) -> int:\n pass", "def ndarray_size(self) -> int:\n pass", "def size(self):\n\t\treturn self.dims", "def dim(self):\n return len(self._n)", "def dim(self) -> int:\n return self._n_dim", "def dim(self):\n return len(self.shape)", "def dim(self):\n return len(self.shape)", "def n_dim(self):\n return self._n_dim", "def dimensionality(self):\n return int(self.nDims)", "def ndim(self):\n # type: () -> int\n return len(self.shape)", "def dimension(self):\n return self.__N", "def ndim(self):\n return len(self.nvars)", "def dim(self) -> int:\n return self.atoms.shape[:-1]", "def dim(self) -> int:", "def num_dimensions(self):\n if self.__num_dimensions__ == 0:\n # Try to get the number of dimensions from the first point or bounding box\n if len(self.points) > 0:\n self.__num_dimensions__ = len(self.points[0].coordinate)\n elif len(self.bounding_boxes) > 0:\n self.__num_dimensions__ = len(self.bounding_boxes[0].start)\n return self.__num_dimensions__", "def ndim(self):\n return len(self._shape)", "def ndim(self) -> int:\r\n return len(self.plates)", "def dimensions():", "def get_dimension_number(self) -> int:\n return np.squeeze(self._channel_arrays[0]).ndim", "def dimension(self):\r\n a = 0\r\n for x in self.faces():\r\n if (len(x) > a):\r\n a = len(x) \r\n return a-1", "def dimension_count(self):\n return self._dimensionCount", "def get_dimension_length(self):\n pass", "def __len__(self) -> int:\n\n return self.layout.gaDims", "def dims(self):\n raise NotImplementedError('Please use Vector2Array or Vector3Array')", "def ndim(self):\n return self.data.ndim", "def get_dims(self):\n row_lbl, col_lbl = self.get_idxvals()\n return len(row_lbl), len(col_lbl)", "def ndim(self):\n return len(self.point)", "def n_dims(self):\n return self.model.template_instance.n_dims", "def _get_observation_dimension(self):\n return len(self._get_observation_np())", "def dim(self,mat):\n result = np.shape(mat)\n self.dimensions = result\n return self.dimensions", "def dimensions(self):\n return len(self.parameter_names)", "def N(self):\n return self._dimensions", "def ndim(self):\n return self._ndim", "def xdim(self):\n return len(self._x)", "def ndim(self) -> int:\n return self[0].ndim", "def ndim(self) -> int:\n\n return 1 + len(self.shape)", "def ndim(self):\n return self.__value.ndim", "def size(self):\n return int(misc.intprod(self.shape))", "def dimension_size(self):\n return self._dim", "def ndim(self):\n return len(self.edges)", "def dims(self):\n return self[0].dims", "def ndims(self, varname):\n if self.handle == None: return None\n try:\n var = self.handle.variables[varname]\n except KeyError:\n return None\n return len(var.dimensions)", "def dim(self):\n return self.m, self.n", "def array_dimensions(array):\n height = len(array)\n width = len(array[0])\n\n return width, height", "def get_dimensions(self):\n return self.lon_arr.shape", "def ndim(self):\n return self.X.ndim", "def dimensionality(self):\n if self.vector.shape is ():\n return 0\n if len(self.vector.shape) is 1:\n return 1\n _, dim = self.vector.shape\n return dim", "def array_size(self):\n return self._array_size", "def dimension(self):\n return len(self.qubit_values)", "def __len__(self) -> int:\n\n return self.shape.shape[1]", "def ndim(self):\n\n self._check_assigned()\n\n if (\n self.lazy\n and self.transformer is not None\n and hasattr(self.transformer, \"get_transformed_shape\")\n ):\n return len(self.transformer.get_transformed_shape(self.values))\n else:\n return self.__array__().ndim", "def dim(self):\n raise NotImplementedError", "def dimension(self):\n return 3*self.genus - 3 + self.n", "def get_nb_element_per_dimension(recipe):\n return len(recipe[\"r\"]), len(recipe[\"c\"]), len(recipe[\"z\"])", "def ndim(self):\n if self._ndim is None:\n self._ndim = self.get_mesh_dimension()\n\n return self._ndim", "def ndim(x):\n dims = x.get_shape()._dims\n if dims is not None:\n return len(dims)\n return None", "def _get_ndim(self):\n return len(self.level_shapes[0])", "def width(self) -> int:\n return self._obj[self.x_dim].size", "def ndim(a):\n if isinstance(a, np.ndarray):\n return a.ndim\n else:\n return K.ndim(a)", "def __len__(self):\n return self.flatten_dim(self.shape[0])", "def size(self):\n return numpy.prod(self.shape)", "def __len__(self):\n return self._arr.shape[1]", "def dim(self):\n return self.__dim__", "def _N(self):\n return len(self._array)", "def dimensions(self) -> int:\n return pulumi.get(self, \"dimensions\")", "def ndim(self):\n return self.initial_value.ndim", "def getDimension(self):\n return len(self.components)", "def ndim(self):\n return np.ndim(self.MJD)", "def outdim(self):\n return len(self.getSensors())", "def dataDimensions(data):\n logging.info('Number of rows of data: %s' % len(data))\n logging.info('Number of columns of data: %s' % len(data[1]))", "def __len__(self):\n return self.N.shape[0]", "def ndarray_size(data, dtype=\"int32\"):\n return _make.ndarray_size(data, dtype)", "def ndim(self):\n futures = self.client.map(_call_ndim, self.vecDask, pure=False)\n ndims = self.client.gather(futures)\n return ndims", "def ndim(self):\n return 2", "def dim(self):\n return (self.n, )", "def __len__(self):\n return np.size(self.A,0)", "def getDimensions():", "def dim(self):\n raise NotImplementedError()", "def size(self):\n return self.N", "def dimension(self):\n return np.prod(np.asarray(self.subsystem_dims))", "def size(self):\n return len(self.array_form)", "def dimension(self):", "def _get_num_tensor_dimensions(input_tensor):\n\n return len(input_tensor.get_shape().as_list())", "def size(self):\n if type(self._shape).__name__ == 'tuple':\n return self._shape[-1]\n else:\n return self._shape" ]
[ "0.87556416", "0.85560286", "0.85560286", "0.84606373", "0.8365305", "0.83450353", "0.8314268", "0.8273276", "0.82264763", "0.8216497", "0.81538165", "0.81395245", "0.8122816", "0.81203264", "0.809687", "0.8096407", "0.80840564", "0.80840564", "0.80781686", "0.80665874", "0.8047602", "0.7954937", "0.7948822", "0.79326594", "0.7924109", "0.7922604", "0.78415036", "0.7841272", "0.78214836", "0.78011036", "0.778965", "0.7747739", "0.7715197", "0.76978576", "0.76526695", "0.764585", "0.7645568", "0.7638701", "0.7619169", "0.76085335", "0.76058596", "0.75977", "0.75922275", "0.7586435", "0.7580205", "0.7560079", "0.7544165", "0.75441396", "0.7507646", "0.74952996", "0.7486465", "0.7479848", "0.7472327", "0.7465935", "0.74540037", "0.74502015", "0.7419761", "0.74158365", "0.7409149", "0.7398527", "0.73965997", "0.7387083", "0.73813313", "0.7363035", "0.7343015", "0.7328039", "0.73250973", "0.7313946", "0.73021036", "0.7288522", "0.72838926", "0.72679627", "0.7259089", "0.72545916", "0.72456527", "0.7242537", "0.7236153", "0.7227688", "0.7225149", "0.72219056", "0.72211355", "0.720171", "0.7199945", "0.71981937", "0.7174733", "0.7168553", "0.71669364", "0.7163261", "0.71495384", "0.71281266", "0.71204454", "0.7120444", "0.7117927", "0.7106828", "0.7098302" ]
0.79104465
30
Number of elements in the partition's data array (not its subarray).
def size(self): return reduce(mul, self.shape, 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count(self):\r\n return self.data_array.size", "def count_elements_in_dataset(dataset):\n return dataset.count()", "def size(self):\n ret = 0\n for ii in self.__data:\n ret += int(ii.get_size())\n return ret", "def n_elements(self) -> int:\n n_elem = np.prod(self.shape)\n if self.n_timesteps > 1:\n n_elem = int(n_elem / self.n_timesteps)\n return n_elem", "def num_elements(self):\n return self.subset.num_elements()", "def _N(self):\n return len(self._array)", "def n_points(self):\n\n if self.data_reduced:\n return len(self.data_reduced[0])\n else:\n return 0", "def __len__(self):\r\n if self.is_superset:\r\n length = 0\r\n for ds in self.data:\r\n length += len(ds)\r\n return length\r\n else:\r\n return len(self.data)", "def get_total_item_size(dataset):\n total_items = 0\n for element in dataset:\n total_items += 1\n return total_items", "def numel(self):\n return self.t.size", "def num(an_array):\n return an_array.size", "def size(self):\n size = 1\n for current_slice in self.slices:\n size *= current_slice.stop - current_slice.start\n return size", "def data_count(self):\n return(len(self.data))", "def __len__(self):\n return np.size(self.A,0)", "def __len__(self) -> int:\n import h5py\n\n with h5py.File(\n os.path.join(self.root, self.data_dir, self.img_file_name), \"r\"\n ) as f:\n num_datapoints: int = f[self.split][\"pv_log\"].shape[0]\n\n return num_datapoints", "def nbytes_at(self, device_id:int):\n if self._slices:\n if isinstance(self._coherence._local_states[device_id], dict): # there are subarrays no this device\n if self._slices_hash in self._coherence._local_states[device_id].keys(): # this subarray is already there\n return self._array.nbytes_at(device_id)\n else: # the subarray will be moved to there\n return self._array.nbytes_at(device_id) + self.subarray_nbytes # add the incoming subarray size\n else: # there is a complete copy on this device, no need to prepare subarray\n return self.nbytes\n else:\n return self.nbytes", "def size(self):\n return len(self.arr)", "def get_length(self):\n if self.opt.num_buckets > 1:\n return sum([len(bucket) for bucket in self.data])\n else:\n return len(self.data)", "def noOfElem(classObj, index):\r\n return len(classObj.dataSet[:, index])", "def nbytes(self):\n return self.nnz * self.dtype.itemsize", "def count_data(self):\n try:\n ndata = len(self.x)\n logger.info(\"Number of data points: {0}\".format(ndata))\n except AttributeError:\n logger.error(\"Data object has not been defined\")\n ndata = 0\n return ndata", "def getNumData(self):\n return len(self.data)", "def GetNumberOfElements(self, assoc):\n result = 0\n for dataset in self:\n result += dataset.GetNumberOfElements(assoc)\n return int(result)", "def get_length(array):\n return len(list(array))", "def getDataSetCount(self):\n\t\treturn int(self.numberOfImages / self.slicesPerTimepoint)", "def size(self) -> Tuple[groupable, pdarray]:\n return self.count()", "def numel(array):\n _import_modules()\n module_name = type(array).__module__.split(\".\")[0]\n if module_name in [\"numpy\", \"numpy.ma.core\"]:\n return array.size\n elif module_name == \"torch\":\n return array.numel()\n elif module_name.split(\".\")[0] == \"jax\":\n return array.size\n elif module_name.split(\".\")[0] == \"tensorflow\":\n return tf.size(array)\n raise UnknownArrayTypeException(\n f\"The provided input of type {type(array)} is\"\n \"not a supported array type.\"\n )", "def __len__(self):\n nsamp = self.data.shape[-1]\n kernel = int(self.kernel * self.fs)\n stride = int(self.stride * self.fs)\n n_stride = int(np.ceil((nsamp - kernel) / stride) + 1)\n return max(0, n_stride)", "def get_data_size(self):\n if self._policy_str == EDGE_PART_POLICY:\n return len(self._partition_book.partid2eids(self._part_id))\n elif self._policy_str == NODE_PART_POLICY:\n return len(self._partition_book.partid2nids(self._part_id))\n else:\n raise RuntimeError('Cannot support policy: %s ' % self._policy_str)", "def get_num_points(self):\n dimensions = self.data.shape\n return dimensions[0]", "def __len__(self):\n return self._dataset.size(dirs=self._dirs)", "def dim(self):\n return len(self._n)", "def dataDimensions(data):\n logging.info('Number of rows of data: %s' % len(data))\n logging.info('Number of columns of data: %s' % len(data[1]))", "def size(self):\n return self.N", "def n(self):\n return nrow(self._array)", "def elements_count(self):\n return self.__elements_count", "def size(self):\n return self._N", "def analysis_function_total_elements(self,clustering):\n return clustering.total_number_of_elements", "def size(self)->int:\n\n return np.prod([axes if is_integer(axes) else len(axes) for axes in self._dim_axes])", "def __len__(self):\n return sum(len(p) for p in self.parts)", "def data_count(self):\r\n\r\n shp = self.df.shape\r\n row_count = shp[0]\r\n return row_count", "def getNumElements(self):\n return 0", "def _num_samples(x: npt.ArrayLike) -> int:\n if not hasattr(x, \"__len__\") and not hasattr(x, \"shape\"):\n if hasattr(x, \"__array__\"):\n x = np.asarray(x)\n else:\n raise TypeError(\"Expected sequence or array-like, got %s\" % type(x))\n if hasattr(x, \"shape\"):\n if len(x.shape) == 0:\n raise TypeError(\"Singleton array %r cannot be considered\" \" a valid collection.\" % x)\n # Check that shape is returning an integer or default to len\n # Dask dataframes may not return numeric shape[0] value\n if isinstance(x.shape[0], numbers.Integral):\n return x.shape[0]\n else:\n return len(x)\n else:\n return len(x)", "def numIncrementals(self) -> int:\n return len(self._dataArrays)", "def numpoints(self):\n return len(self.pars) + 1 # so dof is 1", "def size(self):\r\n # Anthony stage 2\r\n return number_size(self.n) - 1", "def __len__(self):\n ret = self.data.shape[0]\n return ret", "def __len__(self):\n return self._arr.shape[1]", "def getNrEntries(self):\n return len(self.data)", "def __len__(self):\n return len(self.array)", "def __len__(self):\n return len(self.array)", "def dim(self) -> int:", "def getNumElements(self):\n return 1", "def __len__(self):\n n = 1\n for valTuple in self._valListOfLists:\n n *= len(valTuple)\n return n", "def dimension(self):\n return len(self.qubit_values)", "def __len__(self):\n return int(np.floor(len(self.dataset_df) / self.batch_size))", "def partition_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"partition_count\")", "def nbytes(self) -> int:\n\n return self.data.nbytes + self.shape.nbytes", "def number_of_data_nodes(self):\n return int(self._data['number_of_data_nodes'])", "def len():\n if not CpuMap.arr:\n CpuMap.arr = CpuMap._cpus()\n return len(CpuMap.arr)", "def array_size(self):\n return self._array_size", "def get_num_dimensions(self):\n dimensions = self.data.shape\n return dimensions[1]", "def getNbRows(self):\n return self.data.shape[1]", "def __len__(self) -> float:\n return len(self.elements)", "def length(self, data: Sequence[Sequence[torch.Tensor]]) -> int:\n return self.n_batch", "def getNumElements(self):\n return 1", "def length(self):\n length = 0\n a = self.array_form\n for i in xrange(len(a)):\n if a[i] != i:\n length += 1\n return length", "def ndarray_size(data, dtype=\"int32\"):\n return _make.ndarray_size(data, dtype)", "def nbytes(self):\n # type: () -> int\n size = 0\n for chunk in self.data.chunks:\n for buf in chunk.buffers():\n size += buf.size\n return size", "def nbytes(self):\n\n return self.data.type.datasize", "def __len__(self): \r\n length = len(self.data) - 2* self.skip_window\r\n #print ('length', length)\r\n return length\r\n #raise NotImplementedError('Implement the __len__ method of the dataset')\r", "def __len__(self):\n return self.data.num_samples", "def __len__(self):\n return len(self.dataset) * self.samples_per_pair", "def voxel_count(self):\n return self.cols * self.rows * self.sections", "def size(self, index):\n return self.base_dataset.size(index)", "def get_nb_element_per_dimension(recipe):\n return len(recipe[\"r\"]), len(recipe[\"c\"]), len(recipe[\"z\"])", "def getNumElements(self):\n raise Exception(\"Didn't expect this to get called.\")", "def ndarray_size(self) -> int:\n pass", "def numel(self) -> int:\n return sum(p.numel() for p in self.parameters)", "def nbytes(self):\n dtype = self.config[\"dtype\"]\n if dtype is None:\n return None\n\n size = reduce(mul, self.shape, 1)\n nbytes = size * dtype.itemsize\n\n if getattr(self, \"masked\", True):\n nbytes += size\n\n return nbytes", "def __len__(self):\n return self.dataset.shape[0]", "def num_partitions(self): # -> int:\n ...", "def count(self):\n return len(self._elements)", "def n_thres(self):\n return np.size(self.thres)", "def get_array_size():\n tg_file = 'NA_CAS_gauges.txt'\n lines = open(tg_file).readlines()\n tg_nbr = len(lines)\n return tg_nbr", "def __len__(self):\n return int(np.floor(len(self.indexes) / self.batch_size))", "def __len__(self):\n return int(np.floor(len(self.indexes) / self.batch_size))", "def __len__(self):\n return int(np.floor(len(self.indexes) / self.batch_size))", "def count(self):\n return len(self.read_ints())", "def nbytes(self):\n # type: () -> int\n size = 0\n for chunk in self.data.chunks:\n for buf in chunk.buffers():\n if buf is not None:\n size += buf.size\n return size", "def Num_Elem_Pila(self):\n return len(self.pila)", "def __len__(self) -> int:\n if self.preload:\n return len(self.data_ram)\n else:\n return len(self.data)", "def count(self):\n return self.data_container.count", "def __len__(self) -> int:\n\n return self.shape.shape[1]", "def dimension(self):\r\n a = 0\r\n for x in self.faces():\r\n if (len(x) > a):\r\n a = len(x) \r\n return a-1", "def element_count(self):\n return self._internal.get_element_count()", "def __len__(self) -> int:\n return len(self.__elements)", "def __len__(self) -> int:\n return int(np.floor(len(self.list_IDs) / self.batch_size))", "def ndim(self):\n return self._hist.rank()", "def dim(self) -> int:\n return self._n_dim", "def size(self):\n return len(self.array_form)" ]
[ "0.76638305", "0.73945427", "0.7326349", "0.7308581", "0.7271908", "0.72121114", "0.71300155", "0.7121346", "0.71050775", "0.7088031", "0.70531565", "0.6979757", "0.69550794", "0.694825", "0.69397396", "0.69286805", "0.6899676", "0.6895468", "0.68825126", "0.687968", "0.687458", "0.6859505", "0.6857238", "0.6852797", "0.6845907", "0.6819525", "0.68078864", "0.67656296", "0.67622924", "0.67458427", "0.6743274", "0.67397213", "0.67181766", "0.6713872", "0.67106515", "0.670989", "0.6701366", "0.6698873", "0.6694914", "0.66894263", "0.6686565", "0.6674225", "0.66702485", "0.6666631", "0.6664025", "0.6643706", "0.6619851", "0.6615853", "0.66132253", "0.6612713", "0.6612713", "0.66110504", "0.65963167", "0.65948194", "0.6593723", "0.65934736", "0.6585986", "0.65820646", "0.65718216", "0.65695786", "0.6566921", "0.6565889", "0.6555226", "0.652751", "0.65264636", "0.6525014", "0.6524313", "0.65238404", "0.6515606", "0.65119374", "0.6499892", "0.64963764", "0.64856577", "0.6481219", "0.6476438", "0.64578205", "0.64506274", "0.6448778", "0.64480317", "0.6445686", "0.64429843", "0.6436247", "0.6429239", "0.64247173", "0.6422584", "0.6419411", "0.6419411", "0.6419411", "0.64175814", "0.64171", "0.6415757", "0.6412586", "0.6411072", "0.64089394", "0.6407548", "0.6404272", "0.6400729", "0.63993484", "0.6394548", "0.6389457", "0.6377886" ]
0.0
-1
The partition's subarray of data.
def subarray(self): return self._subarray
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subarray(self) -> Subarray:\n return Subarray.from_pybind11(self._ctx, self._subarray)", "def partition(self, sep):\n return asarray(partition(self, sep))", "def array(self) -> ndarray:\n if self._slices: # so this is a sub-parray object\n # index into origin array by saved slices\n ret = self._array.get_by_global_slices(self._current_device_index, self._slices[0])\n for s in self._slices[1:]:\n ret = ret[s]\n return ret\n else: # this is a complete copy\n ret = self._array.get(self._current_device_index)\n\n if isinstance(ret, list): # get a subarray instead\n raise IndexError(\"Current device doesn't have a complete copy of this array\")\n return ret", "def array(self):\n config = self.config\n\n unique_array = config[\"unique_subarray\"]\n\n p_axes = self.axes\n p_flip = self.flip\n p_part = self.part\n p_units = self.Units\n p_shape = self.shape\n p_location = self.location\n subarray = self._subarray\n\n len_p_axes = len(p_axes)\n\n if not self.in_memory:\n # --------------------------------------------------------\n # The subarray is not in memory.\n #\n # It could be in a file on disk or implied by a FileArray\n # object, etc.\n # --------------------------------------------------------\n self._original = self.copy()\n\n unique_array = True\n update = True\n copy = False\n\n if not p_part:\n indices = Ellipsis\n else:\n indices = tuple(p_part)\n\n # Read from a file into a numpy array\n p_data = subarray[indices]\n\n # We've just copied p_data from disk, so in place changes\n # are not possible\n in_place_changes = False\n else:\n # --------------------------------------------------------\n # The subarray is in memory\n # --------------------------------------------------------\n update = config[\"update\"]\n\n if p_part:\n p_data = get_subspace(subarray, p_part)\n elif not unique_array:\n p_data = subarray.view()\n else:\n p_data = subarray\n\n copy = config[\"extra_memory\"]\n\n # In place changes to p_data might be possible if we're not\n # copying the data\n in_place_changes = not copy\n\n if not p_data.ndim and isinstance(p_data, (numpy_number, numpy_bool_)):\n # --------------------------------------------------------\n # p_data is a numpy number (like numpy.int64) which does\n # not support assignment, so convert it to a numpy array.\n # --------------------------------------------------------\n p_data = numpy_array(p_data)\n # We've just copied p_data, so in place changes are\n # not possible\n copy = False\n in_place_changes = False\n\n masked = numpy_ma_isMA(p_data)\n if masked:\n # The p_data is a masked array\n if p_data.mask is numpy_ma_nomask or not numpy_ma_is_masked(\n p_data\n ):\n # There are no missing data points so recast as an\n # unmasked numpy array\n p_data = p_data.data\n masked = False\n # --- End: if\n\n if masked:\n # Set the hardness of the mask\n if config[\"hardmask\"]:\n p_data.harden_mask()\n else:\n p_data.soften_mask()\n # --- End: if\n\n self.masked = masked\n\n # ------------------------------------------------------------\n # Make sure that the data array has the correct units. This\n # process will deep copy the data array if required (e.g. if\n # another partition is referencing this numpy array), even if\n # the units are already correct.\n # ------------------------------------------------------------\n func = config.get(\"func\")\n units = config[\"units\"]\n if func is None:\n if not p_units.equals(units) and bool(p_units) is bool(units):\n func = Units.conform\n\n if func is not None:\n inplace = not copy\n p_data = func(p_data, p_units, units, inplace)\n p_units = units\n\n if not inplace:\n # We've just copied p_data, so in place changes are\n # not possible\n copy = False\n in_place_changes = False\n # --- End: if\n\n flip = config.get(\"flip\", None)\n if flip or p_flip:\n flip_axes = set(p_flip).symmetric_difference(flip)\n else:\n flip_axes = None\n\n axes = config[\"axes\"]\n\n if p_data.size > 1:\n # --------------------------------------------------------\n # Flip axes\n # --------------------------------------------------------\n if flip_axes:\n indices = [\n (\n slice(None, None, -1)\n if axis in flip_axes\n else slice(None)\n )\n for axis in p_axes\n ]\n p_data = p_data[tuple(indices)]\n\n # --------------------------------------------------------\n # Transpose axes\n # --------------------------------------------------------\n if p_axes != axes:\n iaxes = [p_axes.index(axis) for axis in axes if axis in p_axes]\n\n if len_p_axes > len(iaxes):\n for i in range(len_p_axes):\n if i not in iaxes:\n # iaxes.append(i)\n iaxes.insert(i, i)\n # --- End: if\n\n p_data = numpy_transpose(p_data, iaxes)\n # --- End: if\n\n # ------------------------------------------------------------\n # Remove excessive/insert missing size 1 axes\n # ------------------------------------------------------------\n if p_shape != p_data.shape:\n # if len_p_axes != len(p_shape):\n p_data = p_data.reshape(p_shape)\n\n # ------------------------------------------------------------\n # Apply the auxiliary mask\n # ------------------------------------------------------------\n auxiliary_mask = config[\"auxiliary_mask\"]\n if auxiliary_mask:\n for mask in auxiliary_mask:\n if mask.any():\n if not masked:\n p_data = p_data.view(numpy_ma_MaskedArray)\n masked = True\n\n p_data.mask = (mask | p_data.mask).array\n # --- End: for\n\n self.masked = True\n\n # ------------------------------------------------------------\n # Convert the array's data type\n # ------------------------------------------------------------\n p_dtype = p_data.dtype\n dtype = config.get(\"dtype\", None)\n if dtype is not None and dtype != p_dtype:\n try:\n p_data = p_data.astype(dtype) # Note: returns a copy\n except ValueError:\n raise ValueError(\n \"Can't recast partition array from {} to {}\".format(\n p_dtype.name, dtype.name\n )\n )\n else:\n # We've just copied p_data, so in place changes are\n # not possible\n copy = False\n in_place_changes = False\n # --- End: if\n\n # ------------------------------------------------------------\n # Copy the array\n # -----------------------------------------------------------\n if copy:\n if p_dtype.char != \"O\":\n if not masked or p_data.ndim > 0:\n p_data = p_data.copy()\n else:\n # This is because numpy.ma.copy doesn't work for\n # scalar arrays (at the moment, at least)\n p_data = numpy_ma_masked_all((), p_data.dtype)\n\n # We've just copied p_data, so in place changes are\n # not possible\n in_place_changes = False\n else:\n # whilst netCDF4.netcdftime.datetime is mucking bout,\n # don't copy!!!!\n # p_data = _copy(p_data)\n pass\n # --- End: if\n\n # ------------------------------------------------------------\n # Update the partition\n # ------------------------------------------------------------\n if update:\n self.subarray = p_data # ?? DCH CHECK\n self.Units = p_units\n self.part = []\n self.axes = axes\n self.flip = flip\n self.flatten = []\n self.shape = p_shape\n self.location = p_location\n\n self._in_place_changes = in_place_changes\n\n # ------------------------------------------------------------\n # Return the numpy array\n # ------------------------------------------------------------\n return p_data", "def partitionData(data, labels, partition):\n\treturn [s[partition] for s in data], labels[partition]", "def GetPartitioningArray(self):\n return _hypre.HypreParVector_GetPartitioningArray(self)", "def as_slice(self):\n # slice for accessing arrays of values\n return slice(self._lo_atom, self._lo_atom + self._n_atoms)", "def partition(data, indecies):\n\tsplitdata = [data[:indecies[0]]]\n\tsplitdata += [data[indecies[i-1]:indecies[i]] for i in range(1,len(indecies))]\n\tsplitdata.append(data[indecies[-1]:])\n\treturn splitdata", "def get_partions(self) -> Union[ndarray, Tuple[ndarray, ndarray]]:\n if self.fragmented:\n return (self[self._begin:], self[:self._end])\n else:\n return self[self._begin:self._end]", "def subset(arr, start, end):\n return [[row_data for row_data in row[start[1]:end[1]]] for row in arr[start[0]:end[0]]]", "def rpartition(self, sep):\n return asarray(rpartition(self, sep))", "def subset(self, data, subset_size):\n subset_size_q = int((subset_size - 1) / 2)\n subset_image = []\n\n for i in range(-subset_size_q, subset_size_q + 1):\n for j in range(-subset_size_q, subset_size_q + 1):\n subset_roll = np.roll(data, i, axis=0)\n subset_roll = np.roll(subset_roll, j, axis=1)\n subset_image.append(subset_roll)\n\n return np.sum(np.asarray(subset_image), axis=0)", "def __getslice__(self, i, j):\n return self.dtrs[i:j]", "def __getslice__(self,i,j):\n return self.x[i:j]", "def _fprop_slice_np(h, stride, H, roi_offset):\n hstart = int(np.floor(float(h) * stride))\n hend = int(np.ceil(float(h + 1) * stride))\n\n hstart = min(max(hstart + roi_offset, 0), H)\n hend = min(max(hend + roi_offset, 0), H)\n\n return slice(hstart, hend), hend - hstart", "def subdataset(self):\n return self._clip_metadata.get(\"subdataset\")", "def getData(self, slice=None):\n\t\traise NotImplementedError", "def slice(data, size):\n\treturn dice(data, size).T", "def __getslice__( self, *args):\n return array.array.__getslice__(self, *args).tostring()", "def get_subset(self, tile, band=0):\r\n # access window bounds\r\n bounds = rasterio.windows.bounds(tile, self.dataset.transform)\r\n return (\r\n self.__arr[(band,) + tile.toslices()],\r\n bounds,\r\n ) # Shape of array is announced with (bands, height, width)\r", "def row_slice(self, xt, nproc):\n if nproc is None: nproc = self.nproc\n cs = xt.shape[0]//nproc #chuncksize\n tmp = [xt[i*cs:cs*i+cs,:] for i in range(nproc)]\n if nproc*cs != xt.shape[0]:\n tmp[-1] = np.concatenate((tmp[-1],xt[nproc*cs:xt.shape[0],:]),axis=0)\n return tmp", "def slices(self):\n return self._slices", "def _slice(self, start, stop, step=None):\n\n slices = [slice(None)] * self.data.ndim\n slices[self.axis] = slice(start, stop, step)\n return tuple(slices)", "def __getslice__(self, start, stop):\n return self.__getitem__(slice(start, stop, None))", "def get_part_array(array, part, additional_area, offset={'x': 0, 'y': 0}):\n\tresult = []\n\n\toffset_before = {\n\t\t'x': offset['x'] - additional_area['x'],\n\t\t'y': offset['y'] - additional_area['y']\n\t}\n\tif offset_before['x'] < 0:\n\t\tprint('set x')\n\t\toffset_before['x'] = 0\n\tif offset_before['y'] < 0:\n\t\tprint('set y')\n\t\toffset_before['y'] = 0\n\n\tfor i in array[offset_before['y'] : offset['y']+part['y']+additional_area['y']]:\n\t\tresult.append(i[offset_before['x'] : offset['x']+part['x']+additional_area['x']])\n\treturn np.array(result)", "def subimage(image_as_array, step):\r\n\tsubimage_2d_array = image_as_array[200-int(step):200+int(step)]\r\n\treturn subimage_2d_array", "def _extract_data_sub_stack(self, startRow, endRow):\n # Grab the shape of the image stack\n nz, ny, nx = self.shape\n\n # Compute the number of rows in this sub stack\n numberOfRows = endRow - startRow\n\n # Build an array for storing output\n outData = np.zeros((nz, numberOfRows, nx))\n\n # Loop through each image and extract its data\n for zInd, img in enumerate(self.imageList):\n outData[zInd, :, :] = img.data[startRow:endRow, :]\n\n return np.ma.array(outData)", "def get_slice(self):\n return self.locs[tuple(self.indices), :]", "def __getslice__(self, i, j):\n return self.__getitem__(slice(i,j))", "def getPartition(self):\n\t\treturn self.partition", "def get(self):\n return self._partition", "def subsample(self, dataset):\n sample_idx = np.random.choice(\n dataset.shape[0], self.sample_size, replace=True)\n sample = dataset[sample_idx,...]\n return sample", "def vectorized_slice(self, data):\n\n # TODO: finish this.\n sum_of_rows = np.sum(data[:100,:], axis=1)\n max_indx = np.argmax(sum_of_rows)\n return (sum_of_rows[max_indx], max_indx)", "def get_subarray(input_array, reference):\n\n if (reference.meta.subarray.xstart is None or\n reference.meta.subarray.xsize is None or\n reference.meta.subarray.ystart is None or\n reference.meta.subarray.ysize is None):\n raise ValueError('subarray metadata values not found')\n\n xstart = reference.meta.subarray.xstart - 1\n xstop = xstart + reference.meta.subarray.xsize\n ystart = reference.meta.subarray.ystart - 1\n ystop = ystart + reference.meta.subarray.ysize\n log.debug(\"xstart=%d, xstop=%d, ystart=%d, ystop=%d\" \\\n % (xstart, xstop, ystart, ystop))\n\n return input_array[..., ystart:ystop, xstart:xstop]", "def GetRowPartArray(self):\n return _hypre.HypreParMatrix_GetRowPartArray(self)", "def cfdGetBoundaryElementsSubArrayForBoundaryPatch(self):\r\n\r\n for iBPatch, theBCInfo in self.cfdBoundaryPatchesArray.items():\r\n \r\n startBElement=self.numberOfElements+self.cfdBoundaryPatchesArray[iBPatch]['startFaceIndex']-self.numberOfInteriorFaces\r\n endBElement=startBElement+self.cfdBoundaryPatchesArray[iBPatch]['numberOfBFaces']\r\n \r\n self.cfdBoundaryPatchesArray[iBPatch]['iBElements']=list(range(int(startBElement),int(endBElement)))", "def slice_batch(x, n_gpus, part):\n sh = K.shape(x)\n L = sh[0] // n_gpus\n if part == n_gpus - 1:\n return x[part*L:]\n return x[part*L:(part+1)*L]", "def __getslice__(self, i, j):\n return self.__getitem__(slice(i, j))", "def slice(self) -> Tuple[slice, ...]:\n\n total_slice = tuple(slice(None) for _ in self.collection_shape)\n for obj in self.objects.flat:\n for i, current_slice in enumerate(obj.slices):\n if total_slice[i].start is None:\n total_slice = total_slice[:i] + (current_slice,) + total_slice[i + 1:]\n else:\n if current_slice.start < total_slice[i].start:\n total_slice = total_slice[:i] + (\n slice(current_slice.start, total_slice[i].stop, total_slice[i].step),) + total_slice[i + 1:]\n if current_slice.stop > total_slice[i].stop:\n total_slice = total_slice[:i] + (\n slice(total_slice[i].start, current_slice.stop, total_slice[i].step),) + total_slice[i + 1:]\n return total_slice", "def chunk(self):\n # easy enough\n return self.dcpl.getChunk(rank=len(self.shape))", "def partition(data: list, parts: list, *args: float) -> list:\n random.seed(42)\n partition_names = parts\n random.shuffle(data)\n n = len(data)\n rem, a, b = n, 0, 0\n parts = []\n\n for p in args:\n b = a + int(n*p)\n parts.append(data[a:b])\n rem -= (b - a)\n a = b\n # end\n\n parts.append(data[-rem:])\n return parts", "def _get_partition_list(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def get_partition(self):\n return self._partition", "def data_array(self):\n return self._data_array", "def get_data(self):\n idxs = self.get_indexes(self._start, self._length, self.maxsize)\n return self._data[idxs].copy()", "def __getitem__(self, idx):\r\n if self.is_superset:\r\n for ds in self.data:\r\n if idx >= len(ds):\r\n continue\r\n return ds[idx]\r\n else:\r\n return self.data[idx]", "def partition(self, data, labels):\n\t\treturn self.kfold.split(labels)", "def extra_memory(self):\n if not self.in_memory:\n # --------------------------------------------------------\n # The subarray is on disk so getting the partition's data\n # array will require extra memory\n # --------------------------------------------------------\n extra_memory = True\n else:\n # --------------------------------------------------------\n # The subarray is already in memory\n # --------------------------------------------------------\n config = self.config\n\n p_part = self.part\n if p_part:\n extra_memory = True\n elif not config[\"unique_subarray\"]:\n extra_memory = True\n else:\n p_data = self._subarray\n\n if not numpy_ma_isMA(p_data):\n # The p_data is not a masked array\n extra_memory = isinstance(p_data.base, numpy_ndarray)\n else:\n # The p_data is a masked array\n memory_overlap = isinstance(\n p_data.data.base, numpy_ndarray\n )\n if not (\n p_data.mask is numpy_ma_nomask\n or not numpy_ma_is_masked(p_data)\n ):\n # There is at least one missing data point\n memory_overlap |= isinstance(\n p_data.mask.base, numpy_ndarray\n )\n\n extra_memory = memory_overlap\n # --- End: if\n\n p_dtype = p_data.dtype\n\n if not extra_memory:\n if config[\"func\"] is not None:\n extra_memory = True\n else:\n p_units = self.Units\n units = config[\"units\"]\n if (\n not p_units.equals(units)\n and bool(p_units) is bool(units)\n and not (\n p_data.flags[\"C_CONTIGUOUS\"]\n and p_dtype.kind == \"f\"\n )\n ):\n extra_memory = True\n\n # ------------------------------------------------------------\n # Extra memory is required if the dtype needs changing\n # ------------------------------------------------------------\n if not extra_memory:\n dtype = config[\"dtype\"]\n if dtype is not None and dtype != p_data.dtype:\n extra_memory = True\n # --- End: if\n\n # ------------------------------------------------------------\n # Amount of extra memory (in bytes) required to access the\n # array\n # ------------------------------------------------------------\n return self.nbytes if extra_memory else 0", "def sub_rows(arr, sub_size):\n rows, cols = arr.shape\n for i in range(rows):\n for j in range(cols - sub_size + 1):\n yield arr[i, range(j, j + sub_size)]", "def subdimension(self, key):\n \n index = self.to_index(key)\n if isinstance(index, int):\n return None\n\n # here index is a slice\n if index.stop - index.start <= 1:\n # Here key represent a single element\n return None\n units = self.to_unit(index) # recompute units for clean borders\n return AffineDimension([units.start, units.stop], index.stop - index.start)", "def partition(self, data, labels):\n\t\t#TODO remove\n\t\tprint(\"label shape {}\".format(labels.shape))\n\t\treturn self.kfold.split(data[0], labels)", "def tondarray(self):\r\n return self.data;", "def slice_timeseries(n_slices,dataset):\n\n n,l=np.shape(dataset)\n\n X = np.reshape(dataset,(n*n_slices,l//n_slices))\n\n print('sliced data shape (nr. of slices, slice length):',np.shape(X))\n print('#####################################')\n \n return X", "def get(self):\r\n return self.data_array", "def __getitem__(self, index: slice) -> List:\n\n return self.data[index]", "def batch_dataset(x, batch_size):\r\n\tsize_modulo = len(x) % batch_size # hack to ensure data is batches successfully\r\n\tif size_modulo != 0:\r\n\t\tx = x[:-size_modulo]\r\n\tpartitioned = np.split(x, batch_size)\r\n\treturn partitioned", "def __getitem__(self,k):\n if type(k) is IntType: return self.data[k, 0]\n \n vec = [type(x) is SliceType for x in k]\n \n if True in vec: #suppose only one slice\n ii=vec.index(True)\n indices=[]\n k = list(k)\n import numpy\n rep = numpy.zeros((self.dims[ii],), 'd')\n for i in range(self.dims[ii]):\n k[ii] = i\n rep[i] = self.data[self.comp(k), 0]\n return rep\n else:\n return self.data[self.comp(k), 0]", "def split_data(data, squeeze=False):\n vdata = np.atleast_2d(data)\n nr_freqs = int(vdata.shape[1] / 2)\n part1 = vdata[:, 0:nr_freqs]\n part2 = vdata[:, nr_freqs:]\n if(squeeze):\n part1 = part1.squeeze()\n part2 = part2.squeeze()\n return part1, part2", "def to_slice(self):\n return np.index_exp[self.start[2]:self.end[2], #\n self.start[1]:self.end[1], #\n self.start[0]:self.end[0]]", "def get(self):\n return np.hstack((self.data[:, self.cur:], self.data[:, :self.cur])) #Concatena los datos en horizontal", "def split_data_set(data_set, index, value, part=0):\n # save the subdataset\n res_data_set = []\n \n for entry in data_set:\n # find the data set to the left of the partition point\n if part == 0 and float(entry[index])<= value: #求划分点左侧的数据集\n reduced_entry = entry[:index]\n # after partitioning, the value of the index column in the data is removed\n reduced_entry.extend(entry[index + 1:]) \n res_data_set.append(reduced_entry)\n # find the data set to the right of the partition point\n if part ==1 and float(entry[index])> value: \n reduced_entry = entry[:index]\n reduced_entry.extend(entry[index + 1:])\n res_data_set.append(reduced_entry)\n return res_data_set", "def as_list(self) -> List[int]:\n return self.my_partition", "def segmentArray(array, pos = 0):\n\t\n#\tfrom jot.basic.countDistinctValues import countDistinctValues\n\t\n\tfinalarray = []\n\tprovarray = list(countDistinctValues(array, pos).keys())\n\t\n\tfor i in provarray:\n\t\n\t\ttemparray = []\n\t\t\n\t\tfor j in array:\n\t\t\n\t\t\tif i == j[0]:\n\t\t\t\ttemparray.append(j[1:])\n\t\t\t\t\n\t\tfinalarray.append(temparray)\n\t\t\n\treturn finalarray", "def GetDataSlice(vDataSet,z,c,t):\r\n dtype = GetType(vDataSet)\r\n if dtype == np.uint8 or dtype == np.uint16:\r\n arr = np.array(vDataSet.GetDataSliceShorts(z,c,t),dtype)\r\n else:\r\n arr = np.array(vDataSet.GetDataSliceFloats(z,c,t),dtype)\r\n return arr.swapaxes(0,1)", "def close(self, **kwargs):\n config = getattr(self, \"config\", None)\n\n if config is None:\n return\n\n if kwargs:\n config.update(kwargs)\n\n original = getattr(self, \"_original\", None)\n logger.partitioning(\"Partition.close: original = {}\".format(original))\n\n if not original:\n originally_on_disk = False\n original_subarray = None\n else:\n originally_on_disk = not original.in_memory\n original_subarray = original._subarray\n\n config = self.config\n logger.partitioning(\" config = {}\".format(config))\n\n if config[\"serial\"]:\n # --------------------------------------------------------\n # SERIAL\n # --------------------------------------------------------\n logger.partitioning(\" serial\")\n\n if config[\"readonly\"]:\n logger.partitioning(\" readonly=True\")\n\n if originally_on_disk:\n logger.partitioning(\" subarray originally on disk\")\n\n if config.get(\"to_disk\", False):\n # 1.1.1.1 The original subarray was on disk,\n # we don't want to keep the current\n # subarray in memory, and we are happy\n # to discard any changes that may have\n # been made to the subarray.\n logger.partitioning(\" 1.1.1.1 revert\")\n self.revert()\n elif free_memory() <= cf_fm_threshold():\n # 1.1.1.2 The original subarray was on disk,\n # we are happy to keep the current\n # subarray in memory, but there is not\n # enough free memory to do so.\n logger.partitioning(\n \" 1.1.1.2 revert ({} <= {})\".format(\n free_memory(), cf_fm_threshold()\n )\n )\n self.revert()\n else:\n # 1.1.1.3 The original subarray was on disk\n # and there is enough memory to keep\n # the current subarray in memory\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # The original subarray was a temporary\n # file which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n del self.masked\n logger.partitioning(\n \" 1.1.1.3 del masked ({} > {})\".format(\n free_memory(), cf_fm_threshold()\n )\n )\n\n else:\n logger.partitioning(\" subarray originally in memory\")\n if config.get(\"to_disk\", False):\n # 1.1.2.1 Original subarray was in memory and\n # we don't want to keep the current\n # subarray in memory\n logger.partitioning(\" 1.1.2.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.1.2.2 Original subarray was in memory and\n # unique but there is not enough\n # memory to keep the current subarray\n logger.partitioning(\" 1.1.2.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.1.2.3 Original subarray was in memory and\n # unique and there is enough memory to\n # keep the current subarray in memory\n logger.partitioning(\" 1.1.2.3 pass\")\n pass\n else:\n # config['readonly'] is False\n if originally_on_disk:\n if config.get(\"to_disk\", False):\n # 1.2.1.1 Original subarray was on disk and\n # there and we don't want to keep the\n # array\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # Original subarray was a temporary file\n # on disk which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n logger.partitioning(\" 1.2.1.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.2.1.2 Original subarray was on disk but\n # there is not enough memory to keep\n # it\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # Original subarray was a temporary file\n # on disk which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n logger.partitioning(\" 1.2.1.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.2.1.3 Original subarray was on disk and\n # there is enough memory to keep it\n logger.partitioning(\" 1.2.1.3 pass\")\n del self.masked\n else:\n if config.get(\"to_disk\", False):\n # 1.2.2.1 Original subarray was in memory but\n # we don't want to keep it\n logger.partitioning(\" 1.2.2.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.2.2.2 Original subarray was an in memory\n # but there is not enough memory to\n # keep it\n logger.partitioning(\" 1.2.2.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.2.2.3 Original subarray was in memory and\n # there is enough memory to keep it\n logger.partitioning(\" 1.2.2.3 del masked\")\n del self.masked\n else:\n logger.partitioning(\"Partition.close: parallel\")\n # --------------------------------------------------------\n # PARALLEL\n # --------------------------------------------------------\n pass\n\n # if hasattr(self, '_original'):\n # del self._original\n\n # print(hasattr(self, 'config')),\n try:\n del self.config\n except AttributeError:\n pass", "def core_slices(self, chunk):\n intersect_slices = []\n for s, b, olap, idx in zip(chunk.slices, self.bounds, self.overlap, range(0, len(chunk.slices))):\n if s.start == b.start:\n intersect_slices.append(slice(s.start + olap, s.stop))\n elif s.stop == b.stop:\n intersect_slices.append(slice(s.start, s.stop - olap))\n else:\n intersect_slices.append(s)\n\n return tuple(self.remove_chunk_overlap(chunk, intersect_slices))", "def paginator_slice(self, pageno):\n return self._data[pageno * self.height : pageno * self.height + self.height]", "def slice_data(data, start, end):\n rtemp = data[0][start:end]\n if len(data) == 4:\n r, s, A, As = data\n stemp = s[start:end]\n Atemp = A[start:end]\n Astemp = As[start:end]\n datatemp = [rtemp, stemp, Atemp, Astemp]\n else:\n r, h, dMdh = data\n htemp = h[start:end]\n dMdhtemp = dMdh[start:end]\n datatemp = [rtemp, htemp, dMdhtemp]\n return datatemp", "def test_slice_other_dimension(self):\n for i, shape in enumerate([(3, 0), (1, 2, 0), (2, 0, 1)]):\n dset = self.f.create_dataset('x%d'%i, shape, dtype=int, maxshape=(None,)*len(shape))\n self.assertEqual(dset.shape, shape)\n out = dset[:1]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, (1,)+shape[1:])", "def write_subarray(data, z_arr, start):\n stop = tuple(s+c for s, c in zip(start, data.shape))\n assert start[0] < stop[0]\n assert start[1] < stop[1]\n assert start[2] < stop[2]\n z_arr[start[0]:stop[0], start[1]:stop[1], start[2]:stop[2]] = data", "def array_partition_views(array, partition):\n views = dictlist.copy(partition)\n pathsshapes = sorted(list(dictlist.leafs(partition)))\n\n n_used = 0\n for path, shape in pathsshapes:\n item = dictlist.get(partition, path)\n shape = (item,) if isinstance(item, (int, long)) else item\n size = int(np.prod(shape))\n dictlist.set_(views, path, array[n_used:n_used + size].reshape(shape))\n n_used += size\n\n return views", "def findSubArray(list):\n # converts number list to a list of 0,1 indicated by its primality \n binaryArray = [int(isPrime(k)) for k in list]\n \n # explore subarrays from biggest (length of list) to smallest (length=2)\n # breaks when one subarray matches the requirements \n lengthSubArray = len(binaryArray)\n found = False\n while( lengthSubArray > 1 and not found ):\n \n # move startIndex to create subarrays from same length\n numberSubArrays = len(binaryArray) - lengthSubArray + 1\n for startIndex in range(0, numberSubArrays):\n endIndex = startIndex + lengthSubArray\n binarySubArray = binaryArray[startIndex:endIndex]\n \n # verify condition requested in subarray is met\n found = sum(binarySubArray) > (lengthSubArray / 2)\n if found: break\n \n lengthSubArray -= 1 \n \n # returns the subarray using the indexes from the binarySubArray found\n return list[startIndex:endIndex]", "def batch_split(self) -> np.array:\n pass", "def subsampleData(self, count):\n size = 0\n for block in self.blocks: size += len(block[1])\n subset = numpy.random.permutation(size)[:count]\n subset.sort()\n\n pos = 0\n index = 0\n ret = Dataset()\n for block in self.blocks:\n while subset[index]<(pos+len(block[1])):\n loc = subset[index] - pos\n ret.add(block[0][loc,:], block[1][loc])\n index += 1\n if index==subset.shape[0]: return ret\n pos += len(block[1])\n \n return ret", "def slice_data(xdata, ydata, x_range):\n\tdata = zip(xdata, ydata)\n\tsliced_data = [d for d in data if d[0] >= x_range[0] and d[0] <= x_range[1]]\n\treturn array(zip(*sliced_data))", "def _chunk_data(X, slices):\n\n # from object array to list\n slices = [sl for sl in slices if len(sl)]\n selected_times = np.hstack([np.ravel(sl) for sl in slices])\n start = np.min(selected_times)\n stop = np.max(selected_times) + 1\n slices_chunk = [sl - start for sl in slices]\n X_chunk = X[:, :, start:stop]\n return X_chunk, slices_chunk", "def __getslice__(self, i, j):\n if self.__pepth__ != 0:\n if '__warned__' not in plist.__getslice__.__dict__:\n qj('Slicing of inner plist elements with negative indices in python 2.7 does not work, and the error cannot be detected or corrected!\\n'\n 'Instead of slicing with one or two arguments: `plist._[-2:]`, use the three argument slice: `plist._[-2::1]`.\\n'\n 'This avoids the broken code path in the python compiler.', 'WARNING!')\n plist.__getslice__.__dict__['__warned__'] = True\n return plist.__getattr__(self, '__getslice__')(i, j)\n try:\n if self is self.__root__:\n return plist(list.__getslice__(self, i, j))\n return plist(list.__getslice__(self, i, j), root=plist(list.__getslice__(self.__root__, i, j)))\n except Exception:\n return plist.__getitem__(self, slice(i, j))", "def __getitem__(self, keep):\n ndim = len(self.dataset.shape)\n # Ensure that keep is a tuple (then turn it into a list to simplify further processing)\n keep = list(keep) if isinstance(keep, tuple) else [keep]\n # The original keep tuple will be passed to data transform chain\n original_keep = tuple(keep)\n # Ensure that keep is same length as data dimension (truncate or pad with blanket slices as necessary)\n keep = keep[:ndim] + [slice(None)] * (ndim - len(keep))\n # Map current selection to original data indices based on any existing initial selection, per data dimension\n keep = [(dkeep if dlookup is None else dlookup[dkeep]) for dkeep, dlookup in zip(keep, self._lookup)]\n # Iterate over dimensions of dataset, storing information on selection on each dimension:\n # `selection` is a list with one element per dimension; each element is a list of contiguous segments along\n # the dimension, and each segment is represented by a tuple of 3 elements:\n # (dataset selection, post-selection, output array selection)\n # Similarly, `segment_sizes` is a list of lists of segment lengths (empty lists for scalar-selected dimensions)\n selection, segment_sizes = [], []\n for dim_keep, dim_len in zip(keep, self.dataset.shape):\n if np.isscalar(dim_keep):\n # If selection is a scalar, pass directly to dataset selector and remove dimension from output\n selection.append([(dim_keep, None, None)])\n segment_sizes.append([])\n elif isinstance(dim_keep, slice):\n # If selection is a slice, pass directly to dataset selector without post-selection\n start, stop, stride = dim_keep.indices(dim_len)\n segm_size = len(range(start, stop, stride))\n selection.append([(slice(start, stop, stride), slice(None), slice(0, segm_size, 1))])\n segment_sizes.append([segm_size])\n elif len(dim_keep) == 0:\n # If selection is empty, pass to post-selector, as HDF5 datasets do not support zero-length selection\n selection.append([(slice(0, 1, 1), slice(0, 0, 1), slice(0, 0, 1))])\n segment_sizes.append([0])\n else:\n # Anything else is advanced indexing via bool or integer sequences\n dim_keep = np.atleast_1d(dim_keep)\n # Turn boolean mask into integer indices (True means keep that index)\n if dim_keep.dtype == bool and len(dim_keep) == dim_len:\n dim_keep = np.nonzero(dim_keep)[0]\n elif not np.all(dim_keep == np.unique(dim_keep)):\n raise TypeError('LazyIndexer cannot handle duplicate or unsorted advanced integer indices')\n # Split indices into multiple contiguous segments (specified by first and one-past-last data indices)\n jumps = np.nonzero(np.diff(dim_keep) > 1)[0]\n first = [dim_keep[0]] + dim_keep[jumps + 1].tolist()\n last = dim_keep[jumps].tolist() + [dim_keep[-1]]\n segments = np.c_[first, np.array(last) + 1]\n if len(dim_keep) > 0.2 * dim_len and len(segments) > 1:\n # If more than 20% of data are selected in 2 or more separate segments (the Ratcliffian benchmark),\n # select data at dataset level with a single slice spanning segments and then postselect the ndarray\n selection.append([(slice(segments[0, 0], segments[-1, 1], 1),\n dim_keep - dim_keep[0], slice(0, len(dim_keep), 1))])\n segment_sizes.append([len(dim_keep)])\n else:\n # Turn each segment into a separate slice at dataset level without post-selection,\n # and construct contiguous output slices of the same segment sizes\n segm_sizes = [end - start for start, end in segments]\n segm_starts = np.cumsum([0] + segm_sizes)\n selection.append([(slice(start, end, 1), slice(None), slice(segm_starts[n], segm_starts[n + 1], 1))\n for n, (start, end) in enumerate(segments)])\n segment_sizes.append(segm_sizes)\n # Short-circuit the selection if all dimensions are selected with scalars (resulting in a scalar output)\n if segment_sizes == [[]] * ndim:\n out_data = self.dataset[tuple([select[0][0] for select in selection])]\n else:\n # Use dense N-dimensional meshgrid to slice data set into chunks, based on segments along each dimension\n chunk_indices = np.mgrid[[slice(0, len(select), 1) for select in selection]]\n # Pre-allocate output ndarray to have the correct shape and dtype (will be at least 1-dimensional)\n out_data = np.empty([np.sum(segments) for segments in segment_sizes if segments], dtype=self.dataset.dtype)\n # Iterate over chunks, extracting them from dataset and inserting them into the right spot in output array\n for chunk_index in chunk_indices.reshape(ndim, -1).T:\n # Extract chunk from dataset (don't use any advanced indexing here, only scalars and slices)\n dataset_select = tuple([select[segment][0] for select, segment in zip(selection, chunk_index)])\n chunk = self.dataset[dataset_select]\n # Perform post-selection on chunk (can be fancier / advanced indexing because chunk is now an ndarray)\n post_select = [select[segment][1] for select, segment in zip(selection, chunk_index)]\n # If any dimensions were dropped due to scalar indexing, drop them from post_select/out_select tuples\n post_select = tuple([select for select in post_select if select is not None])\n # Do post-selection one dimension at a time, as ndarray does not allow simultaneous advanced indexing\n # on more than one dimension. This caters for the scenario where more than one dimension satisfies\n # the Ratcliffian benchmark (the only way to get advanced post-selection).\n for dim in range(len(chunk.shape)):\n # Only do post-selection on this dimension if non-trivial (otherwise an unnecessary copy happens)\n if not (isinstance(post_select[dim], slice) and post_select[dim] == slice(None)):\n # Prepend the appropriate number of colons to the selection to place it at correct dimension\n chunk = chunk[[slice(None)] * dim + [post_select[dim]]]\n # Determine appropriate output selection and insert chunk into output array\n out_select = [select[segment][2] for select, segment in zip(selection, chunk_index)]\n out_select = tuple([select for select in out_select if select is not None])\n out_data[out_select] = chunk\n # Apply transform chain to output data, if any\n return reduce(lambda data, transform: transform(data, original_keep), self.transforms, out_data)", "def rpartition(self, x):\n pass", "def partition(self, data, labels):\n\t\traise Exception(\"Not implmented\")", "def partition(self):\n return self.tag(\"partition\")", "def getacolslicetrip(self,first_,last_,subi,subj,val): # 3\n maxnumnz_ = self.getaslicenumnz(accmode.var,(first_),(last_))\n _copyback_subi = False\n if subi is None:\n subi_ = None\n else:\n try:\n subi_ = memoryview(subi)\n except TypeError:\n try:\n _tmparr_subi = array.array(\"i\",subi)\n except TypeError:\n raise TypeError(\"Argument subi has wrong type\")\n else:\n subi_ = memoryview(_tmparr_subi)\n _copyback_subi = True\n else:\n if subi_.format != \"i\":\n subi_ = memoryview(array.array(\"i\",subi))\n _copyback_subi = True\n if subi_ is not None and len(subi_) != (maxnumnz_):\n raise ValueError(\"Array argument subi has wrong length\")\n _copyback_subj = False\n if subj is None:\n subj_ = None\n else:\n try:\n subj_ = memoryview(subj)\n except TypeError:\n try:\n _tmparr_subj = array.array(\"i\",subj)\n except TypeError:\n raise TypeError(\"Argument subj has wrong type\")\n else:\n subj_ = memoryview(_tmparr_subj)\n _copyback_subj = True\n else:\n if subj_.format != \"i\":\n subj_ = memoryview(array.array(\"i\",subj))\n _copyback_subj = True\n if subj_ is not None and len(subj_) != (maxnumnz_):\n raise ValueError(\"Array argument subj has wrong length\")\n _copyback_val = False\n if val is None:\n val_ = None\n else:\n try:\n val_ = memoryview(val)\n except TypeError:\n try:\n _tmparr_val = array.array(\"d\",val)\n except TypeError:\n raise TypeError(\"Argument val has wrong type\")\n else:\n val_ = memoryview(_tmparr_val)\n _copyback_val = True\n else:\n if val_.format != \"d\":\n val_ = memoryview(array.array(\"d\",val))\n _copyback_val = True\n if val_ is not None and len(val_) != (maxnumnz_):\n raise ValueError(\"Array argument val has wrong length\")\n res = self.__obj.getacolslicetrip(first_,last_,maxnumnz_,len(subi),subi_,subj_,val_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_val:\n val[:] = _tmparr_val\n if _copyback_subj:\n subj[:] = _tmparr_subj\n if _copyback_subi:\n subi[:] = _tmparr_subi", "def get_partitioning(self):\n raise Exception(\"Unimplemented\")", "def get_data(self):\n return self.data[self._size:self._size + self._len]", "def getacolslicetrip(self,first_,last_,subi_,subj_,val_):\n maxnumnz_ = self.getacolslicenumnz((first_),(last_))\n _subi_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and subi_ is not None and len(subi_) != (maxnumnz_):\n raise ValueError(\"Array argument subi is not long enough: Is %d, expected %d\" % (len(subi_),(maxnumnz_)))\n if isinstance(subi_,numpy.ndarray) and not subi_.flags.writeable:\n raise ValueError(\"Argument subi must be writable\")\n if isinstance(subi_, numpy.ndarray) and subi_.dtype is numpy.dtype(numpy.int32) and subi_.flags.contiguous:\n _subi_copyarray = False\n _subi_tmp = ctypes.cast(subi_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subi_ is not None:\n _subi_copyarray = True\n _subi_np_tmp = numpy.zeros(len(subi_),numpy.dtype(numpy.int32))\n _subi_np_tmp[:] = subi_\n assert _subi_np_tmp.flags.contiguous\n _subi_tmp = ctypes.cast(_subi_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subi_copyarray = False\n _subi_tmp = None\n \n _subj_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and subj_ is not None and len(subj_) != (maxnumnz_):\n raise ValueError(\"Array argument subj is not long enough: Is %d, expected %d\" % (len(subj_),(maxnumnz_)))\n if isinstance(subj_,numpy.ndarray) and not subj_.flags.writeable:\n raise ValueError(\"Argument subj must be writable\")\n if isinstance(subj_, numpy.ndarray) and subj_.dtype is numpy.dtype(numpy.int32) and subj_.flags.contiguous:\n _subj_copyarray = False\n _subj_tmp = ctypes.cast(subj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subj_ is not None:\n _subj_copyarray = True\n _subj_np_tmp = numpy.zeros(len(subj_),numpy.dtype(numpy.int32))\n _subj_np_tmp[:] = subj_\n assert _subj_np_tmp.flags.contiguous\n _subj_tmp = ctypes.cast(_subj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subj_copyarray = False\n _subj_tmp = None\n \n _val_minlength = (maxnumnz_)\n if (maxnumnz_) > 0 and val_ is not None and len(val_) != (maxnumnz_):\n raise ValueError(\"Array argument val is not long enough: Is %d, expected %d\" % (len(val_),(maxnumnz_)))\n if isinstance(val_,numpy.ndarray) and not val_.flags.writeable:\n raise ValueError(\"Argument val must be writable\")\n if isinstance(val_, numpy.ndarray) and val_.dtype is numpy.dtype(numpy.float64) and val_.flags.contiguous:\n _val_copyarray = False\n _val_tmp = ctypes.cast(val_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif val_ is not None:\n _val_copyarray = True\n _val_np_tmp = numpy.zeros(len(val_),numpy.dtype(numpy.float64))\n _val_np_tmp[:] = val_\n assert _val_np_tmp.flags.contiguous\n _val_tmp = ctypes.cast(_val_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _val_copyarray = False\n _val_tmp = None\n \n surp_ = ctypes.c_int64(_subi_minlength)\n res = __library__.MSK_XX_getacolslicetrip(self.__nativep,first_,last_,maxnumnz_,ctypes.byref(surp_),_subi_tmp,_subj_tmp,_val_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _subi_copyarray:\n subi_[:] = _subi_np_tmp\n if _subj_copyarray:\n subj_[:] = _subj_np_tmp\n if _val_copyarray:\n val_[:] = _val_np_tmp", "def subFarms(self,partition,full_name=None):\n if self.inUse.value() is None: self.load()\n got = []\n for i in xrange(len(self.inUse.data)):\n if self.inUse.data[i]==partition:\n if full_name:\n got.append(self.name+'_'+self.subfarms.data[i])\n else:\n got.append(self.subfarms.data[i])\n return got", "def sub_columns(arr, sub_size):\n return sub_rows(arr.T, sub_size)", "def subsection_at(self, index):\n return self.child_at(index)", "def partitions(self):\n self._get_latest_content()\n return self._data.get('partitions', [])", "def slice_during(self, e):\r\n\r\n if not isinstance(e, Epochs):\r\n raise ValueError('e has to be of Epochs type')\r\n\r\n if e.data.ndim > 0:\r\n raise NotImplementedError('e has to be a scalar Epoch')\r\n\r\n if self.ndim != 1:\r\n e_s = 'slicing only implemented for 1-d TimeArrays'\r\n return NotImplementedError(e_s)\r\n i_start = self.index_at(e.start)\r\n i_stop = self.index_at(e.stop)\r\n if e.start > self[i_start]: # make sure self[i_start] is in epoch e\r\n i_start += 1\r\n if e.stop > self[i_stop]: # make sure to include self[i_stop]\r\n i_stop += 1\r\n\r\n return slice(i_start, i_stop)", "def segment(data):", "def _get_slice(segments, shape):\n\n if not (1 <= len(shape) <= 2):\n raise ValueError('Cannot segment array of shape: %s' % str(shape))\n else:\n size = shape[0]\n slice_length = np.ceil(float(size) / segments)\n start_idx = 0\n end_idx = slice_length\n while start_idx < size:\n if len(shape) == 1:\n yield slice(start_idx, end_idx)\n else:\n yield (slice(start_idx, end_idx), slice(None))\n start_idx = end_idx\n end_idx = min(start_idx + slice_length, size)", "def to_ndarray(self):\n # Create an ndarray of the right shape, filled with self.defval.\n ndshape = type(self).flatten_shape(self.shape)\n res = np.full(ndshape, self.defval, dtype=self.dtype)\n if 0 in ndshape:\n return res\n shp, qhp = type(self)._sorted_shape_qhape(tensor=self)\n # ranges is like shape, but every number d is replaced by a tuple\n # (a, a+d) where a is the sum of all the previous entries in the same\n # dim.\n ranges = []\n for dim in shp:\n prv = dim[0]\n r = [(0, prv)]\n for d in dim[1:]:\n nxt = prv + d\n r.append((prv, nxt))\n prv = nxt\n ranges.append(r)\n # Copy the elements of each sector to the right place in the result.\n for k, v in self.sects.items():\n slc = ()\n for i, qnum in enumerate(k):\n r = ranges[i][qhp[i].index(qnum)]\n slc += (slice(r[0], r[1]),)\n res[slc] = v\n return res", "def slice_dims(data_array: sc.DataArray, slices: Dict[str, slice]) -> sc.DataArray:\n out = data_array\n for dim, sl in slices.items():\n out = out[dim, sl]\n return out", "def Get(self,k:int): \n ### get partitions depending on the partition schemes C that depends on k!\n return subsets_k(list(range(self._n)),k)", "def __getitem__(self, data):\n i,j = data\n return self._data[i][j]", "def _get_item(self, index):\n data, label = self.data[index], self.label[index]\n coordmax = np.max(data, axis=0)\n coordmin = np.min(data, axis=0)\n nsubvolume_x = np.ceil((coordmax[0]-coordmin[0])/1.5).astype(np.int32)\n nsubvolume_y = np.ceil((coordmax[1]-coordmin[1])/1.5).astype(np.int32)\n batch_data, batch_label = [], []\n for i in range(nsubvolume_x):\n for j in range(nsubvolume_y):\n curmin = coordmin + [i*1.5, j*1.5, 0]\n curmax = coordmin+ [(i+1)*1.5, (j+1)*1.5, coordmax[2]-coordmin[2]]\n crop_ids = np.sum((data>=(curmin-0.2)) * (data<=(curmax+0.2)), axis=1) == 3\n if sum(crop_ids) == 0: continue\n crop_data = data[crop_ids]\n crop_label = label[crop_ids]\n mask = np.sum((crop_data>=(curmin-0.001)) * (crop_data<=(curmax+0.001)), axis=1) == 3\n ids = np.random.choice(crop_label.size, self.npoints, replace=True)\n this_data = crop_data[ids]\n this_label = crop_label[ids]\n this_mask = mask[ids]\n if sum(this_mask) * 1. / this_mask.size < 0.01: continue\n this_label *= this_mask\n if self.normalize:\n this_data = utils.normalize_point_cloud(this_data)\n batch_data.append(this_data[None,:,:])\n batch_label.append(this_label[None,:])\n batch_data = np.concatenate(tuple(batch_data), axis=0)\n batch_label = np.concatenate(tuple(batch_label), axis=0)\n return batch_data, batch_label", "def segments(self):\n return (self._subset((i,i+1)) for i in range(len(self)-1))", "def partition(array, first, last):\n # partition up until final value\n pivot = array[last]\n i = first - 1\n\n for count in range(first, last):\n # split array\n if array[count] < pivot:\n i += 1\n # assign array positions\n array[i],array[count] = array[count],array[i]\n # reassign\n array[i+1],array[last] = array[last],array[i+1]\n return (i+1)", "def get_split_data(self):\n X, y, _, _ = self.get_subsets()\n return train_test_split(X, y, test_size=0.3, random_state=42)" ]
[ "0.68206084", "0.6535615", "0.6457086", "0.6347438", "0.62131214", "0.61877143", "0.61062545", "0.6101903", "0.60762966", "0.6068932", "0.6029079", "0.6012183", "0.59961045", "0.59653914", "0.5928066", "0.58841175", "0.58761495", "0.58514863", "0.5848823", "0.5791028", "0.5779958", "0.57720095", "0.5763268", "0.5713823", "0.5713595", "0.56957734", "0.5685574", "0.5683506", "0.56661355", "0.5665764", "0.56596345", "0.5659613", "0.56538606", "0.56397116", "0.56286144", "0.56241447", "0.5622422", "0.5611998", "0.5586044", "0.5581166", "0.55736196", "0.5570869", "0.5562308", "0.55604047", "0.5557058", "0.5535347", "0.5523683", "0.5520575", "0.5519391", "0.5514903", "0.5513096", "0.54939085", "0.5488341", "0.54794407", "0.5473635", "0.54733855", "0.54686856", "0.5453024", "0.5446765", "0.54447645", "0.544219", "0.5428138", "0.54233164", "0.53955066", "0.5367499", "0.5364493", "0.536259", "0.53601974", "0.5352783", "0.53390896", "0.53365284", "0.5336055", "0.53340626", "0.5333585", "0.5325604", "0.53202784", "0.5316346", "0.53149754", "0.5312976", "0.5308708", "0.5307463", "0.53028697", "0.529681", "0.5295519", "0.52954924", "0.5295376", "0.52942693", "0.52923304", "0.5271146", "0.52641356", "0.5260086", "0.52566856", "0.5254645", "0.5253552", "0.52519685", "0.5250993", "0.52420545", "0.52414733", "0.5240278", "0.52392286" ]
0.7297865
0
Change the axis names. The axis names are arbitrary, so mapping them to another arbitrary collection does not change the data array values, units, nor axis order.
def change_axis_names(self, axis_map): axes = self.axes # Partition axes self.axes = [axis_map[axis] for axis in axes] # Flipped axes flip = self.flip if flip: self.flip = [axis_map[axis] for axis in flip]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setAxisName(name, axes='XYZ'):\n dislin.name(name, axes)", "def setAxesNames(self):\n \n labels = ['T', 'Z', 'Y', 'X'] + [chr(ord('S')-i) for i in xrange(18)]\n if (len(self.axisList) >= 4):\n i = 0\n else:\n i = 4 - len(self.axisList)\n \n for axis in self.axisList:\n self.axesNames.append(labels[i] + ' - ' + axis.id)\n i += 1", "def set_index_names(self, names, axis=0):\n self.get_axis(axis).names = names", "def axesNames(self, data, info):\n return []", "def setaxesnames(self):\n if not self._axesnames or self.prop['skipsai']:\n return\n debug('ControllerStartup.setaxesnames()')\n oldaxes = self.pidevice.qSAI_ALL()\n for i, newaxis in enumerate(self.axesnames):\n if newaxis != oldaxes[i] or self.prop['forcesai']:\n setstage = False\n if self.pidevice.HasqCST():\n if self.pidevice.qCST()[oldaxes[i]] == 'NOSTAGE':\n try:\n debug('try rename NOSTAGE to TEMP (0x3C)')\n self.pidevice.SPA(oldaxes[i], 0x3c, 'TEMP')\n setstage = True\n except GCSError:\n pass\n self.pidevice.SAI(oldaxes[i], newaxis)\n if setstage:\n self.pidevice.SPA(newaxis, 0x3c, 'NOSTAGE')\n debug('restore NOSTAGE (0x3C)')", "def _default_axis_names(n_dims):\n _DEFAULT_NAMES = (\"z\", \"y\", \"x\")\n return _DEFAULT_NAMES[-n_dims:]", "def customAxisNames(self):\n return []", "def setAllAxisUnits(self,units): \n self.__axis_units__ = units", "def setAxisUnits(self, dim, units): \n try:\n self.__axis_units__[dim] = units\n except IndexError:\n self.__axis_units__.append(units)", "def axesnames(self, axesnames):\n if axesnames is None:\n self._axesnames = None\n else:\n assert isinstance(axesnames, list), 'axesnames must be list'\n self._axesnames = axesnames\n debug('ControllerStartup.axesnames = %s', itemstostr(self._axesnames))", "def set_index_name(self, name, axis=0):\n self.get_axis(axis).name = name", "def setAllAxisLabels(self, labels):\n self.__axis_labels__ = labels", "def _update_axes(self):\n data_shape = self.data.shape\n if len(self.axes) < self.data.ndim + 1:\n self._axes.append(Axis())\n for index in range(self.data.ndim):\n if len(self.axes[index].values) != data_shape[index]:\n self.axes[index].values = np.arange(data_shape[index],\n dtype=np.float64)", "def process_custom_axes(axis_names):\n return axis_names.strip().strip(\"'\").strip('\"').split(',')", "def axis_name(self):\n return self._axis_name", "def setAxisNameColor(idx=-1, axes='XYZ'):\n dislin.axclrs(idx, 'Name', axes)", "def setAxisNameJustification(jus, axes='XYZ'):\n dislin.namjus(justdict[jus],axes)", "def axesnames(self):\n return self._axesnames", "def _handle_setup_axis(self, axis_args):\n axis_name = axis_args['name']\n axes_dict = self.server.axes\n\n if axis_name not in [name for name, _ in axes_dict.items()]:\n print \"Adding a new axis:\", axis_name\n axis_count = len(axes_dict)\n newaxis = self.server.figure.add_subplot(axis_count+1, 1, axis_count+1)\n axes_dict[axis_name] = newaxis\n axes_dict[axis_name].grid(True)\n axes_dict[axis_name].set_xlabel(axis_args['x_label'])\n axes_dict[axis_name].set_ylabel(axis_args['y_label'])\n # TODO: support *.set_title(\"Title\")\n if FLAGS.logy:\n axes_dict[axis_name].set_yscale('log', nonposy='clip')\n\n if axis_count != 0:\n # Resize other axes if the above wasn't the first.\n axis_count = len(axes_dict)\n for row,(name, _) in enumerate(axes_dict.items(), 1):\n print name, axis_count, row\n axes_dict[name].change_geometry(axis_count, 1, row)", "def setAxisLabel(self, dim, label): \n try:\n self.__axis_labels__[dim] = label\n except IndexError:\n self.__axis_labels__.append(label)", "def convert_axis( mv, axisold, axisindnew ):\n (axisnew, indexina3) = axisindnew\n axes = allAxes(mv)\n kold = None\n for k in range(len(axes)):\n if axes[k]==axisold: kold=k\n if kold==None:\n print \"ERROR. convert_axis cannot find axis\",axisold,\" in variable\",mv\n if len(axisold)==len(axisnew):\n mv.setAxis( kold, axisnew )\n return\n # Here's what we would do in 1-D:\n # newdata = ma.ones(len(axisnew))*mv.missing_value # Note that a FileVariable's missing_value is a tuple.\n # for i in range(len(axisold)):\n # newdata[ indexina3[i] ] = ma[i]\n # newmv = cdms2.createVariable( newdata, id=mv.id )\n # >1-D is the same idea, but more dimensions are coming along for the ride,\n # making it more complicated...\n shape0 = mv.shape\n shape0[kold] = len(axisnew)\n newdata = ma.ones(shape0)*mv.missing_value # Note that a FileVariable's missing_value is a tuple.\n # We want to copy ma to newdata - except that we need indirect indexing for the kold-th axis.\n # There seems to be nothing in numpy for treating one axis differently from the rest\n # (except for ellipsis, but it makes sense to use only one ellipsis and we would need two here).\n # The following will do the job. It would be very slow for an array with many big dimensions,\n # but the arrays here have already been reduced for graphics; the index sets will be small or\n # empty...\n ranges = map( range, shape0[0:kold] )\n for i in range(len(axisold)):\n for idx in apply(itertools.product,ranges):\n idx = idx + [indexina3(i)] + [Ellipsis]\n idxo = idx + [i] + [Ellipsis]\n newdata[ tuple(idx) ] = mv[idxo]\n newmv = cdms2.createVariable( newdata, id=mv.id )", "def swapaxes(self, a1, a2):\n an = self.axes_names[:]\n ia1, ia2 = self.get_axis_id(a1), self.get_axis_id(a2)\n an[ia2], an[ia1] = an[ia1], an[ia2]\n return xndarray(np.swapaxes(self.data, ia1, ia2), an, self.axes_domains,\n self.value_label, self.meta_data)", "def set_label_names(self, x: Union[np.ndarray, Dict[int, str]]) -> None:\n if isinstance(x, np.ndarray):\n label_names = x\n elif isinstance(x, dict):\n label_names = np.full(max(x.keys()) + 1, \"\", dtype=\"object\")\n label_names[list(x.keys())] = list(x.values())\n else:\n raise ValueError(f\"Unsupported {type(x)=}\")\n self._label_names_array = label_names", "def _set_axis(axis):\n\n def axis_setter(self, labels):\n new_qc = DataFrameDefault.register(pandas.DataFrame.set_axis)(\n self, axis=axis, labels=labels\n )\n self.__dict__.update(new_qc.__dict__)\n\n return axis_setter", "def _update_axislabels(self, x='x', **kwargs):\n if x not in 'xy':\n return\n # Update label on this axes\n axis = getattr(self, x + 'axis')\n axis.label.update(kwargs)\n kwargs.pop('color', None)\n\n # Defer to parent (main) axes if possible, then get the axes\n # shared by that parent\n ax = self._panel_parent or self\n ax = getattr(ax, '_share' + x) or ax\n\n # Apply to spanning axes and their panels\n axs = [ax]\n if getattr(ax.figure, '_span' + x):\n s = axis.get_label_position()[0]\n if s in 'lb':\n axs = ax._get_side_axes(s)\n for ax in axs:\n getattr(ax, x + 'axis').label.update(kwargs) # apply to main axes\n pax = getattr(ax, '_share' + x)\n if pax is not None: # apply to panel?\n getattr(pax, x + 'axis').label.update(kwargs)", "def setAxisNameDistance(dist,axes='XYZ'):\n dislin.namdis(dist, axes)", "def setIndexNames(self):\n self.xi = self.i1\n self.yi = self.i2", "def set_axis_label(self, label, axis):\n if axis == 'x':\n self.axplot.set_xlabel(label)\n elif axis == 'y':\n self.axplot.set_ylabel(label)\n else:\n errmsg = 'Valid axis names are x and y.'\n raise ValueError(errmsg)", "def _set_plot_axes_labels(self, data, viewer_id):\n viewer = self._viewer_by_id(viewer_id)\n\n # Get the units of the data to be loaded.\n spectral_axis_unit_type = data.spectral_axis.unit.physical_type.title()\n flux_unit_type = data.flux.unit.physical_type.title()\n\n if data.spectral_axis.unit.is_equivalent(u.m):\n spectral_axis_unit_type = \"Wavelength\"\n elif data.spectral_axis.unit.is_equivalent(u.pixel):\n spectral_axis_unit_type = \"pixel\"\n\n viewer.figure.axes[0].label = f\"{spectral_axis_unit_type} [{data.spectral_axis.unit.to_string()}]\"\n viewer.figure.axes[1].label = f\"{flux_unit_type} [{data.flux.unit.to_string()}]\"\n\n # Make it so y axis label is not covering tick numbers.\n viewer.figure.axes[1].label_offset = \"-50\"", "def setnames(self, *args, **kwargs):\n return _coordsys.coordsys_setnames(self, *args, **kwargs)", "def set_axis(self, axis_list):\n if self.table_ready:\n final_axis_list = []\n for i, axis in enumerate(axis_list):\n if axis:\n final_axis_list.append(\"1 \" + str(i + 1))\n else:\n final_axis_list.append(\"0 \" + str(i + 1))\n\n command = self.build_command(\n self.device, (\"set_axis\", final_axis_list), single_commands=True\n )\n self.vcw.write(self.device, command)", "def canonicalize_axis_name(axis_name):\n if not axis_name:\n return []\n if (isinstance(axis_name, str) or\n not isinstance(axis_name, collections.Iterable)):\n return [axis_name]\n return list(axis_name)", "def _declare_auto_axes_idx(self):\n if not self.axes_idx:\n self.axes_idx = BiMapping(to_first=range(len(self.name_elements)), to_second=range(len(self.name_elements)))", "def SwapAxis(self, axis0, axis1):\n\n axis0 = int(axis0)\n axis1 = int(axis1)\n\n self.points[:,[axis0,axis1]] = self.points[:,[axis1,axis0]]", "def set_axis_label(\n self,\n axis: Union[int, Sequence[int]],\n label: Union[str, Sequence[str]],\n ):\n if isinstance(axis, Integral):\n axis = assert_axis_in_bounds(axis, self.ndim)\n if self.axis_labels[axis] != str(label):\n full_axis_labels = list(self.axis_labels)\n full_axis_labels[axis] = str(label)\n self.axis_labels = full_axis_labels\n self.last_used = axis\n else:\n full_axis_labels = list(self.axis_labels)\n # cast label to list for list comparison below\n label = list(label) # type: ignore\n axis = tuple(axis) # type: ignore\n if len(axis) != len(label):\n raise ValueError(\n trans._(\"axis and label sequences must have equal length\")\n )\n if label != full_axis_labels:\n for ax, val in zip(axis, label):\n ax = assert_axis_in_bounds(int(ax), self.ndim)\n full_axis_labels[ax] = val\n self.axis_labels = full_axis_labels", "def axisinfo(unit, axis):\n if isinstance(unit, tuple):\n unit = unit[0]\n unit_obj = unit if isinstance(unit, Unit) else Unit(unit)\n name = unyt_arrayConverter._axisnames.get(axis, \"\")\n if unit_obj.is_dimensionless:\n label = name\n else:\n name += \" \"\n unit_str = unit_obj.latex_representation()\n if unyt_arrayConverter._labelstyle == \"[]\":\n label = name + \"$\\\\left[\" + unit_str + \"\\\\right]$\"\n elif unyt_arrayConverter._labelstyle == \"/\":\n axsym = \"$q_{\\\\rm\" + axis.axis_name + \"}$\"\n name = axsym if name == \" \" else name\n if \"/\" in unit_str:\n label = name + \"$\\\\;/\\\\;\\\\left(\" + unit_str + \"\\\\right)$\"\n else:\n label = name + \"$\\\\;/\\\\;\" + unit_str + \"$\"\n else:\n label = name + \"$\\\\left(\" + unit_str + \"\\\\right)$\"\n return AxisInfo(label=label.strip())", "def set_orientation(self, axes):\n if debug:\n logger.debug('set_orientation ...')\n logger.debug('%s -> %s', str(self.axes_names), str(axes))\n\n if set(axes) != set(self.axes_names):\n raise Exception('Required orientation %s does not contain '\n 'all axes %s' % (str(axes), str(self.axes_names)))\n\n if axes == self.axes_names: # already in the asked orientation\n return\n\n for i, axis in enumerate(axes):\n logger.debug('Rolling axis %s, cur pos=%d -> dest pos=%d',\n axis, self.axes_names.index(axis), i)\n logger.debug('Shape: %s', str(self.data.shape))\n cur_i = self.axes_names.index(axis)\n self.data = np.rollaxis(self.data, cur_i, i)\n self.axes_names.pop(cur_i)\n self.axes_names.insert(i, axis)\n logger.debug('After rolling. Shape: %s, new axes: %s',\n str(self.data.shape), str(self.axes_names))\n logger.debug('')\n\n self.axes_ids = dict([(a, i) for i, a in enumerate(self.axes_names)])", "def replace_axis(self, dim:NamedIndex,\n mapping_or_old:'Union[Mapping[NamedIndex, NamedIndex], NamedIndex]',\n new:'Optional[NamedIndex]'=None):\n\n axes = self[dim] # disable idx_dim access\n # axes = self.get(dim) or self._dim_axes[dim] # dim:'Union[int, NamedIndex]'\n is_tuple_axes = is_namedtuple(axes)\n assert isinstance(axes, dict) or is_tuple_axes, (\n f'unnamed dim({dim!r}) cannot be renamed')\n\n axes_keys = axes._fields if is_tuple_axes else axes.keys()\n axes_iter = iter(zip(axes._fields, axes)) if is_tuple_axes else axes.items()\n axes_ = OrderedDict()\n\n if new is None:\n assert isinstance(mapping_or_old, dict), (\n f\"'mapping_or_old'({type(mapping_or_old)}) is expected to be a dict \"\n \"when 'new' is None\")\n\n mapping = mapping_or_old\n for axis, index in axes_iter:\n axis = mapping.get(axis, axis)\n assert axis not in axes_, f'axis {axis!r} in mapping is conflicted'\n\n axes_[axis] = index\n else:\n assert new not in axes_keys, f'new axis({new!r}) is confilicted'\n\n old = mapping_or_old\n for axis, index in axes_iter:\n axes_[new if axis == old else axis] = index\n\n axes_ = namedtuple(dim, axes_.keys())(**axes_) if is_tuple_axes else type(axes)(axes_)\n ret = OrderedDict()\n for dim_, axes in self.items():\n ret[dim_] = axes_ if dim_ == dim else axes\n\n return type(self)(ret)", "def set_nAxis(self, newval):\n rest_val = str(newval)\n return self._setAttr(\"nAxis\", rest_val)", "def set_axes(self, a):\r\n self.axes = a", "def axes(*x: Iterable[int]):\n return [_ti_core.Axis(i) for i in x]", "def label_axis(self, name, label):\n\n axis = self._find_axis(name)\n axis.axis_label = label", "def __init__(self, axes=()):\n self._axes = []\n self._dimension = 0\n for axis in axes:\n self.add_axis(axis)", "def _appendAxisDefinition(self, axis):\n length = len(axis)\n\n self.na_dict[\"NX\"].append(length)\n self.na_dict[\"XNAME\"].append(xarray_utils.getBestName(axis))\n\n # If only one item in axis values\n if length < 2:\n self.na_dict[\"DX\"].append(0)\n self.na_dict[\"NXDEF\"].append(length)\n self.na_dict[\"X\"].append(axis.data.tolist()) \n return\n\n incr = xarray_utils.get_interval(axis, 0, 1)\n\n for i in range(1, length):\n if (axis[i] - axis[i - 1]) != incr:\n self.na_dict[\"DX\"].append(0)\n self.na_dict[\"NXDEF\"].append(length)\n self.na_dict[\"X\"].append(axis.data.tolist())\n break\n\n else: # If did not break out of the loop\n max_length = length\n if length > 3: \n max_length = 3\n\n self.na_dict[\"DX\"].append(incr)\n self.na_dict[\"NXDEF\"].append(max_length)\n self.na_dict[\"X\"].append(axis[:max_length])", "def reorderAxesEvent(self):\n axisB = self.sender().text()\n self.myParent.swapAxes(self.axisName, axisB)\n self.myParent.setVistrailsVariableAxes()", "def replace_dim(self, old:NamedIndex, new:NamedIndex):\n\n assert new not in self, f'new dim({new!r}) is confilicted'\n\n ret = OrderedDict()\n for dim, axes in self.items():\n if dim == old:\n if is_namedtuple(axes):\n axes = namedtuple(new, axes._fields)(*axes)\n ret[new] = axes\n else:\n ret[old] = axes\n\n return type(self)(ret)", "def xaxis(self,label,units):\n if units != \"\": label = label + \" (\" + units + \")\"\n self.subplot.set_xlabel(label)\n pass", "def addAxes(self):\n numDims = len(self.relation.fieldNames) - 1\n angle = 360 / numDims\n axisDomains = self.relation.axisDomains\n for i in range(numDims):\n axis = PlotAxis(self)\n self.scene().addItem(axis)\n if self.axisAngles and i < len(self.axisAngles):\n axis.setRotation(self.axisAngles[i])\n else:\n axis.setRotation(angle * i)\n self.axes.append(axis)\n\n domain = axisDomains[i]\n text = PlotAxisLabel(\"{}\\n[{:.2f},{:.2f}]\".format(self.relation.fieldNames[i], domain[0], domain[1]))\n text.setFont(self.labelFont)\n self.axisLabels.append(text)\n text.setParentItem(axis)", "def default_units(x, axis):\n # In the case where the first matplotlib command is setting limits,\n # x may be a tuple of length two (with the same units).\n if isinstance(x, tuple):\n name = getattr(x[0], \"name\", \"\")\n units = x[0].units\n else:\n name = getattr(x, \"name\", \"\")\n units = x.units\n\n # maintain a mapping between Axis and name since Axis does not point to\n # its underlying data and we want to propagate the name to the axis\n # label in the subsequent call to axisinfo\n unyt_arrayConverter._axisnames[axis] = name if name is not None else \"\"\n return units", "def set_labels(ax, label_value, label_axis):\n if label_axis == 'x':\n ax.set_xticks(np.arange(len(label_value)))\n axis = ax.get_xticklabels()\n else:\n ax.set_yticks(np.arange(len(label_value)) + 1)\n axis = ax.get_yticklabels()\n\n # fetch labels\n labels = [items.get_text() for items in axis]\n\n # init a count variable\n if label_axis == 'x':\n count = 0\n else:\n count = len(label_value) - 1\n\n # iterate through all the labels and change the label name\n for i in range(len(labels)):\n labels[i] = label_value[count]\n\n if label_axis == 'x':\n count += 1\n else:\n count -= 1\n\n return labels", "def reset(self):\n # Don't reset axis labels\n self.range = ((0, 2, 1),) * self.ndim\n self.current_step = (0,) * self.ndim\n self.order = tuple(range(self.ndim))", "def new_axes(self, name):\n\n return self.figure.add_axes([0.05, 0.05, 0.9, 0.9], label=name)", "def rename_levels(self, name_dict, axis=1, inplace=False):\n\n def apply_func(obj_index):\n return index_fns.rename_levels(obj_index, name_dict)\n\n return self.apply_on_index(apply_func, axis=axis, inplace=inplace)", "def reorder_axes(cls, array: np.ndarray, axes: str) -> np.ndarray:\n return cls._reorder_axes(array, axes, cls.array_axis_order)", "def series_axis(self, series_axis):\n\n self.container['series_axis'] = series_axis", "def setAxisParts(lowx='all', lefty='all', upx='ticks', righty='ticks'):\n partdict = {'none':'NONE','lines':'LINE','ticks':'TICKS',\n 'labels':'LABELS', 'all':'NAME'} \n dislin.setgrf(partdict[lowx], partdict[lefty],\\\n partdict[upx], partdict[righty])", "def _default_axis_units(n_dims):\n return (\"-\",) * n_dims", "def _update_column_name(self, column, idx, old_name, name):\n dtype = self.dtype\n # Updating the names on the dtype should suffice\n dtype.names = dtype.names[:idx] + (name,) + dtype.names[idx + 1 :]", "def SetAxisOrientation(*args, **kwargs):\n return _gdi_.DC_SetAxisOrientation(*args, **kwargs)", "def setupVariableAxes(self):\n if self.var is None:\n return\n \n if (self.axisList is None):\n self.axisList = self.var.getAxisList()\n self.axisOrder = range(len(self.axisList))\n\n self.clear() \n self.setAxesNames()\n \n # Iterate through the variables axes & init each axis widget\n axisIndex = 0\n for axis, axisName in zip(self.axisList, self.axesNames):\n # Create the axis widget\n axisWidget = QAxis(axis, axisName, axisIndex, self)\n axisWidget.setAxisButtonText(axisName)\n self.axisWidgets.append(axisWidget)\n\n # Setup the layout for each axis\n row = self.gridLayout.rowCount()\n self.gridLayout.addWidget(axisWidget.getAxisButton(), row, 0)\n self.gridLayout.addWidget(axisWidget, row, 1) \n self.gridLayout.addWidget(axisWidget.getAxisOperationsButton(), row, 2)\n\n # Create separator line between each axis widget\n vline = QtGui.QFrame()\n vline.setFrameStyle(QtGui.QFrame.HLine | QtGui.QFrame.Sunken)\n self.gridLayout.addWidget(vline, row+1, 0, 1,\n self.gridLayout.columnCount())\n\n axisIndex += 1\n\n self.gridLayout.setRowStretch(self.gridLayout.rowCount(), 1)", "def _relabel(array, renames):\n\n att_renames = []\n dim_renames = []\n\n for k, v in renames.items():\n if k in array.att_names:\n att_renames.extend([k, v])\n elif k in array.dim_names:\n dim_renames.extend([k, v])\n else:\n raise ValueError(\"Invalid array attribute: %s\" % k)\n\n return array.attribute_rename(*att_renames).dimension_rename(*dim_renames)", "def change_figname(self, selection):\r\n # get desired aliases list\r\n if selection == \"Title\":\r\n options = self.titles\r\n elif selection == \"Axes\":\r\n options = self.axes\r\n else:\r\n options = self.fignums\r\n # replace current list with desired aliases\r\n for index, option in enumerate(options):\r\n item = self.listWidget.item(index)\r\n item.setText(option)", "def get_axis_labels(\n self,\n x_label: float | str | Mobject = \"x\",\n y_label: float | str | Mobject = \"y\",\n ) -> VGroup:\n\n self.axis_labels = VGroup(\n self.get_x_axis_label(x_label),\n self.get_y_axis_label(y_label),\n )\n return self.axis_labels", "def __init__(self, narray, axes_names=None, axes_domains=None,\n value_label=\"value\", meta_data=None):\n logger.debug('xndarray.__init__ ...')\n\n narray = np.asarray(narray)\n self.data = narray\n self.value_label = value_label\n self.meta_data = meta_data\n self.has_deprecated_xml_header = True\n\n nbDims = self.data.ndim\n\n if axes_names is None:\n self.axes_names = ['dim' + str(i) for i in xrange(nbDims)]\n else:\n assert type(axes_names) == list\n if len(axes_names) != nbDims:\n raise Exception(\"length of axes_names (%d) is different \"\n \"from nb of dimensions (%d).\\n\"\n \"Got axes names: %s\"\n % (len(axes_names), nbDims, str(axes_names)))\n\n self.axes_names = axes_names[:]\n\n self.axes_ids = dict([(self.axes_names[i], i) for i in xrange(nbDims)])\n\n # By default: domain of axis = array of slice indexes\n sh = self.data.shape\n self.axes_domains = dict([(axis, np.arange(sh[i]))\n for i, axis in enumerate(self.axes_names)])\n\n if axes_domains is not None:\n assert isinstance(axes_domains, dict)\n\n for an, dom in axes_domains.iteritems():\n if an not in self.axes_names:\n raise Exception('Axis \"%s\" defined in domains not '\n 'found in axes (%s)'\n % (an, ','.join(self.axes_names)))\n\n ia = self.axes_names.index(an)\n l = self.data.shape[ia]\n if len(dom) != l:\n raise Exception('Length of domain for axis \"%s\" (%d) '\n 'does not match length of data '\n 'axis %d (%d) ' % (an, len(dom), ia, l))\n\n if len(set(dom)) != len(dom):\n raise Exception('Domain of axis \"%s\" does not contain '\n 'unique values' % an)\n\n axes_domains[an] = np.asarray(dom)\n\n self.axes_domains.update(axes_domains)\n\n logger.debug('Axes names: %s', str(self.axes_names))\n logger.debug('Axes domains: %s', str(self.axes_domains))", "def get_axis_name(self, axis_id):\n if isinstance(axis_id, str):\n if axis_id in self.axes_names:\n return axis_id\n else:\n return None\n assert np.isreal(axis_id) and np.round(axis_id) == axis_id\n if axis_id >= 0 and axis_id < self.get_ndims():\n return self.axes_names[axis_id]\n else:\n return None", "def names(self):\n labels = [\n \"$X_{%i}$\" % i if d.name is None else d.name\n for i, d in enumerate(self.dimensions)\n ]\n return labels", "def findaxisbyname(self, *args, **kwargs):\n return _coordsys.coordsys_findaxisbyname(self, *args, **kwargs)", "def render_axis_labels(self, axes=None):\n axes = plt if axes is None else axes\n if self.x_label is not None:\n axes.set_xlabel(self.x_label)\n if self.y_label is not None:\n axes.set_ylabel(self.y_label)", "def _short_tick_names(ax, label_length=20, ticklabel_length=10):\n ax.set_yticks(ax.get_yticks().tolist())\n ax.set_xticks(ax.get_xticks().tolist())\n ax.set_xticklabels(\n [_shortname(t.get_text(), maxlen=ticklabel_length)\n for t in ax.get_xticklabels()]\n )\n ax.set_yticklabels(\n [_shortname(t.get_text(), maxlen=ticklabel_length)\n for t in ax.get_yticklabels()]\n )\n ax.set_xlabel(_shortname(ax.get_xlabel(), maxlen=label_length))\n ax.set_ylabel(_shortname(ax.get_ylabel(), maxlen=label_length))", "def set_axis_x(self, new_axis_point):\r\n self.__x_axis = new_axis_point", "def render_axis_labels(self, axes=None):\n raise NotImplementedError()", "def getAllAxisLabels(self):\n import copy\n return copy.copy(self.__axis_labels__)", "def _moveaxis(self, arr, source, dest):\n try:\n source = list(source)\n except TypeError:\n source = [source]\n try:\n dest = list(dest)\n except TypeError:\n dest = [dest]\n\n source = [a + arr.ndim if a < 0 else a for a in source]\n dest = [a + arr.ndim if a < 0 else a for a in dest]\n\n order = [n for n in range(arr.ndim) if n not in source]\n\n for dest, src in sorted(zip(dest, source)):\n order.insert(dest, src)\n\n return arr.transpose(order)", "def test_default_axis_nxdata(self, nexus_base):\n assert isinstance(nexus_base.default_axis, np.ndarray)", "def populate_plot_axis(self,plot,ax='x'):\n\n fig=plt.gcf()\n\n extra_ax=[]\n\n if ax=='x':\n\n ticks=plot.get_xticks()\n\n lim=plot.get_xlim()\n\n for i in range(len(self.names)):\n\n if i==0:\n\n axn=plot\n\n axn.spines['bottom'].set_position(('outward',10))\n\n axn.spines['bottom'].set_visible(True)\n\n else:\n\n dy_fig=0.08\n\n prev_ax_position=axn.get_position()\n\n extra_ax.append(fig.add_axes(\\\n (prev_ax_position.x0,\\\n prev_ax_position.y0-2*dy_fig,\\\n prev_ax_position.width,\\\n 0),'autoscalex_on',True))\n\n axn=extra_ax[i-1]\n\n axn.yaxis.set_visible(False)\n\n for side in axn.spines.keys():\n\n axn.spines[side].set_linewidth(1)\n\n axn.set_xticks(ticks)\n\n ticksnames=[float(str(x)) for x in self.values[i]]\n\n axn.set_xticklabels(\\\n [\"{:.2f}\".format(x).rstrip('0').rstrip('.') for x in ticksnames],\\\n rotation = 45)\n\n xlab=axn.set_xlabel(self.names[i])\n\n xlab.set_fontsize(10)\n\n axn.tick_params(axis='x',labelsize=10)\n\n axn.set_xlim(lim)\n\n\n\n elif ax=='y':\n\n ticks=plot.get_yticks()\n\n lim=plot.get_ylim()\n\n for i in range(len(self.names)):\n\n if i==0:\n\n axn=plot\n\n axn.spines['left'].set_position(('outward',10))\n\n axn.spines['left'].set_visible(True)\n\n else:\n\n dx_fig=0.08\n\n plot_position=plot.get_position()\n\n prev_ax_position=axn.get_position()\n\n extra_ax.append(fig.add_axes(\\\n (prev_ax_position.x0-2*dx_fig,\\\n prev_ax_position.y0,\\\n 0,\\\n prev_ax_position.height),'autoscalex_on',True))\n\n axn=extra_ax[i-1]\n\n axn.xaxis.set_visible(False) # hide the yaxis\n\n for side in axn.spines.keys(): # 'top', 'bottom', 'left', 'right'\n\n axn.spines[side].set_linewidth(1)\n\n axn.set_yticks(ticks)\n\n ticksnames=[float(str(x)) for x in self.values[i]]\n\n axn.set_yticklabels(\\\n [\"{:.2f}\".format(x).rstrip('0').rstrip('.') for x in ticksnames],\\\n rotation = 45)\n\n ylab=axn.set_ylabel(self.names[i])\n\n ylab.set_fontsize(10)\n\n axn.tick_params(axis='y',labelsize=10)\n\n axn.set_ylim(lim)\n\n else:\n\n raise ValueError(\"Axis can be 'x' or 'y'\")", "def _swap_axis(input_tensor, dim_index, last_index, name=None):\n return array_ops.transpose(\n input_tensor,\n array_ops.concat([\n math_ops.range(dim_index), [last_index],\n math_ops.range(dim_index + 1, last_index), [dim_index]\n ], 0),\n name=name)", "def move_axis(data, label, new_position):\n # find current position of axis\n try:\n pos = data.dims.index(label)\n except ValueError as e:\n raise ValueError(\n f'Axis name {label} does not exist in input data') from e\n\n # create list of labels with new ordering\n axis_labels = list(data.dims)\n # the new position will be _before_ the given index, so will fail with a negative index\n # convert to a positive index in that case\n if new_position < 0:\n new_position += len(axis_labels)\n axis_labels.insert(new_position, axis_labels.pop(pos))\n # do the move\n return data.transpose(*axis_labels)", "def cleanAxis(axisName, ticksOnly=False, complete=False):\r\n axisName.xaxis.set_ticks_position('bottom')\r\n axisName.yaxis.set_ticks_position('left')\r\n axisName.xaxis.labelpad = 2\r\n if not ticksOnly:\r\n axisName.spines['top'].set_visible(False)\r\n axisName.spines['right'].set_visible(False)\r\n if complete:\r\n axisName.spines['top'].set_visible(False)\r\n axisName.spines['right'].set_visible(False)\r\n axisName.spines['bottom'].set_visible(False)\r\n axisName.spines['left'].set_visible(False)\r\n return axisName", "def set_axis(axis_number):\n robots = get_robot_roots()\n if not robots:\n pm.warning('Nothing Selected; Select a valid robot')\n return\n\n # These are specific to how the robots are rigged in relation to Maya's\n # coordinate system\n rotation_axes = ['Y', 'X', 'X', 'Z', 'X', 'Z']\n\n try: # if the text field is empty, or not a float value, skip it\n rotation_axis = rotation_axes[axis_number - 1]\n val = float(pm.textField('t_a{}'.format(axis_number),\n query=True,\n text=True))\n\n for robot in robots:\n ns = robot.namespace()\n pm.setAttr('{0}|{1}robot_GRP|{1}FK_CTRLS|{1}a{2}FK_CTRL.rotate{3}'.format(robot, ns, axis_number, rotation_axis), val)\n except:\n pass", "def category_axis(self, category_axis):\n\n self.container['category_axis'] = category_axis", "def fix_facetgrid_axis_labels(facet_grid, shared_in_center=False,\n x=True, y=True) -> None:\n # regarding the choice of shared_in_center: WWMDD?\n if shared_in_center:\n # TODO maybe add a axes over / under the FacetGrid axes, with the same\n # shape, and label that one (i think i did this in my gui or one of the\n # plotting fns. maybe plot_traces?)\n raise NotImplementedError\n else:\n for ax in facet_grid.axes.flat:\n if not (ax.is_first_col() and ax.is_last_row()):\n if x:\n ax.set_xlabel('')\n if y:\n ax.set_ylabel('')", "def undimensionize(self, names=None, mapper_to_meta=False):\n mapper = self.undimensionizing_mapper(names)\n self.rename_from_mapper(mapper)\n if mapper_to_meta: self._meta['sets']['rename_mapper'] = mapper\n if not names: self.set_dim_comp(False)", "def _update_data_transforms(self, axisOrder='col-major'):\n self._dataTransform = QtGui.QTransform()\n self._inverseDataTransform = QtGui.QTransform()\n if self.axisOrder == 'row-major': # transpose both\n self._dataTransform.scale(1, -1)\n self._dataTransform.rotate(-90)\n self._inverseDataTransform.scale(1, -1)\n self._inverseDataTransform.rotate(-90)", "def reset_name_labels(infr):\n infr.print('reset_name_labels', 1)\n orig_names = infr.get_node_attrs('orig_name_label')\n infr.set_node_attrs('name_label', orig_names)", "def readAxes(self):\n for axisElement in self.root.findall(\".axes/axis\"):\n axis = {}\n axis['name'] = name = axisElement.attrib.get(\"name\")\n axis['tag'] = axisElement.attrib.get(\"tag\")\n axis['minimum'] = float(axisElement.attrib.get(\"minimum\"))\n axis['maximum'] = float(axisElement.attrib.get(\"maximum\"))\n axis['default'] = float(axisElement.attrib.get(\"default\"))\n # we're not using the map for anything.\n axis['map'] = []\n for warpPoint in axisElement.findall(\".map\"):\n inputValue = float(warpPoint.attrib.get(\"input\"))\n outputValue = float(warpPoint.attrib.get(\"output\"))\n axis['map'].append((inputValue, outputValue))\n # there are labelnames in the element\n # but we don't need them for building the fonts.\n self.axes[name] = axis\n self.axesOrder.append(axis['name'])", "def alias(requestContext, seriesList, newName):\n try:\n seriesList.name = newName\n except AttributeError:\n for series in seriesList:\n series.name = newName\n return seriesList", "def swapaxes(a, axis1, axis2):\n # TODO(okuta): check type\n return a.swapaxes(axis1, axis2)", "def _axis_labels(self, data_name: str) -> Tuple[str, str]:\n\n # Single activity attributes (column name must be present in summary dataframe)\n if data_name == 'distance':\n if self.config.distance_unit == 'km':\n return 'distance_2d_km', 'Distance (km)'\n elif self.config.distance_unit == 'mile':\n return 'distance_2d_mile', 'Distance (miles)'\n elif data_name == 'duration':\n return 'duration', 'Duration (minutes)'\n\n # These can be used for either single activities (summary dataframe) or aggregates (time series dataframe)\n elif data_name == 'mean_speed':\n if self.config.distance_unit == 'km':\n return 'mean_kmph', 'Average speed (km/hour)'\n elif self.config.distance_unit == 'mile':\n return 'mean_mph', 'Average speed (miles/hour)'\n elif data_name == 'mean_hr':\n return 'mean_hr', 'Average heart rate (beats/minute)'\n\n # Aggregate attributes (column name must be present in time series dataframe)\n elif data_name == 'total_distance':\n if self.config.distance_unit == 'km':\n return 'total_distance_2d_km', 'Total distance (km)'\n elif self.config.distance_unit == 'mile':\n return 'total_distance_2d_mile', 'Total distance (miles)'\n elif data_name == 'total_duration':\n return 'total_duration', 'Total duration (minutes)'\n elif data_name == 'activity_count':\n return 'activity_count', 'Number of activities'\n\n else:\n raise ValueError(f'Bad value for `data_name`: \"{data_name}\".')", "def duplicate_axes(isl_obj, duplicate_inames, new_inames):\n if isinstance(isl_obj, list):\n return [\n duplicate_axes(i, duplicate_inames, new_inames)\n for i in isl_obj]\n\n if not duplicate_inames:\n return isl_obj\n\n def _align_and_intersect(d1, d2):\n d1, d2 = isl.align_two(d1, d2)\n return d1 & d2\n\n old_name_to_new_name = dict(zip(duplicate_inames, new_inames))\n\n dup_isl_obj = isl_obj\n\n for old_name, (dt, pos) in isl_obj.get_var_dict().items():\n dup_isl_obj = dup_isl_obj.set_dim_name(dt, pos,\n old_name_to_new_name.get(old_name,\n old_name))\n\n return _align_and_intersect(dup_isl_obj, isl_obj)", "def fix(self):\n for namespace in pm.listNamespaces():\n for elem in namespace.ls():\n elem.rename(elem.split(\":\")[-1])\n namespace.remove()\n\n self.run()", "def setIndexNames(self):\n self.theta = self.i1\n self.radial = self.i2", "def addExtraAxis(slab,newaxis=None,axis=0,verbose=False):\n\n import cdms2 as cdms\n import MV2 as MV\n\n if newaxis is None:\n newaxis=cdms.createAxis([1,])\n newaxis.units=''\n\n # add new axis to axis list of input <slab>\n axislist=slab.getAxisList()\n axislist.insert(axis,newaxis)\n\n #----------------Reshape----------------\n shape=list(slab.shape)\n shape.insert(axis,len(newaxis))\n slab2=MV.reshape(slab,shape)\n\n #------------Create variable------------\n att_dict=attribute_obj2dict(slab)\n slab2=cdms.createVariable(slab2,axes=axislist,attributes=att_dict,\\\n typecode='f')\n slab2.id=slab.id\n\n if verbose:\n print('\\n# <addExtraAxis>: Originial variable shape:',slab.shape)\n print('# <addExtraAxis>: New variable shape:',slab2.shape)\n\n return slab2", "def names(self, names):\n\n self._names = names", "def getAllAxisUnits(self):\n import copy\n return copy.copy(self.__axis_units__)", "def addAxis(self, tag, name, minimum, maximum, default, warpMap=None):\n axisElement = ET.Element(\"axis\")\n axisElement.attrib['name'] = name\n axisElement.attrib['tag'] = tag\n axisElement.attrib['minimum'] = str(minimum)\n axisElement.attrib['maximum'] = str(maximum)\n axisElement.attrib['default'] = str(default)\n if warpMap is not None:\n for a, b in warpMap:\n warpPt = ET.Element(\"map\")\n warpPt.attrib['input'] = str(a)\n warpPt.attrib['output'] = str(b)\n axisElement.append(warpPt)\n self.root.findall('.axes')[0].append(axisElement)", "def set_axis_tick_width(params):\n # type: (Dict) -> Dict\n for panel_id, p in params['local'].items():\n obj_axis = params['internal']['canvas']['axes'][panel_id]\n for k in ['x', 'y']:\n attr = \"get_{}axis\".format(k)\n ax = getattr(obj_axis, attr)()\n for m in ['major', 'minor']:\n ax.set_tick_params(\n which=m,\n width=p['tick'][m]['width'][k])\n\n return params", "def make_axes(info:PartialAxesInfo,\n cls_name:'Optional[str]'=None)->NamedAxes:\n\n if is_integer(info):\n assert info >= 0, f'naxis({info}) should be non-negative'\n assert cls_name is None, f'unnamed axes({info}) cannot have class name'\n\n return info\n\n if isinstance(info, Iterable):\n info = list(info)\n try:\n # try parsing as Mapping[NamedIndex, int] tuples\n axes = dict(info) # throw TypeError, ValueError\n assert all(map(is_integer, axes.values())), 'mapped indices should be integers'\n\n slots = np.zeros(len(axes), dtype=bool)\n slots[list(axes.values())] = True # throw IndexError\n assert slots.sum() == len(slots), 'bad axis indices' # HINT: all(0 <= axes < naxis)?\n except IndexError:\n raise ValueError('bad axis indices')\n except (TypeError, ValueError):\n # fallback to enumerate indices for axes\n assert all(map(lambda axis: isinstance(axis, NamedIndex), info))\n\n axes = dict(zip(info, range(len(info))))\n else:\n # sort name by indices\n axes_ = axes\n axes = {k: v if v >= 0 else len(axes) + v for k, v in axes.items()}\n keys = sorted(axes.keys(), key=axes.get)\n axes = OrderedDict()\n for key in keys:\n axes[key] = axes_[key]\n\n if cls_name is not None:\n axes = namedtuple(cls_name, axes.keys())(**axes)\n\n return axes\n\n raise TypeError(f\"unsupported 'info' type({type(info)})\")", "def xaxis(self,label,units):\r\n if units != \"\": label = label + \" (\" + units + \")\"\r\n self.xbox.set_text(r\"$%s$\" % (label))\r\n pass", "def _create_diagram_axis_info(self, axis, pinfo):\n\n nkey = f\"{axis}colname\"\n colname = pinfo[nkey]\n defs = self._refdefs.info.get(colname, {})\n title = defs.get(\"title\", colname)\n\n fontfmt = {\"family\" : \"Arial, sans-serif\",\n \"size\" : 18,\n \"color\" : \"black\"}\n\n axis = {\"showline\" : True,\n \"showgrid\" : True,\n \"title\" : title,\n \"titlefont\" : fontfmt,\n \"ticks\" : \"outside\",\n \"tickwidth\" : 1,\n \"showticklabels\" : True,\n \"linewidth\" : 1,\n \"linecolor\" : \"black\",\n \"zeroline\" : True,\n \"zerolinewidth\" : 1,\n \"zerolinecolor\" : \"black\"}\n\n if defs.get(\"unit\") == \"nanosecond\":\n axis[\"tickformat\"] = \".3s\"\n axis[\"ticksuffix\"] = \"s\"\n axis[\"hoverformat\"] = \".4s\"\n elif colname == \"Percentile\":\n axis[\"ticksuffix\"] = \"%\"\n\n return axis", "def upAxis(*args, axis: Union[AnyStr, bool]=\"\", rotateView: bool=True, q=True, query=True,\n **kwargs)->Union[None, Any]:\n pass" ]
[ "0.7356323", "0.7326029", "0.6893155", "0.6677015", "0.64332557", "0.6372005", "0.63517636", "0.62905", "0.628417", "0.62775946", "0.6271645", "0.6077482", "0.6047013", "0.6024936", "0.5993588", "0.59712166", "0.5967535", "0.596201", "0.5952742", "0.58923167", "0.589169", "0.5884654", "0.578875", "0.57817596", "0.5780179", "0.5779107", "0.57761216", "0.57437253", "0.5734455", "0.5707204", "0.5703792", "0.56781435", "0.56337297", "0.56219816", "0.5616132", "0.5611061", "0.56026804", "0.558611", "0.5579622", "0.556776", "0.556412", "0.55292183", "0.5517566", "0.5511644", "0.54836136", "0.5480594", "0.5459653", "0.54482764", "0.5446676", "0.54211926", "0.54127645", "0.538706", "0.5370458", "0.5343439", "0.53327024", "0.53163123", "0.5315587", "0.53030705", "0.5300485", "0.5289558", "0.52566266", "0.52388656", "0.52194154", "0.52121794", "0.5208994", "0.5203703", "0.5189778", "0.51700723", "0.51655865", "0.5151354", "0.5145567", "0.5135191", "0.5116793", "0.51126283", "0.51120114", "0.5109633", "0.5105329", "0.5104444", "0.5100494", "0.50953746", "0.50880253", "0.5063333", "0.5059572", "0.5056057", "0.505014", "0.50489455", "0.50468504", "0.50337374", "0.50332844", "0.5029569", "0.5022714", "0.5019628", "0.50166655", "0.5010822", "0.5007343", "0.5006219", "0.4989394", "0.49820545", "0.49815145", "0.4972247" ]
0.75589085
0
Close the partition after it has been conformed. The partition should usually be closed after its `array` method has been called to prevent memory leaks. Closing the partition does one of the following, depending on the values of the partition's `!_original` attribute and on the
def close(self, **kwargs): config = getattr(self, "config", None) if config is None: return if kwargs: config.update(kwargs) original = getattr(self, "_original", None) logger.partitioning("Partition.close: original = {}".format(original)) if not original: originally_on_disk = False original_subarray = None else: originally_on_disk = not original.in_memory original_subarray = original._subarray config = self.config logger.partitioning(" config = {}".format(config)) if config["serial"]: # -------------------------------------------------------- # SERIAL # -------------------------------------------------------- logger.partitioning(" serial") if config["readonly"]: logger.partitioning(" readonly=True") if originally_on_disk: logger.partitioning(" subarray originally on disk") if config.get("to_disk", False): # 1.1.1.1 The original subarray was on disk, # we don't want to keep the current # subarray in memory, and we are happy # to discard any changes that may have # been made to the subarray. logger.partitioning(" 1.1.1.1 revert") self.revert() elif free_memory() <= cf_fm_threshold(): # 1.1.1.2 The original subarray was on disk, # we are happy to keep the current # subarray in memory, but there is not # enough free memory to do so. logger.partitioning( " 1.1.1.2 revert ({} <= {})".format( free_memory(), cf_fm_threshold() ) ) self.revert() else: # 1.1.1.3 The original subarray was on disk # and there is enough memory to keep # the current subarray in memory if config["unique_subarray"] and isinstance( original_subarray, CachedArray ): # The original subarray was a temporary # file which is not referenced by any # other partitions _remove_temporary_files( original_subarray._partition_file ) del self.masked logger.partitioning( " 1.1.1.3 del masked ({} > {})".format( free_memory(), cf_fm_threshold() ) ) else: logger.partitioning(" subarray originally in memory") if config.get("to_disk", False): # 1.1.2.1 Original subarray was in memory and # we don't want to keep the current # subarray in memory logger.partitioning(" 1.1.2.1 to_disk") self.to_disk(reopen=False) elif free_memory() <= cf_fm_threshold(): # 1.1.2.2 Original subarray was in memory and # unique but there is not enough # memory to keep the current subarray logger.partitioning(" 1.1.2.2 to_disk") self.to_disk(reopen=False) else: # 1.1.2.3 Original subarray was in memory and # unique and there is enough memory to # keep the current subarray in memory logger.partitioning(" 1.1.2.3 pass") pass else: # config['readonly'] is False if originally_on_disk: if config.get("to_disk", False): # 1.2.1.1 Original subarray was on disk and # there and we don't want to keep the # array if config["unique_subarray"] and isinstance( original_subarray, CachedArray ): # Original subarray was a temporary file # on disk which is not referenced by any # other partitions _remove_temporary_files( original_subarray._partition_file ) logger.partitioning(" 1.2.1.1 to_disk") self.to_disk(reopen=False) elif free_memory() <= cf_fm_threshold(): # 1.2.1.2 Original subarray was on disk but # there is not enough memory to keep # it if config["unique_subarray"] and isinstance( original_subarray, CachedArray ): # Original subarray was a temporary file # on disk which is not referenced by any # other partitions _remove_temporary_files( original_subarray._partition_file ) logger.partitioning(" 1.2.1.2 to_disk") self.to_disk(reopen=False) else: # 1.2.1.3 Original subarray was on disk and # there is enough memory to keep it logger.partitioning(" 1.2.1.3 pass") del self.masked else: if config.get("to_disk", False): # 1.2.2.1 Original subarray was in memory but # we don't want to keep it logger.partitioning(" 1.2.2.1 to_disk") self.to_disk(reopen=False) elif free_memory() <= cf_fm_threshold(): # 1.2.2.2 Original subarray was an in memory # but there is not enough memory to # keep it logger.partitioning(" 1.2.2.2 to_disk") self.to_disk(reopen=False) else: # 1.2.2.3 Original subarray was in memory and # there is enough memory to keep it logger.partitioning(" 1.2.2.3 del masked") del self.masked else: logger.partitioning("Partition.close: parallel") # -------------------------------------------------------- # PARALLEL # -------------------------------------------------------- pass # if hasattr(self, '_original'): # del self._original # print(hasattr(self, 'config')), try: del self.config except AttributeError: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close(self):\n return self.close_array", "def file_close(self):\n if self.on_disk:\n self._subarray.close()", "def close(self):\n self.ix.close()", "def close (self):\n pass\n #TODO: implement more realistic closing semantics", "def close(self):\n self.data.close()", "def close(self):\n self.drill = None", "def _close( self ):\n for sji in self._sji_data:\n sji.close()", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close(self) -> None:", "def close():", "def close(self) -> None:\n ...", "def close(self) -> None:\n ...", "def _close(self):\n self.write_data(self.write_queue)\n self.write_compound(self.write_compound_queue)", "def close(self):\n\t\tself.filep.close()", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def Close(self):", "def close(self):\n raise NotImplementedError", "def close(self):\n raise NotImplementedError", "def close(self):\n raise NotImplementedError", "def close(self):\n raise NotImplementedError", "def close(self):\n raise NotImplementedError", "def close(self):\n raise NotImplementedError", "def close(self):\n raise NotImplementedError", "def close(self):\n raise NotImplementedError", "def close(self):\n raise NotImplementedError", "def close(self):\n raise NotImplementedError", "def close(self):\n raise NotImplementedError", "def close(self):\n ...", "def close(self):\n ...", "def close (self):\n raise NotImplementedError( 'Needs implementation' )", "def close(self):\n self.__exit__(None, None, None)", "def close(self):\n self.__exit__(None, None, None)", "def __del__(self):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n # If the subarray is unique it will have 2 references to\n # it plus 1 within this method, making 3. If it has more\n # than 3 references to it then it is not unique.\n if getrefcount is not None:\n self._decrement_file_counter()\n if subarray is None or getrefcount(subarray) > 3:\n return\n else:\n # getrefcount has itself been deleted or is in the process\n # of being torn down\n return\n\n _partition_file = getattr(subarray, \"_partition_file\", None)\n if _partition_file is not None:\n # This partition contains a temporary file which is not\n # referenced by any other partition on this process, so if\n # there are no lock files present remove the file from\n # disk.\n _remove_temporary_files(_partition_file)\n\n else:\n try:\n if FileArray is not None and isinstance(subarray, FileArray):\n try:\n filename = subarray.get_filename()\n except Exception:\n filename = None\n\n if self.file_counter.get(filename, 999) <= 0:\n # This partition contains a non-temporary file\n # which is not referenced by any other\n # partitions, so close the file.\n subarray.close()\n except Exception:\n # If we're here then it is likely that FileArray has been\n # torn down, so just do nothing.\n pass\n # --- End: if", "def Close(self):\n raise NotImplementedError('Implement this')", "def _close(self):\n log.Debug('dpbx.close():')", "def close(self) -> None:\n pass", "def close(self):\n \n self.__exit__(None, None, None)\n return", "def close(self) -> None:\n self.pages = []\n for fo, _reader in self.inputs:\n fo.close()\n\n self.inputs = []\n self.output = None", "def close(self):\n raise NotImplemented", "def close(self):\r\n pass", "def Close(self):\n self._RaiseIfNotWritable()\n\n self._storage_file.Close()\n self._storage_file = None", "def close(self):\n self.closed = True", "def close(self) -> None:\r\n pass", "def close(self):\n raise NotImplementedError()", "def close(self):\n raise NotImplementedError()", "def close (self):\n pass", "def close(self):\n raise NotImplementedError(\"Implement this method in child class\")", "def close(self):\n for m in self._mappers:\n m.close()\n self._mappers = []\n self._offsets = []\n self._sizes = []\n self._handler = None", "def close(self):\r\n pass", "def close(self):\r\n pass", "def close(self):\r\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n pass", "async def end_array(self):", "def close(self) -> None:\n\n raise NotImplementedError", "def close(self):\r\n raise NotImplementedError(\"`close` method is not implemented!\")", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n # This is a NOOP by default" ]
[ "0.6764165", "0.6518303", "0.60208786", "0.5811839", "0.5795013", "0.579095", "0.5784808", "0.57648385", "0.57648385", "0.57648385", "0.57648385", "0.57648385", "0.57648385", "0.57648385", "0.57648385", "0.5713722", "0.5699953", "0.5699953", "0.5665161", "0.5664806", "0.56545746", "0.56545746", "0.56545746", "0.56545746", "0.56545746", "0.56545746", "0.56545746", "0.56545746", "0.56545746", "0.56545746", "0.5634785", "0.5615191", "0.5615191", "0.5615191", "0.5615191", "0.5615191", "0.5615191", "0.5615191", "0.5615191", "0.5615191", "0.5615191", "0.5615191", "0.56032425", "0.56032425", "0.5597058", "0.5588722", "0.5588722", "0.5582289", "0.55412054", "0.5529768", "0.552122", "0.5511022", "0.549194", "0.54891145", "0.5474664", "0.5472895", "0.5469488", "0.54694533", "0.5460811", "0.5460811", "0.54573655", "0.5454711", "0.5450347", "0.5449983", "0.5449983", "0.5449983", "0.544837", "0.544837", "0.544837", "0.544837", "0.544837", "0.544837", "0.544837", "0.544837", "0.544837", "0.544837", "0.544837", "0.544837", "0.544837", "0.544837", "0.544837", "0.544837", "0.544837", "0.544837", "0.544837", "0.544837", "0.544837", "0.544837", "0.544837", "0.544837", "0.544837", "0.544837", "0.544837", "0.544837", "0.544616", "0.5440888", "0.5440648", "0.5439066", "0.5439066", "0.54387116" ]
0.70555735
0
Return a deep copy. ``p.copy()`` is equivalent to ``copy.deepcopy(p)``.
def copy(self): new = Partition.__new__(Partition) new.__dict__ = self.__dict__.copy() self._increment_file_counter() return new
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def deepcopy(self):\n return copymod.deepcopy(self)", "def copy(self):\n\t\treturn pythoncopy.deepcopy(self)", "def copy(self):\n import copy as pcopy\n return pcopy.deepcopy(self)", "def deepcopy(self):\n return self.copy()", "def copy(self):\r\n return copy.deepcopy(self)", "def deepcopy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self):\n return copy.deepcopy(self)", "def copy(self, deep=False):\n return _(copy.deepcopy(self._) if deep else copy.copy(self._))", "def __deepcopy__(self, memodict=None):\n return self.copy()", "def copy(self):\n from copy import deepcopy\n return deepcopy(self)", "def clone(self):\n return copy.deepcopy(self)", "def clone(self):\n return deepcopy(self)", "def clone(self):\n return deepcopy(self)", "def clone(self):\n return deepcopy(self)", "def clone(self):\n return deepcopy(self)", "def shallow_copy(self):\n # TODO: Rename this to __copy__()?\n raise NotImplementedError(\"shallow_copy is not implemented\")", "def clone(self):\n from copy import deepcopy\n return deepcopy(self)", "def copy(self):\n try:\n return self.__class__(self, copy=True)\n except TypeError:\n new = self.__class__(copy.deepcopy(self))\n return new", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def copy(self):\n return deepcopy(self)", "def _copy_(self):\n return copy.copy(self)", "def copy (self):\n import copy\n return copy.copy(self)", "def clone(self) -> Any:\n return cp.copy(self)", "def copy(self):\n\t\ttemp = self.__class__()\n\t\ttemp.copy_from(self)\n\t\treturn temp", "def copy(self):\n return self.__copy__()", "def copy(self):\n return self.__copy__()", "def copy(self):\n return self.__copy__()", "def copy(self):\n return self.__copy__()", "def copy(self):\r\n return copy.copy(self)", "def copy(self):\n \n return deepcopy(self)", "def clone(self):\n return self.copy()", "def copy(self):\n return copy.copy(self)", "def copy(self):\n return copy.copy(self)", "def copy(self):\n return copy.copy(self)", "def __copy__(self):\n return self.copy()", "def copy(self):\n return pdict(dict.copy(self))", "def copy(self):\n cpy = deepcopy(self)\n # usually we use copy to perform transformations on the board\n # so it's good to reset memoized values\n cpy._memoized_compact = None \n return cpy", "def deep_copy(self):\n return self.__class__(self.inputs, self.outputs, self.middle)", "def clone(self):\n return shallow_clone(self)", "def copy (self, **kwargs):\n out = copy.deepcopy (self)\n out.update (**kwargs)\n return out", "def clone(self):\n memo = dict()\n c = self._clone(memo)\n c._clone_rip(memo)\n return c", "def __copy__(self, *args, **kwargs):\n return self.copy()", "def copy(self, **kwargs):\n\n # Future versions may add new options here\n with KWArgs(kwargs) as k:\n deep = k.optional(\"deep\", True)\n\n if deep:\n return copy.deepcopy(self)\n else:\n return copy.copy(self)", "def __deepcopy__(self, memodict=None):\n return self.__class__(self.m, self.n, deepcopy(self.data))", "def copy(self):\n return self._new_rep(self._func(self.rep))", "def copy(self):\n return copy(self)", "def copy(self):\n return copy(self)", "def copy(self):\n\n return deepcopy(self)", "def copy(self):\n\n return deepcopy(self)", "def copy(self):\n\n return deepcopy(self)", "def copy(self):\n new = self\n return new", "def copy(self):\n return self.mutate().simple_copy()", "def deepcopy(self):\n\t\tlpcopy = LpProblem(name = self.name, sense = self.sense)\n\t\tif lpcopy.objective != None:\n\t\t\tlpcopy.objective = self.objective.copy()\n\t\tlpcopy.constraints = {}\n\t\tfor k,v in self.constraints.iteritems():\n\t\t\tlpcopy.constraints[k] = v.copy()\n\t\tlpcopy.sos1 = self.sos1.copy()\n\t\tlpcopy.sos2 = self.sos2.copy()\n\t\treturn lpcopy", "def __deepcopy__(self, memo):\n return self.copy()", "def _copy(self, p):\n p._.d = self._.d\n p._.n = self._.n\n if self._has(\"p\"):\n p._.p = copy(self._.p)\n if self._has(\"q\"):\n p._.q = copy(self._.q)\n if self._has(\"P\"):\n p._.P = copy(self._.P)\n if self._has(\"Q\"):\n p._.Q = copy(self._.Q)\n if self._has(\"k\"):\n p._.k = self._.k\n if self._has(\"m\"):\n p._.m = self._.m\n if self._has(\"fsd\"):\n p._.fsd = self._.fsd\n if self._has(\"pPolynomial_ordering\"):\n p._.pPolynomial_ordering = self._.pPolynomial_ordering\n if self._has(\"qPolynomial_ordering\"):\n p._.qPolynomial_ordering = self._.qPolynomial_ordering\n if self._has(\"complement\"):\n p._.complement = self._.complement\n p._.fusion_schemes.update(self._.fusion_schemes)\n p._.subschemes.update(self._.subschemes)\n p._.subconstituents = list(self._.subconstituents)\n p._.triple.update(self._.triple)\n p._.triple_solution.update(self._.triple_solution)\n p._.triple_solution_generator.update(self._.triple_solution_generator)\n p._.quadruple.update(self._.quadruple)", "def copy(self, deep=False):\n if deep:\n raise NotImplementedError(\"Deep Copy is not supported\")\n\n return type(self)(self.data)", "def copy(self):\n return self.__class__(**vars(self))", "def copy (self):\n return self.__class__(self.name, self[:])", "def deepcopy(self, memo=None):\n from copy import deepcopy\n return deepcopy(self, memo)", "def deepcopy(self, memo=None):\n from copy import deepcopy\n return deepcopy(self, memo)", "def deepcopy(self, memo=None):\n from copy import deepcopy\n return deepcopy(self, memo)", "def deepcopy(self, memo=None):\n from copy import deepcopy\n return deepcopy(self, memo)", "def __copy__(self):\n return self.__class__(self.m, self.n, self.data)", "def __copy__(self):\n cls = self.__class__\n result = cls.__new__(cls)\n to_copy = {\"_cache\", \"_buffers\", \"_parameters\", \"_modules\"}\n result.__dict__.update(\n {k: v.copy() if k in to_copy else v for k, v in self.__dict__.items()}\n )\n return result", "def copy(self):\n copy = type(self).__new__(type(self))\n copy._tree_sequence = self._tree_sequence\n copy._ll_tree = self._ll_tree.copy()\n copy._make_arrays()\n return copy", "def copy(self):\n cls = self.__class__\n result = cls.__new__(cls)\n result.__dict__.update(self.__dict__)\n return result", "def copy(self):\n cls = type(self)\n new = cls()\n new.default = deepcopy(self.default)\n new.current = deepcopy(self.current)\n new.stepnames = deepcopy(self.stepnames)\n return new", "def copy(self):\n new = self.__class__(integration=None, data=None)\n for attribute, value in self.__dict__.items():\n if attribute in self.referenced_attributes:\n setattr(new, attribute, value)\n elif hasattr(value, 'copy'):\n setattr(new, attribute, value.copy())\n else:\n setattr(new, attribute, deepcopy(value))\n return new", "def copy(self):\n return self.__class__(self)", "def copy(self):\n return self.__class__(self)", "def copy(self):\n copy = self.__class__()\n copy.a = self.a\n copy.b = self.b\n copy.peak = self.peak\n copy.orientation = self.orientation\n copy.i = self.i\n copy.coords = self.coords.copy()\n return copy", "def copy(self) -> \"Param\":\n copied = super().copy()\n copied._stack = OrderedDiot(\n [(key, param.copy()) for key, param in self._stack.items()]\n )\n return copied", "def copy(self):\n return self.__class__(dict(self))" ]
[ "0.7977393", "0.7835692", "0.7835209", "0.7829921", "0.7808746", "0.7806165", "0.77548367", "0.77548367", "0.77548367", "0.77548367", "0.77548367", "0.77548367", "0.77548367", "0.77548367", "0.77548367", "0.77548367", "0.77548367", "0.77548367", "0.77548367", "0.77548367", "0.77548367", "0.77548367", "0.7629336", "0.7570929", "0.7535955", "0.75320345", "0.75203437", "0.75203437", "0.75203437", "0.75203437", "0.7498102", "0.74954945", "0.74743545", "0.74322134", "0.74322134", "0.74322134", "0.74322134", "0.74322134", "0.74322134", "0.74322134", "0.74322134", "0.74322134", "0.74322134", "0.74322134", "0.74322134", "0.74322134", "0.74322134", "0.74322134", "0.7424588", "0.7396001", "0.73653597", "0.7358803", "0.73552966", "0.73552966", "0.73552966", "0.73552966", "0.73510087", "0.7347772", "0.73406434", "0.73107153", "0.73107153", "0.73107153", "0.7301712", "0.7294297", "0.7289994", "0.7266033", "0.7254224", "0.72527796", "0.7248667", "0.7238254", "0.7210287", "0.7209984", "0.72056806", "0.7187993", "0.7187993", "0.71772134", "0.71772134", "0.71772134", "0.71601623", "0.71387434", "0.71079034", "0.7101826", "0.70932615", "0.708079", "0.7047189", "0.7042109", "0.703998", "0.703998", "0.703998", "0.703998", "0.7035339", "0.70211947", "0.70145977", "0.69918305", "0.6985467", "0.6981408", "0.6974594", "0.6974594", "0.69734454", "0.69436586", "0.69393694" ]
0.0
-1
Returns the partition's data array. After a partition has been conformed, the partition must be closed (with the `close` method) before another partition is conformed,
def array(self): config = self.config unique_array = config["unique_subarray"] p_axes = self.axes p_flip = self.flip p_part = self.part p_units = self.Units p_shape = self.shape p_location = self.location subarray = self._subarray len_p_axes = len(p_axes) if not self.in_memory: # -------------------------------------------------------- # The subarray is not in memory. # # It could be in a file on disk or implied by a FileArray # object, etc. # -------------------------------------------------------- self._original = self.copy() unique_array = True update = True copy = False if not p_part: indices = Ellipsis else: indices = tuple(p_part) # Read from a file into a numpy array p_data = subarray[indices] # We've just copied p_data from disk, so in place changes # are not possible in_place_changes = False else: # -------------------------------------------------------- # The subarray is in memory # -------------------------------------------------------- update = config["update"] if p_part: p_data = get_subspace(subarray, p_part) elif not unique_array: p_data = subarray.view() else: p_data = subarray copy = config["extra_memory"] # In place changes to p_data might be possible if we're not # copying the data in_place_changes = not copy if not p_data.ndim and isinstance(p_data, (numpy_number, numpy_bool_)): # -------------------------------------------------------- # p_data is a numpy number (like numpy.int64) which does # not support assignment, so convert it to a numpy array. # -------------------------------------------------------- p_data = numpy_array(p_data) # We've just copied p_data, so in place changes are # not possible copy = False in_place_changes = False masked = numpy_ma_isMA(p_data) if masked: # The p_data is a masked array if p_data.mask is numpy_ma_nomask or not numpy_ma_is_masked( p_data ): # There are no missing data points so recast as an # unmasked numpy array p_data = p_data.data masked = False # --- End: if if masked: # Set the hardness of the mask if config["hardmask"]: p_data.harden_mask() else: p_data.soften_mask() # --- End: if self.masked = masked # ------------------------------------------------------------ # Make sure that the data array has the correct units. This # process will deep copy the data array if required (e.g. if # another partition is referencing this numpy array), even if # the units are already correct. # ------------------------------------------------------------ func = config.get("func") units = config["units"] if func is None: if not p_units.equals(units) and bool(p_units) is bool(units): func = Units.conform if func is not None: inplace = not copy p_data = func(p_data, p_units, units, inplace) p_units = units if not inplace: # We've just copied p_data, so in place changes are # not possible copy = False in_place_changes = False # --- End: if flip = config.get("flip", None) if flip or p_flip: flip_axes = set(p_flip).symmetric_difference(flip) else: flip_axes = None axes = config["axes"] if p_data.size > 1: # -------------------------------------------------------- # Flip axes # -------------------------------------------------------- if flip_axes: indices = [ ( slice(None, None, -1) if axis in flip_axes else slice(None) ) for axis in p_axes ] p_data = p_data[tuple(indices)] # -------------------------------------------------------- # Transpose axes # -------------------------------------------------------- if p_axes != axes: iaxes = [p_axes.index(axis) for axis in axes if axis in p_axes] if len_p_axes > len(iaxes): for i in range(len_p_axes): if i not in iaxes: # iaxes.append(i) iaxes.insert(i, i) # --- End: if p_data = numpy_transpose(p_data, iaxes) # --- End: if # ------------------------------------------------------------ # Remove excessive/insert missing size 1 axes # ------------------------------------------------------------ if p_shape != p_data.shape: # if len_p_axes != len(p_shape): p_data = p_data.reshape(p_shape) # ------------------------------------------------------------ # Apply the auxiliary mask # ------------------------------------------------------------ auxiliary_mask = config["auxiliary_mask"] if auxiliary_mask: for mask in auxiliary_mask: if mask.any(): if not masked: p_data = p_data.view(numpy_ma_MaskedArray) masked = True p_data.mask = (mask | p_data.mask).array # --- End: for self.masked = True # ------------------------------------------------------------ # Convert the array's data type # ------------------------------------------------------------ p_dtype = p_data.dtype dtype = config.get("dtype", None) if dtype is not None and dtype != p_dtype: try: p_data = p_data.astype(dtype) # Note: returns a copy except ValueError: raise ValueError( "Can't recast partition array from {} to {}".format( p_dtype.name, dtype.name ) ) else: # We've just copied p_data, so in place changes are # not possible copy = False in_place_changes = False # --- End: if # ------------------------------------------------------------ # Copy the array # ----------------------------------------------------------- if copy: if p_dtype.char != "O": if not masked or p_data.ndim > 0: p_data = p_data.copy() else: # This is because numpy.ma.copy doesn't work for # scalar arrays (at the moment, at least) p_data = numpy_ma_masked_all((), p_data.dtype) # We've just copied p_data, so in place changes are # not possible in_place_changes = False else: # whilst netCDF4.netcdftime.datetime is mucking bout, # don't copy!!!! # p_data = _copy(p_data) pass # --- End: if # ------------------------------------------------------------ # Update the partition # ------------------------------------------------------------ if update: self.subarray = p_data # ?? DCH CHECK self.Units = p_units self.part = [] self.axes = axes self.flip = flip self.flatten = [] self.shape = p_shape self.location = p_location self._in_place_changes = in_place_changes # ------------------------------------------------------------ # Return the numpy array # ------------------------------------------------------------ return p_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data ( self ):\n return self._data_pntr.ReadAsArray()", "def get_data ( self ):\n return self._data_pntr.ReadAsArray()", "def data_array(self):\n return self._data_array", "def getData(self):\n return self._array", "def get(self):\r\n return self.data_array", "def data(self):\n\t\tself.dworker()\n\t\treturn self.d", "def get_data(self):\n return self.data[self._size:self._size + self._len]", "def data(self) -> np.ndarray:\n return self._data", "def read(self) -> np.ndarray:\n return self[self._head]", "def get_data(self):\n idxs = self.get_indexes(self._start, self._length, self.maxsize)\n return self._data[idxs].copy()", "def get_data(self):\n return [\n self.offset, self.diag, self.orig_freq_diag, self.orig_freq_dims\n ]", "def __array__(self, copy=None):\n return self.data.to_pandas().values", "def tondarray(self):\r\n return self.data;", "def GetPartitioningArray(self):\n return _hypre.HypreParVector_GetPartitioningArray(self)", "def data(self):\n self._data: np.ndarray\n return self._data", "def _read_data(self):\n return [np.array([]), np.array([])]", "def all_data(self) -> Optional[np.ndarray]:\n if self._data_store is None:\n return None\n return self._data_store[:self._count, :]", "def getData(self, slice=None):\n\t\traise NotImplementedError", "def data(self) -> List[ndarray]:\n return self._data", "def _read(self):\n return np.copy(self.memory[self.head_pos])", "def get_data(self):\n if self.ser.in_waiting:\n data_string = self.ser.readline().decode().strip()\n if not data_string: return self.data\n self.data = [\n float(element) for element in data_string.split()\n ]\n self.ser.reset_input_buffer()\n return self.data", "def data(self):\n return getXarray(self.__mdsnode__,strict=self.__strict__)", "def __array__(self):\n return np.asarray(self.data)", "def GetData(self):\r\n \r\n return self._data", "def array(self):\n return self.get_array()", "def __array__(self):\n return pa.column(\"dummy\", self.data).to_pandas().values", "def _get_heap_data(self):\n if self._heapsize:\n raw_data = self._get_raw_data().view(np.ubyte)\n heap_end = self._heapoffset + self._heapsize\n return raw_data[self._heapoffset : heap_end]\n else:\n return np.array([], dtype=np.ubyte)", "def array(self) -> ndarray:\n if self._slices: # so this is a sub-parray object\n # index into origin array by saved slices\n ret = self._array.get_by_global_slices(self._current_device_index, self._slices[0])\n for s in self._slices[1:]:\n ret = ret[s]\n return ret\n else: # this is a complete copy\n ret = self._array.get(self._current_device_index)\n\n if isinstance(ret, list): # get a subarray instead\n raise IndexError(\"Current device doesn't have a complete copy of this array\")\n return ret", "def get(self):\n return self._partition", "def getCurentData(self):\n if not self.labExperiment:\n super().getCurentData()\n else:\n return np.array(self.connection.query('get_actuator_data'))", "def partitions(self):\n self._get_latest_content()\n return self._data.get('partitions', [])", "def to_fits_array(self):\n return self.data", "def _get_data_chunk(self):\n if self._start_pos < self.max_pos:\n self._current_sho_spec_slice = slice(self.sho_spec_inds_per_forc * self._current_forc,\n self.sho_spec_inds_per_forc * (self._current_forc + 1))\n self._end_pos = int(min(self.h5_main.shape[0], self._start_pos + self.max_pos))\n self.data = self.h5_main[self._start_pos:self._end_pos, self._current_sho_spec_slice]\n elif self._current_forc < self._num_forcs - 1:\n # Resest for next FORC\n self._current_forc += 1\n\n self._current_sho_spec_slice = slice(self.sho_spec_inds_per_forc * self._current_forc,\n self.sho_spec_inds_per_forc * (self._current_forc + 1))\n self._current_met_spec_slice = slice(self.metrics_spec_inds_per_forc * self._current_forc,\n self.metrics_spec_inds_per_forc * (self._current_forc + 1))\n self._get_dc_offset()\n\n self._start_pos = 0\n self._end_pos = int(min(self.h5_main.shape[0], self._start_pos + self.max_pos))\n self.data = self.h5_main[self._start_pos:self._end_pos, self._current_sho_spec_slice]\n\n else:\n self.data = None\n\n return", "def get_data(self):\n oshape = (ctypes.c_uint * 4)()\n ostride = ctypes.c_uint()\n ret = cxnlib.CXNIOGetData(self.handle,\n oshape, ctypes.byref(ostride))\n return ctypes2numpyT(ret, [x for x in oshape], 'float32', ostride.value)", "def getData(self):\r\n return self._data", "def _get_partition_list(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def data(self):\n return self._fastqc_data", "def arr(self):\n return self._arr", "def read_data(self, filename, chunksize = 2048):\n pdt_tipsy = np.dtype([('mass', 'f4'),('pos', 'f4', 3),('vel', 'f4', 3), ('eps', 'f4'), ('phi', 'f4')])\n\n # helper functions\n def convert_to_fof_particle_partition(index, iterator): \n for s in iterator: \n p_arr = np.frombuffer(s, pdt_tipsy)\n new_arr = np.zeros(len(p_arr), dtype=pdt)\n new_arr['pos'] = p_arr['pos'] \n if count: \n npart_acc.add({index: len(new_arr)})\n yield new_arr\n\n def set_particle_IDs_partition(index, iterator): \n p_counts = partition_counts.value\n local_index = 0\n start_index = sum([p_counts[i] for i in range(index)])\n for arr in iterator:\n arr['iOrder'] = range(start_index + local_index, start_index + local_index + len(arr))\n local_index += len(arr)\n yield arr\n \n sc = self.sc\n\n rec_rdd = sc.binaryRecords(filename, pdt_tipsy.itemsize*chunksize)\n nPartitions = rec_rdd.getNumPartitions()\n # set the partition count accumulator\n npart_acc = sc.accumulator({i:0 for i in range(nPartitions)}, dictAdd())\n count=True\n # read the data and count the particles per partition\n rec_rdd = rec_rdd.mapPartitionsWithIndex(convert_to_fof_particle_partition)\n rec_rdd.count()\n count=False\n\n partition_counts = sc.broadcast(npart_acc.value)\n\n rec_rdd = rec_rdd.mapPartitionsWithIndex(set_particle_IDs_partition)\n rec_rdd = (self._partition_rdd(rec_rdd, partition_array).partitionBy(self.nPartitions) \n .map(lambda (_,v): v, preservesPartitioning=True)) \n return rec_rdd", "def get_data(self):\n return self.wordsArray", "def get_data(self):\n return self.read_sample_win()", "def data(self):\n try:\n return self.get_nowait()\n except Empty:\n return None", "def _readData(self):\n # Debug. This fn should be called only after checking canRead()\n if not self._canRead():\n raise Exception(\"Trying to read more data than there is.\")\n\n data = self.buffer[:self._expectedByteCount]\n self.buffer = self.buffer[self._expectedByteCount:]\n\n return data", "def close(self, **kwargs):\n config = getattr(self, \"config\", None)\n\n if config is None:\n return\n\n if kwargs:\n config.update(kwargs)\n\n original = getattr(self, \"_original\", None)\n logger.partitioning(\"Partition.close: original = {}\".format(original))\n\n if not original:\n originally_on_disk = False\n original_subarray = None\n else:\n originally_on_disk = not original.in_memory\n original_subarray = original._subarray\n\n config = self.config\n logger.partitioning(\" config = {}\".format(config))\n\n if config[\"serial\"]:\n # --------------------------------------------------------\n # SERIAL\n # --------------------------------------------------------\n logger.partitioning(\" serial\")\n\n if config[\"readonly\"]:\n logger.partitioning(\" readonly=True\")\n\n if originally_on_disk:\n logger.partitioning(\" subarray originally on disk\")\n\n if config.get(\"to_disk\", False):\n # 1.1.1.1 The original subarray was on disk,\n # we don't want to keep the current\n # subarray in memory, and we are happy\n # to discard any changes that may have\n # been made to the subarray.\n logger.partitioning(\" 1.1.1.1 revert\")\n self.revert()\n elif free_memory() <= cf_fm_threshold():\n # 1.1.1.2 The original subarray was on disk,\n # we are happy to keep the current\n # subarray in memory, but there is not\n # enough free memory to do so.\n logger.partitioning(\n \" 1.1.1.2 revert ({} <= {})\".format(\n free_memory(), cf_fm_threshold()\n )\n )\n self.revert()\n else:\n # 1.1.1.3 The original subarray was on disk\n # and there is enough memory to keep\n # the current subarray in memory\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # The original subarray was a temporary\n # file which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n del self.masked\n logger.partitioning(\n \" 1.1.1.3 del masked ({} > {})\".format(\n free_memory(), cf_fm_threshold()\n )\n )\n\n else:\n logger.partitioning(\" subarray originally in memory\")\n if config.get(\"to_disk\", False):\n # 1.1.2.1 Original subarray was in memory and\n # we don't want to keep the current\n # subarray in memory\n logger.partitioning(\" 1.1.2.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.1.2.2 Original subarray was in memory and\n # unique but there is not enough\n # memory to keep the current subarray\n logger.partitioning(\" 1.1.2.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.1.2.3 Original subarray was in memory and\n # unique and there is enough memory to\n # keep the current subarray in memory\n logger.partitioning(\" 1.1.2.3 pass\")\n pass\n else:\n # config['readonly'] is False\n if originally_on_disk:\n if config.get(\"to_disk\", False):\n # 1.2.1.1 Original subarray was on disk and\n # there and we don't want to keep the\n # array\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # Original subarray was a temporary file\n # on disk which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n logger.partitioning(\" 1.2.1.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.2.1.2 Original subarray was on disk but\n # there is not enough memory to keep\n # it\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # Original subarray was a temporary file\n # on disk which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n logger.partitioning(\" 1.2.1.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.2.1.3 Original subarray was on disk and\n # there is enough memory to keep it\n logger.partitioning(\" 1.2.1.3 pass\")\n del self.masked\n else:\n if config.get(\"to_disk\", False):\n # 1.2.2.1 Original subarray was in memory but\n # we don't want to keep it\n logger.partitioning(\" 1.2.2.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.2.2.2 Original subarray was an in memory\n # but there is not enough memory to\n # keep it\n logger.partitioning(\" 1.2.2.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.2.2.3 Original subarray was in memory and\n # there is enough memory to keep it\n logger.partitioning(\" 1.2.2.3 del masked\")\n del self.masked\n else:\n logger.partitioning(\"Partition.close: parallel\")\n # --------------------------------------------------------\n # PARALLEL\n # --------------------------------------------------------\n pass\n\n # if hasattr(self, '_original'):\n # del self._original\n\n # print(hasattr(self, 'config')),\n try:\n del self.config\n except AttributeError:\n pass", "def getData(self):\n return self._data", "def getData(self):\n return self._data", "def update(self) -> np.ndarray:\r\n # Read chunk from array\r\n signal_slice: np.ndarray = np.array(\r\n self._signal_data[self._pointer : self._pointer + self._chunk]\r\n )\r\n # Move index over\r\n self._pointer += self._chunk\r\n if self._pointer > len(self._signal_data):\r\n # Go back to beginning\r\n self._pointer = 0\r\n self._restart_flag = True\r\n print(\"Restarting stream...\")\r\n\r\n return signal_slice", "def update(self) -> np.ndarray:\r\n # Read chunk from array\r\n signal_slice: np.ndarray = np.array(\r\n self._signal_data[self._pointer : self._pointer + self._chunk]\r\n )\r\n # Move index over\r\n self._pointer += self._chunk\r\n if self._pointer > len(self._signal_data):\r\n # Go back to beginning\r\n self._pointer = 0\r\n self._restart_flag = True\r\n print(\"Restarting stream...\")\r\n\r\n return signal_slice", "def read_data(self):\n raise NotImplementedError", "def _data(self):\n cell = self._get_cell()\n return deepcopy(cell.data)", "def get_data(self):\r\n return self.data.copy()", "def extract_volume(self):\n\n # RDD or array of [(partition, vol)]\n vols = None\n if self.usespark:\n vols = self._retrieve_vol(self.current_spot, None)\n else:\n vols = self._retrieve_vol(self.current_spot, len(self.partitions))\n self.current_spot += len(self.partitions)\n \n return vols", "def getData(self):\n return self.__data", "def data(self):\r\n return self._data", "def data(self):\r\n return self._data", "def data(self):\r\n return self._data", "def data(self):\n if self._data is None:\n self._get_data()\n\n return self._data", "def data(self):\n return self._buf[self._offset : self._offset + self._size]", "def getPartition(self):\n\t\treturn self.partition", "def partition(self, sep):\n return asarray(partition(self, sep))", "def in_memory_data(self):\n return self._in_memory_data", "def read(self) -> np.array:\n return self._stream.read(self._frame_size)", "def getData(self):\n return self.data", "def getData(self):\n return self.data", "def get_data(self):\n try:\n data = self._queue.get(block=False)\n except Empty:\n data = None\n return data", "def data(self):\n return self._data", "def getArray( self, par, path, unit = None ):\n\n return self.db.getArrayPar( par, path, unit = unit )", "def data(self) -> List[T]:\n return self._data", "def exposed_get_data(self, chunk_id):\n local_filename = self.chunk_filename(chunk_id)\n with open(local_filename, \"r\") as file:\n data = file.read()\n return data", "def as_list(self) -> List[int]:\n return self.my_partition", "def getPoolData(self):\r\n # type: () -> (list[Data.Data])\r\n output = []\r\n # start from the beginning of the pool area\r\n ea = self.func_ea + self.getSize(withPool=False)\r\n while ea < self.getSize(withPool=True):\r\n # create and append the data item\r\n data = Data.Data(ea)\r\n output.append(data)\r\n # advance ea to the next item\r\n ea += data.getSize()\r\n return output", "def getData(self):\n\n if not self.data:\n raise ValueError('No data file given.')\n\n return self.data.getData()", "def data(self):\n if not hasattr(self, \"_data\"):\n self._data = self._get_data()\n return self._data", "def getNdArray(self):\n futures = self.client.map(_call_getNdArray, self.vecDask, pure=False)\n arrays = self.client.gather(futures)\n return arrays", "def get_partitions(self):\n return self.partitions", "def getData(self, taskId:int):\n return self.pool.getData(taskId)", "def get_data(self):\n raise NotImplementedError(\"Not implemented!\")", "def numpy(self):\n return self.data", "def data(self):\n return self._data", "def data(self):\n return self._data", "def data(self):\n return self._data", "def data(self):\n return self._data", "def data(self):\n return self._data", "def data(self):\n return self._data", "def data(self):\n return self._data", "def data(self):\n return self._data", "def data(self):\n return self._data", "def data(self):\n return self._data", "def data(self):\n return self._data", "def data(self):\n return self._data", "def data(self):\n return self._data", "def data(self):\n return self._data", "def data(self):\n return self._data", "def data(self):\n return self._data", "def data(self):\n return self._data", "def data(self):\n return self._data", "def data(self):\n return self._data", "def data(self):\n return self._data", "def data(self) -> Generator:\n # Not using a consumer group and setting partitions manually so it's a smaller\n # jump to make this deterministic/repeatable with multiple workers later on.\n\n self.connect()\n\n self.approx_position = 0\n for partition_id, start_offset, end_offset in self._partition_ranges():\n # TODO - confirm this can never jump to another partition\n tp = TopicPartition(topic=self.topic, partition=partition_id)\n self.client.assign([tp])\n\n self.items_to_fetch = end_offset - start_offset\n self.client.seek(tp, start_offset)\n\n if self.items_to_fetch <= 0:\n msg = f\"Invalid offsets {start_offset}:{end_offset} for partition {partition_id}\"\n raise ValueError(msg)\n\n for m in self.client:\n self.approx_position += 1\n yield Pinnate(data=m.value)\n\n if end_offset is not None and m.offset >= end_offset:\n break", "def partitions(self):\n return self._partitions" ]
[ "0.66712004", "0.66712004", "0.6514655", "0.62883234", "0.62340677", "0.6196425", "0.6086166", "0.6076796", "0.5936824", "0.59225637", "0.5890117", "0.5851978", "0.5830991", "0.5823379", "0.5809332", "0.57856905", "0.5781631", "0.57773644", "0.57653815", "0.5727279", "0.5719797", "0.5713546", "0.56780607", "0.5648781", "0.5639347", "0.56386054", "0.5624693", "0.5615178", "0.5605869", "0.5559736", "0.5544451", "0.55281126", "0.54828036", "0.5477017", "0.5470059", "0.5456417", "0.54526037", "0.54519075", "0.5449001", "0.544266", "0.544167", "0.5435135", "0.54187924", "0.54090464", "0.54024386", "0.54024386", "0.5398539", "0.5398539", "0.5395827", "0.5395746", "0.53955626", "0.53931326", "0.5384674", "0.5380315", "0.5380315", "0.5380315", "0.53746235", "0.5372373", "0.536975", "0.5366125", "0.53609025", "0.53437394", "0.5339596", "0.5339596", "0.5334339", "0.53260964", "0.53231436", "0.5322855", "0.53211737", "0.53188425", "0.53174394", "0.53168225", "0.5307165", "0.53046256", "0.53039026", "0.5295094", "0.52826726", "0.5278804", "0.5278098", "0.5278098", "0.5278098", "0.5278098", "0.5278098", "0.5278098", "0.5278098", "0.5278098", "0.5278098", "0.5278098", "0.5278098", "0.5278098", "0.5278098", "0.5278098", "0.5278098", "0.5278098", "0.5278098", "0.5278098", "0.5278098", "0.5278098", "0.52712214", "0.5270703" ]
0.5538118
31
True if the subarray contains datetime objects.
def isdt(self): return self.Units.isreftime and self._subarray.dtype == _dtype_object
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_datetime_type(obj: _std_typing.Any) -> bool:\n return obj.dtype == sc.DType.datetime64", "def _uses_datetimeblock(dtype: Union[np.dtype, ExtensionDtype]) -> bool:\n vtype = dtype.type\n return issubclass(vtype, np.datetime64)", "def is_datetime(self) -> bool:\n return False", "def are_all_datetimes(values: List[Union[str, int, float]]):\n for value in values:\n if not is_datetime(value):\n return False\n return True", "def has_time(self):\n return isinstance(self._start, datetime.datetime)", "def is_datetime(self):\n answer = self._call('is_datetime')\n return answer.yes", "def is_datetime_type(val):\n return (\n pd.api.types.is_datetime64_any_dtype(val)\n or isinstance(val, pd.Timestamp)\n or isinstance(val, datetime)\n )", "def is_all_dates(self) -> bool:\n return False", "def is_normalized(self) -> bool:\n return is_date_array_normalized(self.asi8, self.tz, reso=self._creso)", "def has_start_stop_acqtamps(self):\n try:\n if not all([isinstance(x, datetime) for x in self.start_acq]):\n raise Exception(\"Invalid value encountered in start_acq\")\n if not all([isinstance(x, datetime) for x in self.stop_acq]):\n raise Exception(\"Invalid value encountered in stop_acq\")\n if not all([len(self) == len(x) for x in [self.start_acq,\\\n self.stop_acq]]):\n raise Exception(\"Lengths of arrays do not match...\")\n return True\n except Exception as e:\n print((repr(e)))\n return False", "def has_t(self):\n return any(map(lambda s: s.is_temporal, self))", "def time_series(self) -> bool:\n return self._time_series", "def check_dataset_dates(self):\n # TODO: graph traverse and date checking\n pass", "def __contains__(self, ts):\n if not isinstance(ts, datetime.datetime):\n return False\n base_key = self.floor_time(key)\n return self.first_timestamp <= base_key <= self.last_timestamp", "def is_date_dtype(df, col_name):\n dtype = df.dtypes[col_name]\n return np.issubdtype(dtype, np.datetime64) or np.issubdtype(dtype, np.timedelta64)", "def check_dates(dates):\n for date in dates:\n if type(date) != datetime.datetime:\n raise TypeError('Input date, %s, not datetime object' % date)", "def test_validate_datetime(self):\n self.datatrue = pd.read_csv(pkg_resources.resource_filename(resource_package, 'tests/testing_data/report_2_counts.csv'))\n\n self.datafalse = pd.read_csv(pkg_resources.resource_filename(resource_package, 'tests/testing_data/random_date1.csv'))\n\n self.test1 = utils.validate_datetime(self.datatrue)\n\n self.test2 = utils.validate_datetime(self.datafalse)\n\n self.assertTrue(isinstance(self.test1, pd.DataFrame))\n\n self.assertTrue(np.dtype('datetime64[ns]') in self.test1.dtypes.tolist())\n\n self.assertFalse(np.dtype('datetime64[ns]') in self.test2.dtypes.tolist())", "def has_timestamp(self):\n return (self.data_type() & 0x100 == 0x100) and (self.raw_data_length() >= 8)", "def _checkData(data: Sequence[HistoryElement]):\r\n if not all(x.timeStamp for x in data):\r\n raise ValueError(\"At least one element in data doesn't have a TimeStamp\")", "def could_be_datetime(val, fmt):\n\n if val == None or fmt == None:\n return False\n\n if isinstance(val, datetime):\n return True\n\n if isinstance(val, (str, unicode)):\n if Record.is_empty_str(val) or Record.is_empty_str(fmt):\n return False\n\n try:\n d = datetime.strptime(val, fmt)\n if not isinstance(d, datetime):\n raise ValueError\n else:\n return True\n except Exception as e:\n logging.error(e)\n return False\n\n #otherwise\n return False", "def test_14_digit_datetime_detection(self):\n obj = awstats_reader.awstats_datetime('20091130165230')\n self.assertTrue(isinstance(obj, awstats_reader.AwstatsDateTime))", "def _is_key_value_array(self, data):\n for d in data:\n if not self._is_key_value(d):\n return False\n return True", "def __eq__(self, t):\n if not isinstance(t, DateTime):\n return False\n return (self._micros, self._tz) == (t._micros, t._tz)", "def is_date(dt):\n return isinstance(dt, datetime.date) and not isinstance(dt, datetime.datetime)", "def has_data(self, fit_id, species_id, start=None, stop=None):\n if not (fit_id in self.raw_results and species_id in\\\n self.raw_results[fit_id]):\n return False\n if all([isinstance(x, datetime) for x in [start, stop]]):\n ts = self.raw_results[fit_id][\"start\"]\n if not any([start < x < stop for x in ts]):\n return False\n return True", "def _is_tc_entity_array(self, data):\n for d in data:\n if not self._is_tc_entity(d):\n return False\n return True", "def _check_dates_tareas(self, cr, uid, ids, context=None):\n for leave in self.read(cr, uid, ids, ['date_start_tarea', 'date_end_tarea'], context=context):\n if leave['date_start_tarea'] and leave['date_end_tarea']:\n if leave['date_start_tarea'] > leave['date_end_tarea']:\n return False\n return True", "def check_consistency(object) -> bool:\n time = np.array(list(object.keys()))\n time_diff = time[1:] - time[0:-1]\n return np.all(time_diff == 1)", "def __len__(self):\n return len(self.dates)", "def table_has_any_timestamp_fields(table_object) -> bool:\n mapper = sqlalchemy.inspect(table_object)\n for column in mapper.all_orm_descriptors:\n try:\n if isinstance(column.type, PSQL_TIMESTAMP) or isinstance(column.type, SQLITE_TIMESTAMP):\n return True\n except Exception:\n pass\n return False", "def check_dt_consistency(date_dt):\n\n # https://en.wikipedia.org/wiki/Tz_database\n # https://www.iana.org/time-zones\n \n if date_dt.tzinfo is None:\n return True\n else:\n \n # This check is quite heavy but there is apparently no other way to do it.\n if date_dt.utcoffset() != dt_from_s(s_from_dt(date_dt), tz=date_dt.tzinfo).utcoffset():\n return False\n else:\n return True", "def is_time(self) -> bool:\n return self.times > 1", "def is_noncomplex(obj):\n if type(obj) is time.struct_time:\n return True\n return False", "def on_disk(self):\n return isinstance(self._subarray, FileArray)", "def NeedsArray(self, type_):\n return self._NameComponents(type_) in self._array_types", "def is_datetime(s: Union[str, int, float]):\n if is_number(s):\n return False\n\n try:\n parse_datetime(s)\n return True\n except Exception:\n return False", "def _checkDT(self):\r\n dt = np.diff(self.tsec)\r\n \r\n dt_unique = np.unique(dt)\r\n \r\n if np.size(dt_unique) == 1:\r\n self.isequal = True\r\n else:\r\n self.isequal = False\r\n \r\n try:\r\n self.dt = dt[1]\r\n except:\r\n self.dt = 0.0", "def _dateisrange(value):\n if len(re.findall('([0-9])/([0-9])', value)) > 1:\n NotImplemented\n elif len(re.findall('([0-9])/([0-9])', value)) == 1:\n return True\n else:\n return False", "def is_temporal(self, e):\n if e in self.temporal:\n return True", "def is_interval(self):\n return len(self.interval_list) > 0", "def _is_DataArrays(data):\n if isinstance(data, (Dataset, DataArray)):\n return True\n if isinstance(data, Mapping):\n for da in data.values():\n if not isinstance(da, DataArray):\n raise TypeError(\"Please provide List/Mapping of DataArrays\")\n return True\n if isinstance(data, Iterable):\n for da in data:\n if not isinstance(da, DataArray):\n return False\n # raise TypeError(\"Please provide List/Mapping of DataArrays\")\n return True\n return False", "def test_date_obj_within_t_delta(self):\n max_hour_count = (None, None, None)\n self.deque.append((self.datetime_obj, self.timestamp))\n result = feature_5(self.deque,\n self.heap,\n self.expected_dict,\n self.top_n,\n max_hour_count,\n self.time_rollover_queue)\n self.assertEqual(len(self.deque), 2)\n self.assertEqual(self.deque[-1], (self.datetime_obj, self.timestamp))\n self.assertEqual(result, max_hour_count)", "def is_array(self):\n return False", "def _test_df_datetime(self, df):\n date_raw = df['DateListed'].iloc[0] # e.g. '2016-01-07 00:00:00'\n first_date_time = datetime.strptime(date_raw, \"%Y-%m-%d %H:%M:%S\")\n assert first_date_time >= datetime(2016, 1, 1, 0, 0) and \\\n first_date_time < datetime(2017, 1, 1, 0, 0)", "def isscalar(array):\n arr = ma.array(array)\n if not hasattr(arr, '__len__') or arr.shape == () or len(arr) == 1:\n return True\n return False", "def _has_unicode_fields(array):\n dtypes = (d[0] for d in array.dtype.fields.values())\n return any(d.kind == \"U\" for d in dtypes)", "def valid_datetime(dt):\n if isinstance(dt.tzinfo, tzinfo) and not datetime_ambiguous(dt):\n return True\n return False", "def check_daterange(sdate, edate, st_dict):\n \n reflist = pd.date_range(start=sdate, end=edate, freq='MS').to_list()\n checklist = [] \n for y in st_dict['years']:\n for m in st_dict[y]['months']:\n checklist.append(pd.to_datetime(y + '-' + m))\n \n if list(set(checklist) & set(reflist)):\n return True", "def is_array(self):\n return len(self.descriptor) > 1", "def is_dateline(vertices):\n vertices = np.asarray(vertices, dtype=\"d\")\n longitudes = vertices[:, 0]\n return np.abs(longitudes.min(axis=0) - longitudes.max(axis=0)) > 180", "def __is_initialized(self):\n if self.start_datetime is None or self.end_datetime is None:\n raise Exception(\"TimeSeriesDataFrame Object is not initialized!\")\n\n return True", "def test_list_of_datetimes(self):\n plot_index = pd.date_range(start=\"2000-1-1\", freq=\"D\", periods=10000)\n freq = 'M'\n\n dates = pd.Series(1, index=plot_index).resample(freq).index\n tl = formatter.TimestampLocator(plot_index, xticks=dates)\n test = tl._process(3, 900)\n\n tl = formatter.TimestampLocator(plot_index, freq=freq)\n correct = tl._process(3, 900)\n tm.assert_almost_equal(test, correct)\n\n freq = 'MS'\n dates = pd.Series(1, index=plot_index).resample(freq).index\n tl = formatter.TimestampLocator(plot_index, xticks=dates)\n test = tl._process(3, 900)\n\n tl = formatter.TimestampLocator(plot_index, freq=freq)\n correct = tl._process(3, 900)\n tm.assert_almost_equal(test, correct)\n\n # straight list of dates\n freq = 'MS'\n dates = pd.Series(1, index=plot_index).resample(freq).index\n dates = list(dates)\n tl = formatter.TimestampLocator(plot_index, xticks=dates)\n test = tl._process(3, 900)\n\n tl = formatter.TimestampLocator(plot_index, freq=freq)\n correct = tl._process(3, 900)\n tm.assert_almost_equal(test, correct)", "def is_multiobjects(x: Any) -> bool:\r\n return (is_listlike(x) or (isinstance(x, np.ndarray)\r\n and x.dtype == \"O\")) and len(x) > 0 and not is_scalar(x[0])", "def has_value(cls, value):\n return bool(isinstance(value, numbers.Number) or isinstance(value, time) or \\\n isinstance(value, datetime) or value)", "def test_dates(self):\n result = export.processExport(houseId=1,\n startDate = datetime.datetime(2013, 01, 06) #5 Days\n )\n\n self.assertEqual(result.shape, (1440, 2))\n self.assertEqual(result.index[0], datetime.datetime(2013, 01, 06))\n self.assertEqual(result.index[-1], datetime.datetime(2013, 01, 10, 23, 55))\n\n\n #Stop at 00:00 on the 5th\n result = export.processExport(houseId=1,\n endDate = datetime.datetime(2013, 01, 05, 23, 55) #5 Days\n )\n\n self.assertEqual(result.shape, (1440, 2))\n self.assertEqual(result.index[0], datetime.datetime(2013, 01, 01))\n self.assertEqual(result.index[-1], datetime.datetime(2013, 01, 05, 23, 55))", "def validateTiming(obj):\n if obj.start_time:\n if obj.end_time:\n return obj.start_time <= timezone.now() and obj.end_time > timezone.now()\n else:\n return obj.start_time <= timezone.now()\n else:\n if obj.end_time:\n return obj.end_time > timezone.now()\n else:\n return True", "def is_interpretable(self):\n return bool(self.as_date() or self.as_time())", "def validDateTime( dateTime ):\n try:\n datetime.strptime( dateTime, \"%Y-%m-%dT%H:%M:%S.%fZ\" )\n return True\n except ValueError:\n return False", "def test_paid_at(self):\n\n self.assertIsInstance(self.obj.paid_at, datetime)", "def validate(self, test_data):\n if not isinstance(test_data, datetime.datetime):\n raise ValidationError('Invalid type/value.', 'datetime.datetime',\n type(test_data))", "def datetime_has_tz(dt):\n\n if type(dt) != datetime.datetime:\n raise TypeError(f\"dt must be type datetime.datetime, not {type(dt)}\")\n\n return dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) is not None", "def is_arraylike(obj):\n if isinstance(obj, list):\n return True\n elif isinstance(obj, np.ndarray):\n return True\n elif isinstance(obj, pd.Series):\n return True\n elif isinstance(obj, pd.DataFrame):\n return True\n return False", "def test_8_digit_date_detection(self):\n obj = awstats_reader.awstats_datetime('20091130')\n self.assertTrue(isinstance(obj, awstats_reader.AwstatsDate))", "def is_pandas_collection(l):\n if not (hasattr(l, '__iter__') and len(l) > 0):\n # make sure it's iterable\n # don't include empty iterables because\n # that would include some sklearn estimator objects\n return False\n\n if isinstance(l, dict):\n l = l.values()\n\n for i in l:\n if not (isinstance(i, pd.DataFrame) or isinstance(i, pd.Series)):\n return False\n\n return True", "def test_hk_getdata_timeline_array_type(self):\n _, timelines = load_data(self._file)\n assert isinstance(timelines['group0']['t'], np.ndarray)", "def is_rejoinee(self):\n return len(self._start_date) > 1", "def in_cached_file(self):\n return isinstance(self._subarray, CachedArray)", "def test_empty_dates(self):\n\n for func in self.main_functions:\n self.assertFalse(func(20190120, 20130201).size)", "def is_array(a):\n try:\n shape = a.shape\n return len(shape) >= 1\n except AttributeError:\n return False", "def test_created_at(self):\n self.assertIsInstance(self.obj.created_at, datetime)", "def test_created_at(self):\n self.assertIsInstance(self.obj.created_at, datetime)", "def test_created_at(self):\n self.assertIsInstance(self.obj.created_at, datetime)", "def __len__(self):\n return len(self._timeseriesData)", "def test_update_at(self):\n self.assertIsInstance(self.obj.update_at, datetime)", "def test_update_at(self):\n self.assertIsInstance(self.obj.update_at, datetime)", "def test_update_at(self):\n self.assertIsInstance(self.obj.update_at, datetime)", "def isscalar(x):\n arrayed_x = asarray(x)\n return asarray(x).ndim == 0 and arrayed_x.dtype != 'object'", "def is_dataset_in_t_range(dataset, t_min, t_max):\n for obj in dataset.object_names:\n obj_data = dataset.data[obj]\n t_min_obj = np.min(obj_data['mjd'])\n t_max_obj = np.max(obj_data['mjd'])\n if (t_min_obj < t_min) or (t_max_obj > t_max):\n return False\n return True", "def valid_time_range(self, range_indexes):\n ts_id = self.get_time_series().id\n if self.df_class.loc[range_indexes, ts_id].eq(True).all():\n logging.warning(msg_injection_all_anomalies())\n return False\n else:\n return True", "def __isub__(self, *args, **kwargs):\n return _uhd_swig.time_spec_t___isub__(self, *args, **kwargs)", "def test_dict_keys_datetime_passthrough(self):\n assert (\n orjson.dumps(\n {datetime.datetime(2000, 1, 1, 2, 3, 4, 123): True},\n option=orjson.OPT_NON_STR_KEYS | orjson.OPT_PASSTHROUGH_DATETIME,\n )\n == b'{\"2000-01-01T02:03:04.000123\":true}'\n )", "def is_timestamp(schema_obj):\n\n if isinstance(schema_obj, schema.Field):\n return is_timestamp(schema_obj.metadata)\n elif isinstance(schema_obj, schema.Struct):\n return schema_obj.full_name == 'google.protobuf.Timestamp'\n return False", "def is_array(self, arr):\n return isinstance(arr, np.ndarray)", "def test_created_at(self):\n self.assertIsInstance(self.certificate_history.datetime, datetime.datetime)", "def is_array_type(self, objtype):\n return isinstance(objtype, self.__arrayt) # _ctypes.PyCArrayType", "def valid_visa(visa):\n if dates_difference(visa[\"date\"]) < 730:\n return True\n return False", "def is_interval(self):\n return True", "def test_TimeArray_repr():", "def toc(self,timestamp):\n return self._timestamp > timestamp", "def _check_dates_tarea_start(self, cr, uid, ids, context=None):\n for leave in self.read(cr, uid, ids, ['date_start_tarea', 'date_start_proyecto'], context=context):\n if leave['date_start_tarea'] and leave['date_start_proyecto']:\n if leave['date_start_tarea'] < leave['date_start_proyecto']:\n return False\n return True", "def is_categoric(series: List) -> bool:\n if series.dtype == str or series.dtype == np.object:\n try:\n if (\n int(re.split(r\"[^\\w\\s]\", series[0])[0]) >= 1900\n and len(re.split(r\"[^\\w\\s]\", series[0])) >= 3\n ):\n return False\n else:\n return True\n except:\n if (\n len(series.unique()) / len(series) == 1\n or \"id\" in series.name.lower()\n ):\n return False\n elif (\n True in series.unique().tolist()\n and False in series.unique().tolist()\n ):\n return False\n elif (\n \"True\" in series.unique().tolist()\n and \"False\" in series.unique().tolist()\n ):\n return False\n else:\n return True\n else:\n return False", "def __isZeroEverywhere(self, array):\n epsilon = numpy.finfo( type(array[0]) ).eps\n boolList = numpy.less_equal(numpy.abs(array), epsilon)\n\n for b in boolList:\n if not b:\n return False\n return True", "def _validate_dict_entry(self, dict_entry):\r\n try:\r\n # Type-check all of the type-critical items.\r\n if (\r\n type(dict_entry[\"id\"]) == int and\r\n type(dict_entry[\"date\"]) == datetime.date and\r\n type(dict_entry[\"time\"]) == datetime.time and\r\n type(dict_entry[\"datetime\"]) == datetime.datetime and\r\n type(dict_entry[\"duration\"]) == datetime.timedelta):\r\n return True\r\n else:\r\n return False\r\n # end if\r\n except Exception as err:\r\n _z_exc(\"logentry.py/_validate_dict_entry\", err)\r\n # end try\r", "def one_day(self):\n return self.end.date() == self.date", "def valid_format(self):\n\n # If candidate is None, return true\n if not self.dt:\n print \"dt empty\"\n return True\n\n # Verify if time format is ok and stores in into a time-tuple format\n try:\n stime = datetime.strptime(self.dt, \"%Y-%m-%d %H:%M:%S\")\n except ValueError:\n return False\n else:\n return True", "def is_full(self) -> bool:\n return self._array[0].all()", "def is_data(i):\n keys = ['_id', '_time']\n return all(i != k for k in keys)", "def graph_has_temporal(g):\n return any(any(edge.get(p) == 'time' for p in {'argmax', 'argmin', 'type'}) or 'num' in edge for edge in g.get('edgeSet', []))", "def check_date(created_at, start, end):\n x = get_date(created_at)\n return x <= end and x >= start", "def is_array(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_array)" ]
[ "0.72152364", "0.68011504", "0.67946845", "0.6540459", "0.6300369", "0.62526584", "0.6159265", "0.612599", "0.612436", "0.60484034", "0.60277694", "0.6001856", "0.5956571", "0.5938945", "0.58849597", "0.58664906", "0.58624506", "0.5751764", "0.56482595", "0.56456316", "0.5640433", "0.5638849", "0.5635289", "0.55743665", "0.55646634", "0.5558836", "0.55414283", "0.5526302", "0.5519423", "0.55170965", "0.54992944", "0.5493077", "0.5477467", "0.54586625", "0.5452428", "0.5445292", "0.54435813", "0.54297835", "0.5429741", "0.5426795", "0.5426606", "0.5404101", "0.53979665", "0.5396521", "0.53841734", "0.53830475", "0.53734213", "0.5367579", "0.5356255", "0.5337177", "0.53363216", "0.53201467", "0.53149575", "0.5311156", "0.53082126", "0.530581", "0.5278697", "0.52702135", "0.5264012", "0.524948", "0.5249242", "0.5245368", "0.5243252", "0.52327543", "0.52320415", "0.5206431", "0.52006215", "0.5182226", "0.5177959", "0.5167405", "0.5167405", "0.5167405", "0.5166753", "0.5162158", "0.5162158", "0.5162158", "0.5156749", "0.5152084", "0.51493293", "0.51463586", "0.51457274", "0.5144246", "0.5137541", "0.5130838", "0.512798", "0.51277894", "0.5126735", "0.51212716", "0.51109326", "0.51072645", "0.51022786", "0.5096927", "0.5096412", "0.5095657", "0.50938547", "0.5085799", "0.50855654", "0.5081769", "0.50763565", "0.5069899" ]
0.71551335
1
Close the file containing the subarray, if there is one.
def file_close(self): if self.on_disk: self._subarray.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close_file(self):\n self.root_group.close()", "def __del__(self):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n # If the subarray is unique it will have 2 references to\n # it plus 1 within this method, making 3. If it has more\n # than 3 references to it then it is not unique.\n if getrefcount is not None:\n self._decrement_file_counter()\n if subarray is None or getrefcount(subarray) > 3:\n return\n else:\n # getrefcount has itself been deleted or is in the process\n # of being torn down\n return\n\n _partition_file = getattr(subarray, \"_partition_file\", None)\n if _partition_file is not None:\n # This partition contains a temporary file which is not\n # referenced by any other partition on this process, so if\n # there are no lock files present remove the file from\n # disk.\n _remove_temporary_files(_partition_file)\n\n else:\n try:\n if FileArray is not None and isinstance(subarray, FileArray):\n try:\n filename = subarray.get_filename()\n except Exception:\n filename = None\n\n if self.file_counter.get(filename, 999) <= 0:\n # This partition contains a non-temporary file\n # which is not referenced by any other\n # partitions, so close the file.\n subarray.close()\n except Exception:\n # If we're here then it is likely that FileArray has been\n # torn down, so just do nothing.\n pass\n # --- End: if", "def close(self):\n return self.close_array", "def close_raster_file(self):\n try:\n if self.dataset:\n del self.dataset\n self.dataset = None\n except AttributeError:\n pass", "def close_datafile(fs):\r\n fs.close() # fs is the output from init_datafile\r", "def close_file(self):\r\n self.file.close()", "def close(self):\n self.fileobj.close()", "def close(self):\n self.fileobj.close()", "def close(self):\n\t\tself.filep.close()", "def close(self):\n if self.current_file_number is not None:\n self.fh_raw.close()\n self.current_file_number = None", "def release(self, path, fh, *args, **pargs):\n with(self.rwlock):\n # If we're closing a FLACCue file...\n if(path in self._open_subtracks):\n # Delete the file handle from the stored list.\n del self._open_subtracks[path]['Positions'][fh]\n # Close the OS reference to the file.\n return os.close(fh)", "def close(self):\n self.fout.close()", "def close_file(self, data_set):\n if hasattr(data_set, '_h5_base_group'):\n data_set._h5_base_group.close()\n # Removes reference to closed file\n del data_set._h5_base_group\n else:\n logging.warning(\n 'Cannot close file, data_set has no open hdf5 file')", "def _close( self ):\n for raster in self._raster_data:\n if raster != []:\n raster.close()", "def close_file(self):\n self.file.close()", "def close_file(self):\n self.file.close()", "def Close(self):\n super(CPIOArchiveFile, self).Close()\n self._file_entries = None", "def test_fileobj_not_closed(self):\n\n f = open(self.data(\"test0.fits\"), \"rb\")\n _ = fits.getdata(f)\n assert not f.closed\n\n f.seek(0)\n _ = fits.getheader(f)\n assert not f.closed\n\n f.close() # Close it now", "def _close(self):\n self.fh.close()", "def close_csv_file(self):\n if self.file is not None:\n self.file.close()", "def close(self):\n self.file.close()", "def close(self):\n self.file.close()", "def close(self):\n if self.mode == \"w\":\n # Write the content index\n self.cnt.write(self.file)\n\n self.file.close()", "def close_file(file):\n file.close()", "def close(self):\n self.__file.close()", "def close_file_serializer(self):\n if self.file_writer:\n self.file_writer.close()\n self._output_file = None", "def close(self):\n self.file_out.close()", "def closeJson(f):\n f.write(']')\n f.close()", "def close(self):\n if not self.file.closed:\n self.file.close()", "def close(self):\n self.__file_object.close()", "def _close( self ):\n for sji in self._sji_data:\n sji.close()", "def close(cls, file):\n file.close()", "def close(self):\n self.f.close()", "def close(self):\n self.f.close()", "def close_subblock(self, lines: Tuple[int, int]) -> None:", "def close(self):\r\n self._fp.close()", "def close(self):\n self._file.close()", "def close(self):\n self._file.close()", "def close(self):\n if self.file is not None:\n self.file.close()\n self.file = None", "def close_file_deserializer(self):\n if self._input_file:\n self._input_file.close()\n self._input_file = None", "def close(self):\n\t\tif self.is_open:\n\t\t\tself.hdf5file.close()\n\t\t\tself.is_open = False", "def close(self):\n self.out_file.close()", "def close_hdf_file(self):\n\t\tself.h5f.close()", "def close(self, **kwargs):\n config = getattr(self, \"config\", None)\n\n if config is None:\n return\n\n if kwargs:\n config.update(kwargs)\n\n original = getattr(self, \"_original\", None)\n logger.partitioning(\"Partition.close: original = {}\".format(original))\n\n if not original:\n originally_on_disk = False\n original_subarray = None\n else:\n originally_on_disk = not original.in_memory\n original_subarray = original._subarray\n\n config = self.config\n logger.partitioning(\" config = {}\".format(config))\n\n if config[\"serial\"]:\n # --------------------------------------------------------\n # SERIAL\n # --------------------------------------------------------\n logger.partitioning(\" serial\")\n\n if config[\"readonly\"]:\n logger.partitioning(\" readonly=True\")\n\n if originally_on_disk:\n logger.partitioning(\" subarray originally on disk\")\n\n if config.get(\"to_disk\", False):\n # 1.1.1.1 The original subarray was on disk,\n # we don't want to keep the current\n # subarray in memory, and we are happy\n # to discard any changes that may have\n # been made to the subarray.\n logger.partitioning(\" 1.1.1.1 revert\")\n self.revert()\n elif free_memory() <= cf_fm_threshold():\n # 1.1.1.2 The original subarray was on disk,\n # we are happy to keep the current\n # subarray in memory, but there is not\n # enough free memory to do so.\n logger.partitioning(\n \" 1.1.1.2 revert ({} <= {})\".format(\n free_memory(), cf_fm_threshold()\n )\n )\n self.revert()\n else:\n # 1.1.1.3 The original subarray was on disk\n # and there is enough memory to keep\n # the current subarray in memory\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # The original subarray was a temporary\n # file which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n del self.masked\n logger.partitioning(\n \" 1.1.1.3 del masked ({} > {})\".format(\n free_memory(), cf_fm_threshold()\n )\n )\n\n else:\n logger.partitioning(\" subarray originally in memory\")\n if config.get(\"to_disk\", False):\n # 1.1.2.1 Original subarray was in memory and\n # we don't want to keep the current\n # subarray in memory\n logger.partitioning(\" 1.1.2.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.1.2.2 Original subarray was in memory and\n # unique but there is not enough\n # memory to keep the current subarray\n logger.partitioning(\" 1.1.2.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.1.2.3 Original subarray was in memory and\n # unique and there is enough memory to\n # keep the current subarray in memory\n logger.partitioning(\" 1.1.2.3 pass\")\n pass\n else:\n # config['readonly'] is False\n if originally_on_disk:\n if config.get(\"to_disk\", False):\n # 1.2.1.1 Original subarray was on disk and\n # there and we don't want to keep the\n # array\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # Original subarray was a temporary file\n # on disk which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n logger.partitioning(\" 1.2.1.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.2.1.2 Original subarray was on disk but\n # there is not enough memory to keep\n # it\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # Original subarray was a temporary file\n # on disk which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n logger.partitioning(\" 1.2.1.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.2.1.3 Original subarray was on disk and\n # there is enough memory to keep it\n logger.partitioning(\" 1.2.1.3 pass\")\n del self.masked\n else:\n if config.get(\"to_disk\", False):\n # 1.2.2.1 Original subarray was in memory but\n # we don't want to keep it\n logger.partitioning(\" 1.2.2.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.2.2.2 Original subarray was an in memory\n # but there is not enough memory to\n # keep it\n logger.partitioning(\" 1.2.2.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.2.2.3 Original subarray was in memory and\n # there is enough memory to keep it\n logger.partitioning(\" 1.2.2.3 del masked\")\n del self.masked\n else:\n logger.partitioning(\"Partition.close: parallel\")\n # --------------------------------------------------------\n # PARALLEL\n # --------------------------------------------------------\n pass\n\n # if hasattr(self, '_original'):\n # del self._original\n\n # print(hasattr(self, 'config')),\n try:\n del self.config\n except AttributeError:\n pass", "def close(self):\n if self._open:\n self.grp.file.close()\n self._open = False", "def closeButton(self):\n #Close button\n # if there is a shuffle, reorder the samples for the files\n if self.shuffle:\n self.matrix = [self.matrix[i] for i in self.list_numbers]\n \n for i in range(0,len(self.matrix)):\n self.f.write(str(self.matrix[i][0])+\" \"+str(self.matrix[i][1])+\" \"+str(self.matrix[i][2])+\"\\n\")\n self.f.close()\n self.v.quit()", "def close(self):\n if self.record_mode:\n self.fout.close()\n self.record_mode = False", "def close(self):\n if self.record_mode:\n self.fout.close()\n self.record_mode = False", "def test_fileobj_not_closed(self):\n\n f = open(self.data('test0.fits'), 'rb')\n data = fits.getdata(f)\n assert not f.closed\n\n f.seek(0)\n header = fits.getheader(f)\n assert not f.closed", "def test_close(self):\r\n\r\n b = BufferedWriter(self.test_fp, buf_size=2)\r\n b.write(\"1\")\r\n b.close()\r\n content = open(self.test_fp, \"r\").readlines()\r\n self.assertEquals(content, [\"1\"])", "def close(self) -> None:\n self.f.close()", "def close_file_handle(self):\n if self.file_handle and self.output_file:\n self.file_handle.close()", "def close(self):\n self.file.close()\n self.file = None", "def __del__(self):\n self.file.close()", "def close(self):\n self._fp.close()", "def close(self):\r\n if self._filename and self._fh:\r\n self._fh.close()\r\n self._fh = None", "def close(self):\n self.data.close()", "def write_file(self):\n print 'Writing '+self.name+' binary...'\n if self.vals is not None:\n if len(self.vals) == self.size:\n stream = self.pack_mem()\n with open(self.name+'.bin','wb') as f:\n f.write(stream)\n print 'File written: '+self.name+'.bin'\n else:\n print 'Error: input array for '+self.name+'is not the right '+\\\n 'size (should be '+str(self.size)+'). Skipping.'\n else:\n print 'No array provided, skipping.'", "def close(self, verify_file=True):\n if self.fp:\n do_close(self.fp)\n self.fp = None", "def Close(self): \n posix.close(self.File)", "def close(self):\n file = self.file\n self.file = None\n self.filename = None\n self.current_line = None\n file.close()", "def close(self):\n self.ix.close()", "def close(self):\n if callable(getattr(self._file, 'close', None)):\n self._iterator.close()\n self._iterator = None\n self._unconsumed = None\n self.closed = True", "def close(self):\n if hasattr(self, '_FITS'):\n if self._FITS is not None:\n self._FITS.close()\n self._FITS = None\n self._filename = None\n self.mode = None\n self.charmode = None\n self.intmode = None\n self.hdu_list = None\n self.hdu_map = None", "def endfile(self) :\n\n\t\tself.fd.close()\n\t\treturn 1", "def close():", "def close_file(self, mds_fh):\n seqid=0 #FIXME: seqid must be !=0\n fh, stateid = self.filehandles[mds_fh]\n ops = [op.putfh(fh)] + [op.close(seqid, stateid)]\n res = self.execute(ops)\n # ignoring return\n del self.filehandles[mds_fh]", "def close(self):\n if self.__mode == 'w':\n self.__buff.insert(0, self.__header())\n self.__buff.append(';\\nEND;\\n')\n self.__fobj.write(''.join(self.__buff))\n return self.__fobj.close()", "def close(self):\n self._outfile.write(struct.pack('>2h', 4, 0x0400))\n if self._close:\n self._outfile.close()", "def close(self):\n self._outfile.write(struct.pack(\">2H\", 4, 0x0400))\n if self._close:\n self._outfile.close()", "def __del__(self):\n self.f.close()", "def close(self):\n if not self._f:\n return\n\n logger.info(\"Closed {} ({})\".format(self.name, self.num))\n\n self._f.close()\n self._f = None", "def close(self):\n if self.write_buffer is not None:\n self.write_buffer.close()", "def close( self ):\r\n self.oodocument.close( 1 )", "def close(self):\r\n self._report_file.close()\r\n # Make sure everything's closed.\r\n for files in self._output_files.values():\r\n for f in files.values():\r\n f.close()", "def close(self):\n self.flush()\n self.file.close()\n self.file = None", "def endFileOutput(self):\n self._outputFile.close()", "def __del__(self):\n self.file_out.close()", "def close(self):\n self.hdfile.close()", "def close(self):\n self.hdfile.close()", "def finish(self):\n self._fout.add_size()\n self._fout.add_padding()\n self._fout.close()", "def close( self ):\n if not self.sji is None:\n self.sji._close()\n if not self.raster is None:\n self.raster._close()", "def _CloseOutputFiles(self):\n self.gfile.close()\n self.efile.close()", "def closeJson(self, data):\n with open(self.file, 'w') as f:\n json.dump(data, f)", "def close(self):\n try:\n self._failed = True\n if self._file is not None:\n self._file.close()\n self._failed = False\n finally:\n self._file = None", "def close( self ):\n \n for file in self._files:\n ir.file_hub.close( file )", "def close(self, _file):\n if isinstance(file, _file):\n _file.close()\n else:\n raise TypeError(\n \"Excepting a FileType not %s \" % (type(_file))\n )", "def close(self, close_file=True):\n if self.__ref is not None:\n self.__lib.flush()\n if close_file == True:\n self.__ref.close()\n self.__ref = None\n self.__lib = None", "def save(file, arr, allow_pickle=True, fix_imports=True):\n\n return numpy.save(file, array_create.array(arr, bohrium=False), allow_pickle, fix_imports)", "def close_comprar(self):\n self.comprar.destroy()", "def _add_to_file_counter(self, i):\n # subarray = getattr(self, '_subarray', None)\n subarray = self._subarray\n\n if subarray is None:\n return\n\n try:\n if isinstance(subarray, FileArray) and not isinstance(\n subarray, CachedArray\n ):\n try:\n filename = subarray.get_filename()\n except Exception:\n filename = None\n\n if filename is None:\n return\n\n file_counter = self.file_counter\n # count = file_counter.get(filename, 0)\n # file_counter[filename] = count + i\n # if file_counter[filename] <= 0:\n count = file_counter.get(filename, 0) + i\n if count <= 0:\n # Remove the file from the dictionary if its count has\n # dropped to zero\n file_counter.pop(filename, None)\n else:\n file_counter[filename] = count\n except Exception:\n # If we're here then it is likely that FileArray has been\n # torn down, so just do nothing.\n pass", "def finalize(self):\n if self._file:\n toLog(\"Closing file `{0}`\".format(self._fname), True)\n self._file.close()\n self._file = None", "def quick_save_array(data, file_name, delimiter=',', ):\n data.tofile(file_name, sep=delimiter)", "def _write_array_on_file(self, pa_array):\n pa_batch = pa.RecordBatch.from_struct_array(pa_array)\n self._num_bytes += pa_array.nbytes\n self.pa_writer.write_batch(pa_batch)", "def close(self):\r\n if self._session:\r\n self._session.close()\r\n self._session = None\r\n try:\r\n self._writer.remove_file()\r\n self._reader.remove_file()\r\n except Oct2PyError:\r\n pass", "def save_data(self, f): \n if not self.sampling:\n self.convert_to_array()\n np.save(f, self.reads)", "def Close(self):\n self._RaiseIfNotWritable()\n\n self._storage_file.Close()\n self._storage_file = None", "def __del__(self):\r\n self.filename.close()", "def __del__(self):\n if self.file is None:\n return\n try:\n self.file.close()\n del self.file\n self.file = None\n except:\n getLogger(__name__).warning('Error on file close', exc_info=True)", "def close(self, **kw) -> None:\n super().close(**kw)\n self._file.close()" ]
[ "0.61085176", "0.6024418", "0.59545076", "0.5824396", "0.58042437", "0.5736725", "0.57022864", "0.57022864", "0.56994104", "0.56970567", "0.56860274", "0.564539", "0.56289333", "0.56183493", "0.5591367", "0.5591367", "0.55517685", "0.5509945", "0.5487693", "0.54802656", "0.54625976", "0.54625976", "0.5456624", "0.54523706", "0.5448531", "0.5446381", "0.5442482", "0.54284096", "0.54231614", "0.54186165", "0.5413946", "0.54104435", "0.5409187", "0.5409187", "0.54071975", "0.5406899", "0.53935415", "0.53935415", "0.5379382", "0.53783435", "0.53725004", "0.534277", "0.53378725", "0.53273296", "0.5323528", "0.5314895", "0.52969813", "0.52969813", "0.5270878", "0.5255893", "0.52350175", "0.52156496", "0.5192522", "0.5183164", "0.5158139", "0.5153369", "0.5146133", "0.51337415", "0.5131872", "0.5121358", "0.51112723", "0.51011467", "0.5095343", "0.5090327", "0.5072971", "0.5066568", "0.5065016", "0.50645465", "0.50528437", "0.5050413", "0.5037242", "0.502396", "0.5004375", "0.50040233", "0.49919924", "0.49884698", "0.49803802", "0.49771494", "0.49765706", "0.49765706", "0.49632463", "0.4956913", "0.49437773", "0.49353203", "0.4934011", "0.49181095", "0.4915223", "0.49125916", "0.49105388", "0.49062186", "0.48947278", "0.4894391", "0.4892698", "0.48923147", "0.48912776", "0.48895654", "0.48867685", "0.48850223", "0.48696014", "0.4866721" ]
0.7828047
0
Inspect the object for debugging.
def inspect(self): print(cf_inspect(self))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inspect(obj:Any) -> None:\n\t\tLogging._log(Logging.logLevel, obj)", "def debug(self):\n raise NotImplementedError", "def output_debug_info(self):", "def inspect_obj(self, line=None):\n if not line:\n return\n\n # evaluate the line to get a python object\n python_object = self.shell.ev(line)\n\n # Pretty Print/Format the object\n # Print the output, but don't return anything (otherwise, we'd\n # potentially get a wall of color-coded text.\n formatted_dict = pformat(python_object.__dict__)\n print(highlight(formatted_dict, lexer, formatter).strip())\n pprint(python_object)", "def debug():", "def source_code(obj):\n print(inspect.getsource(obj))", "def NeedsDebugInfo(self):\n return True", "def print_debug(obj):\n\n if not DEBUG:\n return False\n\n if hasattr(obj, 'lower'):\n # string/unicode... just print it.\n print('Debug: {}'.format(obj))\n elif isinstance(obj, (list, tuple)):\n # list/tuple, format it...\n header = 'Debug: '\n spaces = (' ' * len(header))\n if obj:\n print('{}{}'.format(header, obj[0]))\n if len(obj) > 1:\n otherlines = '\\n{}'.format(spaces).join(obj[1:])\n print('\\n{}{}'.format(spaces, otherlines))\n else:\n # different type of object\n print('Debug: {!r}'.format(obj))\n\n return True", "def debug(self):\n return self._debug", "def debug(self):\n try:\n super(FaucetTopoTestBase, self).debug()\n except Exception:\n pprint.pprint(self.host_information)\n raise", "def print_object_details(obj: object) -> None:\n print_section(obj, 'Type', print_type)\n print_section(obj, 'Documentation', print_documentation)\n print_section(obj, 'Attributes', print_attributes)\n print_section(obj, 'Methods', print_methods)\n print_section_delimiter()", "def __repr__(self):\n return repr(self.__dict__['_obj'])", "def _inspect(self,meth,oname,**kw):\n \n oname = oname.strip()\n found,obj,ns,ds,ismagic = self._ofind(oname)\n if found:\n pmethod = getattr(self.shell.inspector,meth)\n if meth == 'pdoc':\n formatter = ismagic and self.format_screen or None\n pmethod(obj,oname,formatter)\n elif meth == 'pinfo':\n formatter = ismagic and self.format_screen or None\n pmethod(obj,oname,ns,formatter,ismagic=ismagic,**kw)\n else:\n pmethod(obj,oname)\n else:\n print 'Object `%s` not found.' % oname\n return 'not found' # so callers can take other action", "def debug(self, *args, **kwargs):", "def NeedsDebugInfo(self):\n return False", "def pprint(self):\n # just here for defining the interface; work is done in subclasses\n pass", "def DebugInfo( self, request_data ):\n pass", "def debug(self):\n print(self.memory)\n print('r0 = %s, ip = %s' % (self.r0, self.ip))", "def inspect_object(obj):\n raise TryNext", "def dump_objects():\n pass", "def debug(self):\n return Debug(self)", "def do_inspect(self, arg):\n self._do_inspect(arg, with_source=False)", "def debug(self):\n neighbors = len(self.__neighbors)\n string = self.__repr__() + f' neighbors: {self.living_neighbors()}/{neighbors}'\n for neighbor in self.__neighbors:\n string += '\\n ' + neighbor.__repr__()\n print(string)", "def print(self):\r\n print(\"[DEBUG] STACK: \", self.__memory.__repr__())", "def compute_debug(self):", "def debug(self):\n \n #path\n print('Path information:')\n for k, v in self.__path.items():\n print(k, v)\n \n #sample count\n print('Sample statistic of each phase')\n for k, v in self.__phase_sample_count.items():\n print(k, v)\n \n print('Sample statistic of each class')\n for k, v in self.__area_sample_count.items():\n print(k, v)\n \n print('Sample statistic of each train')\n for k, v in self.__train_sample_count.items():\n print(k, v)", "def pprint(obj):\n for argname in sorted([x for x in dir(obj) if not x.startswith('__')]):\n # Skip callables\n if hasattr(getattr(obj, argname), '__call__'):\n continue\n print(\"{} : {}\".format(argname, getattr(obj, argname)))", "def debug() -> bool:", "def debug(self):\n #breakpoint() # infinite loop\n print(self.ttl)", "def debug(self) -> bool:\n return self._data[ATTR_DEBUG]", "def debug_print(self):\n print self.title\n print self.storyline\n print self.poster_image_url\n print self.trailer_youtube_url\n print \"------\"", "def test_debug(self) -> Debug:\n return self._test_debug", "def Print(self):\n print(self.__dict__)", "def info(self):\n\t\timport inspect\n\t\n\t\tmessage = \"All variables available for star ID %i\" % self.ID\t\t\n\t\tprint message\n\t\tprint '-'*len(message)\n\t\tattributes = inspect.getmembers(self, lambda a:not(inspect.isroutine(a)))\n\t\tfor a in attributes:\n\t\t\tif (a[0].startswith('__') and a[0].endswith('__')): continue\n\t\t\tprint a[0], \"=\", a[1]", "def print_verbose(self) -> None:\n print(self)\n if self.meta is not None:\n print(self.meta.__repr__())", "def _inspect_mode(self):\n return self._project._inspect_mode", "def _print_attribute(self):\n print(vars(self))", "def debug_string(self):\n\n raise NotImplementedError", "def debug(node):\n print \"%r\" % node", "def print_dict(self):\n print(self.__dict__)", "def debug(self, *args: Any, **kwargs) -> None:\n ...", "def __debug_print__(self):\n print(self.question_data)", "def __repr__(self):\n return self.pretty_print(self.__dict__)", "def dump(self):\n self.logger.debug(self)", "def dump_objects(self):\n #print 'Object Count: ', self.object_store.len()\n \n for item in self.object_store:\n print 'Object Name: ', item.__dict__['Name'], ' LocalID: ', item.__dict__['LocalID']", "def debug():\n return exported_res_dict", "def __str__(self):\n return stringify(\n Inspect(\n self,\n help=True,\n methods=True,\n private=True,\n dunder=False,\n sort=True,\n all=False,\n ),\n maxlen=-1,\n )", "def debug(self) -> str:\n\n return Widget.debug(self)", "def show_debug_msg(self) -> None:\n if self.debug_mode:\n for point in self.points:\n print(point.debug_info())", "def debug_allowed(request_object):\n \n return utilities.debug_allowed(request_object)", "def print_debug(self):\n print()\n print(\"Variable names ({} total):\".format(len(self.variable_names)))\n print()\n for variable in self.variable_names:\n print(variable)\n print()\n\n print(\"Clauses:\")\n print()\n for clause in self.abstract_clauses:\n print(clause)", "def dump():\n\t\treturn self.__dict__;", "def _debugGenerator(self, generatorName, obj):\n\n debug.println(debug.LEVEL_FINER,\n \"GENERATOR: %s\" % generatorName)\n debug.println(debug.LEVEL_FINER,\n \" obj = %s\" % obj.name)\n debug.println(debug.LEVEL_FINER,\n \" role = %s\" % obj.getRoleName())", "def info(self):", "def info(self):", "def info(self):\n print self.id, self.type, self.xyz.get_xyz", "def pprint(self):\n print(self.pprint_str())", "def _debug_pyomo_print(self, m):\n print('/' + '='*80)\n print('DEBUGG model pieces:')\n print(' -> objective:')\n print(' ', m.obj.pprint())\n print(' -> variables:')\n for var in m.component_objects(pyo.Var):\n print(' ', var.pprint())\n print(' -> constraints:')\n for constr in m.component_objects(pyo.Constraint):\n print(' ', constr.pprint())\n print('\\\\' + '='*80)\n print('')", "def debug(self, message):\r\n pass", "def details(self):\n pass", "def dump(self):\n return", "def debug(self, input):\n # Pass the debug information that you may think is important for your\n # evaluators\n debug_info = 'debug info'\n return debug_info", "def debug(self, input):\n # Pass the debug information that you may think is important for your\n # evaluators\n debug_info = 'debug info'\n return debug_info", "def debug():\n # written before I knew about the pdb module\n caller = currentframe().f_back\n method_name = caller.f_code.co_name\n line_no = getframeinfo(caller).lineno\n print(method_name + \": line \" + str(line_no))\n code.interact(local=dict(globals(), **caller.f_locals))", "def print_attributes1(obj):\n\tfor attr in vars(obj):\n\t\tprint(attr, getattr(obj, attr))", "def printer(obj, ident=''):\n import inspect\n print ident + obj.__class__.__name__.upper()\n ident += ' '\n lists = []\n for name in dir(obj):\n elem = getattr(obj, name)\n if isinstance(elem, list) and name != u'decoded_content':\n lists.append(elem)\n elif not inspect.ismethod(elem):\n if not name.startswith('__'):\n if name == u'data' and elem:\n print ident + u'data = '\n printer(elem, ident + ' ')\n else:\n print ident + u'%s\\t= %s' % (name, getattr(obj, name))\n for l in lists:\n for i in l:\n printer(i, ident + ' ')", "def printer(obj, ident=''):\n import inspect\n print ident + obj.__class__.__name__.upper()\n ident += ' '\n lists = []\n for name in dir(obj):\n elem = getattr(obj, name)\n if isinstance(elem, list) and name != u'decoded_content':\n lists.append(elem)\n elif not inspect.ismethod(elem):\n if not name.startswith('__'):\n if name == u'data' and elem:\n print ident + u'data = '\n printer(elem, ident + ' ')\n else:\n print ident + u'%s\\t= %s' % (name, getattr(obj, name))\n for l in lists:\n for i in l:\n printer(i, ident + ' ')", "def _print_properties(self):\n return NotImplemented", "def __debug(msg):\n\n pass", "def debug_info_struct(fdt):\n # Traverse node tree in depth first\n depth = 0\n path = b''\n root = fdt.get_root_node()\n debug_node(fdt, root, depth, path)", "def __debug(self):\n\t\tprint \"Dumping Object Chat\"\n\t\tprint self.userA.username +' + '+ self.userB.username", "def _debug_stack(self):\n debug(\"current stack: %s\" % self.calc.stack)", "def print_attributes2(obj):\n\tfor attr, val in vars(obj).items():\n\t\tprint(attr, val)", "def debug(self):\n return Config.DEBUG", "def dump(self) -> None:\n ...", "def __repr__(self):\n\n return repr(self.__dict__)", "def debug_print(self):\n os.system('cls' if os.name == 'nt' else 'clear')\n\n print('\\nPosition')\n print(self.tetromino.position())\n print('\\nBlock coordinates')\n print(self.tetromino.block_coordinates())\n print('\\nBoard')\n print(self.board)\n print('\\nBoard heights')\n print(self.board.get_height())\n\n if self.pause:\n print('\\nPaused')", "def PrettyPrint(self):\r\n print(self.data)\r\n return", "def info(obj=None):\n if obj is None:\n print (\"Python keywords:\")\n import keyword\n for kwname in keyword.kwlist:\n print (\" \", kwname)\n print(\"Built in objects:\")\n for bi_object_name in sorted(__builtins__.keys()):\n bi_object = __builtins__[bi_object_name]\n if callable(bi_object):\n if type(bi_object) is types.ClassType:\n print(\" {} (class)\".format(bi_object.__name__))\n elif type(bi_object) is types.FunctionType:\n print(\" {} (function)\".format(bi_object.__name__))\n elif hasattr(obj, \"__doc__\") and obj.__doc__ is not None:\n print (\"Documentation for %s :\\n\" % (obj.__name__))\n print (obj.__doc__)\n elif type(obj) is types.ModuleType:\n pprint(dir(obj))\n elif type(obj) is types.ClassType:\n pprint(dir(obj))\n elif type(obj) is types.InstanceType:\n pprint(dir(obj))\n pprint(dir(obj.__class__))\n return \"\"", "def magic_pinfo(self, parameter_s=''):\n \n # look for the given object in all namespaces\n qmark1,oname,qmark2 = re.match('(\\?*)([^?]*)(\\??)',parameter_s).groups()\n # detail_level: 0 -> obj? , 1 -> obj??\n detail_level = 0\n if qmark1 or qmark2:\n detail_level = 1\n self._inspect('pinfo',oname,detail_level=detail_level)", "def __str__(self):\r\n\r\n for att in self.__dict__:\r\n print(\"%s: %r\" % (att, getattr(self, att)))\r\n\r\n return \"Planet Population class object attributes\"", "def do_show(self, arg):\n obj = self.verify(arg, 1)\n if obj:\n print(obj)", "def debug(self):\r\n for test in self:\r\n test.debug()", "def dump_methods_and_datatypes(self):\n\n print(self._client)", "def __str__(self):\n \n for att in self.__dict__:\n print('%s: %r' % (att, getattr(self, att)))\n \n return 'Background Sources class object attributes'", "def debug(self):\n return self.settings['debug']", "def print_objects(self):\n print(\"Spaces: {}\".format([s.name for s in self.spaces]))\n print(\"Characters: {}\".format([c.name for c in self.characters]))\n print(\"Items: {}\".format([i.name for i in self.items]))", "def debugprint(debugobject, debugstring):\n if CMDLINEARGS.debug:\n print \"===== \" + debugstring + \" =====\"\n pprint.pprint(debugobject)\n print \"===== \" + debugstring + \" =====\"\n print \"\"", "def debugprint(debugobject, debugstring):\n if CMDLINEARGS.debug:\n print \"===== \" + debugstring + \" =====\"\n pprint.pprint(debugobject)\n print \"===== \" + debugstring + \" =====\"\n print \"\"", "def _showProperty(self):\n pass", "def repr_(object_):\n return repr(object_)", "def print_performance_info(self):\n pass", "def visualizar(self):\n print(self.stack)", "def inspect(self):\n\n input_array = []\n for mech in list(self.originMechanisms.mechanisms):\n input_array.append(mech.value)\n input_array = np.array(input_array)\n\n recurrent_init_array = []\n for mech in list(self.recurrentInitMechanisms.mechanisms):\n recurrent_init_array.append(mech.value)\n recurrent_init_array = np.array(recurrent_init_array)\n\n output_state_names = []\n output_value_array = []\n for mech in list(self.terminalMechanisms.mechanisms):\n output_value_array.append(mech.outputValue)\n for name in mech.outputStates:\n output_state_names.append(name)\n output_value_array = np.array(output_value_array)\n\n from PsyNeuLink.Components.Projections.ControlProjection import ControlProjection\n from PsyNeuLink.Components.Projections.LearningProjection import LearningProjection\n learning_projections = []\n controlled_parameters = []\n for mech in list(self.mechanisms):\n for parameter_state in mech.parameterStates:\n try:\n for projection in parameter_state.receivesFromProjections:\n if isinstance(projection, ControlProjection):\n controlled_parameters.append(parameter_state)\n except AttributeError:\n pass\n for output_state in mech.outputStates:\n try:\n for projection in output_state.sendsToProjections:\n for parameter_state in projection.paramaterStates:\n for sender in parameter_state.receivesFromProjections:\n if isinstance(sender, LearningProjection):\n learning_projections.append(projection)\n except AttributeError:\n pass\n\n inspect_dict = {\n PROCESSES: self.processes,\n MECHANISMS: self.mechanisms,\n ORIGIN_MECHANISMS: self.originMechanisms.mechanisms,\n INPUT_ARRAY: input_array,\n RECURRENT_MECHANISMS: self.recurrentInitMechanisms,\n RECURRENT_INIT_ARRAY: recurrent_init_array,\n TERMINAL_MECHANISMS: self.terminalMechanisms.mechanisms,\n OUTPUT_STATE_NAMES: output_state_names,\n OUTPUT_VALUE_ARRAY: output_value_array,\n NUM_PHASES_PER_TRIAL: self.numPhases,\n MONITORING_MECHANISMS: self.monitoringMechanisms,\n TARGET_MECHANISMS: self.targetMechanisms,\n LEARNING_PROJECTION_RECEIVERS: learning_projections,\n CONTROL_MECHANISMS: self.controlMechanism,\n CONTROL_PROJECTION_RECEIVERS: controlled_parameters,\n }\n\n return inspect_dict", "def do_display_object(self, address):\n address = self.ParseAddressExpr(address)\n if self.reader.IsAlignedAddress(address):\n address = address + 1\n elif not self.heap.IsTaggedObjectAddress(address):\n print(\"Address doesn't look like a valid pointer!\")\n return\n heap_object = self.padawan.SenseObject(address)\n if heap_object:\n heap_object.Print(Printer())\n else:\n print(\"Address cannot be interpreted as object!\")", "def debug():\n # print(\" ++ debugging 'pyre.filesystem'\")\n # attach {Extent} as the metaclass of Node and Filesystem so we can verify that all\n # instances of these classes are properly garbage collected\n from ..patterns.Extent import Extent\n\n global _metaclass_Node\n _metaclass_Node = Extent\n # all done\n return", "def show_class_details(name, f):\n print '%s:' % name\n print '\\tobject:', f\n print '\\t__name__:', \n try:\n print f.__name__\n except AttributeError:\n print '(no __name__)'\n print '\\t__doc__', repr(f.__doc__)\n return", "def dump(self):\n outputs = [\"Code object : %s\" % self.name]\n outputs.append(\" Type : %s\" % self.object_type)\n for source_line in self.source:\n # Each line is a (line_number, code) pair\n outputs.append('%d: %s' % source_line)\n return \"\".join(outputs)", "def __str__(self):\n return str(self.__dict__['_obj'])", "def debug(self):\r\n print(\"_,.-^ DEBUG ^-.,_\")\r\n print(\"state = %s %s\"%(self.state, self.error))\r\n print(\"\".join(self.memory))\r\n print ((self.pointer*\" \")+\"^\")\r\n print(\"PROGRAM\")\r\n print(\" {:16}{:7}{:7}{:7}{:16}\".format(\"State\", \"symbol\", \"write\", \"move\", \"new_state\"))\r\n for row in self.program:\r\n if row.state == self.state and row.symbol == self.memory[self.pointer]:\r\n print(\">\", end=\"\")\r\n else:\r\n print(\" \", end=\"\")\r\n print(row)" ]
[ "0.7372906", "0.72778416", "0.6883075", "0.6746992", "0.6737449", "0.6555419", "0.6541405", "0.65274495", "0.6431801", "0.64068395", "0.6402232", "0.640157", "0.6398631", "0.63798773", "0.6376369", "0.6375551", "0.63642555", "0.63500595", "0.6315535", "0.6302645", "0.62807316", "0.6275674", "0.62132704", "0.618943", "0.6182986", "0.6180341", "0.61715746", "0.6112123", "0.6110644", "0.6097784", "0.60940063", "0.6092642", "0.60903", "0.6054216", "0.60380286", "0.6020348", "0.5998989", "0.59823835", "0.59593666", "0.5959058", "0.594984", "0.5948973", "0.59344953", "0.5915146", "0.5907818", "0.5907362", "0.5894905", "0.5893206", "0.5857184", "0.58565533", "0.5852448", "0.5839369", "0.5834062", "0.583272", "0.583272", "0.5808855", "0.57997626", "0.5795582", "0.57859826", "0.5776516", "0.57371455", "0.5736569", "0.5736569", "0.57317114", "0.5720329", "0.5709732", "0.5709732", "0.5707328", "0.57054234", "0.5702372", "0.57020366", "0.5690056", "0.56893", "0.5686834", "0.5685593", "0.5681018", "0.5676573", "0.5669005", "0.56560576", "0.56527", "0.5635127", "0.5633602", "0.56335855", "0.56276417", "0.56270635", "0.56117773", "0.56024456", "0.5600739", "0.5600739", "0.5596689", "0.5595998", "0.55835223", "0.55768037", "0.5572136", "0.55677575", "0.55615526", "0.55614966", "0.5560985", "0.5558589", "0.5535195" ]
0.6848801
3
Return an iterator over indices of the master array which are spanned by the data array.
def master_ndindex(self): # itermaster_indices(self): return itertools_product( *[range(*r) for r in self.location] ) # TODO check
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_subset_inds(self, adata_parent):\r\n subset_inds = np.ones(len(adata_parent), dtype=bool)\r\n for condition, values in self.subset_cond.items():\r\n subset_inds *= adata_parent.obs[condition].isin(values)\r\n return subset_inds", "def enumerate(self):\n # go through the container and tile in sync\n for index, value in zip(self.tile, self.data):\n # hand the {index} and the corresponding value to the caller\n yield index, value\n # all done\n return", "def indices(self):\n return range(len(self))", "def mainIndices(self):\n return self.i1, self.i2", "def __iter__(self):\n indices = []\n for i, size in enumerate(self.group_sizes):\n if size == 0:\n continue\n indice = np.where(self.flag == i)[0]\n if not len(indice) == size:\n raise ValueError('the length of the indice should be equal to the size')\n np.random.shuffle(indice)\n num_extra = int(np.ceil(size / self.samples_per_gpu)\n ) * self.samples_per_gpu - len(indice)\n indice = np.concatenate([indice, indice[:num_extra]])\n indices.append(indice)\n indices = np.concatenate(indices)\n indices = [\n indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu]\n for i in np.random.permutation(\n range(len(indices) // self.samples_per_gpu))\n ]\n indices = np.concatenate(indices)\n indices = torch.from_numpy(indices).long()\n if not len(indices) == self.num_samples:\n raise ValueError(\"the length of the indices should be equal to num_samples\")\n return iter(indices)", "def indices(self):\n return tuple([slice(*r) for r in self.location])", "def indices(self) -> np.ndarray:\n return self.impl.indices", "def get_index_array(self):\n return self.region_pairs", "def __iter__(self):\n\n starts = range(0, self.data.shape[self.axis], self.chunksize)\n\n for t in zip_longest(starts, starts[1:], fillvalue=None):\n yield self.data[self._slice(*t)]", "def __iter__(self):\n while True:\n if self.batches is None:\n for indexed_sentence in self.indexed_sentences:\n yield indexed_sentence\n else:\n for batch in self.batches:\n yield batch[:-1, :], batch[1:, :] # Return batch and target indices\n\n if not self.repeat:\n return", "def get_data_idx(self)->list:\n return self.__data_idx", "def get_indices(self):\r\n return self._indices", "def getind(self,start,end,blk):\n\n if blk is None:\n # Return all blocks\n blk = np.arange(self.ind[start].size)\n\n ind=np.array([])\n for k,val in enumerate(blk):\n ind=np.append(ind,np.arange(self.ind[start][val],self.ind[end][val]))\n return ind.astype(int)", "def inidices(self):\n return [Index(name, index) for name, index in self._data['indices'].iteritems()]", "def index_iterator((x_min, x_max, y_min, y_max)):\n for row in xrange(y_min, y_max):\n for col in xrange(x_min, x_max):\n yield (row, col)", "def _get_split_indices(self):\n\n cumsum = np.cumsum(\n np.concatenate((np.array([0], dtype=np.int8), self.split_sizes)))\n \n fold_inds = np.array(\n [(cumsum[n], cumsum[n + 1]) for n in range(self.n_splits)])\n\n return fold_inds", "def get_indexes(self, dataset: BaseDataset) -> int:\n\n index = [np.random.randint(0, len(dataset)) for _ in range(1)]\n\n return index", "def __iter__(self):\n # deterministically shuffle based on epoch\n if self.shuffle:\n g = torch.Generator()\n g.manual_seed(self.epoch)\n indices = torch.randperm(len(self.dataset), generator=g).tolist()\n else:\n indices = torch.arange(len(self.dataset)).tolist()\n\n # add extra samples to make it evenly divisible\n indices += indices[:(self.total_size - len(indices))]\n if not len(indices) == self.total_size:\n raise ValueError('the length of the indices should be equal to total_size')\n\n # subsample\n indices = indices[self.rank:self.total_size:self.num_replicas]\n if not len(indices) == self.num_samples:\n raise ValueError(\"the length of the indices should be equal to num_samples in subsample\")\n\n return iter(indices)", "def _get_chunk_indexer(self, array):\n if self.data.num_chunks == 1:\n return np.broadcast_to(0, len(array))\n return np.digitize(array, self.offsets[1:])", "def get_intra_sample_indices(self):\n intra_sample_indices = []\n for group_index in range(self.num_groups):\n num_images_in_group = self.num_images_per_group[group_index]\n if self.intra_group_option == \"forward\":\n for i in range(num_images_in_group):\n for j in range(i):\n # j < i\n intra_sample_indices.append(\n ((group_index, j), (group_index, i))\n )\n elif self.intra_group_option == \"backward\":\n for i in range(num_images_in_group):\n for j in range(i):\n # i > j\n intra_sample_indices.append(\n ((group_index, i), (group_index, j))\n )\n elif self.intra_group_option == \"unconstrained\":\n for i in range(num_images_in_group):\n for j in range(i):\n # j < i, i > j\n intra_sample_indices.append(\n ((group_index, j), (group_index, i))\n )\n intra_sample_indices.append(\n ((group_index, i), (group_index, j))\n )\n else:\n raise ValueError(\n \"Unknown intra_group_option, must be forward/backward/unconstrained\"\n )\n return intra_sample_indices", "def create_repeated_indexes(data):\n from numpy import arange\n\n index_range = arange(0, len(data))\n return (index_range for i in index_range)", "def get_indices_entire_sequence(data: pd.Dataframe, window_size: int, step_size: int) -> list:\n stop_position = len(data)-1 # 1- because of 0 indexing\n\n # Start the first sub-sequence at index position 0\n subseq_first_idx = 0\n\n subseq_last_idx = subseq_first_idx + window_size\n\n indices = []\n\n while subseq_last_idx <= stop_position:\n indices.append((subseq_first_idx, subseq_last_idx))\n subseq_first_idx += step_size\n subseq_last_idx += step_size\n return indices", "def batch_indices(self):\n b = self.batch_size\n return [np.arange(i*b, i*b+b) for i in range(self.num_batches)]", "def atom_idxs(self):\n\n return np.array([atom.atom_idxs for atom in self])", "def indices(self):\n _indices = []\n for h in self.miller.indices():\n _indices.append(self.indices_hkl(*h)[0])\n return _indices", "def getLandmarkindices(self):\n return self.subsetindices", "def __iter__(self):\n batch = []\n for i_batch in range(self.episode_num):\n classes = torch.randperm(len(self.idx_list))[: self.way_num]\n for c in classes:\n idxes = self.idx_list[c.item()]\n pos = torch.randperm(idxes.size(0))[: self.image_num]\n batch.append(idxes[pos])\n if len(batch) == self.episode_size * self.way_num:\n batch = torch.stack(batch).reshape(-1)\n yield batch\n batch = []", "def _iterCoordsets(self):\n\n for i in range(self._n_csets):\n yield self._coords[i]", "def iterCoordsets(self):\n\n for i in range(self._n_csets):\n yield self._coords[i].copy()", "def __iter__(self):\n worker_info = torch.utils.data.get_worker_info()\n num_batches = len(self.coords_batcher)\n if worker_info is None:\n # In single-processing mode\n start, end = 0, num_batches\n else:\n worker_id = worker_info.id\n num_workers = worker_info.num_workers\n shard_size = int(np.ceil(num_batches / num_workers))\n start = shard_size * worker_id\n end = min(start + shard_size, num_batches)\n return (self.get_batch(i) for i in range(start, end))", "def index(self):\n return list(self._innercontainer)", "def get_indexes(self, dataset):\n\n indexs = [random.randint(0, len(dataset)) for _ in range(3)]\n return indexs", "def indices_of_split(self, split_name='train'):\n return self.indices_of('split', split_name)", "def iter_sites(self):\n for i in range(self.num_sites):\n indices = list()\n for dim in self.site_idxs:\n indices.append(i % dim)\n i = i // dim\n yield tuple(indices)", "def get_34index_list(self):\n msk = self.load_mask()\n return [i for (i,v) in enumerate(msk) if v==1]", "def _get_index_iterator(indexes, length):\n return combinations(indexes, length)", "def _iter_indices(self, frame, y):\n pass", "def getIndices(self):\r\n return self._indices", "def indices(self):\n return self.index.indices", "def SectionIndicesConnectedToSoma(self):\n indices = []\n index = 0\n for each_section in self._section_list:\n if each_section.ParentId() == -1:\n indices.append(index)\n index += 1\n return indices", "def get_idxvals(self):\n input_rows = list()\n input_cols = list()\n for key in self.index:\n input_rows.append(key[0])\n input_cols.append(key[1])\n\n return list(OrderedSet(input_rows)), list(OrderedSet(input_cols))", "def _compute_data_offsets(store: Store,\n datas: Tuple[DataSegment, ...],\n module_instance: ModuleInstance) -> Iterable[numpy.uint32]:\n for data_segment in datas:\n frame = Frame(\n module=module_instance,\n locals=[],\n instructions=InstructionSequence(data_segment.offset),\n arity=1,\n )\n config = Configuration(store=store)\n config.push_frame(frame)\n result = config.execute()\n if len(result) != 1:\n raise Exception(\"Invariant: data segment offset returned empty result\")\n offset = numpy.uint32(cast(int, result[0]))\n\n memory_address = module_instance.memory_addrs[data_segment.memory_idx]\n memory_instance = store.mems[memory_address]\n\n if offset + len(data_segment.init) > len(memory_instance.data):\n raise Unlinkable(\n f\"Computed data segment offset exceeds memory size: {offset} \"\n f\"+ {len(data_segment.init)} > {len(memory_instance.data)}\"\n )\n yield offset", "def getAllIndex(ldata, fldata):\n\treturn list(map(lambda e : fldata.index(e), ldata))", "def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n for i, size in enumerate(self.group_sizes):\n if size > 0:\n indice = np.where(self.flag == i)[0]\n if not len(indice) == size:\n raise ValueError(\"the length of the indice should be equal to size\")\n indice = indice[list(torch.randperm(int(size),\n generator=g))].tolist()\n extra = int(\n math.ceil(\n size * 1.0 / self.samples_per_gpu / self.num_replicas)\n ) * self.samples_per_gpu * self.num_replicas - len(indice)\n indice += indice[:extra]\n indices += indice\n\n if not len(indices) == self.total_size:\n raise ValueError(\"the length of the indices should be equal to total_size\")\n\n indices = [\n indices[j] for i in list(\n torch.randperm(\n len(indices) // self.samples_per_gpu, generator=g))\n for j in range(i * self.samples_per_gpu, (i + 1) * self.samples_per_gpu)\n ]\n\n # subsample\n offset = self.num_samples * self.rank\n indices = indices[offset:offset + self.num_samples]\n if not len(indices) == self.num_samples:\n raise ValueError(\"the length of the indices should be equal to num_samplers in subsample\")\n\n return iter(indices)", "def next(self):\n # Keeps under lock only the mechanism which advances\n # the indexing of each batch.\n with self.lock:\n index_array = next(self.index_generator)\n # The transformation of images is not under thread lock\n # so it can be done in parallel\n return self._get_batches_of_transformed_samples(index_array)", "def indices(self):\n if self._indices is None:\n i = []\n\n # TODO: this is not right for multi-column keys\n # TODO: new style indexes\n\n global_name = '^DD(%s,0,\"IX\",\"0\")' % self.fileid\n prefix = '^DD(%s,0,\"IX\",' % self.fileid\n while 1:\n global_name = M.mexec('set s0=$query(%s)' % global_name, M.INOUT(\"\"))[0]\n if not global_name or not global_name.startswith(prefix):\n break\n suffix = global_name[len(prefix):-1]\n parts = suffix.split(\",\")\n idx_name = parts[0][1:-1]\n idx_table = parts[1]\n idx_columns = parts[2:]\n index = Index(idx_name, idx_table, idx_columns)\n i.append(index)\n\n # A second list, gives indices for a field\n columns = {}\n for idx in i:\n for c in idx.columns:\n columns[c] = 1\n\n # Now trawl the listed columns in the data dictionary, and load their\n # cross references.\n cr_names = {}\n for c in columns.keys():\n idx_root = M.Globals[\"^DD\"][self.fileid][c][1]\n if not idx_root[0].exists():\n continue\n for cr_id, val in idx_root.keys_with_decendants():\n if float(cr_id) > 0:\n cr_header = idx_root[cr_id][0].value\n parts = cr_header.split(\"^\")\n if len(parts) == 2 and parts[1]: # if more than 2 parts, assume MUMPs trigger\n f = cr_names.get(parts[1], list())\n f.append(c)\n cr_names[parts[1]] = f\n\n # Now, just delete items from the index list if they are not in cr_names\n self._indices = []\n for index in i:\n cr = cr_names.get(index.name)\n if cr:\n # verify columns - lots of errors in real systems\n if len(cr) == len(index.columns):\n invalid = False\n for c in cr:\n if c not in index.columns:\n invalid = True\n continue\n if not invalid:\n self._indices.append(index)\n\n return self._indices", "def get_indexes(self, dataset):\n\n for i in range(self.max_iters):\n index = random.randint(0, len(dataset))\n gt_bboxes_i = dataset.get_ann_info(index)['bboxes']\n if len(gt_bboxes_i) != 0:\n break\n\n return index", "def get_indexes(self, items: Iterable[_T]) -> List[int]:\n return [self.get_index(item) for item in items]", "def get_indexes(self, variable, *args):\n\n return [get_subset_idxs(data, min, max)\n for data, (min, max) in args]", "def getLandmarkindices(self):\n return self.subsetnodes_indices", "def getIndexes(self, val):\n # Find where this value is listed. \n valNdx = (self.values == val).nonzero()[0]\n \n # If this value is not actually in those listed, then we \n # must return empty indexes\n if len(valNdx) == 0:\n start = 0\n end = 0\n else:\n # The index into counts, etc. for this value. \n valNdx = valNdx[0]\n start = self.start[valNdx]\n end = self.end[valNdx]\n \n # Create a tuple of index arrays, one for each index of the original array. \n ndx = ()\n for i in range(self.nDims):\n ndx += (self.indexes[start:end, i], )\n return ndx", "def get_raster_ids(self):\n return numpy.array(range(self._lo_atom, self._lo_atom + self._n_atoms))", "def get_indexes(self):\n indexes = []\n for c in self.components:\n indexes.extend(c.get_indexes())\n return indexes", "def get_indexes(self, dataset: BaseDataset) -> int:\n\n indexes = [random.randint(0, len(dataset)) for _ in range(3)]\n return indexes", "def next(self):\n with self.lock:\n index_array = next(self.index_generator)\n # The transformation of images is not under thread lock\n # so it can be done in parallel\n return self._get_batches_of_transformed_samples(index_array)", "def iter_leaf_idx(self):\n def leaf_idx(tree, total):\n total[0] += 1\n for elem in tree[1:]:\n if isinstance(elem, Tree):\n for elem2 in leaf_idx(elem, total):\n yield total[0]\n else:\n yield total[0]\n total[0] += 1\n return leaf_idx(self, [0])", "def indices(self):\n i, j, _edge = self.indicesAndEdge()\n return i, j", "def vir_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==0:\n indices.append(index)\n return indices", "def test_get_indices_several_existing_items(self):\r\n control_ids = ['PC.354', 'PC.355', 'PC.356', 'PC.481', 'PC.593']\r\n exp_control_indices = [0, 1, 2, 3, 4]\r\n\r\n fast_ids = ['PC.607', 'PC.634', 'PC.635', 'PC.636']\r\n exp_fast_indices = [5, 6, 7, 8]\r\n\r\n obs_control = _get_indices(self.dist_matrix_header, control_ids)\r\n self.assertEqual(obs_control, exp_control_indices)\r\n\r\n obs_fast = _get_indices(self.dist_matrix_header, fast_ids)\r\n self.assertEqual(obs_fast, exp_fast_indices)", "def create_jackknife_indexes(data):\n from numpy import arange, delete\n\n index_range = arange(0, len(data))\n return (delete(index_range, i) for i in index_range)", "def __iter__(self):\n for i in range(len(self.ks)):\n yield self.get_neighs([i]), self.get_sp_rel_pos([i]),\\\n [self.ks[i]], self.iss", "def get_inter_sample_indices(self):\n inter_sample_indices = []\n for group_index1 in range(self.num_groups):\n for group_index2 in range(self.num_groups):\n num_images_in_group1 = self.num_images_per_group[group_index1]\n num_images_in_group2 = self.num_images_per_group[group_index2]\n for image_index1 in range(num_images_in_group1):\n for image_index2 in range(num_images_in_group2):\n inter_sample_indices.append(\n ((group_index1, image_index1), (group_index2, image_index2))\n )\n return inter_sample_indices", "def getContents(self):\r\n cont=[]\r\n for i in range (len(self._indices)):\r\n cont.append(self._dataset.getPoint(self._indices[i]))\r\n return cont", "def _notstaticneighs_get_corestored_by_inds_slice(self, inds):\n inds = [inds] if type(inds) == int else inds\n idxs = self.idxs\n if self.sp_relative_pos is not None:\n sp_relative_pos = []\n for k in range(len(self.sp_relative_pos)):\n sp_relative_pos += [[self.sp_relative_pos[k][i] for i in inds]]\n else:\n sp_relative_pos = None\n return idxs, sp_relative_pos", "def __iter__(self):\n batch = []\n for idx in self._sampler:\n batch.append(idx)\n if len(batch) == self._batch_size:\n batch = sum(batch, [])\n yield batch\n batch = []\n if len(batch) > 0:\n batch = sum(batch, [])\n yield batch", "def getNeighbours(seg,meta,inversedIndex):\n return np.unique(np.fromiter( (inversedIndex[x] for x in np.concatenate([meta.loc[seg]['ins'],meta.loc[seg]['outs']])),dtype=np.int))", "def _calculate_chunk_offsets(self):\n offset = 0\n offsets = []\n for chunk in self.data.iterchunks():\n offsets.append(offset)\n offset += len(chunk)\n return np.array(offsets)", "def read_ix(self, indexes: np.ndarray) -> Iterator[XData]:\n index_list = np.array_split(\n indexes, range(self.batchsize, indexes.shape[0], self.batchsize)\n )\n return self._run(index_list)", "def iteritems(self):\n for name, data_containers in self._name_index.items():\n for data_container in data_containers:\n yield data_container", "def __iter__(self):\n for i in range(self.m):\n for j in range(self.n):\n yield self[i, j]", "def _pair_indices(self):\n indices_src = []\n indices_dst = []\n for i in range(self.walk_len):\n for j in range(max(i - self.l, 0), i):\n indices_src.append(i)\n indices_dst.append(j)\n for j in range(i + 1, min(i + self.r + 1, self.walk_len)):\n indices_src.append(i)\n indices_dst.append(j)\n return indices_src, indices_dst", "def _get_indices_from_payload(self):\n for _, value in self.s_namespaces.items():\n for index in value['indexes'].items():\n yield index", "def find_offsets(haystack, needle):\n\toffs = -1\n\twhile True:\n\t\toffs = haystack.find(needle, offs+1)\n\t\tif offs == -1:\n\t\t\tbreak\n\t\telse:\n\t\t\tyield offs", "def index(self):\n return copy.deepcopy(self._innercontainer)", "def byte_offsets(self) -> Iterator[int]:\n\n start_index = self.pixels_offset\n end_index = self.pixels_offset + self.pixel_size_bytes\n scanline_bytes = self.pixel_size_bytes // self.height\n\n for scanline in range(start_index, end_index, scanline_bytes):\n yield from range(scanline, scanline + self.width * 3)", "def batch_iter(data, batch_size, shuffle=False):\n batch_num = math.ceil(len(data) / batch_size)\n index_array = list(range(len(data)))\n\n if shuffle:\n np.random.shuffle(index_array)\n\n for i in range(batch_num):\n indices = index_array[i * batch_size: (i + 1) * batch_size]\n examples = [data[idx] for idx in indices]\n\n examples = sorted(examples, key=lambda e: len(e[0]), reverse=True)\n src_sents = [e[0] for e in examples]\n tgt_sents = [e[1] for e in examples]\n\n yield src_sents, tgt_sents", "def batch_iter(data, batch_size, shuffle=False):\n batch_num = math.ceil(len(data) / batch_size)\n index_array = list(range(len(data)))\n\n if shuffle:\n np.random.shuffle(index_array)\n\n for i in range(batch_num):\n indices = index_array[i * batch_size: (i + 1) * batch_size]\n examples = [data[idx] for idx in indices]\n\n examples = sorted(examples, key=lambda e: len(e[0]), reverse=True)\n src_sents = [e[0] for e in examples]\n tgt_sents = [e[1] for e in examples]\n\n yield src_sents, tgt_sents", "def __iter__(self):\n for i in self.loopindices:\n pid = self.frametracks.particle.values[i]\n yield pid, self.neighbors(pid)", "def __getitem__(self, index):\n #print(\"%d / %d\" %(index, np.floor(len(self.list_IDs) / self.batch_size)))\n indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]\n\n # Find list of IDs\n list_IDs_temp = [self.list_IDs[k] for k in indexes]\n\n # Generate data\n X, y = self.__data_generation(list_IDs_temp)\n\n return X, y", "def indices(self):\n return self._kbounded_partitions", "def _staticneighs_get_corestored_by_inds_slice(self, inds):\n inds = [inds] if type(inds) == int else inds\n idxs = self.idxs\n if self.sp_relative_pos is not None:\n sp_relative_pos = [self.sp_relative_pos[i] for i in inds]\n else:\n sp_relative_pos = None\n return idxs, sp_relative_pos", "def _getscanind(self):\n \n zamin = self.za.min()\n first = np.where(self.za==zamin)[0]\n self.scan = np.zeros(self.spec.shape[0])\n if zamin < 0:\n cs = first[np.where((first - np.roll(first, 1)) != 1)[0]]\n ss = first[np.where((np.roll(first,-1) - first) != 1)[0]] + 1\n ce = ss \n se = np.roll((cs - 1) % self.za.size, -1) + 1\n for k, val in enumerate(cs):\n self.scan[val:se[k] + 1] = k\n else:\n moves = np.diff(self.za)\n max_ind = np.where(moves==moves.max())[0]\n turnover = self.za.size\n diffs = np.diff(max_ind)\n if np.unique(diffs).size > 1:\n raise ValueError, 'Can\\'t deal with non-uniform cal data yet.'\n if max_ind.size > 1:\n turnover = diffs[0]\n cs = ce = np.array([])\n ss = np.arange(self.za.size)[::turnover]\n se = np.roll((ss - 1) % self.za.size, -1)\n for k, val in enumerate(ss):\n self.scan[val:se[k] + 1] = k\n \n self.ind = {'cs': cs, 'ce': ce, 'ss': ss, 'se': se}\n self.nscan = np.unique(self.scan).size", "def get_indices_input_target(num_obs, input_len, step_size, forecast_horizon, target_len):\n input_len = round(input_len) # just a precaution\n start_position = 0\n stop_position = num_obs - 1\n\n inpseq_first_idx = start_position\n inpseq_last_idx = inpseq_first_idx + input_len\n target_first_idx = inpseq_last_idx + forecast_horizon\n target_last_idx = target_first_idx + target_len\n print(\"target_last_idx = {}\".format(target_last_idx))\n print(\"stop_position = {}\".format(stop_position))\n indices = []\n while target_last_idx <= stop_position:\n indices.append((inpseq_first_idx, inpseq_last_idx, target_first_idx, target_last_idx))\n inpseq_first_idx += step_size\n inpseq_last_idx += step_size\n target_first_idx += inpseq_last_idx + forecast_horizon\n target_last_idx += target_first_idx + target_len\n return indices", "def test_should_only_get_specific_indices(self):\n print(\"Testing that get_region returns correct amount of data\")\n\n test_many = get_region_data(self.wmo_boxes, self.float_name, self.config,\n self.index, self.pres)\n\n self.assertTrue(test_many[0].shape[1] == self.index.__len__())\n\n test_one = get_region_data(self.wmo_boxes, self.float_name, self.config,\n [50], self.pres)\n\n self.assertTrue(test_one[0].shape[1] == 1)", "def iter_positions(self):\n for loc in self.iter_locations():\n yield loc.position", "def _get_child_indices(self, current_index: int) -> List[int]:\n multiplier = current_index * 2\n left_index = multiplier + 1\n right_index = multiplier + 2\n\n return [left_index, right_index]", "def _get_next_minibatch_inds(self):\n img_next = self._cursor[0].next()\n msk_next = self._cursor[1].next()\n if img_next and msk_next:\n pass\n else:\n print 'BlobFetcher to begin because of cursor point to end.'\n self._cursor = [self._txn[0].cursor(), self._txn[1].cursor()]\n self._cursor[0].next()\n self._cursor[1].next()", "def _get_indices_from_iss(self, iss):\n iss = [iss] if type(iss) not in [np.ndarray, list] else iss\n if self.iss is not None:\n inds = []\n for i in iss:\n inds.append(list(self.iss).index(i))\n# else:\n# inds = iss\n return inds", "def get_idx_dataset(self, split):\r\n def generator():\r\n while True:\r\n batch_idx = self.get_batch_idx(split)\r\n yield tf.constant(batch_idx, dtype=tf.int32)\r\n return tf.data.Dataset.from_generator(\r\n generator,\r\n output_types=tf.int32,\r\n output_shapes=[None])", "def getscanind(self,blk=None,zarange=[0,90]):\n ind=self.getind('ss','se',arr(blk))\n ind=ind[(self.za[ind]>=zarange[0]) & (self.za[ind]<=zarange[1])]\n return ind", "def getidx(self, ind: Union[str, int]) -> List[Any]:\n output = []\n for data in self.data:\n output.append(data[ind])\n return output", "def get_batch(self, idxs):\r\n return self.data[(self.start + idxs) % self.maxlen]", "def get_data(self):\n idxs = self.get_indexes(self._start, self._length, self.maxsize)\n return self._data[idxs].copy()", "def _next(self):\n i = 0\n while i < self.size:\n yield self.data[i]\n i += 1", "def get_cached_indices(self, start=None, end=None):\n params = {}\n indices = [\n y[\"sample_identifier\"]\n for y in self.mongo_database.cache.find(\n params, {\"_id\": 0, \"sample_identifier\": 1}\n )[start:end]\n ]\n return np.unique(indices).tolist()", "def __iter__(self):\n\n # Open the data reader\n self.data.open()\n\n starts = np.arange(self.start, self.stop, self.chunksize)\n for a, b in zip_longest(starts, starts[1:], fillvalue=self.stop):\n yield self.data.read(a, b, **self.kwargs)", "def __iter__(self):\n for i in range(len(self.data)):\n yield self.data[i]", "def index(self):\n return self.data.index.values", "def _iter_sims(self):\n for idx, lineset in enumerate(self.linesets[:-1]):\n for lineset2 in self.linesets[idx + 1 :]:\n yield from self._find_common(lineset, lineset2)", "def _neuron_location(self, m, n):\n for i in range(m):\n for j in range(n):\n yield np.array([i, j])" ]
[ "0.61020994", "0.6072502", "0.60690475", "0.60687184", "0.60543483", "0.60275173", "0.5924552", "0.5918545", "0.59088135", "0.58821535", "0.58567846", "0.58429295", "0.5828838", "0.58083194", "0.5794418", "0.5793355", "0.57828283", "0.57748425", "0.5756645", "0.57550335", "0.5752208", "0.5744148", "0.57440686", "0.5740753", "0.5721396", "0.5708537", "0.5681665", "0.5668879", "0.566354", "0.5635181", "0.5628518", "0.5624042", "0.5622284", "0.56018215", "0.5578469", "0.5578117", "0.5565186", "0.55578977", "0.55574375", "0.55430126", "0.5518523", "0.5515029", "0.55139816", "0.55010974", "0.5495244", "0.5489729", "0.5484076", "0.5473604", "0.54710835", "0.5469012", "0.5457938", "0.5456938", "0.5447555", "0.54415137", "0.54287857", "0.5424696", "0.54225737", "0.5422566", "0.5420563", "0.5418336", "0.5418161", "0.54104763", "0.54052794", "0.54046303", "0.53832614", "0.5380916", "0.5380549", "0.53744274", "0.5372589", "0.5363425", "0.53574026", "0.53566873", "0.53384525", "0.53252476", "0.53204423", "0.5319284", "0.5319284", "0.53137565", "0.53118867", "0.53116673", "0.53102565", "0.5307818", "0.5307543", "0.530584", "0.53044134", "0.5303639", "0.53010774", "0.5301048", "0.52996683", "0.52966356", "0.52903336", "0.5284297", "0.5282697", "0.52732414", "0.5264803", "0.5264675", "0.52633446", "0.5261429", "0.52567405", "0.5251312" ]
0.6835397
0
Update the `!part` attribute inplace for new indices of the master array.
def new_part(self, indices, master_axis_to_position, master_flip): shape = self.shape if indices == [slice(0, stop, 1) for stop in shape]: return # ------------------------------------------------------------ # If a dimension runs in the wrong direction then change its # index to account for this. # # For example, if a dimension with the wrong direction has # size 10 and its index is slice(3,8,2) then after the # direction is set correctly, the index needs to changed to # slice(6,0,-2): # # >>> a = [9, 8, 7, 6, 5, 4, 3, 2, 1, 0] # >>> a[slice(3, 8, 2)] # [6, 4, 2] # >>> a.reverse() # >>> print(a) # >>> a = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] # >>> a[slice(6, 0, -2)] # [6, 4, 2] # ------------------------------------------------------------ if self._subarray.size > 1: indices = indices[:] p_flip = self.flip for axis, i in master_axis_to_position.items(): if (axis not in p_flip and axis not in master_flip) or ( axis in p_flip and axis in master_flip ): # This axis runs in the correct direction continue # Still here? Then this axis runs in the wrong # direction. # Reset the direction p_flip = p_flip[:] if axis in self.flip: p_flip.remove(axis) else: p_flip.append(axis) # Modify the index to account for the changed # direction size = shape[i] if isinstance(indices[i], slice): start, stop, step = indices[i].indices(size) # Note that step is assumed to be always +ve here div, mod = divmod(stop - start - 1, step) start = size - 1 - start stop = start - div * step - 1 if stop < 0: stop = None indices[i] = slice(start, stop, -step) else: size -= 1 indices[i] = [size - j for j in indices[i]] # --- End: for self.flip = p_flip # --- End: if slice_None = slice(None) # Reorder the new indices indices = [ ( indices[master_axis_to_position[axis]] if axis in master_axis_to_position else slice_None ) for axis in self.axes ] part = self.part if not part: self.part = indices return # Still here? update an existing part p_part = [] for part_index, index, size in zip( part, indices, self._subarray.shape ): if index == slice_None: p_part.append(part_index) continue if isinstance(part_index, slice): if isinstance(index, slice): start, stop, step = part_index.indices(size) size1, mod = divmod(stop - start - 1, step) start1, stop1, step1 = index.indices(size1 + 1) size2, mod = divmod(stop1 - start1, step1) if mod != 0: size2 += 1 start += start1 * step step *= step1 stop = start + (size2 - 1) * step if step > 0: stop += 1 else: stop -= 1 if stop < 0: stop = None p_part.append(slice(start, stop, step)) continue else: new_part = list(range(*part_index.indices(size))) new_part = [new_part[i] for i in index] else: if isinstance(index, slice): new_part = part_index[index] else: new_part = [part_index[i] for i in index] # --- End: if # Still here? Then the new element of p_part is a list of # integers, so let's see if we can convert it to a slice # before appending it. new_part0 = new_part[0] if len(new_part) == 1: # Convert a single element list to a slice object new_part = slice(new_part0, new_part0 + 1, 1) else: step = new_part[1] - new_part0 if step: if step > 0: start, stop = new_part0, new_part[-1] + 1 else: start, stop = new_part0, new_part[-1] - 1 if new_part == list(range(start, stop, step)): if stop < 0: stop = None new_part = slice(start, stop, step) # --- End: if p_part.append(new_part) # --- End: for self.part = p_part
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self, patch):\n internalSlices = self._get_internal_slices(patch.slices)\n self.array[internalSlices] = patch.array", "def _idx_changed(self, idx):\n self.refresh_memory()", "def _loadpart(self, part):\n new_partidx = util.Partname(part.partname).idx\n for idx, seq_part in enumerate(self._values):\n partidx = util.Partname(seq_part.partname).idx\n if partidx > new_partidx:\n self._values.insert(idx, part)\n return\n self._values.append(part)", "def _idxs_postformat_array(self):\n self.idxs = np.array(self.idxs)", "def __update(self, idx):\n parent = (idx - 1) // 2\n while parent >= 0:\n left, right = 2 * parent + 1, 2 * parent + 2\n self.__tree[parent] = self.__tree[left] + self.__tree[right]\n parent = (parent - 1) // 2", "def part_ids(self, part_ids):\n\n self._part_ids = part_ids", "def update_chunk(self):\n for key, value in self.piece_coordinates.items():\n # Why is the key a numpy.int type ???\n self.chunk[value] = key", "def update_idx(self):\n self.idx = (self.F * self.FMUL +\n self.E * self.EMUL +\n self.Z * self.ZMUL +\n self.A * self.AMUL +\n self.B * self.BMUL )", "def setInternalIndex(self,ind):\n\t\tself.trMtrxNode_ind = ind", "def reindex_subcomponent_taxa(self):\n ti_mutable = self.taxon_set._is_mutable\n self.taxon_set._is_mutable = True\n new_map = CharacterDataMap()\n for taxon, seq in self.taxon_seq_map.items():\n taxon = self.taxon_set.require_taxon(label=taxon.label)\n new_map[taxon] = seq\n self.taxon_set._is_mutable = ti_mutable\n self.taxon_seq_map = new_map", "def set(self, idx_in, vals, fill=False):\n o = np.broadcast_arrays(vals, *idx_in)\n vals = np.ravel(o[0])\n\n # TODO: Determine whether new vs. existing indices are being\n # addressed, in the latter case we only need to update data\n # array\n\n vals = np.array(vals, ndmin=1)\n idx_flat_in, msk_in = self._to_flat_index(idx_in)\n vals = np.asanyarray(vals, dtype=self.data.dtype)\n idx, data = merge_sparse_arrays(\n idx_flat_in, vals, self.idx, self.data, fill=fill\n )\n\n # Remove elements equal to fill value\n msk = data != self._fill_value\n idx = idx[msk]\n data = data[msk]\n self._idx = idx\n self._data = data\n # idx, msk = find_in_array(idx_flat_in, self.idx)\n # self._data[idx[msk]] = vals[msk]", "def _setActiveChildWellIndices(self,activeWellIndices):\n if activeWellIndices is None:\n activeWellIndices=list(range(0,len(self.childWellIndices())))\n\n if len(activeWellIndices) > 0:\n includedIndices=set()\n for localdataidx in activeWellIndices:\n if localdataidx < 0 or localdataidx >= len(self.childWellIndices()):\n raise RuntimeError(\"local index \"+str(localdataidx)+\" out of range\")\n if localdataidx in includedIndices:\n raise RuntimeError(\"local index \"+str(localdataidx)+\" given multiple times\")\n includedIndices.add(localdataidx)\n\n self._activeWellIndices=activeWellIndices", "def __setitem__(self, key, value):\n mixed_positions, vindex_positions = _advanced_indexer_subspaces(key)\n self._array[key] = np.moveaxis(value, vindex_positions, mixed_positions)", "def reindex(self):\n self._index = {w: i for i, w in enumerate(self._words)}\n self.n, self.d = self._vecs.shape\n assert self.n == len(self._words) == len(self._index)\n self._neighbors = None", "def update(self, idx, add):\n idx += 1\n while idx < len(self.array):\n self.array[idx] += add\n idx += idx & -idx #Adding the last bit", "def propagateDirty(self, slot, subindex, roi):\n totalIndex = (self._subSlots.index(slot),) + subindex\n self.operator.propagateDirty(self, totalIndex, roi)", "def set_index_ub(self, param, length):\n if tik.Dprofile().get_product_name() in (MINI, CLOUD, HISI_ES):\n sum_mask_ub = self.instance.Tensor(self.dtype, (16,),\n name=\"sum_mask_ub\",\n scope=tik.scope_ubuf)\n work_tensor_ub = self.instance.Tensor(self.dtype, (16,),\n name=\"work_tensor_ub\",\n scope=tik.scope_ubuf)\n self.instance.vec_reduce_add(self.mask, sum_mask_ub, param['reduce_mask_ub'], work_tensor_ub, 1, 8)\n\n mask_scalar = self.instance.Scalar(\"uint16\", name=\"mask_scalar\")\n mask_scalar.set_as(sum_mask_ub[0])\n with self.instance.if_scope(mask_scalar != 0):\n with self.instance.if_scope(param['count'] < PRE_NMS_TOPN):\n with self.instance.for_range(0, length) as mask_index:\n param['index_offset'].set_as(param['index_offset'] + 1)\n with self.instance.if_scope(param['count'] < PRE_NMS_TOPN):\n mask_scalar.set_as(param['reduce_mask_ub'][mask_index])\n\n # 1 fp16 == 15360 uint16\n with self.instance.if_scope(mask_scalar == 15360):\n param['index_ub'][param['count']].set_as(\n param['index_offset'])\n param['count'].set_as(param['count'] + 1)\n with self.instance.else_scope():\n param['index_offset'].set_as(param['index_offset'] + length)", "def update_rec(self):\n import copy\n \n self.leftrec, self.rightrec = copy.copy(self.rec), copy.copy(self.rec)\n self.leftrec[2*self.dim + 1], self.rightrec[2*self.dim] = self.node.dimension[self.dim], self.node.dimension[self.dim]", "def update(self, idx, value):\n idx = self.__capacity - 1 + idx\n self.__tree[idx] = value\n self.__update(idx)", "def reindex(self):", "def reindex(self):", "def __setitem__(self, index, value):\n if isinstance(index, types.SliceType):\n keys = self._main._sequence[index]\n if len(keys) != len(value):\n raise ValueError('attempt to assign sequence of size %s '\n 'to slice of size %s' % (len(name), len(keys)))\n # FIXME: efficiency? Would be better to calculate the indexes\n # directly from the slice object\n # NOTE: the new keys can collide with existing keys (or even\n # contain duplicates) - these will overwrite\n for key, val in zip(keys, value):\n self._main[key] = val\n else:\n self._main[self._main._sequence[index]] = value", "def preCommitFixup(self):\n log_method_call(self, self.name)\n if not self.exists or not self.disklabelSupported:\n return\n\n # find the correct partition on the original parted.Disk since the\n # name/number we're now using may no longer match\n _disklabel = self.disk.originalFormat\n\n if self.isExtended:\n # getPartitionBySector doesn't work on extended partitions\n _partition = _disklabel.extendedPartition\n log.debug(\"extended lookup found partition %s\",\n devicePathToName(getattr(_partition, \"path\", None) or \"(none)\"))\n else:\n # lookup the partition by sector to avoid the renumbering\n # nonsense entirely\n _sector = self.partedPartition.geometry.start\n _partition = _disklabel.partedDisk.getPartitionBySector(_sector)\n log.debug(\"sector-based lookup found partition %s\",\n devicePathToName(getattr(_partition, \"path\", None) or \"(none)\"))\n\n self.partedPartition = _partition", "def untie_everything(self):\r\n self.tied_indices = []", "def get_local_indices(self, part, ctx):\n return self.map_to_global(\n F.arange(0, self.local_size(part), ctx=ctx), part\n )", "def setActiveChildWellIndices(self,activeWellIndices):\n if not self.isReplicateGroup():\n raise RuntimeError(self.fullId()+\": cannot change active child well indices, this is not a replicate group.\")\n self._setActiveChildWellIndices(activeWellIndices)\n self.clsParent.modified=True", "def setReference(self, updatedIndices):\n # self.colors[:] = [self.colors[i] for i in updatedIndices]\n self.cellData[:] = [self.cellData[i] for i in updatedIndices]", "def set_position(self, idx, pos):\n if self.EMULATOR_MODE:\n return\n if idx >= self.nleaflets or idx < 0:\n raise IndexError('index specified is out of bounds')\n self._fserial.write(self.MAGIC_BYTES + bytes([idx]) + pos.to_bytes(2, byteorder='big', signed=False) )\n self._fserial.reset_input_buffer()", "def _setPartedPartition(self, partition):\n log_method_call(self, self.name)\n\n if partition is not None and not isinstance(partition, parted.Partition):\n raise ValueError(\"partition must be None or a parted.Partition instance\")\n\n log.debug(\"device %s new partedPartition %s\", self.name, partition)\n self._partedPartition = partition\n self.updateName()", "def undo_scan(self, sub_array_id: int):", "def reindex(self):\n if self.channels is None:\n return\n\n self.data = None\n\n keep_indices = self.channels.new_indices_in_old()\n self.channels.reindex()\n\n if self.parms is not None:\n self.parms = self.integration.get_dependents(\n self.get_config_name())\n\n channel_attributes = self.channel_dependent_attributes\n\n for attribute, value in self.__dict__.items():\n if attribute not in channel_attributes:\n continue\n if not isinstance(value, np.ndarray):\n continue\n setattr(self, attribute, value[keep_indices])", "def __setitem__(self, index, item):\n if isinstance(index, types.SliceType):\n # NOTE: item must be an iterable (list of tuples)\n self._main[index] = OrderedDict(item)\n else:\n # FIXME: Does this raise a sensible error?\n orig = self._main.keys[index]\n key, value = item\n if self._main.strict and key in self and (key != orig):\n raise ValueError('slice assignment must be from '\n 'unique keys')\n # delete the current one\n del self._main[self._main._sequence[index]]\n self._main.insert(index, key, value)", "def __setitem__(self, index, name):\n if isinstance(index, types.SliceType):\n # FIXME: efficiency?\n # check length is the same\n indexes = range(len(self._main._sequence))[index]\n if len(indexes) != len(name):\n raise ValueError('attempt to assign sequence of size %s '\n 'to slice of size %s' % (len(name), len(indexes)))\n # check they are the same keys\n # FIXME: Use set\n old_keys = self._main._sequence[index]\n new_keys = list(name)\n old_keys.sort()\n new_keys.sort()\n if old_keys != new_keys:\n raise KeyError('Keylist is not the same as current keylist.')\n orig_vals = [self._main[k] for k in name]\n del self._main[index]\n vals = zip(indexes, name, orig_vals)\n vals.sort()\n for i, k, v in vals:\n if self._main.strict and k in self._main:\n raise ValueError('slice assignment must be from '\n 'unique keys')\n self._main.insert(i, k, v)\n else:\n raise ValueError('Cannot assign to keys')", "def update_dimensions(self):\n self.chunk = numpy.full((self.current_height, self.current_width), fill_value=Constants.VALUE_INITIALIZER,\n dtype=\"int16\")", "def _cte_postformat(self):\n# if type(self.idxs) == list:\n# self.idxs = np.array(self.idxs)\n if self.sp_relative_pos is not None:\n if type(self.sp_relative_pos) == list:\n self.sp_relative_pos = np.array(self.sp_relative_pos)", "def _set_post_op_offset(self, spec: DTensorSpec, old_offset: int) -> None:\n dtensor_shape = spec.shape\n\n from torch.distributed._tensor.ops.utils import prod\n\n numel = prod(dtensor_shape)\n # pytorch: offset must be multiple of 4\n # source: aten/src/ATen/cuda/CUDAGeneratorImpl.cpp\n numel = (numel + 3) // 4 * 4\n self.set_offset(\"parallel-rng\", old_offset + numel)", "def _UpdateCoords(self, new_coords):\n for i in range(self.mol.n_atoms):\n for j in range(const.NUMDIM):\n self.mol.atoms[i].coords[j] = new_coords[i][j]", "def insert_parts(self, parts):\r\n self.board.insert_parts(parts)\r\n self.set_changed(parts)", "def _reset_stored(self):\n ## Main information\n self.idxs = None\n self.sp_relative_pos = None\n self._setted = False\n self.ks = None\n self.iss = [0]", "def _adjust(self, offset, size, *keep_refs):\n for basic_block in self._cfg.values():\n for instr in basic_block:\n instr.adjust(offset, size, instr in keep_refs)", "def _configure_auxiliary_mask(self, auxiliary_mask):\n indices = self.indices\n\n new = [\n mask[\n tuple(\n [\n (slice(None) if n == 1 else index)\n for n, index in zip(mask.shape, indices)\n ]\n )\n ]\n for mask in auxiliary_mask\n ]\n\n # # If the partition is to be parallelised then get rid of mask\n # # components which are all False so the mask component does\n # # not get copied to the child process\n # if not config['serial']:\n # new = [mask for mask in new if not mask.any()]\n\n self.config[\"auxiliary_mask\"] = new", "def update_parts():\n syt.log_info(\"$$$ Get Rebrickable Part info\")\n part_list = [x[0] for x in reapi.pull_all_pieces()] # ['piece_id', 'descr', 'category')\n part_list.pop(0) # Remove the header\n secondary_parts.add_parts_to_database(part_list, type=\"re\")\n # Todo: need to create a scraper for rebrickable piece num information\n syt.log_info(\"%%% Rebrickable Part info added to parts table\")", "def remove_parts(self, parts):\r\n self.board.remove_parts(parts)\r\n self.set_changed(parts)", "def _array_only_set_rel_pos(self, rel_pos):\n ## Preformatting\n rel_pos = np.array(rel_pos)\n if len(rel_pos.shape) == 1:\n rel_pos = rel_pos.reshape((len(rel_pos), 1))\n n_iss = len(self.iss)\n sp_relative_pos = np.array([rel_pos for i in range(n_iss)])\n ## Not staticneighs\n if not self.staticneighs:\n n_k = len(self.idxs)\n sp_relative_pos = np.array([sp_relative_pos for i in range(n_k)])\n self.sp_relative_pos = sp_relative_pos", "def edit(self):\n wmap = 512\n if not self.mapd:\n data = self.data[24:]\n mapd = self.mapd = []\n for index in xrange(0,3*wmap*wmap,3):\n mapd.append(data[index:index+3])\n self.setChanged()", "def local_inplace_setsubtensor(node):\r\n if isinstance(node.op, IncSubtensor) and not node.op.inplace:\r\n new_op = node.op.__class__(\r\n node.op.idx_list, inplace=True,\r\n set_instead_of_inc=node.op.set_instead_of_inc,\r\n destroyhandler_tolerate_aliased=node.op.destroyhandler_tolerate_aliased)\r\n new_node = new_op(*node.inputs)\r\n return [new_node]\r\n return False", "def update_array(old_array, indices, val):\n\n # Sanity check.\n if (not isinstance(old_array, list)):\n old_array = []\n\n # 1-d array?\n if (len(indices) == 1):\n \n # Do we need to extend the length of the list to include the indices?\n index = int(indices[0])\n if (index >= len(old_array)):\n old_array.extend([0] * (index - len(old_array) + 1))\n old_array[index] = val\n\n # 2-d array?\n elif (len(indices) == 2):\n\n # Do we need to extend the length of the list to include the indices?\n index = int(indices[0])\n index1 = int(indices[1])\n if (index >= len(old_array)):\n # NOTE: Don't do 'old_array.extend([[]] * (index - len(old_array) + 1))' here.\n # The [] added with extend refers to the same list so any modification\n # to 1 sublist shows up in all of them.\n for _ in range(0, (index - len(old_array) + 1)):\n old_array.append([])\n if (index1 >= len(old_array[index])):\n old_array[index].extend([0] * (index1 - len(old_array[index]) + 1))\n old_array[index][index1] = val\n \n # Done.\n return old_array", "def set_offset(self, offset):\r\n for b in self.buf:\r\n b.set_offset(offset)", "def _update_block(self, block_idx):\n my_coord_indices = self.block_idxs[block_idx]\n for i,j in enumerate(my_coord_indices):\n soln = self._partial_min_solution(j)\n with self.block_locks[block_idx]:\n self.blocks[block_idx][i] = soln", "def reintegrate_state(self, state_chunks):\n num_chunks = len(state_chunks.keys())\n\n # Get the bounds\n bounds = self.chunk_bounds(num_chunks)\n\n # Now reset the master state vector\n for cnum, bnds in bounds.items():\n self.state[dict(location=slice(bnds[0],bnds[1]))] = state_chunks[cnum].state", "def __setitem__(self, name, item):\n if self.is_derived_array(name) and not self.auto_propagate_off:\n raise RuntimeError(\"Derived array is not writable\")\n\n if isinstance(name, tuple) or isinstance(name, list):\n index = name[1]\n name = name[0]\n else:\n index = None\n\n self._assert_not_family_array(name)\n\n if isinstance(item, array.SimArray):\n ax = item\n else:\n ax = np.asanyarray(item).view(array.SimArray)\n\n if name not in list(self.keys()):\n # Array needs to be created. We do this through the\n # private _create_array method, so that if we are operating\n # within a particle-specific subview we automatically create\n # a particle-specific array\n try:\n ndim = len(ax[0])\n except TypeError:\n ndim = 1\n except IndexError:\n ndim = ax.shape[-1] if len(ax.shape) > 1 else 1\n\n # The dtype will be the same as an existing family array if\n # one exists, or the dtype of the source array we are copying\n dtype = self._get_preferred_dtype(name)\n if dtype is None:\n dtype = getattr(item, 'dtype', None)\n\n self._create_array(name, ndim, dtype=dtype)\n\n # Copy in contents if the contents isn't actually pointing to\n # the same data (which will be the case following operations like\n # += etc, since these call __setitem__).\n self._set_array(name, ax, index)", "def originalData(self): \n self.__exampleIndices = array(list(range(0, self.__numExamples)))", "def update_part(session=None, data=None):\n data_dict = format_and_check_update_part_request(data)\n if data_dict is None:\n return False\n\n with mc.MCSessionWrapper(session=session) as session:\n for dkey, dval in data_dict.items():\n hpn_to_change = dval[0][0]\n rev_to_change = dval[0][1]\n part_rec = session.query(Parts).filter(\n (func.upper(Parts.hpn) == hpn_to_change.upper())\n & (func.upper(Parts.hpn_rev) == rev_to_change.upper())\n )\n num_part = part_rec.count()\n if num_part == 0:\n part = Parts()\n elif num_part == 1:\n part = part_rec.first()\n set_an_attrib = False\n for d in dval:\n try:\n getattr(part, d[2])\n setattr(part, d[2], d[3])\n set_an_attrib = True\n except AttributeError:\n print(d[2], \"does not exist as a field\")\n continue\n if set_an_attrib:\n session.add(part)\n session.commit()\n cm_utils.log(\"cm_partconnect part update\", data_dict=data_dict)\n\n return True", "def __setitem__(self, key, val):\n if isinstance(key, types.SliceType):\n if not isinstance(val, OrderedDict):\n # FIXME: allow a list of tuples?\n raise TypeError('slice assignment requires an OrderedDict')\n keys = self._sequence[key]\n # NOTE: Could use ``range(*key.indices(len(self._sequence)))``\n indexes = range(len(self._sequence))[key]\n if key.step is None:\n # NOTE: new slice may not be the same size as the one being\n # overwritten !\n # NOTE: What is the algorithm for an impossible slice?\n # e.g. d[5:3]\n pos = key.start or 0\n del self[key]\n newkeys = val.keys()\n for k in newkeys:\n if k in self:\n if self.strict:\n raise ValueError('slice assignment must be from '\n 'unique keys')\n else:\n # NOTE: This removes duplicate keys *first*\n # so start position might have changed?\n del self[k]\n self._sequence = (self._sequence[:pos] + newkeys +\n self._sequence[pos:])\n dict.update(self, val)\n else:\n # extended slice - length of new slice must be the same\n # as the one being replaced\n if len(keys) != len(val):\n raise ValueError('attempt to assign sequence of size %s '\n 'to extended slice of size %s' % (len(val), len(keys)))\n # FIXME: efficiency?\n del self[key]\n item_list = zip(indexes, val.items())\n # smallest indexes first - higher indexes not guaranteed to\n # exist\n item_list.sort()\n for pos, (newkey, newval) in item_list:\n if self.strict and newkey in self:\n raise ValueError('slice assignment must be from unique'\n ' keys')\n self.insert(pos, newkey, newval)\n else:\n if key not in self:\n self._sequence.append(key)\n dict.__setitem__(self, key, val)", "def delete(self, idx):\n self.arr[idx] = self.arr[self.current-1]\n self.current -= 1", "def updatePartnerIdices(self):\n ## In order to access the partners by their indices the current\n # indices must be updated in each time step.\n plant_indices = np.array(range(0, self.no_plants))\n for i in self._partner_names:\n if not i:\n self._partner_indices.append([])\n else:\n h = []\n for j in i:\n a = plant_indices[np.where(self._plant_names == j)][0]\n h.append(a)\n self._partner_indices.append(h)", "def in_place_offset(self, offset):\n self.p += offset * self.cross_z.normalized()", "def reindex(self):\n super().reindex()\n self._depths, self._heights = None, None\n for p in self.positions():\n self._compute_depth(p)\n self._compute_height(p)", "def _update_farness_map(self,ind):", "def _child_reindex(self, old_id, model):\n assert old_id in self._models # detect logic errors\n new_id = model.get_id()\n assert new_id != None\n assert new_id not in self._models\n del self._models[old_id]\n self._models[new_id] = model", "def _set_view_slice(self, indices):\n\n vertices = self._mesh_vertices\n faces = self._mesh_triangles\n\n if len(faces) == 0:\n self._node.set_data(vertices=None, faces=None)\n else:\n self._node.set_data(vertices=vertices[:, ::-1], faces=faces,\n color=self.color)\n\n self._need_visual_update = True\n self._update()", "def update_E(self):\n self.grid.E[0, :, :, :] = self.grid.E[-1, :, :, :]", "def update_E(self):\n self.grid.E[:, 0, :, :] = self.grid.E[:, -1, :, :]", "def update_E(self):\n self.grid.E[:, :, 0, :] = self.grid.E[:, :, -1, :]", "def __setitem__(self, inds, value):\n i, j = inds\n self.array[i][j] = value", "def set_member_indexing(self, var_idx: int) -> None:\n if len(self._carry_register_) < var_idx or len(self._carry_register_) == 0:\n return # TODO: Throw an error\n if self._carry_variables_[self._carry_register_[var_idx]] not in [List, list, Dict, dict, str]:\n return # TODO: Throw an error\n\n # Done checking\n self.indexing = self._get_available_var_()\n self.__prefix__ += self.indexing + \" in \" + self._carry_register_[var_idx]\n self.__prefix__ += \":\"", "def test_partial_updates(self):\r\n m1 = TestSetModel.create(int_set={1, 2, 3, 4})\r\n\r\n m1.int_set.add(5)\r\n m1.int_set.remove(1)\r\n assert m1.int_set == {2, 3, 4, 5}\r\n\r\n m1.save()\r\n\r\n m2 = TestSetModel.get(partition=m1.partition)\r\n assert m2.int_set == {2, 3, 4, 5}", "def __getitem__(self, i):\n new_data = super().__getitem__(i)\n if isinstance(i, slice):\n new_data = self.__class__(new_data)\n new_data.global_settings = copy.copy(self.global_settings)\n return new_data", "def beam_update(self, idx, positions, beam_size):\n for e in self._all:\n a, br, d = e.size()\n sentStates = e.view(a, beam_size, br // beam_size, d)[:, :, idx]\n sentStates.data.copy_(\n sentStates.data.index_select(1, positions))", "def offset_index(self, offset):\n if self.has_index:\n self.index += offset", "def test00(self):\n a = np.arange(1, 111)\n b = bcolz.carray(a, chunklen=10)\n sl = [3, 1]\n b[sl] = (10, 20)\n a[sl] = (10, 20)\n # print \"b[%s] -> %r\" % (sl, b)\n assert_array_equal(b[:], a, \"fancy indexing does not work correctly\")", "def _dirty(self, name):\n\n name = self._array_name_1D_to_ND(name) or name\n if name=='pos':\n for v in self.ancestor._persistent_objects.values():\n if 'kdtree' in v:\n del v['kdtree']\n\n if not self.auto_propagate_off:\n for d_ar in self._dependency_tracker.get_dependents(name):\n if d_ar in self or self.has_family_key(d_ar):\n if self.is_derived_array(d_ar):\n del self[d_ar]\n self._dirty(d_ar)", "def set_custom_data(self, index_from, data):\r\n self.unif[index_from:(index_from + len(data))] = data", "def map_to_global(self, idxs, part_id):\n return F.zerocopy_from_dgl_ndarray(\n _CAPI_DGLNDArrayPartitionMapToGlobal(\n self._partition, F.zerocopy_to_dgl_ndarray(idxs), part_id\n )\n )", "def _array_array_set_rel_pos(self, rel_pos):\n# self.staticneighs = True\n if self.staticneighs:\n self.sp_relative_pos = np.array(rel_pos)\n else:\n len_ks = 1 if self.ks is None else len(self.ks)\n self.sp_relative_pos = np.array([rel_pos for k in range(len_ks)])", "def set_changed(self, parts):\r\n self.command_manager.set_changed(parts)", "def __setitem__(self, index, value):\n if isinstance(index, slice):\n del self[index]\n offset = 0\n if len(self) == 0:\n for x in value:\n self.append(x)\n else:\n for x in xrange(*index.indices(len(self))):\n self.__insert(x + offset, value)\n offset += value.length\n if not index.step:\n break\n return\n\n self.__verify_index(index)\n\n if index < 0:\n index += self.length\n\n index, prev_node, cur_node = self.__find_node_index(index)\n cur_node.data_list[index] = value", "def move(self, coord, mark):\n self.arr[coord] = mark", "def update_state(self):\n self.reset_state()\n for piece in self.pieces:\n coordinates = piece.get_block_positions()\n for coor in coordinates:\n x, y = coor\n self.state[y][x] = piece", "def expand(self):\n self.vertices[-1, :] = self.expanded", "def partition_indexed(part_index):\n return module_globals[\n partition_tmpl % (name, part_index)\n ]", "def _array_array_array_set_rel_pos(self, rel_pos):\n if self.staticneighs:\n self.sp_relative_pos = rel_pos[0]\n else:\n self.sp_relative_pos = rel_pos", "def __setitem__(self, index, item):\n # type: (int, Any) -> None\n items = self._refs(item) if isinstance(index, slice) else self.ref(item)\n return list.__setitem__(self, index, items)", "def _set_neighs_array_lvl1(self, key):\n #sh = key.shape\n ## If only array of neighs\n if self.staticneighs:\n self.idxs = np.array([key for i in range(len(self.iss))])\n else:\n self.ks = range(1) if self.ks is None else self.ks\n len_ks = len(self.ks)\n self.idxs = np.array([[key for i in range(len(self.iss))]\n for i in range(len_ks)])\n self._setted = True", "def _reinit_indexes(self):\n print('Reinitializing indexes...')\n for identity in self.groundtruth_metadata.keys():\n self.groundtruth_metadata[identity]['index'] = 0\n print('Indexes reinitialized!')", "def _remove_element(cls, d, idx):\n d[idx, 2] = 0\n return d", "def update_partition(self, event) -> None:\n self.min_width = 150 * len(self.partition)\n self.request_update()", "def __setstate__(self, state):\n if \"_version\" not in state.keys(): # Compatibility mode\n self._array = state['_array']\n self._version = 0.1 # promote to the latest version\n x = state['_x']\n y = state['_y']\n z = state['_z']\n dx = state['_dx']\n dy = state['_dy']\n dz = state['_dz']\n if x is not None and y is not None and z is not None:\n self[x:x+dx, y:y+dy, z:z+dz]\n elif x is not None and y is not None:\n self[x:x+dx, y:y+dy]\n elif x is not None:\n self[x:x+dx]\n else:\n self._view = self._array\n self._view_item = None\n else:\n self.__dict__ = state\n if self._view_item is not None:\n self.__getitem__(self._view_item)\n else:\n self._view = self._array", "def update(self, instance, validated_data):\n for attr, value in list(validated_data.items()):\n if attr == 'part_details':\n for a, v in list(value.items()):\n setattr(instance.part_details, attr, value)\n else:\n setattr(instance, attr, value)\n instance.save()\n return instance", "def _set_pre_op_offset(self, spec: DTensorSpec) -> None:\n dtensor_shape = spec.shape\n mesh = spec.mesh\n dim_map = spec.dim_map\n\n # Compute shard coordinate:\n # The coordinate on each tensor dim is a tuple (idx, range)\n # If a DTensor is partitioned on its dim i into n shards, and the current rank\n # holds the j-th, then its shard coordinate will be (idx=j, range=n) on dim i\n coordinate = mesh.get_coordinate()\n assert coordinate is not None\n shard_coord = [\n coordinate[mesh_dim] if mesh_dim >= 0 else 0 for mesh_dim in dim_map\n ]\n shard_size = [\n mesh.size(mesh_dim) if mesh_dim >= 0 else 1 for mesh_dim in dim_map\n ]\n\n # compute shard linear index\n shard_linear_idx = self._calc_shard_linear_idx(shard_coord, shard_size)\n\n # compute starting offset using the first shard's size\n local_size_on_rank_0 = list(dtensor_shape)\n for idx, placement in enumerate(spec.placements):\n if isinstance(placement, Shard):\n mesh_dim_size = mesh.size(idx)\n shard_dim = placement.dim\n local_size_on_rank_0[shard_dim] = placement._local_shard_size_on_dim(\n dtensor_shape[shard_dim],\n mesh_dim_size,\n 0,\n return_offset=False,\n )[0]\n\n from torch.distributed._tensor.ops.utils import prod\n\n local_size = prod(local_size_on_rank_0)\n\n # get current RNG offset\n current_offset = self.get_offset(\"parallel-rng\")\n\n # pytorch: offset must be multiple of 4\n # source: aten/src/ATen/cuda/CUDAGeneratorImpl.cpp\n offset_incr = (shard_linear_idx * local_size + 3) // 4 * 4\n self.set_offset(\"parallel-rng\", current_offset + offset_incr)", "def set_offset(self, trace, offset):\n trace = self.traces_to_idx(trace)\n self._offset[trace] = offset\n # Inform descendants of change\n self.set_changed()", "def reindex(self, using=None):\n self.clear(using=using)\n self.update(using=using)", "def __setitem__(self, idx, value):\n assert(isinstance(idx, int))\n nidx = self._normalize_idx(idx)\n if nidx >= len(self.data):\n raise IndexError\n self.data[nidx] = value", "def set_part(self, connection_part):\n self.part = connection_part", "def __setitem__(self, item, value):\n index = self.reindex(item)\n self.parent.__setitem__(index, value)", "def reset_s(self):\n self.s = np.copy(self.f_uniq) # (current) solution, selected column", "def reindex(self):\n raise NotImplementedError()", "def _restore_global_position(x, root_pos, root_idx=None):\n x = x + root_pos\n if root_idx is not None:\n x = np.insert(x, root_idx, root_pos.squeeze(1), axis=1)\n return x", "def update_from_indexes(self, data, **kw):\n for i in data:\n self.update_from_index(i, **kw)", "def __setitem__(self, idx, val):\n self.rows[idx[0]][idx[1]] = val" ]
[ "0.5455975", "0.5450583", "0.5443926", "0.53721666", "0.5359862", "0.5275372", "0.52543634", "0.52315736", "0.5228334", "0.51149035", "0.50902224", "0.50782204", "0.5062653", "0.50265366", "0.50074023", "0.49909484", "0.49905938", "0.49756426", "0.4970326", "0.496184", "0.496184", "0.49535567", "0.4943507", "0.49215022", "0.49166226", "0.49110836", "0.49097493", "0.4902477", "0.4890401", "0.48781407", "0.48481715", "0.48267615", "0.48146898", "0.4808053", "0.4803822", "0.47912985", "0.4790657", "0.4761948", "0.47523195", "0.4748287", "0.47477698", "0.4742629", "0.47417226", "0.47413313", "0.47361755", "0.47345686", "0.47320303", "0.47253257", "0.47216284", "0.47195795", "0.47145712", "0.4712462", "0.471057", "0.4703475", "0.46985236", "0.46983114", "0.46902126", "0.46876916", "0.46836734", "0.4682486", "0.4681186", "0.46764952", "0.46704277", "0.46687114", "0.4668263", "0.46472108", "0.4644216", "0.46413085", "0.46374607", "0.46330908", "0.46295178", "0.46290186", "0.46264744", "0.462511", "0.46247083", "0.460113", "0.45928112", "0.45908034", "0.45895377", "0.4584424", "0.45834067", "0.45831272", "0.457653", "0.45688072", "0.45678675", "0.4567391", "0.45571727", "0.45562705", "0.4555775", "0.45550337", "0.45533147", "0.45525464", "0.4552538", "0.45442653", "0.4540668", "0.45402852", "0.4537582", "0.45278376", "0.45230293", "0.45187727" ]
0.5784396
0
The extra memory required to access the array.
def extra_memory(self): if not self.in_memory: # -------------------------------------------------------- # The subarray is on disk so getting the partition's data # array will require extra memory # -------------------------------------------------------- extra_memory = True else: # -------------------------------------------------------- # The subarray is already in memory # -------------------------------------------------------- config = self.config p_part = self.part if p_part: extra_memory = True elif not config["unique_subarray"]: extra_memory = True else: p_data = self._subarray if not numpy_ma_isMA(p_data): # The p_data is not a masked array extra_memory = isinstance(p_data.base, numpy_ndarray) else: # The p_data is a masked array memory_overlap = isinstance( p_data.data.base, numpy_ndarray ) if not ( p_data.mask is numpy_ma_nomask or not numpy_ma_is_masked(p_data) ): # There is at least one missing data point memory_overlap |= isinstance( p_data.mask.base, numpy_ndarray ) extra_memory = memory_overlap # --- End: if p_dtype = p_data.dtype if not extra_memory: if config["func"] is not None: extra_memory = True else: p_units = self.Units units = config["units"] if ( not p_units.equals(units) and bool(p_units) is bool(units) and not ( p_data.flags["C_CONTIGUOUS"] and p_dtype.kind == "f" ) ): extra_memory = True # ------------------------------------------------------------ # Extra memory is required if the dtype needs changing # ------------------------------------------------------------ if not extra_memory: dtype = config["dtype"] if dtype is not None and dtype != p_data.dtype: extra_memory = True # --- End: if # ------------------------------------------------------------ # Amount of extra memory (in bytes) required to access the # array # ------------------------------------------------------------ return self.nbytes if extra_memory else 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def allocated_memory(self):\n return self._allocated_memory", "def memory(self):\r\n return self._memory", "def __len__(self):\n\t\treturn len(self.memory)", "def __len__(self):\r\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def __len__(self):\n return len(self.memory)", "def allocatememory(self):\n pass", "def get_array_size(self):\r\n return conf.lib.clang_getArraySize(self)", "def MAXMEM(self):", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory():\n return tracemalloc.take_snapshot()", "def getMemory(self):\n return self.memory", "def array_size(self):\n return self._array_size", "def MEM (self,n):", "def get_memory(self):\n return self.loss_memory", "def memory(self) -> Optional[Any]:\n return pulumi.get(self, \"memory\")", "def memory(self) -> Optional[Any]:\n return pulumi.get(self, \"memory\")", "def deviceMemory(self):\n return 1", "def memory_get_usage():\n raise NotImplementedError()", "def read_memory(self):\n if self.mem_empty == True:\n if self.mem_idx == 0:\n m_x = np.zeros(self.n)\n m_d = 0\n else:\n m_x = np.mean(self.mem_x[:self.mem_idx+1], axis=0)\n m_d = np.mean(self.mem_d[:self.mem_idx])\n else:\n m_x = np.mean(self.mem_x, axis=0)\n m_d = np.mean(np.delete(self.mem_d, self.mem_idx))\n self.mem_idx += 1\n if self.mem_idx > len(self.mem_x)-1:\n self.mem_idx = 0\n self.mem_empty = False\n return m_d, m_x", "def getArrayLength(self):\r\n return self.arraySize", "def _read(self):\n return np.copy(self.memory[self.head_pos])", "def get_used_mem(self):\n return self.used_mem", "def memory_used(self) -> int:\r\n return self._memory_used", "def memory_real_in_bytes(self):\n cdef INT64_t total_memory = 0\n\n # col\n total_memory += self.__nnz * sizeof(INT64_t)\n # ind\n total_memory += (self.__nrow + 1) * sizeof(INT64_t)\n # val\n total_memory += self.__nnz * sizeof(FLOAT32_t)\n\n return total_memory", "def getSize(self):\n return self.bf.memory()", "def __len__(self):\n return len(self.array)", "def __len__(self):\n return len(self.array)", "def size(self):\n return self._N", "def BytesOfStorage(self):\n return (self.NumBits() + 7) / 8", "def getSize(self) -> int:\n return len(self.mem)", "def estimated_lookup_memory(self):\n return 60 * len(self.docvecs.offset2doctag) + 140 * len(self.docvecs.doctags)", "def memory_utilization(self) -> float:\r\n return self._memory_utilization", "def total_memory(self):\n return self._total_memory", "def __len__(self):\n\n return len(self.data) * 8", "def __len__(self) -> int:\n return self.disp_size ** 2", "def _get_raw_data(self):\n raw_data_bytes = self.nbytes + self._heapsize\n base = self\n while hasattr(base, \"base\") and base.base is not None:\n base = base.base\n # Variable-length-arrays: should take into account the case of\n # empty arrays\n if hasattr(base, \"_heapoffset\"):\n if hasattr(base, \"nbytes\") and base.nbytes > raw_data_bytes:\n return base\n # non variable-length-arrays\n else:\n if hasattr(base, \"nbytes\") and base.nbytes >= raw_data_bytes:\n return base", "def update_size(self):\n return 3 + self.memory_unit_size", "def memory():\n sin = psutil.virtual_memory()\n return round((sin.total / sin.used) / 100, 3)", "def allocated_storage(self):\n return self._allocated_storage", "def memUsedGpu(self):\n return None # amount not known", "def total_actual_memory(self):\n return self._total_actual_memory", "def get_total_memory_size(self):\n return self.drt_manager.get_total_memory_size()", "def in_memory_data(self):\n return self._in_memory_data", "def get_size(self):", "def _N(self):\n return len(self._array)", "def __len__(self):\n return self.current_size", "def __len__(self):\n\t\treturn self._size", "def get_total_memory_size(self):\n memory = 0\n for i in range(4):\n for j in range(4):\n memory += self.system.operator[i, j].memory\n return memory", "def get_free_mem(self):\n return self.free_mem", "def get_mem(self) -> list:\n return self.__mem", "def size(self):\r\n return self.size.data", "def size(self):\n return len(self.arr)", "def __len__(self):\r\n return 100000", "def size(self):\n return self.ptr", "def getallocatedblocks(): # real signature unknown; restored from __doc__\n return 0", "def __len__(self):\r\n return self._size", "def __len__(self):\r\n return self._size", "def display_memory(self) -> None:\n return self.__memory", "def size(self):\n return self.__size", "def get_total_mem(self):\n return self.total_mem", "def mem_per_proc(self):\n return self._mem_per_proc", "def size(self):\n size = 0\n size += self.data.size * sys.getsizeof(self.data)\n return size / 1024.0 / 1024.0 / 1024.0", "def get_space_used():\n fs.get_space_used()", "def array(self):", "def size(self):\r\n # Anthony stage 2\r\n return number_size(self.n) - 1", "def in_memory(self):\n return hasattr(self._subarray, \"__array_interface__\")", "def size(self):\r\n return self._size", "def _nbytes(self, deep: bool = False) -> int:\n # for implementations with no useful getsizeof (PyPy)\n objsize = 24\n\n level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels)\n label_nbytes = sum(i.nbytes for i in self.codes)\n names_nbytes = sum(getsizeof(i, objsize) for i in self.names)\n result = level_nbytes + label_nbytes + names_nbytes\n\n # include our engine hashtable\n result += self._engine.sizeof(deep=deep)\n return result", "def get_memory(self):\n return (self.K.get_value(), self.V.get_value(), self.A.get_value())", "def size(self) -> int:", "def __len__(self):\n return self._arr.shape[1]", "def allocate(self):\n raise NotImplementedError", "def size(self):\n return self._size", "def master_mem_overhead(self):\n return self._master_mem_overhead", "def get_size(self):\n ...", "def __len__(self):\n return self._size", "def __len__(self):\n return self._size" ]
[ "0.6821264", "0.6811845", "0.66922146", "0.66735834", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.6612898", "0.65476745", "0.6454627", "0.6434698", "0.64256907", "0.64256907", "0.64256907", "0.64256907", "0.64256907", "0.64256907", "0.64256907", "0.6421089", "0.6355993", "0.6355911", "0.63380694", "0.6234925", "0.6234925", "0.6210194", "0.61880624", "0.61694604", "0.6157968", "0.6144876", "0.61090446", "0.610004", "0.60976934", "0.60961306", "0.60599285", "0.60599285", "0.6057573", "0.60571706", "0.6045886", "0.6040636", "0.60312796", "0.6009837", "0.6006144", "0.59940344", "0.5988107", "0.59719926", "0.59624505", "0.59426904", "0.59420973", "0.59121144", "0.5909753", "0.5905127", "0.5891123", "0.5885706", "0.5857103", "0.58505625", "0.5841439", "0.5815311", "0.58117014", "0.58079964", "0.5805348", "0.5803843", "0.5794774", "0.5793059", "0.5782983", "0.5782983", "0.57791406", "0.57715523", "0.5770453", "0.5761753", "0.57322794", "0.57259846", "0.5725839", "0.57202536", "0.5716401", "0.5711541", "0.5709581", "0.57084954", "0.57082736", "0.5704399", "0.5701274", "0.56990856", "0.56973356", "0.5694242", "0.5694148", "0.5694148" ]
0.73394907
0
Open the partition prior to getting its array.
def open(self, config): unique_subarray = getrefcount(self._subarray) <= 2 config = config.copy() config["unique_subarray"] = unique_subarray self.config = config if config.get("auxiliary_mask"): self._configure_auxiliary_mask(config["auxiliary_mask"]) self.config["extra_memory"] = self.extra_memory() self._in_place_changes = True self.masked = True if hasattr(self, "output"): del self.output return config
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def open(self):\n return self.open_array", "def partition_book(self):\n ...", "def to_disk(self, reopen=True):\n # try:\n tfa = CachedArray(self.array)\n # except Exception:\n # return False\n\n fd, _lock_file = mkstemp(\n prefix=tfa._partition_file + \"_\", dir=tfa._partition_dir\n )\n close(fd)\n\n self.subarray = tfa\n _temporary_files[tfa._partition_file] = (\n tfa._partition_dir,\n _lock_file,\n set(),\n )\n\n if reopen:\n # Re-open the partition\n self.open(self.config)\n\n return True", "def close(self, **kwargs):\n config = getattr(self, \"config\", None)\n\n if config is None:\n return\n\n if kwargs:\n config.update(kwargs)\n\n original = getattr(self, \"_original\", None)\n logger.partitioning(\"Partition.close: original = {}\".format(original))\n\n if not original:\n originally_on_disk = False\n original_subarray = None\n else:\n originally_on_disk = not original.in_memory\n original_subarray = original._subarray\n\n config = self.config\n logger.partitioning(\" config = {}\".format(config))\n\n if config[\"serial\"]:\n # --------------------------------------------------------\n # SERIAL\n # --------------------------------------------------------\n logger.partitioning(\" serial\")\n\n if config[\"readonly\"]:\n logger.partitioning(\" readonly=True\")\n\n if originally_on_disk:\n logger.partitioning(\" subarray originally on disk\")\n\n if config.get(\"to_disk\", False):\n # 1.1.1.1 The original subarray was on disk,\n # we don't want to keep the current\n # subarray in memory, and we are happy\n # to discard any changes that may have\n # been made to the subarray.\n logger.partitioning(\" 1.1.1.1 revert\")\n self.revert()\n elif free_memory() <= cf_fm_threshold():\n # 1.1.1.2 The original subarray was on disk,\n # we are happy to keep the current\n # subarray in memory, but there is not\n # enough free memory to do so.\n logger.partitioning(\n \" 1.1.1.2 revert ({} <= {})\".format(\n free_memory(), cf_fm_threshold()\n )\n )\n self.revert()\n else:\n # 1.1.1.3 The original subarray was on disk\n # and there is enough memory to keep\n # the current subarray in memory\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # The original subarray was a temporary\n # file which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n del self.masked\n logger.partitioning(\n \" 1.1.1.3 del masked ({} > {})\".format(\n free_memory(), cf_fm_threshold()\n )\n )\n\n else:\n logger.partitioning(\" subarray originally in memory\")\n if config.get(\"to_disk\", False):\n # 1.1.2.1 Original subarray was in memory and\n # we don't want to keep the current\n # subarray in memory\n logger.partitioning(\" 1.1.2.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.1.2.2 Original subarray was in memory and\n # unique but there is not enough\n # memory to keep the current subarray\n logger.partitioning(\" 1.1.2.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.1.2.3 Original subarray was in memory and\n # unique and there is enough memory to\n # keep the current subarray in memory\n logger.partitioning(\" 1.1.2.3 pass\")\n pass\n else:\n # config['readonly'] is False\n if originally_on_disk:\n if config.get(\"to_disk\", False):\n # 1.2.1.1 Original subarray was on disk and\n # there and we don't want to keep the\n # array\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # Original subarray was a temporary file\n # on disk which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n logger.partitioning(\" 1.2.1.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.2.1.2 Original subarray was on disk but\n # there is not enough memory to keep\n # it\n if config[\"unique_subarray\"] and isinstance(\n original_subarray, CachedArray\n ):\n # Original subarray was a temporary file\n # on disk which is not referenced by any\n # other partitions\n _remove_temporary_files(\n original_subarray._partition_file\n )\n\n logger.partitioning(\" 1.2.1.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.2.1.3 Original subarray was on disk and\n # there is enough memory to keep it\n logger.partitioning(\" 1.2.1.3 pass\")\n del self.masked\n else:\n if config.get(\"to_disk\", False):\n # 1.2.2.1 Original subarray was in memory but\n # we don't want to keep it\n logger.partitioning(\" 1.2.2.1 to_disk\")\n self.to_disk(reopen=False)\n elif free_memory() <= cf_fm_threshold():\n # 1.2.2.2 Original subarray was an in memory\n # but there is not enough memory to\n # keep it\n logger.partitioning(\" 1.2.2.2 to_disk\")\n self.to_disk(reopen=False)\n else:\n # 1.2.2.3 Original subarray was in memory and\n # there is enough memory to keep it\n logger.partitioning(\" 1.2.2.3 del masked\")\n del self.masked\n else:\n logger.partitioning(\"Partition.close: parallel\")\n # --------------------------------------------------------\n # PARALLEL\n # --------------------------------------------------------\n pass\n\n # if hasattr(self, '_original'):\n # del self._original\n\n # print(hasattr(self, 'config')),\n try:\n del self.config\n except AttributeError:\n pass", "async def begin_array(self):", "def _load_disk(self):", "def _load_disk(self):", "def open_position(self, position: int):\n # get row, column, and path to the well\n row_name = self.positions[position]['row']\n col_name = self.positions[position]['col']\n well_path = os.path.join(os.path.join(self.root_path, row_name), col_name)\n\n # check to see if this well exists (row/column)\n if os.path.exists(well_path):\n pos_name = self.positions[position]['name']\n pos_path = os.path.join(well_path, pos_name)\n\n # check to see if the position exists\n if os.path.exists(pos_path):\n\n if self.verbose: print(f'Opening subgroup {row_name}/{col_name}/{pos_name}')\n\n # update trackers to note the current status of the writer\n self.current_pos_group = self.store[row_name][col_name][pos_name]\n self.current_well_group = self.store[row_name][col_name]\n self.current_position = position\n\n else:\n raise FileNotFoundError(f'Could not find zarr position subgroup at {row_name}/{col_name}/{pos_name}\\\n Check spelling or create position subgroup with create_position')\n else:\n raise FileNotFoundError(f'Could not find zarr position subgroup at {row_name}/{col_name}/\\\n Check spelling or create column/position subgroup with create_position')", "def lazy_read_file(self):\n store = zarr.DirectoryStore(self.fpath)\n z_array = zarr.open(store=store, mode='r')\n self.da_input = da.from_array(z_array)\n self.data = self.da_input\n self.data_dim = self.data.shape\n self.chunk_size = z_array.chunks", "def _load_disk(self):\r\n pass", "def _get_partition_list(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def a_subarray_in_the_idle_state():", "def provide_partition_info(self):\n self.partition_info = True", "def getPartition(self):\n\t\treturn self.partition", "def initialize(self):\r\n self.bucket_array.initialize()", "def set_xp_partition(self,xp_partition):\n # setup partition: set x grid\n self.xp_partition = xp_partition\n self.xp_partition.setup_x_grid(xx=[0,self.L])\n # local copies of x and p grids\n self.x=self.xp_partition.x\n self.p=self.xp_partition.p\n # allocate fmci_XP array\n self.fmci_XP=np.zeros((self.xp_partition.nx,self.xp_partition.np))", "def partition(self, arr, low, high, pivot):\n i_less = low\n for i in range(low, high):\n if arr[i] <= pivot: # note the equal\n arr[i], arr[i_less] = arr[i_less], arr[i]\n i_less += 1 # do not forget this\n arr[i_less], arr[high] = arr[high], arr[i_less] # put the pivot into the mid position\n return i_less", "def open(self) -> None:\n if not self.__opened:\n if self.path is None:\n self.path = HID.enumerate_devices(self.vendor_id)[0]\n self.device.open_path(self.path)\n self.device.set_nonblocking(True)\n self.__opened = True", "def next(self):\n partition_start = self._partitions[self._current_index]\n partition_end = partition_start + self._partition_length\n self._current_index += 1\n return self._dataframe[partition_start:partition_end]", "def get_partitioning(self):\n raise Exception(\"Unimplemented\")", "def allocate(self,partition, num_farms, activity, runinfo_dp):\n if self.load():\n nf = num_farms\n got = 0\n used = Online.PVSS.StringVector()\n farms = Online.PVSS.StringVector()\n dpv = Online.PVSS.DataPointVector()\n for i in xrange(len(self.inUse.data)):\n f = self.inUse.data[i]\n n = self.subfarms.data[i]\n if len(f)==0 and got<nf:\n dpv.push_back(self.dp2(self.name+'_'+n,'UsedBy'))\n dpv.back().data = partition\n dpv.push_back(self.dp2(self.name+'_'+n,'RunInfo'))\n dpv.back().data = runinfo_dp\n dpv.push_back(self.dp2(self.name+'_'+n,'Activity'))\n dpv.back().data = activity\n used.push_back(partition)\n farms.push_back(n)\n got = got + 1\n else:\n used.push_back(f)\n if got==nf:\n if len(runinfo_dp)>0:\n #dpv.push_back(self.dp2(runinfo_dp,'general.partName'))\n #dpv.back().data = partition\n #dpv.push_back(self.dp2(runinfo_dp,'HLTFarm.nSubFarms'))\n #dpv.back().data = nf\n dpv.push_back(self.dp2(runinfo_dp,'HLTFarm.subFarms'))\n dpv.back().data = farms\n self.inUse.data = used\n self.writer.add(dpv)\n self.writer.add(self.inUse)\n if self.writer.execute():\n return 'SUCCESS'\n self.error('Failed to update allocation information for partition '+\\\n partition+' in farm system:'+self.name)\n return None\n return self.error('Not enough free subfarms availible for partition '+partition+\\\n ' in farm system:'+self.name)\n return self.error('Failed to load information for partition '+partition+\\\n ' in farm system:'+self.name)", "def one_basis(self):\n return self._kbounded_partitions([])", "def test_partitioner_iter(self):\n partitioner = self.tx_client.SetPartitioner(\"xyzzy\", \"iddqd\")\n self.assertEqual(list(partitioner), [1])", "def _lock_partition(self, partition, shared=False):\n pass", "def allocate(self):\n raise NotImplementedError", "def testRead(self):\n self._TestRead(self._tsk_partition_path_spec)", "def testRead(self):\n self._TestRead(self._tsk_partition_path_spec)", "def partition(seq):\n\n return 0", "def __init__(self):\n self.arr = []\n self.size = 0", "def load_chunk(self, start): # TODO parallelize this whole process\n self.X = queue.Queue()\n n = 0 # number of loaded batches\n print('stop loading : %s' % self.stop_loading())\n print('start + n : %s' % str(start + n))\n while (not self.stop_loading()) and (start + n) < self.size:\n print('load')\n self.X.put(np.load(self.data_filenames[start+n]))\n n += 1\n print('return chunk')\n return n", "def set_partition(self, begin=0, end=0):\r\n self.partition = (begin, end)", "def get_free_partition_index(dev):\n try:\n lines = _check_output(\n args=[\n 'parted',\n '--machine',\n '--',\n dev,\n 'print',\n ],\n )\n except subprocess.CalledProcessError as e:\n print 'cannot read partition index; assume it isn\\'t present\\n (Error: %s)' % e\n return 1\n\n if not lines:\n raise Error('parted failed to output anything')\n if ('CHS;' not in lines and\n 'CYL;' not in lines and\n 'BYT;' not in lines):\n raise Error('parted output expected to contain one of ' +\n 'CHH; CYL; or BYT; : ' + lines)\n if dev not in lines:\n raise Error('parted output expected to contain ' + dev + ': ' + lines)\n _, partitions = lines.split(dev)\n partition_numbers = extract_parted_partition_numbers(partitions)\n if partition_numbers:\n return max(partition_numbers) + 1\n else:\n return 1", "def _read(self):\n return np.copy(self.memory[self.head_pos])", "def __init__(self, partition, test=False, local_test_data_dir=_LOCAL_TEST_DATA_DIR):\n assert sum(partition) == 100, 'The sum of the partition list must be 100: {}'.format(partition)\n self._partition = partition\n self._test = test\n # Split the files up according to the self._partition list.\n self._partitioned_filenames = []\n filenames = data_filenames(shuffle=False, test=self._test,\n local_test_data_dir=local_test_data_dir)\n part_start = 0\n for i, part_size in enumerate(self._partition):\n part_end = part_start + int(len(filenames) * 0.01 * part_size)\n assert part_end - part_start > 0, 'The number of files in partition {} is zero.'.format(i)\n self._partitioned_filenames.append(filenames[part_start:part_end])", "def get_partition(self, nIndex):\n\t\treturn handle_to_object(call_sdk_function('PrlVmDevHd_GetPartition', self.handle, nIndex))", "def add_partition(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVmDevHd_AddPartition', self.handle))", "def _preload_range(self):\n queue = self._read_queue\n size = self._buffer_size\n start = self._seek\n end = int(start + size * self._max_buffers)\n workers_submit = self._workers.submit\n indexes = tuple(range(start, end, size))\n\n for seek in tuple(queue):\n if seek not in indexes:\n del queue[seek]\n\n read_range = self._read_range\n for seek in indexes:\n if seek not in queue:\n queue[seek] = workers_submit(read_range, seek, seek + size)", "def resume_reading(self):\n raise NotImplementedError", "def get_partition():\n if selection is None:\n warning(\"You need to pick something first.\")\n return\n if not selection.obj_type in ['partition']:\n warning(\"You need to partition the selection first.\")\n return\n res = askItems([['property',[1]]],\n caption='Partition property')\n if res:\n prop = res['property']\n getPartition(selection,prop)\n highlightPartitions(selection)", "def rpartition(self, x):\n pass", "def get_index(self):\n return self.disk.partitions.index(self)", "def num_partitions(self): # -> None:\n ...", "def push_front(self, param):\n if self.size == self.capacity:\n self.resize(2 * self.size)\n for _ in range(self.arr):\n pass", "def _partition(array, order, pivot, lower, upper):\n # The pivot element is made the last not to interfere with other swaping \n # operations\n array[pivot], array[upper] = array[upper], array[pivot]\n\n # Last index of smaller/greater (asc/desc) elements\n idx = lower\n\n for j in range(lower, upper):\n if (order == Order.ASC and array[j] < array[upper] or # ASCENDING\n order == Order.DESC and array[j] > array[upper]): # DESCENDING\n # Moves the current element at the end of the sub-array with\n # the smaller/grater (asc/desc) elements\n array[idx], array[j] = array[j], array[idx]\n idx += 1\n\n # Moves pivot as separator between greater and smaller numbers\n array[idx], array[upper] = array[upper], array[idx]\n\n return idx", "def _open_dataset(self):\n\n try:\n\n datasets = [\n _open_dataset(\n record[self.path_column_name],\n record[self.variable_column_name],\n xarray_open_kwargs=self.xarray_open_kwargs,\n preprocess=self.preprocess,\n expand_dims={\n agg.attribute_name: [record[agg.attribute_name]]\n for agg in self.aggregations\n if agg.type.value == 'join_new'\n },\n requested_variables=self.requested_variables,\n additional_attrs=record.to_dict(),\n )\n for _, record in self.df.iterrows()\n ]\n\n datasets = dask.compute(*datasets)\n if len(datasets) == 1:\n self._ds = datasets[0]\n else:\n datasets = sorted(\n datasets,\n key=lambda ds: tuple(\n f'{INTAKE_ESM_ATTRS_PREFIX}/{agg.attribute_name}'\n for agg in self.aggregations\n ),\n )\n with dask.config.set(\n {'scheduler': 'single-threaded', 'array.slicing.split_large_chunks': True}\n ): # Use single-threaded scheduler\n datasets = [\n ds.set_coords(set(ds.variables) - set(ds.attrs[INTAKE_ESM_VARS_KEY]))\n for ds in datasets\n ]\n self._ds = xr.combine_by_coords(\n datasets, **self.xarray_combine_by_coords_kwargs\n )\n\n self._ds.attrs[INTAKE_ESM_DATASET_KEY] = self.key\n\n except Exception as exc:\n raise ESMDataSourceError(\n f\"\"\"Failed to load dataset with key='{self.key}'\n You can use `cat['{self.key}'].df` to inspect the assets/files for this key.\n \"\"\"\n ) from exc", "def test_partition(self):\n mat = self.mat\n self.assertSequenceEqual(\n [mat.m, mat.n, mat.shape[2], mat.shape[3], mat.dtype],\n [self.m, self.n, self.p, self.q, self.dtype]\n )\n if not mat.is_active:\n self.assertSequenceEqual(\n [mat.mloc, mat.mstart, mat.mend, mat.nloc, mat.nstart, mat.nend],\n [0, 0, 0, 0, 0, 0]\n )\n else:\n pass", "def get_partition(self):\n return self._partition", "def grow(self):\n while self.splittable_nodes:\n self.split_next()", "def get(self):\n return self._partition", "def __init__(self, my_partition: List[int]):\n self.my_partition = my_partition\n self.my_partition.sort(reverse=True)\n if self.my_partition[-1]==0:\n first_zero = self.my_partition.index(0)\n self.my_partitition = self.my_partition[0:first_zero]\n self.my_n = sum(self.my_partition)", "def test_open_fill(self):", "def __init__(self, arr):\n self._arr = arr\n self._n = len(arr) # no. sub-arrays \n self._row = 0 # initialize cursor at position (0,0)\n self._col = 0", "def newpart(self, device, primary, ncyls, swap=False):\n # This is a simple partitioning tool, which only supports\n # adding partitions sequentially, with all primary partitions\n # being before the extended partition, so once a logical\n # partition has been added, it is not possible to add further\n # primary ones.\n di = DiskInfo(device)\n pmax = 0 # Record highest partition number\n lim = -1 # Used for seeking last used cylinder\n exp = 0 # Number of extended partition\n ex0, ex1 = 0, -1 # Extended partition start and end\n log0, log1 = 0, -1 # Start and end of area used by logical partitions\n for p in di.parts:\n pn = int(p[0][len(device):])\n scyl, ecyl = p[1:3]\n if pn <= 4:\n if exp:\n run_error(_(\"Not supported: primary partition (%s%d)\\n\"\n \"has higher partition number than extended \"\n \"partition\") % (device, pn))\n return \"\"\n if scyl <= lim:\n run_error(_(\"Partitions must be ordered on the device.\\n\"\n \"%s%d is out of order.\") % (device, pn))\n return \"\"\n if p[3] in (\"5\", \"f\"):\n # extended\n exp = pn\n ex0, ex1 = scyl, ecyl\n continue\n pmax = pn\n lim = ecyl\n\n startcyl = lim + 1\n endcyl = lim + ncyls\n if endcyl >= di.drvcyls:\n run_error(_(\"Too little space at end of drive for new partition\"))\n return \"\"\n if exp and (pmax <= 4):\n # Remove the extended partition, which is empty anyway\n if not self.rmpart(device, exp):\n return \"\"\n pmax = exp - 1\n if primary:\n if pmax >= 4:\n run_error(_(\"Cannot add primary partition to %s\") % device)\n return \"\"\n t = \"primary\"\n else:\n t = \"logical\"\n if pmax > 4:\n # resize extended partition\n if not self.xcheck(\"resizepart\", device, str(exp),\n str(ex0), str(endcyl),\n onfail=_(\"Couldn't resize extended partition %s%d\")\n % (device, exp)):\n return False\n else:\n # create extended partition\n if not self.xcheck(\"newpart\", device,\n str(startcyl), str(endcyl), \"extended\",\n onfail=_(\"Couldn't create extended partition on %s\")\n % device):\n return False\n if pmax < 4:\n pmax = 4\n\n if self.xcheck(\"newpart\", device, str(startcyl), str(endcyl),\n t, \"linux-swap\" if swap else \"ext2\"):\n return \"%s%d\" % (device, pmax + 1)\n else:\n run_error(_(\"Couldn't add new partition to %s\") % device)\n return \"\"", "def get_open_disk_space(self):\n count = 0\n for i in range(self.size):\n if self.disk_mem[i]==\".\":\n count += 1\n return count", "def update(self) -> np.ndarray:\r\n # Read chunk from array\r\n signal_slice: np.ndarray = np.array(\r\n self._signal_data[self._pointer : self._pointer + self._chunk]\r\n )\r\n # Move index over\r\n self._pointer += self._chunk\r\n if self._pointer > len(self._signal_data):\r\n # Go back to beginning\r\n self._pointer = 0\r\n self._restart_flag = True\r\n print(\"Restarting stream...\")\r\n\r\n return signal_slice", "def update(self) -> np.ndarray:\r\n # Read chunk from array\r\n signal_slice: np.ndarray = np.array(\r\n self._signal_data[self._pointer : self._pointer + self._chunk]\r\n )\r\n # Move index over\r\n self._pointer += self._chunk\r\n if self._pointer > len(self._signal_data):\r\n # Go back to beginning\r\n self._pointer = 0\r\n self._restart_flag = True\r\n print(\"Restarting stream...\")\r\n\r\n return signal_slice", "def partition_session(self):\n if self.user['drive']['name'] is not None:\n\n # Set root size\n if self.user['root_freespace'] is True:\n self.user['root_size'] = 'freespace'\n\n # Set partition parameters\n self.user['partitions'] = {'name': ['boot', 'root'],\n 'size': [self.user['boot_size'],\n self.user['root_size']],\n 'filesystem': ['fat32', 'ext4'],\n 'mountpoint': ['/mnt/boot', '/mnt'],\n 'mountorder': [1, 0]}\n\n # Set swap size and filesystem\n if 'Swap' in self.user['optional_partitions']:\n self.user['partitions']['size'].insert(1, self.user['swap_size'])\n self.user['partitions']['filesystem'].insert(1, 'swap')\n\n # Set home size and filesystem\n if 'Home' in self.user['optional_partitions']:\n if self.user['home_freespace'] is True:\n self.user['home_size'] = 'freespace'\n self.user['partitions']['size'].append(self.user['home_size'])\n self.user['partitions']['filesystem'].append('ext4')\n\n # Custom partitions\n else:\n\n # Set partition parameters\n self.user['partitions'] = {\n 'name': ['boot', 'root'],\n 'drive_id': [self.user['boot_id'].split()[0],\n self.user['root_id'].split()[0]],\n 'mountpoint': ['/mnt/boot', '/mnt'],\n 'mountorder': [1, 0]}\n\n # Set swap drive ID\n if self.user['swap_id'] is not None:\n self.user['partitions']['drive_id'].insert(\n 1, self.user['swap_id'].split()[0])\n\n # Set home drive ID\n if self.user['home_id'] is not None:\n self.user['partitions']['drive_id'].append(\n self.user['home_id'].split()[0])\n\n # Set swap parameters\n if ('Swap' in self.user['optional_partitions']) or \\\n (self.user['swap_id'] is not None):\n self.user['partitions']['name'].insert(1, 'swap')\n self.user['partitions']['mountpoint'].insert(1, 'swap')\n self.user['partitions']['mountorder'].insert(1, 2)\n\n # Set home parameters\n if 'Home' in self.user['optional_partitions'] or \\\n (self.user['home_id'] is not None):\n self.user['partitions']['name'].append('home')\n self.user['partitions']['mountpoint'].append('/mnt/home')\n self.user['partitions']['mountorder'].append(3)", "def num_partitions(self): # -> int:\n ...", "def partition1(self, partition1):\n\n self._partition1 = partition1", "def smallest_partition():\n try:\n usb_partitions = sort_partitions()\n smallest = usb_partitions[0]\n except IndexError:\n print(\"Not enough USB devices available\")\n exit(1)\n else:\n return str(smallest[0])", "def GetPartitioningArray(self):\n return _hypre.HypreParVector_GetPartitioningArray(self)", "def partition(cls, key):\n return cls.partition_indexed(\n cls.hash_ring.select_bucket(key),\n )", "def init_disk(self):\n self.put(self.SIZE/2-1, self.SIZE/2-1,\n Disk(self.SIZE/2-1, self.SIZE/2-1, 255, self, self.DISKSIZE))\n self.put(self.SIZE/2-1, self.SIZE/2,\n Disk(self.SIZE/2-1, self.SIZE/2, 0, self, self.DISKSIZE))\n self.put(self.SIZE/2, self.SIZE/2-1,\n Disk(self.SIZE/2, self.SIZE/2-1, 0, self, self.DISKSIZE))\n self.put(self.SIZE/2, self.SIZE/2,\n Disk(self.SIZE/2, self.SIZE/2, 255, self, self.DISKSIZE))", "def test_partition(self):\n # one swap at the end\n list = [5, 6, 7, 8, 9, 2]\n partition(list, 0, 5)\n # assert list == [2, 6, 7, 8, 9, 5] # should be improved in future", "def grow(self):\n # expansion - get all fanins of this gate, except for ones already in\n next_frontier = set()\n added = 0\n remove = set()\n for g in self.frontier:\n new_fin = len((self.ckt[g].fins - self.members)) - 1\n if (new_fin + self.w) < self.max_w:\n print \"Adding\", g, \"to partition\"\n # add this to the partition\n self.members.add(g)\n next_frontier |= self.ckt[g].fins - self.members\n self.w += new_fin + 1\n else:\n remove.add(g)\n self.frontier = next_frontier\n if len(self.frontier) == 0:\n return None\n else:\n return True", "def choose_partition():\n # Ask the user wether the partitions should be taken from the original partitions, or from the home-made partitions\n file_name = selector([\"The original partition given by the instructor\", \"The homemade partition file\"], [\"ORIGINAL\", \"HOMEMADE\"])\n\n # Open the corresponding file\n if file_name == \"1\" or file_name == \"ORIGINAL\":\n file = open(\"./assets/partitions.txt\", \"r\")\n elif file_name == \"2\" or file_name == \"HOMEMADE\":\n file = open(\"./assets/homemade_partitions.txt\", \"r\")\n\n skip_lines(-1)\n\n # Print all song's names in the partitions\n lines = file.readlines()\n file.close()\n for i in range(0, len(lines), 2):\n print(lines[i][:-1])\n\n # Ask the user to choose for a song\n song_index = choose_number(len(lines) / 2)\n\n # Get the corresponding song's partition and convert notes to Note instances\n partition = lines[song_index * 2 - 1][:-1].replace(' ', '')\n raw_notes = get_notes_from_line(partition)\n parsed_notes = [Note(note) for note in raw_notes]\n return parsed_notes", "def _download_input_chunk(self):\n #FIXME choose the mip level based on the chunk key\n volume = Precomputed(self._storage)\n self._data = volume[self._xmin:self._xmax,\n self._ymin:self._ymax,\n self._zmin:self._zmax]", "def _get_partitions():\n partitions = []\n\n try:\n with files.FileReader('/proc/partitions') as f:\n lines = f.readlines()[2:]\n for line in lines:\n _, _, _, name = line.split()\n if name[-1].isdigit():\n partitions.append(name)\n # This will catch access denied and file not found errors, which is expected\n # on non-Linux/limited access systems. All other errors will raise as normal.\n except files.Error:\n pass\n\n return partitions", "def on_add_clicked(self,button):\n\t\tself.list_partitions.add_partition()", "def open(self, dataset: DatasetDB):\n self._es.open_index(dataset_records_index(dataset.id))", "def partition(self, sep):\n return asarray(partition(self, sep))", "def open(self):\n return xr.open_dataset(self)", "def open(self, mode):\n if not self._open:\n self.grp = zarr.open_group(self.store_path, mode)[self.group_path]\n self._open = True", "def get_partition(self, partition_spec):\n return self.partitions[partition_spec]", "def __init__(self, width, partition_points):\n self.width = width\n self.a = Signal(width, reset_less=True)\n self.b = Signal(width, reset_less=True)\n self.partition_points = PartitionPoints(partition_points)\n self.mwidth = len(self.partition_points)+1\n self.output = Signal(self.mwidth, reset_less=True)\n if not self.partition_points.fits_in_width(width):\n raise ValueError(\"partition_points doesn't fit in width\")", "def __init__(self):\n super().__init__()\n self.__size = 0\n self.__capacity = self.INITIAL_CAPACITY\n self.__array = self.__get_new_array(self.__capacity)\n\n self.__getitem_status = self.GETITEM_NIL\n self.__insert_status = self.GETITEM_NIL\n self.__delete_status = self.DELETE_NIL", "def open(self, chunks: dict = None):\n return open_nwmdataset(self,chunks=chunks)", "def __missing__(self, key):\n # deferred import so arrayjob can be used without load_memmap_offset\n from cgp.utils.load_memmap_offset import open_memmap\n return open_memmap(\"%s/%s.npy\" % (self.pardir, key), **self.kwargs)", "def indices(self):\n return self._kbounded_partitions", "def partition(array, first, last):\n # partition up until final value\n pivot = array[last]\n i = first - 1\n\n for count in range(first, last):\n # split array\n if array[count] < pivot:\n i += 1\n # assign array positions\n array[i],array[count] = array[count],array[i]\n # reassign\n array[i+1],array[last] = array[last],array[i+1]\n return (i+1)", "def take_snapshot(self):\r\n self.snapshot = self.name, self.size, copy.copy(self.cells)\r\n self.bucket_array.take_snapshot()", "def num_partitions(self): # -> Unknown:\n ...", "def set_partition(self, partition=0):\n if not isinstance(partition, int):\n raise TypeError('partition must be an integer')\n if partition <= 0:\n raise ValueError('partition must be positive')\n if self.connected:\n self.producer.send(\"PART:\"+str(partition))", "def do_stage_partition(cls, part, source_params, creator, cr_workdir,\n oe_builddir, bootimg_dir, kernel_dir,\n native_sysroot):\n logger.debug(\"SourcePlugin: do_stage_partition: part: %s\", part)", "def addPartition(self,partitionData):\n self.PCAs[partitionData.id] = partitionData\n self.pcaStatemachineLock[partitionData.id] = threading.Lock()\n self.StateMachineForPca[partitionData.id] = Statemachine(self.StateMachineFile,\"Unconfigured\")\n self.isPCAinTransition[partitionData.id] = False\n self.pcaSequenceNumber[partitionData.id] = 0", "def partition(self):\n return self.tag(\"partition\")", "def _load_array(self, array_name, fam=None):\n raise OSError(\"No lazy-loading implemented\")", "def prevSplit(self):\n pass", "def on_splitter_size_allocate(self, splitter, allocation):\n\t\tself.emit('splitter-position-changed')", "def __init__(self, growth_factor=2):\n self._length = 0 # Number of elements in array\n self._capacity = 1 # Capacity of array before expanding\n self._arr = self._create_array(self._capacity) # Compact array of pointers\n self._growth_factor = max(2, growth_factor) # Factor to grow array when capacity reached", "def get_partition(self, partid):\n #TODO(zhengda) add implementation later.", "def get_partition(self, partid):\n #TODO(zhengda) add implementation later.", "def get_partitions(self):\n return self.partitions", "def bin_open(img: np.ndarray, disk_size: int) -> np.ndarray:\n selem = disk(disk_size)\n res = binary_opening(img, selem)\n return res", "def _setPartedPartition(self, partition):\n log_method_call(self, self.name)\n\n if partition is not None and not isinstance(partition, parted.Partition):\n raise ValueError(\"partition must be None or a parted.Partition instance\")\n\n log.debug(\"device %s new partedPartition %s\", self.name, partition)\n self._partedPartition = partition\n self.updateName()", "def __init__(self, period):\n\n self.open = StreamArray(period)\n self.high = StreamArray(period)\n self.low = StreamArray(period)\n self.close = StreamArray(period)", "def _loadpart(self, part):\n new_partidx = util.Partname(part.partname).idx\n for idx, seq_part in enumerate(self._values):\n partidx = util.Partname(seq_part.partname).idx\n if partidx > new_partidx:\n self._values.insert(idx, part)\n return\n self._values.append(part)", "def __init__(self, k: int):\n self.front = 0\n self.rear = 0\n self.capacity = k + 1\n self.arr = [0 for _ in range(self.capacity)]", "def initialize(self):\n\t\ttable = NetworkTables.getTable('SmartDashboard')\n\t\tinitial_pos = table.getData('initial_pos')\n\t\t#XXX Doing this will start the path following immediately. May want this\n\t\t# in the execute or something\n\t\tpath = self.dt.which_path(initial_pos)", "def __init__(self, nrows):\n\n # create the data array\n dummy_list = []\n\n # set the row number\n self._nrows = nrows\n\n # append the reuqested number of None\n # for index in range(nrows):\n # self._data.append(None)\n\n # append the reuqested number of None\n # perhaps a bit faster than the above lines\n self._data = list(map(dummy_list.append, range(nrows)))\n\n # set the row number\n self._nrows = nrows", "def partition(data, num_partitions=None, by=None, **kwargs):\n return Component(\n \"Partition\",\n arguments={\n 'data': Component.of(data),\n 'num_partitions': Component.of(num_partitions),\n 'by': Component.of(by)\n },\n options={\n \n },\n constraints=kwargs)" ]
[ "0.5840582", "0.5428586", "0.5294434", "0.52601844", "0.51751643", "0.50962955", "0.50962955", "0.509415", "0.5024122", "0.5011268", "0.49543542", "0.4883474", "0.48718578", "0.48603144", "0.48522878", "0.48517695", "0.4839195", "0.48225853", "0.48027864", "0.48000965", "0.4790247", "0.47798887", "0.4720501", "0.47029617", "0.46842965", "0.4682525", "0.4682525", "0.46814677", "0.46763003", "0.46511462", "0.4649878", "0.46495765", "0.46389672", "0.4622136", "0.46061173", "0.45910683", "0.4584389", "0.45805055", "0.45802802", "0.45794904", "0.45771983", "0.45748523", "0.45747092", "0.457036", "0.45691523", "0.45593444", "0.45571756", "0.4557058", "0.45556217", "0.45504922", "0.45442882", "0.4525282", "0.45250976", "0.45197967", "0.45153186", "0.45153186", "0.45109043", "0.45079452", "0.4506449", "0.45022196", "0.44995156", "0.44910493", "0.44774902", "0.44759083", "0.44756046", "0.44744152", "0.44724086", "0.4470217", "0.44698712", "0.44659707", "0.4462444", "0.44618163", "0.44603375", "0.44594052", "0.44553822", "0.44536054", "0.44517532", "0.44502246", "0.44423303", "0.4438988", "0.44290558", "0.44272983", "0.44218904", "0.4417824", "0.4414225", "0.44139224", "0.44068182", "0.4406671", "0.44063008", "0.4403438", "0.44031662", "0.44031662", "0.4402446", "0.44017848", "0.43955085", "0.43933836", "0.439055", "0.43902653", "0.4387042", "0.43869588", "0.43843147" ]
0.0
-1