query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Delete the upload ticket (to be used once get_last_uploaded_byte() == total file size) Makes a DELETE request to the given URI, removing the upload ticket and setting the upload status to "processing"
def delete_upload_ticket(self, complete_uri): url = self.config['apiroot'] + complete_uri log.info("Requesting %s" % url) r = HTTPClient().fetch(url, method="DELETE", headers=self.standard_headers, validate_cert=not self.config['dev']) log.info("Upload completed: status code: %d" % r.code) if r.code == 201: _id = r.headers['location'].split('/')[-1] return _id raise ValueError("Upload completion unsuccessful")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self, _uri):\n print(\"Deleting '%s'\"%(_uri))\n response = self.__httpsRequest('DELETE', _uri, '')", "def delete(self):\n try:\n flash_message = request.json[\"flash_message\"]\n folder_path = \"{0}/user_uploads/{1}/{2}/\".format(self.__APP_PATH__, current_user.net_id, request.json[\"folder_name\"])\n request_submitted = path.exists(\"{0}request.submitted\".format(folder_path))\n request_processed = path.exists(\"{0}request.processed\".format(folder_path))\n request_returned = path.exists(\"{0}request.returned\".format(folder_path))\n request_voided = path.exists(\"{0}request.voided\".format(folder_path))\n filename = \"{0}{1}\".format(folder_path, request.json.get(\"file\", None))\n\n if request.json['delete_request'] and not request_submitted and not request_processed and not request_returned and not request_voided:\n rmtree(folder_path, True)\n\n if flash_message:\n flash(\"Request deleted successfully.\", \"success\")\n\n return jsonify({\"success\": True, \"type\": \"success\", \"message\": \"Request deleted successfully.\"})\n elif request.json['delete_request']:\n if flash_message:\n flash(\"This request can no longer be deleted.\", \"error\")\n\n return jsonify({\"success\": False, \"type\": \"error\", \"message\": \"This request can no longer be deleted.\"})\n\n if filename and path.exists(filename):\n remove(filename)\n\n return jsonify({\"type\": \"success\", \"message\": \"File successfully deleted.\"})\n except Exception as e:\n print(e)\n pass\n\n return jsonify({\"type\": \"error\", \"message\": \"Invalid request format or origin.\"})", "def delete_upload(arn=None):\n pass", "def delete_file(self, upload_id: str, file_path: str, token: str) \\\n -> Upload:\n data, _, _ = self.json('delete', f'/{upload_id}/{file_path}', token)\n return self._parse_upload_status(data)", "def delete(cls, uri):\n return cls._perform_request(uri, 'DELETE')", "def remote_abortUpload(self, upload_id):\n self.transfers_register.deallocate_upload_slot(upload_id)\n\n # Also remove the file if the upload is already completed\n completed = settings.completed_root.child(upload_id)\n if completed.exists():\n completed.remove()", "def delete_file(self, filepath):\n self.ftp.delete(filepath)", "def do_DELETE(self): # pylint: disable=missing-docstring\n self._set_headers(204)\n (resource, id) = self.parse_url(self.path)\n\n if resource == \"comments\":\n delete_comment(id)\n elif resource == \"posts\":\n delete_post(id)\n elif resource == \"categories\":\n delete_category(id)\n elif resource == \"posttags\":\n remove_tag(id)\n \n self.wfile.write(\"\".encode())", "def delete(self):\n request_data = request.get_json(force=True)\n current_path = self.get_current_path()\n file_name = request_data.get('file_name')\n\n if not file_name:\n abort(400, message=\"File name must not be empty!\")\n\n full_path = os.path.join(current_path, file_name)\n\n if not os.path.exists(full_path):\n abort(400, message=\"File was not found in current path!\")\n\n if not os.path.isfile(full_path):\n abort(400, message=\"File name is not a file!\")\n\n if not self.is_allowed(full_path):\n abort(403, message=\"You are not allowed to this path\")\n\n os.remove(full_path)\n\n return {\"message\": \"OK\"}", "def delete(self, *route, **req_data):\n # Read the file ID from the request, with safety.\n try:\n file_id = UUID(req_data['file_id']).hex\n except ValueError:\n return Response(status='400 Bad Request')\n\n # Retrieve and delete the file.\n stored_files = StoredFile.collection()\n to_delete = stored_files.first(id=file_id)\n\n log_activity('%s deleted file %s'%(\n context.user.link, to_delete.filename\n ))\n\n stored_files.delete(to_delete)\n get_bucket().delete(to_delete.data_id)\n\n return Response(status='200 OK')", "def rm(self, uri):\n path = osaka.utils.get_uri_path(uri)\n try:\n osaka.utils.LOGGER.debug(\"Removing {0} as a file\".format(uri))\n self.webdav.delete(path)\n except Exception as e:\n osaka.utils.LOGGER.debug(\n \"Removing {0} as a directory, file encountered error {1}\".format(uri, e)\n )\n self.webdav.rmdir(path)", "def delete(self, filename):\n pass", "def _delete(self, uri, headers=None):\n if self.openam_url[-1:] == '/':\n openam_path = self.openam_url + uri\n else:\n openam_path = self.openam_url + \"/\" + uri\n\n try:\n data = requests.delete(openam_path, headers=headers, timeout=self.timeout, verify=self.verify)\n except requests.exceptions.RequestException as e:\n data = {'error': e}\n return data", "def delete_file(filename):\n\tprint client.file_delete(filename)", "def delete(self,filename):\n\n try:\n self.ftp.delete(filename)\n except:\n print('Error deleting remote file:%s'%filename)\n return 1\n\n return 0", "def _cleanup_uploads(self):\n logger.debug(\"Performing blob upload cleanup\")\n\n while True:\n # Find all blob uploads older than the threshold (typically a week) and delete them.\n with UseThenDisconnect(app.config):\n stale_upload = model.get_stale_blob_upload(DELETION_DATE_THRESHOLD)\n if stale_upload is None:\n logger.debug(\"No additional stale blob uploads found\")\n return\n\n # Remove the stale upload from storage.\n logger.debug(\"Removing stale blob upload %s\", stale_upload.uuid)\n assert stale_upload.created <= (datetime.utcnow() - DELETION_DATE_THRESHOLD)\n\n try:\n storage.cancel_chunked_upload(\n [stale_upload.location_name], stale_upload.uuid, stale_upload.storage_metadata\n )\n except Exception as ex:\n logger.debug(\n \"Got error when trying to cancel chunked upload %s: %s\",\n stale_upload.uuid,\n ex.message,\n )\n\n # Delete the stale upload's row.\n with UseThenDisconnect(app.config):\n model.delete_blob_upload(stale_upload)\n\n logger.debug(\"Removed stale blob upload %s\", stale_upload.uuid)", "def delete(request):\n wfsxml = request.POST.get('wfsxml', False) # FOR GEOSERVER\n uuid = request.POST.get('uuid', False)\n # MAKE GEOSERVER WFS TRANSACTION\n error = post_to_geoserver(wfsxml, GeoPostBase.wfsURL)\n # ALL GOOD\n if error:\n return server_error(error)\n # IF WFS TRANSACTION ERROR\n else:\n pass\n # Delete photo from bucket\n delete_from_bucket(uuid, GeoPostBase.imageBucket)\n return HttpResponseRedirect(reverse('geopost_home'))", "async def delete_file(location_id: LocationID, file_id: StorageFileID, user_id: UserID):", "def _delete(self, url):\n return self._request(url, method=\"DELETE\")", "def delete(self, filename, threadID):\n self.lock.acquire()\n removed_file = self.files_on_disk.pop(filename)\n\n # remove file from the directory\n if not os.path.exists(\".storage/\" + filename):\n self.lock.release()\n return \"ERROR: NO SUCH FILE\\n\"\n else:\n os.remove(\".storage/\" + filename)\n i = 0\n j = 0\n while(i<self.size and j<removed_file.num_blocks):\n if(self.disk_mem[i]==removed_file.letter):\n self.disk_mem[i] = \".\"\n j+=1\n i += 1\n print(\"[thread %d] Deleted %s file '%c' (deallocated %d blocks)\" %\n (threadID, removed_file.name, removed_file.letter, removed_file.num_blocks))\n self.show(threadID)\n self.lock.release()\n return \"ACK\\n\"", "def delete(self, host, file):", "def _delete(self, url):\n url = urljoin(self.base_url, url)\n try:\n r = self._make_request(**dict(\n method='DELETE',\n url=url,\n auth=self.auth,\n timeout=self.timeout,\n hooks=self.request_hooks,\n headers=self.request_headers\n ))\n except requests.exceptions.RequestException as e:\n raise e\n else:\n if r.status_code >= 400:\n _raise_response_error(r)\n if r.status_code == 204:\n return\n return r.json()", "def do_delete_request(self, uri, headers, timeout_ms):\n return self._do_request('DELETE', uri, headers, None, timeout_ms, None)", "def __deleteLock(self, url):\n response = self._adapter.deleteRequest(url, self._baseHeader)\n return response", "def delete(self, relative_url, accepted_status_codes):\n return self._send_request(\n relative_url, accepted_status_codes, self._session.delete\n )", "def cancel_upload(self):\r\n self.bucket.cancel_multipart_upload(self.key_name, self.id)", "def terminate(self):\n logger.debug(\"terminating\")\n self._upload = None\n self._buf.close()", "def delete(self, uri, body=None, headers=None, auth=False):\n return self.send_request('DELETE', uri, body, headers, auth)", "def delete_file(self, path):\n raise HTTPError(\n 501,\n \"Narrative deletion not implemented here. Deletion is handled elsewhere.\",\n )", "def base_delete(url_path):\n response = requests.delete(url=settings.URL_API + url_path)\n return response" ]
[ "0.67701095", "0.64577615", "0.6345733", "0.61198354", "0.6112959", "0.6029433", "0.59412366", "0.5909342", "0.5896206", "0.5832445", "0.5818534", "0.57876617", "0.5784361", "0.5783744", "0.5763343", "0.57614774", "0.5744645", "0.57401735", "0.5735063", "0.57311624", "0.572804", "0.57015604", "0.56957394", "0.5661615", "0.564834", "0.5642531", "0.5635026", "0.5588969", "0.55822235", "0.5565004" ]
0.7901534
0
Return world state HC state cooccurance matrix.
def get_co_occ_mat(s_hc_ml, n_s_real, n_s_hc): co_occs = np.zeros((n_s_hc, n_s_real)) for idx, n in s_hc_ml.items(): co_occs[idx] = n return co_occs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def CTMCtoStormpy(h):\n\tstate_labelling = _buildStateLabeling(h)\n\ttransition_matrix = deepcopy(h.matrix)\n\te = array([h.e(s) for s in range(h.nb_states)])\n\ttransition_matrix /= e[:,newaxis]\n\ttransition_matrix = st.build_sparse_matrix(transition_matrix)\n\tcomponents = st.SparseModelComponents(transition_matrix=transition_matrix,\n\t\t\t\t\t\t\t\t\t\t state_labeling=state_labelling,\n\t\t\t\t\t\t\t\t\t\t rate_transitions=True)\n\tcomponents.exit_rates = e\n\tctmc = st.storage.SparseCtmc(components)\n\treturn ctmc", "def _get_obs(self):\n # return np.concatenate((self.world.state[:6], self.world.state[7:13]))\n return np.concatenate((self.world.state, np.zeros(7)))\n # return self.world.state", "def get_cov_matrix_states(self):\n cov = numpy.diag(numpy.zeros(self.get_num_variables()))\n i = 0\n for v in self.variables:\n cov[i,i] = v.get_covariance()\n i += 1\n return cov", "def _excitonic_coft_all(self,SS,AG):\n \n # SystemBathInteraction\n sbi = AG.get_SystemBathInteraction()\n # CorrelationFunctionMatrix\n cfm = sbi.CC\n \n c0 = AG.monomers[0].get_egcf((0,1))\n Nt = len(c0)\n \n Nst = AG.HamOp.dim\n ct = numpy.zeros((Nst,Nt),dtype=numpy.complex128)\n\n # electronic states corresponding to single excited states\n import time\n timecount = 0\n elst = numpy.where(AG.which_band == 1)[0]\n start = time.time()\n for el1 in elst:\n for el2 in elst:\n coft = cfm.get_coft(el1-1,el2-1)\n start2 = time.time()\n for kk in AG.vibindices[el1]:\n for ll in AG.vibindices[el2]:\n ct[:,:] += numpy.dot(\n numpy.expand_dims((SS[kk,:]**2)*(SS[ll,:]**2),axis=1),\n numpy.expand_dims(coft,axis=0))\n stop2 = time.time()\n timecount += stop2 - start2\n stop = time.time()\n print(stop-start,stop-start - timecount)\n return ct", "def c_():\r\n c = np.array([[0, 0], [0, 100], [100, 100], [100, 80], [20, 80],\r\n [20, 20], [100, 20], [100, 0], [0, 0]])\r\n return c", "def get_system_state_matrix(self):\n\n # Create empty matrix to describe system state\n A = zeros((9,9))\n\n # Q-dot terms\n A[0:3,3:6] = 0.5*identity(3)\n\n # X and Y gravity terms and Z rotation\n A[3:6,0:3] = matrix([\n [2*self.params['g']*self.params['lp']*self.params['Mp']/(self.params['Ix']), 0, 0],\n [0, 2*self.params['g']*self.params['lp']*self.params['Mp']/(self.params['Iy']), 0],\n [0, 0, 2*self.params['g']*self.params['Fspin_z']/(self.params['Iz'])]\n ])\n # Friction in pendulum rotation on pendulum\n A[3:6,3:6] = matrix([\n [self.params['Bx']/(self.params['Ix']), 0, 0],\n [0, self.params['By']/(self.params['Iy']), 0],\n [0, 0, self.params['Bz']/(self.params['Iz'])]\n ])\n # Friction in wheel rotation on pendulum\n A[3:6,6:9] = matrix([\n [-self.params['Bw']/(self.params['Ix']), 0, 0],\n [0, -self.params['Bw']/(self.params['Iy']), 0],\n [0, 0, -self.params['Bw']/(self.params['Iz'])]\n ])\n # Friction in wheel rotation on wheel\n A[6:9,6:9] = -self.params['Bw']/self.params['Iw']*identity(3)\n\n return A", "def coherence(self):\r\n\r\n #XXX Calculate this from the standard output, instead of recalculating\r\n #the coherence:\r\n\r\n tseries_length = self.input.data.shape[0]\r\n spectrum_length = self.spectrum.shape[-1]\r\n coherence = np.zeros((tseries_length,\r\n tseries_length,\r\n spectrum_length))\r\n\r\n for i in range(tseries_length):\r\n for j in range(i, tseries_length):\r\n coherence[i][j] = tsa.coherence_spec(self.spectrum[i][j],\r\n self.spectrum[i][i],\r\n self.spectrum[j][j])\r\n\r\n idx = tril_indices(tseries_length, -1)\r\n coherence[idx[0], idx[1], ...] = coherence[idx[1], idx[0], ...].conj()\r\n\r\n return coherence", "def chol_covar(self) -> np.ndarray:\n if self._chol_covar is None:\n self._chol_covar = batched_cholesky(self.covar)\n return self._chol_covar", "def get_global_stiffness_matrix(self) -> NDArray[np.float64]:\n\n s = self.sin()\n c = self.cos()\n matrix_helper = [\n c ** 2,\n c * s,\n -(c ** 2),\n -c * s,\n c * s,\n s ** 2,\n -c * s,\n -(s ** 2),\n -(c ** 2),\n -c * s,\n c ** 2,\n c * s,\n -c * s,\n -(s ** 2),\n c * s,\n s ** 2,\n ]\n return (\n self.youngs_modulus\n * self.area\n / self.get_length()\n * np.array(matrix_helper, dtype=np.float64).reshape(4, 4)\n )", "def coaccessible_components(self):\n DG = self.digraph().reverse()\n coaccessible_states = DG.breadth_first_search(\n [_.label() for _ in self.iter_final_states()])\n return self.induced_sub_finite_state_machine(\n [self.state(_) for _ in coaccessible_states])", "def get_features_critic(state):\n # reshape to make it a matrix with one row (so we can transpose it later)\n p, v = state\n p_v = np.array([p, v]).reshape((1, -1)).T\n X = np.array([p_v - c_entry.T for c_entry in C])\n inv_cov = np.linalg.inv(np.diag([0.04, 0.0004]))\n phi = np.array([np.exp(-(xi.T @ inv_cov @ xi) / 2) for xi in X])\n\n return np.squeeze(phi) # get rid of 2 unnecessary dimensions", "def cofactorMatrix(self):\n returnvalue = Matrix()\n for i in range(self._height):\n newRow = list()\n for j in range(self._width):\n newRow.append(self.cofactor(i, j))\n returnvalue.addRow(*newRow)\n return returnvalue", "def scc(self):\n if not self.is_square:\n raise DMNonSquareMatrixError('Matrix must be square for scc')\n\n return self.rep.scc()", "def state_descriptor(self, state, player):\n\n\t\tboard = np.copy(state.board)\n\n\t\tnp.place(board, board == -1, 0)\n\t\tnp.place(board, board == player, 3)\n\t\tnp.place(board, board == 3 - player, -1)\n\t\tnp.place(board, board == 3, 1)\n\n\t\tmy_cows = state.cows[player - 1]\n\t\ten_cows = state.cows[2 - player]\n\n\t\treturn (self.totuple(board), my_cows, en_cows)", "def bc_outgoing_mat(n, h, k):\n \n d = [1.0, 2.0j*k*h]\n i = [n-1, n-1]\n j = [n-2, n-1]\n return scipy.sparse.coo_matrix((d, (i, j)))", "def cofactor_matrix(self):\n resp = []\n len_b = len(self.take_vec())\n for i in range(self.order):\n _matrix = aux.cofactor(self.take_matrix(),\n (i, self.order-1)\n )\n _resp = math.pow(-1, len_b-1)\n _resp = _resp * np.linalg.det(_matrix)\n _resp = _resp * math.pow(-1, i * (self.order-1))\n resp.append(int(round(_resp)))\n\n return resp", "def appearance_space(state_data, C):\n return np.dot(C, state_data)", "def MCtoStormpy(h):\n\tstate_labelling = _buildStateLabeling(h)\n\ttransition_matrix = h.matrix\n\ttransition_matrix = st.build_sparse_matrix(transition_matrix)\n\tcomponents = st.SparseModelComponents(transition_matrix=transition_matrix,\n\t\t\t\t\t\t\t\t\t\t state_labeling=state_labelling)\n\tmc = st.storage.SparseDtmc(components)\n\treturn mc", "def _C(self):\n\n # Find the local x and y coordinates at each node\n xi = 0\n yi = 0\n xj = self.width()\n yj = 0\n xm = xj\n ym = self.height()\n xn = 0\n yn = ym\n\n # Calculate the [C] coefficient matrix\n C = array([[1, xi, yi, xi**2, xi*yi, yi**2, xi**3, xi**2*yi, xi*yi**2, yi**3, xi**3*yi, xi*yi**3],\n [0, 0, 1, 0, xi, 2*yi, 0, xi**2, 2*xi*yi, 3*yi**2, xi**3, 3*xi*yi**2],\n [0, -1, 0, -2*xi, -yi, 0, -3*xi**2, -2*xi*yi, -yi**2, 0, -3*xi**2*yi, -yi**3],\n \n [1, xj, yj, xj**2, xj*yj, yj**2, xj**3, xj**2*yj, xj*yj**2, yj**3, xj**3*yj, xj*yj**3],\n [0, 0, 1, 0, xj, 2*yj, 0, xj**2, 2*xj*yj, 3*yj**2, xj**3, 3*xj*yj**2],\n [0, -1, 0, -2*xj, -yj, 0, -3*xj**2, -2*xj*yj, -yj**2, 0, -3*xj**2*yj, -yj**3],\n\n [1, xm, ym, xm**2, xm*ym, ym**2, xm**3, xm**2*ym, xm*ym**2, ym**3, xm**3*ym, xm*ym**3],\n [0, 0, 1, 0, xm, 2*ym, 0, xm**2, 2*xm*ym, 3*ym**2, xm**3, 3*xm*ym**2],\n [0, -1, 0, -2*xm, -ym, 0, -3*xm**2, -2*xm*ym, -ym**2, 0, -3*xm**2*ym, -ym**3],\n\n [1, xn, yn, xn**2, xn*yn, yn**2, xn**3, xn**2*yn, xn*yn**2, yn**3, xn**3*yn, xn*yn**3],\n [0, 0, 1, 0, xn, 2*yn, 0, xn**2, 2*xn*yn, 3*yn**2, xn**3, 3*xn*yn**2],\n [0, -1, 0, -2*xn, -yn, 0, -3*xn**2, -2*xn*yn, -yn**2, 0, -3*xn**2*yn, -yn**3]])\n \n # Return the coefficient matrix\n return C", "def get_co_occ_matrix(res, last_n, s_real, s_hc):\n\n s = np.array(res['s'])[-last_n:]\n hc_ml = np.array(res['hc_ml'])[-last_n:]\n co_occs = np.zeros((len(s_hc), len(s_real)))\n for si, hc_mli in zip(s, hc_ml):\n co_occs[s_hc.index(hc_mli), s_real.index(si)] += 1\n co_occs /= co_occs.sum()\n\n return co_occs", "def state(self):\n state = np.array(self.get_state_arr())\n om = utils.build_occupancy_maps(utils.build_humans(state))\n # We only have a batch of one so just get the first element of\n # transform and rotate\n state = utils.transform_and_rotate(state.reshape((1, -1)))[0]\n return torch.cat((state, om), dim=1)", "def ws(self, inhom_object):\n z = inhom_object.zeta\n # define bohr frequencies of eigenstates--currently a and b are \n # correlated \n wg = 0.0 + 0*z\n wa = z + self.wa_central\n wb = z + self.wb_central\n w2a = 2*wa - self.a_coupling\n w2b = 2*wb - self.b_coupling\n wc = wa + wb - self.ab_coupling\n # define coherence frequencies\n w_gg = wg - wg\n w_ag = wa - wg\n w_bg = wb - wg\n w_2ag = w2a - wg\n w_2bg = w2b - wg\n w_cg = wc - wg\n # array aggregates all frequencies to match state vectors\n w = np.array( [w_gg, \n w_ag, w_bg,\n w_2ag, w_2bg, w_cg\n ])\n return w", "def get_input_state_matrix(self):\n\n # state matrix for control\n B = zeros((9,3))\n\n # control term impact on pendulum\n B[3:6,0:3] = matrix([\n [self.params['Km']/(self.params['Ix'])/self.params['Rm'], 0, 0],\n [0, self.params['Km']/(self.params['Iy'])/self.params['Rm'], 0],\n [0, 0, self.params['Km']/(self.params['Iz'])/self.params['Rm']]\n ])\n # control term impact on wheel\n B[6:9,0:3] = self.params['Km']/self.params['Iw']/self.params['Rm']*identity(3)\n\n return B", "def covar(self) -> np.ndarray:\n if self._covar is None:\n self._covar = batched_inv_spd(batched_cholesky(self._inv_covar))\n return self._covar", "def scc(self):\n return self.to_ddm().scc()", "def conditional_component_covs(self):\n return np.array([d.conditional_cov() for d in self.conditionalMVNs])", "def cudnn_lstm_state(lstm_cell_state):\n h = tf.stack([s.h for s in lstm_cell_state])\n c = tf.stack([s.c for s in lstm_cell_state])\n return (h, c)", "def visualizeC(self, M=None):\n try:\n import seaborn as sns\n import matplotlib.pyplot as plt\n except:\n print(\"Seaborn or matplotlib not imported...can't build the heatmap\")\n if M is None:\n M = self.stateC\n a = torch.argmax(M, dim=0)\n print(self.find_TPname(a))\n M = pd.DataFrame(M.numpy(), index=list(\n self.filler2index.keys()), columns=list(self.role2index.keys()))\n sns.heatmap(M, annot=True, cmap=\"Blues\")\n plt.show()", "def m_c(self) -> np.ndarray:\n assert self._k is not None, \"camera must be calibrated\"\n return forge_projective_matrix(self._k)", "def get_constr_obs_states_high(self):\n constrHi = numpy.empty(self.get_num_variables())\n i = 0\n for v in self.variables:\n constrHi[i] = v.get_constraint_high()\n i += 1\n return constrHi" ]
[ "0.635167", "0.59943235", "0.58283806", "0.5816899", "0.5784517", "0.57605594", "0.5751287", "0.5710274", "0.57057977", "0.56717366", "0.5666582", "0.56537753", "0.562778", "0.56184703", "0.56027305", "0.56016433", "0.5601625", "0.55812305", "0.555667", "0.5529863", "0.55265343", "0.54993033", "0.549819", "0.5483802", "0.5482348", "0.5452969", "0.54493135", "0.5430309", "0.542297", "0.5414111" ]
0.63109434
1
Return learned HC state order from cooccurance matrix by taking best matching pairs world location HC state pairs.
def get_s_order(co_occs, s_hc=None): # Greedy approach: just go through items from max to min. free_rows, free_cols = [list(range(n)) for n in co_occs.shape] s_ord = -np.ones(co_occs.shape[0], dtype=int) co_normed = norm_co_occ_matrix(co_occs) isrtd = np.unravel_index(co_normed.argsort(axis=None)[::-1], co_occs.shape) for irow, icol in zip(isrtd[0], isrtd[1]): # If neither row nor column has been taken yet, it's a match! if irow in free_rows and icol in free_cols: s_ord[icol] = irow free_rows.remove(irow) free_cols.remove(icol) if not len(free_rows) or not len(free_cols): break # Unmatched HC states go to the end. s_ord[s_ord == -1] = free_rows # Also sort state name list, if provided. s_name_srtd = np.array(s_hc)[s_ord] if s_hc is not None else None return s_ord, s_name_srtd
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_nearest_state(self): # TODO: Check if we need here state, instead of self.state\n self.stateC = self.toConceptual(self.state)\n CTP, winners = self.find_winner()\n\n state_name = self.find_TPname(filleridx=winners)\n binding = self.find_symBinding(filleridx=winners)\n state_num = self.find_TPnum(stateName=state_name)\n TP_state = self.TP.matmul(fortran_reshape(CTP, (torch.numel(CTP), 1)))\n Cdist = torch.norm(CTP - self.stateC, p='fro') # Frobenius Norm\n Sdist = self.L2norm(TP_state - self.state)\n TP_h = self.calc_harmony(state=TP_state)\n\n return TP_state, winners, state_name, binding, Cdist, Sdist, state_num, TP_h", "def find_winner(self):\n\n # Extract the max value of each col in the conceptual state matrix\n winners_idx = torch.argmax(self.stateC, dim=0)\n\n M = torch.zeros_like(self.stateC)\n # Populate the matrix\n for r in range(M.shape[1]):\n M[winners_idx[r].item(), r] = 1\n return M, winners_idx", "def computer_driver_heuristic(self, pc):\n if pc == self.departure_location:\n return self.nearest_neigbor(pc)\n else:\n # encode state: State -> Generalized One hot vector\n # print(len(self.idx_to_pc)+1)\n encoded_vector = np.zeros(len(self.idx_to_pc)+1)\n\n # indices of locations FOR ENCODING\n pickup_jobs_idx = [self.pc_to_idx[p]+1 for p in list(self.state.P_k.keys())] # +1 is to make room for the time dim\n deliv_jobs_idx = [self.pc_to_idx[p]+1 for p in list(self.state.D_k.keys())]\n\n # indices of locations FOR PC READING\n pickup_jobs_idx_read = [self.pc_to_idx[p] for p in list(self.state.P_k.keys())]\n deliv_jobs_idx_read = [self.pc_to_idx[p] for p in list(self.state.D_k.keys())]\n tasks = set(pickup_jobs_idx_read + deliv_jobs_idx_read)\n\n if len(tasks) > 0:\n # set appropriate values at the index corresponding to the location\n encoded_vector[pickup_jobs_idx] = -0.5\n encoded_vector[deliv_jobs_idx] = 0.5\n encoded_vector[self.pc_to_idx[pc]+1] = 1\n\n # # current time encoded as nb of seconds between 12pm and now/nb seconds between 12pm and 12am\n total_nb_seconds = datetime.timedelta(hours=12, minutes=0)\n cur_time = self.state.t_k.time()\n cur_time = datetime.timedelta(hours=cur_time.hour, minutes=cur_time.minute) # nb of seconds from 12am\n # # TODO this can further be noramlized as most values will be >0 (>6am)\n cur_time = 2 * cur_time.seconds / total_nb_seconds.seconds - 1 # normalized time in [-1,1]\n encoded_vector[0] = cur_time\n\n # predict decision\n pred = self.heuristic_model.predict_proba(encoded_vector.reshape(1,-1))\n\n # take the most probable location among the remaining jobs\n # # set proba to 0 if location not among remaining jobs\n # print(\"##############\")\n # print(\"shape of pred \", pred.shape)\n # print(\"Number of locations considered : \", len(self.idx_to_pc))\n print(\"Possible indices to choose from : \", tasks)\n pred[0, list(set(range(0, len(self.idx_to_pc))) - set(pickup_jobs_idx_read + deliv_jobs_idx_read))] = 0\n\n idx_opt = np.argsort(pred[0,:])[-1] # most probable location (by its index) among remaining jobs\n print(\"Index chosen : \", idx_opt )\n return self.idx_to_pc[idx_opt]\n\n elif len(tasks) == 0:\n return 0\n\n else:\n raise ValueError('Problem with tasks, which has negative length...')", "def _get_state(self):\n # COMPUTE CLASSIFIER_STATE\n predictions = self.model.predict_proba(self.dataset.state_data)[:,0]\n predictions = np.array(predictions)\n idx = np.argsort(predictions)\n # the state representation is the *sorted* list of scores \n classifier_state = predictions[idx]\n \n # COMPUTE ACTION_STATE\n unknown_data = self.dataset.train_data[self.indeces_unknown,:]\n # prediction (score) of classifier on each unlabelled sample\n a1 = self.model.predict_proba(unknown_data)[:,0]\n # average distance to every unlabelled datapoint\n a2 = np.mean(self.dataset.distances[self.indeces_unknown,:][:,self.indeces_unknown],axis=0)\n # average distance to every labelled datapoint\n a3 = np.mean(self.dataset.distances[self.indeces_known,:][:,self.indeces_unknown],axis=0)\n next_action_state = np.concatenate(([a1], [a2], [a3]), axis=0)\n return classifier_state, next_action_state", "def search(world_state, robot_pose, goal_pose):\n if world_state.shape[0] == 0 or world_state.shape[1] == 0:\n print(\"Error, empty world_state!!!\")\n return None\n if not is_pos_valid(robot_pose, world_state.shape):\n print(\"Error, invalid robot_pose!!!\", robot_pose)\n return None\n if not is_pos_valid(goal_pose, world_state.shape):\n print(\"Error, invalid goal_pose!!!\", goal_pose)\n return None\n\n directions = [(-1, 0), (1, 0), (0, -1), (0, 1)] # orthogonal directions\n found = False\n\n x, y = robot_pose\n g = 0\n h = heuristic(robot_pose, goal_pose)\n f = g + h\n open = [[f, x, y]]\n came_from = {}\n came_from[robot_pose] = None\n cost_so_far = {}\n cost_so_far[robot_pose] = 0\n\n while open:\n open.sort() # sort based on f value\n current = open.pop(0)\n\n x, y = current[1:]\n g = cost_so_far[(x, y)]\n\n if (x, y) == goal_pose:\n found = True\n break\n else:\n # find available next positions\n for direction in directions:\n x2 = x + direction[0]\n y2 = y + direction[1]\n\n # check whether x2 and y2 are valid\n if not is_pos_valid((x2, y2), world_state.shape):\n continue\n\n g2 = g + 1\n if world_state[x2, y2] == 0 and ((x2, y2) not in cost_so_far or g2 < cost_so_far[(x2, y2)]):\n\n h2 = heuristic((x2, y2), goal_pose)\n f2 = g2 + h2\n open.append([f2, x2, y2])\n came_from[(x2, y2)] = (x, y)\n cost_so_far[(x2, y2)] = g2\n if found:\n path = [goal_pose]\n current = goal_pose\n while came_from[current]:\n current = came_from[current]\n path.append(current)\n\n path.reverse()\n return path\n\n else:\n return None", "def find_topo_order(s,graph):\n\n ## initialization\n matrix = graph.get_adjacency()\n n, c = matrix.shape\n sym_matrix = np.empty((n,c), dtype=object)\n # cost_matrix = np.zeros((n,c))\n cache = {}\n\n def symbolize(i,j):\n \"given two indices, create a symbolic variable\"\n s = z.Int('edge_{0}{1}'.format(i,j))\n return s\n\n\n def value_of(i,j):\n \"given two indices, return the (i,j)th value in the adjacency matrix\"\n return sym_matrix[i][j]\n\n\n def constraint_1(n,i,j,k):\n y_ij = value_of(i,j)\n y_jk = value_of(j,k)\n y_ik = value_of(i,k)\n\n name = \"c1\" + str((n,i,j,k))\n constraint = (y_ij + y_jk - y_ik) <= 1\n\n # if name not in cache:\n # cache[name] = constraint\n s.assert_and_track(constraint, name)\n\n\n def constraint_2(n,i,j,k):\n y_ij = value_of(i,j)\n y_jk = value_of(j,k)\n y_ik = value_of(i,k)\n\n name = \"c2\" + str((n,i,j,k))\n constraint = (-y_ij - y_jk + y_ik) <= 0\n\n # if name not in cache:\n # cache[name] = constraint\n s.assert_and_track(constraint, name)\n\n\n def constraint_3(symbolic):\n s.add(z.Or([symbolic == 0, symbolic == 1]))\n\n\n def int_formulation(j):\n left = z.Sum([matrix[k][j] * sym_matrix[k][j] for k in range(j)])\n right = z.Sum([matrix[l][j] * (1 - sym_matrix[j][l]) for l in range(j+1, n)])\n\n return [left, right]\n\n\n ## constraint 3, every edge must be a 0 or a 1, we get the 0 or 1 directly\n ## from the adjacency matrix\n ## we do this first so that the sym_matrix is populated\n for n_iter in range(n):\n for j in range(n_iter+1):\n for i in range(j):\n s_edge = symbolize(i,j)\n sym_matrix[i][j] = s_edge\n constraint_3(s_edge)\n\n ## Iteration for triangle inequalities\n for n_iter in range(n):\n for k in range(n_iter+1):\n for j in range(k):\n for i in range(j):\n constraint_1(n_iter,i,j,k)\n constraint_2(n_iter,i,j,k)\n\n\n ## minimization\n o = z.Optimize()\n y = z.Int('y')\n\n y = z.Sum(u.flatten([int_formulation(j) for j in range(n)]))\n o.minimize(y)\n\n result = []\n\n if s.check() == z.sat:\n result = s.model()\n\n return result", "def _get2dCostMatrix(c_hat, time_step_indices: TimeStepIndices, track_count, measurement_count):\n\n # Compute track-measurement cost for single scan problem from hypothesis\n # cost, including null assignment costs\n # Construct track to measurement assignment matrix at scan k\n cost = np.full((track_count, measurement_count), np.inf)\n nullCost = np.full((track_count,), np.inf)\n # store index of the single target hypothesis with the minimum cost for\n # each track and measurement\n idxCost = np.full((track_count, measurement_count), -1, dtype=np.int32)\n idxNullCost = np.full((track_count,), -1, dtype=np.int32)\n for track_id, trackNull_index_for_track in enumerate(time_step_indices.trackNull_index):\n if len(trackNull_index_for_track):\n min_index = c_hat[trackNull_index_for_track].argmin()\n min_index_all_c_hat = trackNull_index_for_track[min_index]\n nullCost[track_id] = c_hat[min_index_all_c_hat]\n idxNullCost[track_id] = min_index_all_c_hat\n # find single target hypotheses in track i that use this\n # measurement if found, find the single target hypothesis with\n # the minimum cost, and record its index\n for measurement in range(measurement_count):\n measTrack_index_for_meas = time_step_indices.measTrack_index[measurement, track_id]\n if len(measTrack_index_for_meas):\n min_index = c_hat[measTrack_index_for_meas].argmin()\n min_index_all_c_hat = measTrack_index_for_meas[min_index]\n cost[track_id, measurement] = c_hat[min_index_all_c_hat]\n idxCost[track_id, measurement] = min_index_all_c_hat\n\n # Create cost matrix for nulls with null costs on diagonal and inf\n # elsewhere (so we can have any number of null assignments)\n nullCostMatrix = np.full((track_count, track_count), np.inf, dtype=np.float64)\n # Pick out diagonal entries (like np.diagonal, but this is a writable view)\n nullCostMatrix.ravel()[::track_count + 1] = nullCost\n\n fullCostMatrix = np.concatenate((cost, nullCostMatrix), axis=1)\n return fullCostMatrix, idxCost, idxNullCost", "def greedy_selector(self):\n r_k = 0 \n best_route = []\n cities_to_visit = [i for i in range(1, self.city_count)]\n for _ in range(1, self.city_count):\n s_ind = np.argmax([self.tau[(r_k, u)] for u in cities_to_visit])\n s_k = cities_to_visit.pop(s_ind)\n best_route.append((r_k, s_k))\n r_k = s_k\n best_route.append((r_k, 0))\n \n shortest_path = np.sum([self.phi[(p)] for p in best_route])\n return best_route, shortest_path", "def h(self, state):\n loc = dict((val, (i, j)) for i, row in enumerate(self.goal_state)\n for j, val in enumerate(row))\n\n def calculate_cost(i, j):\n val = state[i][j]\n goal_i, goal_j = loc[val]\n return abs(goal_i - i) + abs(goal_j - j)\n\n return sum(\n calculate_cost(i, j) for i in xrange(self.size)\n for j in xrange(self.size) if state[i][j])", "def misplaced_heuristic(state):\n msp_h = 0\n size = len(state)\n for i in range (size):\n for j in range (size):\n if state[i][j] == 0:\n continue\n elif state[i][j] != i*size + j:\n msp_h += 1\n return msp_h", "def sort_state(self, best_hyp_indices: mx.nd.NDArray, best_word_indices: mx.nd.NDArray):\n self.prev_target_word_id = best_word_indices\n self.decoder_states = [mx.nd.take(ds, best_hyp_indices) for ds in self.decoder_states]", "def compute_lookuptable(self):\n\n if self.uselookuptable:\n # Evaluation lookup tables \n self.action_isok = np.zeros( ( self.nodes_n , self.actions_n ) , dtype = bool )\n self.x_next = np.zeros( ( self.nodes_n , self.actions_n , self.DS.n ) , dtype = float ) # lookup table for dynamic\n \n # For all state nodes \n for node in range( self.nodes_n ): \n \n x = self.nodes_state[ node , : ]\n \n # For all control actions\n for action in range( self.actions_n ):\n \n u = self.actions_input[ action , : ]\n \n # Compute next state for all inputs\n x_next = self.DS.fc( x , u ) * self.dt + x\n \n # validity of the options\n x_ok = self.DS.isavalidstate(x_next)\n u_ok = self.DS.isavalidinput(x,u)\n \n self.x_next[ node, action, : ] = x_next\n self.action_isok[ node, action] = ( u_ok & x_ok )", "def top_k_betweenness_centrality(self):\n d={}\n l=[]\n for v in vertices:\n a=self.betweenness_centrality(v)\n d[v]=a\n l.append(a)\n m=max(l)\n l1=[]\n for key in d:\n if d[key]==m:\n l1.append(key)\n\n return l1", "def csuccessors(state):\n M1, C1, B1, M2, C2, B2 = state\n assert B1 is 1 or B2 is 1\n pairs = {} \n moves = [(0,1), (1,0), (1,1), (2,0), (0,2)]\n if C1 > M1 or C2 > M2: \n return pairs # more cannibals than missionaries\n if B1 is 1:\n B1, B2 = 0, 1\n for m, c in moves:\n if M1 - m >= 0 and C1 - c >= 0:\n state = (M1-m, C1-c, B1, M2+m, C2+c, B2)\n action = 'M' * m + 'C' * c + '->'\n pairs[state] = action\n else:\n B1, B2 = 1, 0\n for m, c in moves:\n if M2 - m >= 0 and C2 - c >= 0:\n state = (M1+m, C1+c, B1, M2-m, C2-c, B2)\n action = '<-' + 'M' * m + 'C' * c\n pairs[state] = action\n return pairs", "def determineNextMove(player_location, opponentLocation, coins):\n global route, currentcoin, meta_route, best_weight, best_path, coins_to_search, index\n if opponentLocation in coins_to_search:\n coins_to_search, meta_route, route = change_way(coins, opponentLocation, player_location)[:3]\n index = 0\n elif currentcoin == player_location: \n if len(route) != 0:\n old_dist = algo.dijkstra(mazeMap, player_location)[1][meta_route[index+1]]\n coins_to_search2, meta_route2, route2, new_dist = change_way(coins, opponentLocation, player_location)\n\n #dist_matrix, route_matrix = u.update_dists_from_each(dists_matrix, routes_matrix, player_location, mazeMap, coins)\n #coins_to_search = get_n_shortest(3, coins, player_location, dists_matrix)\n \t\n #ennemy_dists = algo.dijkstra(mazeMap, opponentLocation)\n #for c in coins_to_search:\n #if len(coins_to_search) >= 2 and ennemy_dists[1][c] < dists_matrix[player_location][c]:\n # coins_to_search.remove(c)\n #break\n \t\t\n #best_weight = float(\"inf\")\n #best_path = []\n #exhaustive(coins_to_search, player_location, [], 0, dist_matrix)\n #meta_route2 = [player_location] + best_path\n #route2 = u.location_list_to_route(meta_route2, route_matrix)\n #new_dist = dist_matrix[player_location][meta_route2[1]]\n\t\t\n if len(route) == 0 or old_dist - new_dist > 3:\n route = route2\n meta_route = meta_route2 \n index = 0\n index += 1\n currentcoin = meta_route[index]\n #api.debug(route)\n return u.direction(player_location, route.pop(0))", "def greedy_match(cost_matrix):\n num_detections, num_tracks = cost_matrix.shape\n distance_1d = cost_matrix.reshape(-1)\n index_1d = np.argsort(distance_1d)\n index_2d = np.stack([index_1d // num_tracks, index_1d % num_tracks], axis=1).astype(int, copy=False)\n\n matched_indices = []\n matched_firsts = set()\n matched_seconds = set()\n\n for (first_id, second_id) in index_2d:\n if first_id not in matched_firsts and second_id not in matched_seconds:\n matched_seconds.add(second_id)\n matched_firsts.add(first_id)\n matched_indices.append([first_id, second_id])\n return np.array(matched_indices)", "def heuristic(state, depth):\n if state.has_tic_tac_toe(COMP):\n score = depth + 1\n elif state.has_tic_tac_toe(HUMAN):\n score = -(depth + 1)\n else: # draw/undetermined outcome\n score = 0\n return score", "def get_constr_obs_states_high(self):\n constrHi = numpy.empty(self.get_num_variables())\n i = 0\n for v in self.variables:\n constrHi[i] = v.get_constraint_high()\n i += 1\n return constrHi", "def score(self, candidate_holder, new_scores):\n cand_seqs = candidate_holder.cand_seqs\n cand_states = candidate_holder.cand_states\n cand_syms = cand_seqs[:, -1]\n\n cand_state_value = []\n cand_score_value = []\n for j in range(cand_states[self.state_index].shape[0]):\n cand_state_value.append(cand_states[self.state_index][j][cand_syms[j]])\n cand_score_value.append(cand_states[self.score_index][j][cand_syms[j]])\n ctc_score_result = []\n ctc_score_total = []\n new_states = []\n for i in tf.range(new_scores.shape[0]):\n num_sym_state = np.array([self.init_state] * self.num_classes)\n num_sym_score = np.array([0.0] * self.num_classes, dtype=np.float32)\n num_sym_score_minus = np.array([0.0] * self.num_classes, dtype=np.float32)\n cand_seq = cand_seqs[i]\n ctc_pre_state = cand_state_value[i]\n top_ctc_candidates = np.argsort(new_scores[i, :])\n top_ctc_candidates = sorted(top_ctc_candidates[-self.ctc_beam :].tolist())\n cand_seq = np.array(cand_seq)\n top_ctc_candidates = np.array(top_ctc_candidates)\n ctc_pre_state = ctc_pre_state.numpy()\n ctc_score, new_state = self.cand_score(\n cand_seq, top_ctc_candidates, ctc_pre_state\n )\n ctc_pre_score = tf.cast(cand_score_value[i], tf.float32)\n ctc_score_minus = self.ctc_weight * (ctc_score - ctc_pre_score) + 500\n\n for k in range(len(top_ctc_candidates)):\n num_sym_score[top_ctc_candidates[k]] = ctc_score[k]\n num_sym_score_minus[top_ctc_candidates[k]] = ctc_score_minus[k]\n num_sym_state[top_ctc_candidates[k]] = new_state[k]\n num_sym_score_minus -= 500\n ctc_score_result.append(num_sym_score_minus)\n ctc_score_total.append(num_sym_score)\n new_states.append(num_sym_state)\n cand_states[self.state_index] = tf.convert_to_tensor(np.array(new_states))\n ctc_score_result = tf.convert_to_tensor(np.array(ctc_score_result))\n ctc_score_total = tf.convert_to_tensor(np.array(ctc_score_total))\n cand_states[self.score_index] = ctc_score_total\n return ctc_score_result, cand_states", "def costMatrix(row_feats, col_feats, row_labels, col_labels, metric=\"Pearson\"):\n\n # Get unique label values in non-moving and moving brain\n row_labs = np.asarray(list(set(row_labels).difference({-1, 0})))\n col_labs = np.asarray(list(set(col_labels).difference({-1, 0})))\n\n # Initialize cost matrix\n costMatrix = np.zeros((len(row_labs), len(col_labs)))\n print(costMatrix.shape)\n\n # Compute pairwise costs between all label sets\n for i, r in enumerate(row_labs):\n indr = np.where(row_labels == r)[0]\n lr = len(indr)\n\n if metric in [\"Spearman\",\"Euclidean\",\"Pearson\"]:\n featr = row_feats[indr, :]\n\n for j, c in enumerate(col_labs):\n indc = np.where(col_labels == c)[0]\n \n if metric in [\"Spearman\",\"Euclidean\",\"Pearson\"]:\n featc = col_feats[indc, :]\n\n if metric == \"Spearman\":\n [rVal, _] = spearmanr(featr, featc, axis=1)\n rVal = 1-rVal[lr:, 0:lr]\n\n elif metric == \"Pearson\":\n rVal = cdist(featr, featc, metric='Correlation').mean()\n\n elif metric == \"Euclidean\":\n rVal = cdist(featr, featc).mean()\n\n elif metric == \"Dice\":\n rVal = 1-hmg.dice(indr, indc)\n\n elif metric == \"EMD\":\n rmu = row_feats[indr, :].mean(0)\n rmu = rmu/rmu.sum()\n\n cmu = col_feats[indc, :].mean(0)\n cmu = cmu/cmu.sum()\n\n rVal = emd(rmu, cmu)\n\n\n costMatrix[i, j] = rVal\n\n return [row_labs, col_labs, costMatrix]", "def get_observation_neighbour(self):\n state = {}\n for grid_id, grid in self.grids.items():\n drivers = list(grid.get_idle_drivers().values())\n orders = self.get_active_order_neighbour(grid)\n neighbour_drivers = self.get_active_driver_neighbour(grid)\n for driver in drivers:\n driver_id = driver.get_driver_id()\n loc = driver.get_position()\n time = driver.city_time\n neighbour_drivers.remove(driver) # pop itself\n assert driver_id not in state\n state[driver_id] = [(loc, time), orders, neighbour_drivers]\n neighbour_drivers.append(driver) # insert back\n return state", "def state_descriptor(self, state, player):\n\n\t\tboard = np.copy(state.board)\n\n\t\tnp.place(board, board == -1, 0)\n\t\tnp.place(board, board == player, 3)\n\t\tnp.place(board, board == 3 - player, -1)\n\t\tnp.place(board, board == 3, 1)\n\n\t\tmy_cows = state.cows[player - 1]\n\t\ten_cows = state.cows[2 - player]\n\n\t\treturn (self.totuple(board), my_cows, en_cows)", "def solve(list_of_locations, list_of_homes, starting_car_location, adjacency_matrix, params=[]):\n\n loc_map = {}\n drop_off_dict = {}\n num_home_visited = 0\n\n \"\"\"\n for i in range(len(list_of_locations)):\n loc_map[i] = list_of_locations[0]\n \"\"\"\n\n home_indexes = convert_locations_to_indices(list_of_homes, list_of_locations)\n start = list_of_locations.index(starting_car_location)\n graph, msg = adjacency_matrix_to_graph(adjacency_matrix)\n num_homes = len(list_of_homes)\n\n car_path = []\n all_paths = dict(nx.all_pairs_dijkstra(graph))\n visited = set()\n\n #print(start)\n car_path.append(start)\n current_node = start\n\n if start in home_indexes:\n visited.add(start)\n drop_off_dict[start] = [start]\n num_home_visited += 1\n\n while num_home_visited < num_homes:\n dist_dict = all_paths.get(current_node)[0]\n paths_dict = all_paths.get(current_node)[1]\n\n dist_dict = {k:v for (k,v) in dist_dict.items() if k not in visited and k in home_indexes}\n min_dist = min(dist_dict.values())\n min_list = [k for k in dist_dict.keys() if dist_dict[k] <= min_dist]\n #print(dist_dict.values())\n target = min_list[0]\n drop_off_dict[target] = [target]\n #print(target+1)\n #print(target)\n car_path.pop()\n car_path.extend(paths_dict[target])\n\n visited.add(target)\n current_node = target\n num_home_visited += 1\n\n paths_dict = all_paths.get(current_node)[1]\n car_path.pop()\n car_path.extend(paths_dict[start])\n #print((drop_off_dict.keys()))\n #car_path = [start, ...., start]\n #drop_off_dict = {drop_off_loc: [home1, home2, ...] }\n\n return car_path, drop_off_dict", "def evaluateBoardState(self, board):\n\n \"\"\"\n These are the variables and functions for board objects which may be helpful when creating your Agent.\n Look into board.py for more information/descriptions of each, or to look for any other definitions which may help you.\n\n Board Variables:\n board.width \n board.height\n board.last_move\n board.num_to_connect\n board.winning_zones\n board.score_array \n board.current_player_score\n\n Board Functions:\n get_cell_value(row, col)\n try_move(col)\n valid_move(row, col)\n valid_moves()\n terminal(self)\n legal_moves()\n next_state(turn)\n winner()\n \"\"\"\n if self.id == 1:\n opponent_id = 2\n else:\n opponent_id = 1\n\n maxvalue = 100000\n minvalue = -maxvalue\n winner = board.winner()\n if winner == self.id:\n return maxvalue\n elif winner == opponent_id:\n return minvalue\n size_y = board.height\n size_x = board.width\n map_ = []\n num_to_connect = board.num_to_connect\n total_points = 0\n\n multiply_reachable = 1\n multiply_oddeven = 1\n # basically this function is calculating all the possible win positions\n # more pieces in a possible win position will be counted with more weights\n # a win position with X pieces in it will be counted as X^2 points\n # initialise the zones maps\n for i in range(size_y):\n map_.append([])\n for j in range(size_x):\n map_[i].append([])\n\n # Fill in the horizontal win positions\n for i in range(size_y):\n for j in range(size_x - num_to_connect + 1):\n points = 0\n self_pieces_count = 0\n opponent_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[i][j + k] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[i][j + k] == self.id:\n points += len(board.winning_zones[j+k][i])\n if (self.id == 1 and i % 2 == 1) or (self.id == 2 and i%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n if self_pieces_count == 3 and opponent_pieces_count == 0:\n if j - 1 >= 0 and board.board[i][j + 3] == 0 and board.board[i][j - 1] == 0 \\\n and board.try_move(j + 3) == i and board.try_move(j - 1) == i:\n return maxvalue\n elif j + 4 < size_y and board.board[i][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i and board.try_move(j) == i:\n return maxvalue\n else:\n for k in range(num_to_connect):\n if board.board[i][j + k] == 0 and board.try_move(j + k) == i:\n points *= multiply_reachable\n elif opponent_pieces_count == 3 and self_pieces_count == 0:\n if j - 1 >= 0 and board.board[i][j + 3] == 0 and board.board[i][j - 1] == 0 \\\n and board.try_move(j + 3) == i and board.try_move(j - 1) == i:\n return minvalue\n elif j + 4 < size_y and board.board[i][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i and board.try_move(j) == i:\n return minvalue\n # else:\n # for k in range(num_to_connect):\n # if board.board[i][j + k] == 0 and board.try_move(j + k) == i:\n # points *= -multiply_reachable\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n\n # Fill in the vertical win positions\n for i in range(size_x):\n for j in range(size_y - num_to_connect + 1):\n points = 0\n self_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[j + k][i] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[j + k][i] == self.id:\n points += len(board.winning_zones[i][j+k])\n if (self.id == 1 and (j+k) % 2 == 1) or (self.id == 2 and (j+k)%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n points *= multiply_reachable\n # if opponent_pieces_count == 3 and self_pieces_count == 0:\n # points *= -1\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n\n # Fill in the forward diagonal win positions\n for i in range(size_y - num_to_connect + 1):\n for j in range(size_x - num_to_connect + 1):\n points = 0\n self_pieces_count = 0\n opponent_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[i + k][j + k] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[i + k][j + k] == self.id:\n points += len(board.winning_zones[j+k][i+k])\n if (self.id == 1 and (i+k) % 2 == 1) or (self.id == 2 and (i+k)%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n if self_pieces_count == 3 and opponent_pieces_count == 0:\n if i - 1 >= 0 and j - 1 >= 0 and board.board[i + 3][j + 3] == 0 and board.board[i - 1][j - 1] == 0 \\\n and board.try_move(j + 3) == i + 3 and board.try_move(j - 1) == i - 1:\n return maxvalue\n elif i + 4 < size_y and j + 4 < size_x and board.board[i + 4][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i + 4 and board.try_move(j) == i:\n return maxvalue\n else:\n for k in range(num_to_connect):\n if board.board[i + k][j + k] == 0 and board.try_move(j + k) == i + k:\n points *= multiply_reachable\n elif opponent_pieces_count == 3 and self_pieces_count == 0:\n if i - 1 >= 0 and j - 1 >= 0 and board.board[i + 3][j + 3] == 0 and board.board[i - 1][j - 1] == 0 \\\n and board.try_move(j + 3) == i + 3 and board.try_move(j - 1) == i - 1:\n return minvalue\n elif i + 4 < size_y and j + 4 < size_x and board.board[i + 4][j + 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j + 4) == i + 4 and board.try_move(j) == i:\n return minvalue\n # else:\n # for k in range(num_to_connect):\n # if board.board[i + k][j + k] == 0 and board.try_move(j + k) == i + k:\n # points *= -multiply_reachable\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n\n # Fill in the backward diagonal win positions\n for i in range(size_y - num_to_connect + 1):\n for j in range(size_x - 1, num_to_connect - 1 - 1, -1):\n points = 0\n self_pieces_count = 0\n opponent_pieces_count = 0\n for k in range(num_to_connect):\n if board.board[i + k][j - k] == opponent_id:\n opponent_pieces_count += 1\n elif board.board[i + k][j - k] == self.id:\n points += len(board.winning_zones[j-k][i+k])\n if (self.id == 1 and (i+k) % 2 == 1) or (self.id == 2 and (i+k)%2 == 0):\n points *= multiply_oddeven\n self_pieces_count += 1\n if self_pieces_count == 3 and self_pieces_count == 0:\n if board.board[i + 3][j - 3] == 0 and board.board[i - 1][j + 1] == 0 \\\n and board.try_move(j - 3) == i + 3 and board.try_move(j + 1) == i - 1:\n return maxvalue\n elif i + 4 < size_y and j - 4 >= 0 and board.board[i + 4][j - 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j - 4) == i + 4 and board.try_move(j) == i:\n return maxvalue\n else:\n for k in range(num_to_connect):\n if board.board[i + k][j - k] == 0 and board.try_move(j - k) == i + k:\n points *= multiply_reachable\n\n elif opponent_pieces_count == 3 and self_pieces_count == 0:\n if board.board[i + 3][j - 3] == 0 and board.board[i - 1][j + 1] == 0 \\\n and board.try_move(j - 3) == i + 3 and board.try_move(j + 1) == i - 1:\n return minvalue\n elif i + 4 < size_y and j - 4 >= 0 and board.board[i + 4][j - 4] == 0 and board.board[i][j] == 0 \\\n and board.try_move(j - 4) == i + 4 and board.try_move(j) == i:\n return minvalue\n # else:\n # for k in range(num_to_connect):\n # if board.board[i + k][j - k] == 0 and board.try_move(j - k) == i + k:\n # points *= -multiply_reachable\n if (opponent_pieces_count == 3 and self_pieces_count == 0) or opponent_pieces_count == 0:\n total_points += points\n return total_points", "def get_pathologic_covering_routes(n_pl, n_target, attacker_strategy, target_values):\n # computes the coefficient used by the greedy oracle to choose routes\n targets_coeff = np.transpose(np.multiply(attacker_strategy, target_values))\n\n # randomly selects the player for which the non optimal choice will be made\n wrong_pl = randint(1, n_pl)\n\n # generate the non optimal route randomly\n n_covered_targets = randint(n_pl,n_target-1)\n non_opt_action = np.zeros(n_target)\n for i in range(0, n_covered_targets):\n random_covered_target = randint(0, n_target-1)\n non_opt_action[random_covered_target] = 1\n\n # compute the value of the non optimal route\n non_opt_val = get_value_single_route(non_opt_action, targets_coeff)\n\n # generate routes that have, as a single, values smaller than the best greedy route but taken togher perform\n # at least as well. [[0,1,...],[...],...] a[r][t]=1 iff t is covered by r.\n # The returned list should have n_pl - 1 routes\n opt_routes = get_opt_routes(n_pl, non_opt_action)\n\n I={}\n for pl in range(1, n_pl+1):\n\n n_r = randint(0, MAX_ROUTES)\n temp = lil_matrix((n_r+1, n_target), dtype='int8')\n\n if pl == wrong_pl:\n # put the non opt route in the bucket\n for t in non_opt_action.nonzero():\n temp[0,t] = 1\n else:\n for t in opt_routes.pop().nonzero():\n temp[0,t] = 1\n\n # generate other random routes with single value less than the non_opt_value\n for r in range(1, n_r):\n new_route = get_r_limited_val(non_opt_val, targets_coeff)\n\n for t in new_route.nonzero():\n temp[r,t] = 1\n\n I[pl] = temp.tocsr()\n\n return I", "def next_state_rowcol(self):\n # state before and after propagation\n self.state_before = self.state_after\n transition_probs = list(Customer.TPM.loc[Customer.TPM.index==self.state_before].values[0])\n self.state_after = random.choices(Customer.STATES, weights=transition_probs)[0]\n # row and col before and after propagation\n self.row_before = self.row_after\n self.col_before = self.col_after\n\n # randomly chose row_after, col_after depending on the state_after\n if self.state_after == 'fruit':\n self.row_after = random.choice(Customer.STATE_ROW_COL['fruit'][0])\n self.col_after = random.choice(Customer.STATE_ROW_COL['fruit'][1])\n elif self.state_after == 'spices':\n self.row_after = random.choice(Customer.STATE_ROW_COL['spices'][0])\n self.col_after = random.choice(Customer.STATE_ROW_COL['spices'][1])\n elif self.state_after == 'dairy':\n self.row_after = random.choice(Customer.STATE_ROW_COL['dairy'][0])\n self.col_after = random.choice(Customer.STATE_ROW_COL['dairy'][1])\n elif self.state_after == 'drinks':\n self.row_after = random.choice(Customer.STATE_ROW_COL['drinks'][0])\n self.col_after= random.choice(Customer.STATE_ROW_COL['drinks'][1])\n elif self.state_after == 'checkout':\n self.row_after = random.choice(Customer.STATE_ROW_COL['checkout'][0])\n self.col_after = random.choice(Customer.STATE_ROW_COL['checkout'][1])", "def get_states(self):\n states = []\n for chords in self.training_data:\n chunks = [chords[x:x+self.order] for x in range(0,\n len(chords), self.order)]\n for chunk in chunks:\n chunk_string = \" \".join(chunk)\n if chunk_string not in states:\n states.append(chunk_string)\n return sorted(states)", "def select_observation(self):\n\n # New board to collect the states sampled by the MC agent\n score_board = np.zeros_like(self.observed_board)\n n_samples = self.nb_samples\n\n # +----------+\n # | Task 1 |\n # +----------+\n # Check if there is already an \"open\" hit, i.e. a ship that has been hit but not sunk\n # These locations are handled by the observation_board as 1\n # If there is already a hit, choose a random one to deal with next.\n # Create a score board including that hit, and reduce the number of samples to 1/10\n hit = None\n if 1 in self.observed_board:\n hits = np.where(self.observed_board == 1)\n i = random.choice(range(len(hits[0])))\n hit = (hits[0][i], hits[1][i])\n n_samples /= 10\n\n # +----------+\n # | Task 2 |\n # +----------+\n # Populate the score_board with possible boat placements\n i = 0\n while i < n_samples:\n board = np.zeros_like(self.observed_board)\n for boat_size in self.remaining_ships:\n while True:\n o = random.choice(['h', 'v'])\n x = random.choice(range(self.size - (boat_size - 1 if o == 'h' else 0)))\n y = random.choice(range(self.size - (boat_size - 1 if o == 'v' else 0)))\n\n d = np.hstack((np.zeros((boat_size, 1)), np.arange(boat_size)[:, None])[::(-1 if o == 'h' else 1)])\n boat = tuple((np.array([[x, y]]) + d).astype(int).T)\n\n # Place boats non-overlapping and not on misses or sunken ships.\n if 1 in board[boat] or -1 in self.observed_board[boat]:\n continue\n\n board[boat] = 1\n break\n\n # Sanity check\n assert board.sum() == sum(self.remaining_ships)\n\n if not hit or board[hit] == 1:\n i += 1\n score_board += board\n\n # +----------+\n # | Task 3 |\n # +----------+\n # Having populated the score board, select a new position by choosing the location with the highest score.\n score_board *= np.where(self.observed_board == 1, 0, 1)\n plt.imshow(score_board, cmap='hot')\n plt.show()\n time.sleep(1)\n i_new, j_new = np.unravel_index(np.argmax(score_board), score_board.shape)\n\n # return the next location to query, i_new: int, j_new: int\n return i_new, j_new, score_board", "def bridge_problem3(here):\r\n\r\n def all_over(state):\r\n here, _ = state\r\n return not here or here == set([\"light\"])\r\n\r\n start = (frozenset(here) | frozenset([\"light\"]), frozenset())\r\n return lowest_cost_search(start, bsuccessors2, all_over, bcost)", "def calc_ply_order(constraints, targets):\r\n if constraints.sym:\r\n ply_order = np.arange(targets.n_plies // 2 + targets.n_plies % 2)\r\n return ply_order\r\n\r\n order_before_sorting = np.arange(targets.n_plies)\r\n ply_order = np.zeros((targets.n_plies,), int)\r\n ply_order[0::2] = order_before_sorting[\r\n :targets.n_plies // 2 + targets.n_plies % 2]\r\n ply_order[1::2] = order_before_sorting[\r\n targets.n_plies // 2 + targets.n_plies % 2:][::-1]\r\n return ply_order" ]
[ "0.5657669", "0.5559561", "0.5372872", "0.53349", "0.5301023", "0.5282986", "0.52419263", "0.5127219", "0.5086206", "0.50741035", "0.5065264", "0.50308555", "0.5025374", "0.50204796", "0.50141317", "0.5002204", "0.5002081", "0.4969301", "0.49646002", "0.4962375", "0.4949405", "0.4942153", "0.49354175", "0.4929495", "0.49136695", "0.48923928", "0.48881328", "0.48833677", "0.48670334", "0.48476198" ]
0.62907976
0
Link a spotify account to the bot.
async def link(self, ctx): if not is_linked(ctx.author.id): token = str(uuid.uuid4()) valid_until = int((datetime.utcnow() + timedelta(days=1)).timestamp()) add_token(ctx.author.display_name, ctx.author.id, token, valid_until, str(ctx.author.avatar_url)) web_base_url = get_setting('web_base_url') await ctx.author.send(f"Please visit {web_base_url}/link/{token} to link your Spotify account. " f"This link will expire after 24 hours.") if ctx.guild is not None: await ctx.message.add_reaction('📬') else: await ctx.reply("You have already linked a spotify account!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def link(ctx, bot: typing.Union[discord.Member, discord.User]):\n if not bot.bot:\n return await r(ctx, \"Not a bot.\")\n await r(ctx, f'<https://www.motiondevelopment.top/bots/{bot.id}>')", "async def info(self, ctx):\n if ctx.guild is not None:\n await ctx.reply(\"This command can only be used in DMs, because of privacy reasons.\")\n raise commands.CommandError(\"Invoker not in DMs.\")\n\n if not is_linked(ctx.author.id):\n await ctx.reply(f\"You don't have a Spotify account linked. Please link one using \"\n f\"`{self.bot_config['prefix']}link`.\")\n raise commands.CommandError(\"User has no spotify account linked.\")\n\n sp = init_spotify(ctx.author.id)\n result = sp.me()\n msg_embed = Embed()\n msg_embed.title = \"Linked Spotify account\"\n msg_embed.url = result['external_urls'].get('spotify', None)\n if len(result['images']) > 0:\n msg_embed.set_image(url=result['images'][0]['url'])\n msg_embed.add_field(name=\"Display name\", value=result['display_name'])\n msg_embed.add_field(name=\"Subscription type\", value=result.get('product', 'free'))\n if result.get('product', None) != \"premium\":\n msg_embed.add_field(name=\"Warning!\",\n value=\"Only accounts with Spotify Premium can use this bot!\",\n inline=False)\n await ctx.reply(embed=msg_embed)", "async def join(self, ctx):\n if ctx.guild is None:\n await ctx.reply(\"This command can only be used in a server, not in DMs.\")\n raise commands.CommandError(\"Invoker not in a guild.\")\n\n if not is_linked(ctx.author.id):\n await ctx.reply(f\"You don't have a Spotify account linked. Please link one using \"\n f\"`{self.bot_config['prefix']}link`.\")\n raise commands.CommandError(\"User has no spotify account linked.\")\n\n if ctx.author.voice is None or ctx.author.voice.channel is None:\n await ctx.reply(\"You need to be in a voice channel to use this command.\")\n raise commands.CommandError(\"Invoker not connected to a voice channel.\")\n\n if ctx.voice_client is not None and ctx.author.voice.channel != ctx.voice_client.channel:\n await ctx.reply(\"You need to be in the same voice channel as the bot to use this command.\")\n raise commands.CommandError(\"Invoker not in same voice channel as bot.\")\n\n # Connect to voice channel that the invoker is in (if we're not already connected somewhere else)\n try:\n controller_instance = await ctx.author.voice.channel.connect(reconnect=False)\n except asyncio.TimeoutError:\n await ctx.reply(\"Timeout error while connecting to the voice channel. Please try again later.\")\n return\n except discord.ClientException:\n await ctx.reply(\"I'm already connected to a voice channel, please disconnect me first!\")\n return\n except OpusNotLoaded:\n await ctx.reply(\"Opus library was not loaded. Please try again later.\")\n return\n\n if controller_instance is not None:\n # Create a listening socket for the future incoming audio connection\n try:\n controller = SpotifyController.create(controller_instance.channel.id,\n controller_instance.channel.bitrate,\n ctx.author.id)\n except ValueError as e:\n await ctx.reply(e)\n return\n\n controller.get_or_create_playlist()\n\n await ctx.author.send(f\"Please enter the following code in your client application and click \"\n f\"'connect' to start playing music!\\nCode: `{controller.link_code}`\")\n await ctx.reply(f\"Ready and waiting for a connection! I've DM'ed you a code to fill in in your client app.\"\n f\"\\nIn the mean time, you can start adding songs with `{self.bot_config['prefix']}add`, \"\n f\"and view the queue with `{self.bot_config['prefix']}queue`\")", "async def invite(self, ctx):\r\n myInvite = discord.utils.oauth_url(self.bot.user.id, permissions=discord.Permissions(permissions=8))\r\n await ctx.channel.send('Invite me to *your* server with this link: \\n\\n<{}>'.format(myInvite))", "async def invite(self):\n link = \"https://discordapp.com/oauth2/authorize?client_id=282765243862614016&scope=bot&permissions=19456\"\n await self.bot.say(\"Invite me to your server with this link!\\n\" + link)", "async def inviteme(self):\r\n\r\n #Your code will go here\r\n await self.bot.say(\"Here is a link to Invite Me: http://bit.ly/TacoBot\")", "async def invite(self, ctx):\n link = \"https://discordapp.com/oauth2/authorize?client_id=282765243862614016&scope=bot&permissions=19456\"\n await ctx.send(\"Invite me to your server with this link!\\n\" + link)", "async def spotify(self, ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send('Invalid command passed. Use the `?help spotify` command to learn more.')", "def share_link(cls, user, link):", "def share_link(cls, user, link):", "async def invite(self, ctx):\n await ctx.send(f\"**{ctx.author.name}**, use this URL to invite me\\n<{discord.utils.oauth_url(self.bot.user.id)}>\")", "async def link_to(self, *args):\n pass", "async def invite(ctx):\n permissions = 2134207679\n url = discord.utils.oauth_url(client_id=bot.user.id, permissions=discord.Permissions(permissions=permissions),\n scopes=(\"bot\", \"applications.commands\"))\n view = discord.ui.View()\n view.add_item(discord.ui.Button(label=\"Invite\", url=url))\n await ctx.respond(\"I'm glad you want to add me to your server, here's a link!\", view=view)", "def wrap_spotify_link(item, text=''):\n\n # generate default text if no text has been given\n if not text:\n name = item['name']\n if item['type'] == 'playlist':\n user = SP.user(item['owner']['id'])['display_name']\n text = f'{name} by {user}'\n elif item['type'] == 'artist':\n text = name\n else:\n artist = item['artists'][0]['name']\n text = f'{name} by {artist}'\n\n link = item['external_urls']['spotify']\n return f'<a href=\"{link}\">{text}</a>'", "async def user_aclient(user_token):\n sender = tk.RetryingSender(sender=tk.AsyncSender())\n yield tk.Spotify(user_token, sender=sender)\n await sender.close()", "async def spotify(self, ctx, *, query):\n\n # Setup the headers with the token that should be here\n headers = {\"Authorization\": \"Bearer {}\".format(self._token)}\n opts = {\"q\": query, \"type\": \"track\"}\n url = \"https://api.spotify.com/v1/search\"\n response = await utils.request(url, headers=headers, payload=opts)\n try:\n await ctx.send(\n response.get(\"tracks\")\n .get(\"items\")[0]\n .get(\"external_urls\")\n .get(\"spotify\")\n )\n except (KeyError, AttributeError, IndexError):\n await ctx.send(\"Couldn't find a song for:\\n{}\".format(query))", "async def links(self, ctx):\n await ctx.send(\"https://discordapp.com/channels/566451504332931073/681617252814159904/755489156146397311\")", "def use_account(self, token, url=QE_URL, **kwargs):\n credentials = Credentials(token, url, **kwargs)\n\n self._append_account(credentials)", "async def invite(self, ctx):\n embed = discord.Embed(title=\"Invite\", description=f\"**{ctx.author.name}**, use this URL to invite me\\n[link](https://discord.com/oauth2/authorize?client_id=749629426777456691&permissions=8&scope=bot)\", color=0xeff0f1)\n await ctx.send(embed=embed)", "def auth(self):\n token = spotipy.util.prompt_for_user_token(self.username,\n self.scope,\n client_id = self.client_id,\n client_secret = self.client_secret,\n redirect_uri= self.redirect_uri)\n if token:\n self.spotify = spotipy.Spotify(auth=token)\n else:\n print(colored.stylize(\"\"\"\\n[*] \"\"\", colored.fg(\"light_red\")) + 'Cant get token for: %s\\n' % (self.username))\n exit()", "async def github(self, ctx):\n await ctx.send('https://github.com/nick411077/nickcan_bot')", "async def server():\n await bot.say(\"https://discord.gg/Eau7uhf\")", "def shareNote(self, authenticationToken, guid):\r\n pass", "async def addme(self, ctx):\n invite_url = discord.utils.oauth_url(self.bot.user.id, permissions=discord.Permissions(8))\n embed = self.bot.embeds.embed_builder(title='Add this bot to your own Discord server',\n description=invite_url,\n has_footer=False)\n await ctx.send(embed=embed)", "async def _invite(self, ctx: Context):\n\n perm_int = discord.Permissions(268494928)\n\n data = await self.bot.application_info()\n invite_url = discord.utils.oauth_url(data.id, permissions=perm_int)\n\n value = (\n f\"Invite TvM Assistant to your bot by [clicking here]({invite_url}).\"\n \"\\n\\nInviting the bot will give it some management permissions. You can\"\n \" review them when you use the link.\"\n )\n\n embed = discord.Embed(color=await ctx.embed_colour(), description=value)\n embed.set_author(name=f\"Invite TvM Assistant\", icon_url=ctx.me.avatar_url)\n\n try:\n await ctx.send(embed=embed)\n except discord.Forbidden:\n return await ctx.send(\n f\"{invite_url}\\n\\nInviting the bot will give it some management permissions.\"\n \" You can review them when you use the link.\"\n )", "def link(self):\n return f\"https://{DOMAIN}/invite/{self.code}\"", "async def invite(self, ctx, plain_url: bool = False):\n\n if not plain_url:\n try:\n await ctx.embed(\n 'Click to invite me to your server!',\n title_url=self.bot.invite_url,\n colour='blue',\n icon=\"https://i.imgur.com/DtPWJPG.png\"\n )\n except discord.errors.Forbidden:\n pass\n else:\n return\n\n await ctx.send(f\"Invite URL: <{self.bot.invite_url}>\")", "async def _discord(self, ctx: Context):\n\n await ctx.send(\n f\"You can join the Brawlcord community server by using this link: {COMMUNITY_SERVER}\"\n )", "def link(self, pin):\n headers = {\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'X-Plex-Product': 'Plex SSO'\n }\n data = {'code': pin}\n self.query(self.LINK, self._session.put, headers=headers, data=data)", "async def _invite(self, ctx: Context):\n\n # read_messages=True,\n # send_messages=True,\n # manage_messages=True,\n # embed_links=True,\n # attach_files=True,\n # external_emojis=True,\n # add_reactions=True\n perms = discord.Permissions(322624)\n\n try:\n data = await self.bot.application_info()\n invite_url = discord.utils.oauth_url(data.id, permissions=perms)\n value = (\n \"Add Brawlcord to your server by **[clicking here]\"\n f\"({invite_url})**.\\n\\n**Note:** By using the link\"\n \" above, Brawlcord will be able to\"\n \" read messages,\"\n \" send messages,\"\n \" manage messages,\"\n \" embed links,\"\n \" attach files,\"\n \" add reactions,\"\n \" and use external emojis\"\n \" wherever allowed.\\n\\n*You can remove the permissions manually,\"\n \" but that may break the bot.*\"\n )\n except Exception as exc:\n invite_url = None\n value = (\n f\"Error \\\"{exc}\\\" while generating invite link.\"\n \" Notify bot owner using the `-report` command.\"\n )\n\n embed = discord.Embed(color=EMBED_COLOR, description=value)\n embed.set_author(\n name=f\"Invite {ctx.me.name}\", icon_url=ctx.me.avatar_url)\n # embed.add_field(name=\"__**Invite Link:**__\", value=value)\n\n try:\n await ctx.send(embed=embed)\n except discord.Forbidden:\n return await ctx.send(\n \"I do not have the permission to embed a link.\"\n \" Please give/ask someone to give me that permission.\"\n )" ]
[ "0.6603521", "0.63787323", "0.6339056", "0.6307534", "0.62112045", "0.6183406", "0.60342115", "0.6027001", "0.60145694", "0.60145694", "0.5984008", "0.59183764", "0.5639046", "0.5622354", "0.5614502", "0.5600881", "0.5547755", "0.5491989", "0.5484494", "0.54839873", "0.5476929", "0.5442474", "0.5434192", "0.54155725", "0.54124016", "0.53911066", "0.5378274", "0.5357755", "0.53518236", "0.5349578" ]
0.7880789
0
Unlink a spotify account from the bot.
async def unlink(self, ctx): # Remove all link tokens and spotify details for this user remove_tokens(ctx.author.id) remove_spotify_details(ctx.author.id) await ctx.reply("All your linked accounts were removed, if you had any!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unlink(self, link_id):", "def unfollow_profile(self):\n self.find_clickable_element(self.ISFOLLOWED_BTN).click()", "async def twitter_unfollow(self, ctx, handle):\n sane_handle = handle.lower().lstrip('@')\n conf = dutils.get(self.conf.follows, screen_name=sane_handle)\n chan_conf = dutils.get(conf.discord_channels, id=ctx.message.channel.id) if conf is not None else None\n\n if chan_conf is None:\n raise TwitterError('Not following {} on this channel.'.format(handle))\n\n # Remove the Discord channel from the Twitter channel conf\n conf.discord_channels.remove(chan_conf)\n if not conf.discord_channels:\n # If there are no more Discord channel to feed, unfollow the Twitter channel\n self.conf.follows.remove(conf)\n del conf\n\n # Update the tweepy stream\n if len(self.conf.follows) > 0:\n await self.stream.start()\n else:\n self.stream.stop()\n\n self.conf.save()\n\n await self.bot.say('\\N{OK HAND SIGN}')", "def disconnect():\n\n\tglob.tokens.deleteToken(glob.tokens.getTokenFromUserID(999))", "def unfollow(alias):\n s = db.Series.alias_lookup(alias)\n s.following = False\n db.session.commit()\n output.series('Removing follow for {}'.format(s.name))", "def closeaccount(request):\n get_user_model().objects.get(username=request.user.get_username()).delete()\n return Response({}, status=status.HTTP_200_OK)", "def unregister(url):\n return Client.get_client().unregister(url)", "async def unplonk(ctx, user: typing.Union[discord.Member, discord.User]):\n await bot.plonk.delete(user.id)\n await r(ctx, f'Unplonked **{user}**')", "def _disconnect(remote, *args, **kwargs):\n if not current_user.is_authenticated:\n return current_app.login_manager.unauthorized()\n\n account = RemoteAccount.get(\n user_id=current_user.get_id(), client_id=remote.consumer_key\n )\n if account:\n external_id = account.extra_data.get(\"external_id\")\n\n if external_id:\n oauth_unlink_external_id(dict(id=external_id, method=\"cern_openid\"))\n\n with db.session.begin_nested():\n account.delete()\n\n disconnect_identity(g.identity)", "def deauth(nick):\n global auth_list\n if nick in auth_list:\n a = auth_list.index(nick)\n del(auth_list[a])", "def leave(self):\n self.remove(\n self.subreddit._reddit.config.username or self.subreddit._reddit.user.me()\n )", "async def unlink(self, ctx: MyContext):\n query = \"SELECT * FROM wormhole_channel WHERE channelID = ?\"\n wh_channel = self.bot.db_query(\n query, (ctx.channel.id,), astuple=True, fetchone=True\n )\n # comes as: (name, channelID, guildID, type, webhookID, webhookTOKEN)\n if len(wh_channel) == 0:\n await ctx.send(await self.bot._(ctx.guild.id, \"wormhole.error.not-linked\"))\n return\n query = \"DELETE FROM wormhole_channel WHERE channelID = ? AND name = ?\"\n async with ClientSession() as session:\n webhook = discord.Webhook.partial(\n wh_channel[4], wh_channel[5], session=session\n )\n await webhook.delete()\n self.bot.db_query(query, (wh_channel[0], ctx.channel.id))\n await ctx.send(\n await self.bot._(ctx.guild.id, \"wormhole.success.channel-unlinked\")\n )", "def unlink(self):\n if self.resource is None:\n self.resource = self.client.get_resource(self.href)\n self.client.post_linked_resource(\n self.resource, RelationType.UNLINK_FROM_TEMPLATE,\n EntityType.ROLE.value, None)", "def unfollow_me(self):\n return self.follow_me('unfollow_topic')", "def unfollow(request, usertostopfollow):\n stop_follow = Member.objects.get(user__username=usertostopfollow)\n user = Member.objects.get(user=request.user)\n user.following.remove(stop_follow)\n user.save()\n return redirect(request.META['HTTP_REFERER'])", "def unfollow(self, name):\r\n url = '{0}/{1}/{2}'.format(self.get_url(), 'following', name)\r\n\r\n return http.Request('DELETE', url), parsers.parse_empty", "def delete_account(self, account):\n \n pass", "def unfollow(self, other):\n\t\tif self.follows(other):\n\t\t\tself.followed.remove(other)", "def unShare(sharedItem):\n sharedItem.store.query(Share, Share.sharedItem == sharedItem).deleteFromStore()", "def remove_link():", "def disassociate_member_account(memberAccountId=None):\n pass", "def detele_account():\n session_id = request.args.get('session-id', None)\n user_id = request.args.get('user-id', None)\n delete_user(user_id)\n delete_session(session_id)\n return render_template('home.html', cars_list=get_cars_preview(), news_list=get_news_list(), authjs=False,\n preview_length=get_cars_preview().__len__(), user_deleted=user_id)", "def unfollow_friend(username):\n\n if not g.user:\n print \"401\"\n abort(401)\n whom_id = get_user_id(username)\n print whom_id\n if whom_id is None:\n abort(404)\n unfollow_query(whom_id)\n flash('You are no longer following \"%s\"' % username)\n name = {'name of unfollowing user': username}\n ############### REDIS cache invalidate #####################\n R_SERVER.delete(user_timeline_key)\n return jsonify(Username=name, Status_code=status.HTTP_200_OK)", "def unfollow_user(username):\n if not g.user:\n abort(401)\n whom_id = get_user_id(username)\n if whom_id is None:\n abort(404)\n db = get_db()\n db.execute('delete from follower where who_id=? and whom_id=?',\n [session['user_id'], whom_id])\n db.commit()\n flash('You are no longer following \"%s\"' % username)\n return redirect(url_for('user_timeline', username=username))", "def unlink_account(self, accountid, provider, providerid):\n auth = 'appkey='+ self._lr_object._get_api_key()+ '&appsecret='+ self._lr_object._get_api_secret()\n payload = {'accountid': accountid, 'provider': provider, 'providerid': providerid}\n url = SECURE_API_URL + \"raas/v1/account/unlink\" + \"?\" + auth\n return self._lr_object._post_json(url, payload)", "def revoke_token(token):\n token.delete_instance()", "async def disconnect(self, ctx):\n player = self.bot.lavalink.players.get(ctx.guild.id)\n if not player.is_connected:\n return await ctx.send(\"I'm not currently connected to a voice channel :no_entry:\")\n if not ctx.author.voice or (player.is_connected and player.connected_channel.id != ctx.author.voice.channel.id):\n return await ctx.send(\"You have to be in my voice channel to disconnect :no_entry:\")\n if player.fetch(\"sessionowner\") == ctx.author.id:\n player.queue.clear()\n await player.disconnect()\n player.delete(\"votes\")\n await ctx.send(\"Disconnected <:done:403285928233402378>\")\n else:\n await ctx.send(\"Only the session owner can disconnect the bot :no_entry:\")", "def disconnect(self, login_session):\n\n # Only disconnect a connected user.\n credentials = login_session.get('credentials')\n\n if 'gplus_id' in login_session:\n del login_session['gplus_id']\n if 'credentials' in login_session:\n del login_session['credentials']\n\n if credentials is None:\n response = make_response(\n json.dumps('Current user not connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n access_token = credentials.access_token\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n if result['status'] != '200':\n # For whatever reason, the given token was invalid.\n response = make_response(\n json.dumps('Failed to revoke token for given user.', 400))\n response.headers['Content-Type'] = 'application/json'\n return response\n else:\n return \"You have been logged out.\"", "async def disconnect(ctx):\n if ctx.voice_client is not None:\n await ctx.voice_client.disconnect()", "def unregister(self):\n if self.hub.is_connected:\n self._is_registered = False\n self.hub.unregister(self._private_key)\n self._hub_id = None\n self._public_id = None\n self._private_key = None\n else:\n raise SAMPClientError(\n \"Unable to unregister from the SAMP Hub. Hub proxy not connected.\"\n )" ]
[ "0.63025486", "0.61723953", "0.6036946", "0.59427714", "0.5927398", "0.59164095", "0.5912746", "0.5850332", "0.5843319", "0.58328366", "0.5821932", "0.5803021", "0.57848567", "0.5749491", "0.5748773", "0.5742856", "0.5740611", "0.57387394", "0.5726775", "0.5691439", "0.5678666", "0.56742096", "0.56360686", "0.5615988", "0.5610281", "0.55780584", "0.55632854", "0.55581117", "0.5546788", "0.5528797" ]
0.745899
0
Displays basic info about your linked spotify account (name, avatar)
async def info(self, ctx): if ctx.guild is not None: await ctx.reply("This command can only be used in DMs, because of privacy reasons.") raise commands.CommandError("Invoker not in DMs.") if not is_linked(ctx.author.id): await ctx.reply(f"You don't have a Spotify account linked. Please link one using " f"`{self.bot_config['prefix']}link`.") raise commands.CommandError("User has no spotify account linked.") sp = init_spotify(ctx.author.id) result = sp.me() msg_embed = Embed() msg_embed.title = "Linked Spotify account" msg_embed.url = result['external_urls'].get('spotify', None) if len(result['images']) > 0: msg_embed.set_image(url=result['images'][0]['url']) msg_embed.add_field(name="Display name", value=result['display_name']) msg_embed.add_field(name="Subscription type", value=result.get('product', 'free')) if result.get('product', None) != "premium": msg_embed.add_field(name="Warning!", value="Only accounts with Spotify Premium can use this bot!", inline=False) await ctx.reply(embed=msg_embed)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def status(verbose):\n user_data = Spotify.request('me', method='GET')\n click.echo('Logged in as {}'.format(user_data['display_name']))\n if verbose:\n click.echo('Credentials stored in {}'.format(CREDS_PATH))\n return", "async def githubinfo_command(self, ctx, *, githubusername: str):\n async with aiohttp.ClientSession() as session:\n async with session.get(\n f\"https://api.github.com/users/{githubusername}\"\n ) as resp:\n githubinfo = await resp.json()\n name = githubinfo[\"name\"]\n avatar_url = githubinfo[\"avatar_url\"]\n blog = githubinfo[\"blog\"]\n location = githubinfo[\"location\"]\n twitter_username = githubinfo[\"twitter_username\"]\n publicrepos = githubinfo[\"public_repos\"]\n followers = githubinfo[\"followers\"]\n following = githubinfo[\"following\"]\n embed = Embed(\n color=Color.blurple(),\n timestamp=datetime.utcnow(),\n description=(\n f\"**Name** - {name}\\n**Blog URL** - {None if not blog else blog}\\n**Location** - {location}\\n**Twitter Username** - {twitter_username}\\n **Public Repositories** - {publicrepos}\\n**Followers** - {followers}\\n**Following** - {following}\"\n ),\n )\n embed.set_author(name=f\"Github Profile info of username {githubusername}\")\n if avatar_url is not None:\n embed.set_thumbnail(url=avatar_url)\n await ctx.send(embed=embed)", "async def info(self, context):\n await context.send('creador: [email protected]\\ncolabs:\\n emi: https://twitter.com/emilianosce/ o https://www.instagram.com/emilianosce/ \\n garza: https://twitter.com/Matias_Garcia00 o https://www.twitch.tv/garzangb')", "def show_user_profile(username):\n\n name = USERS[username]\n return f\"<h1>Profile for {name}</h1>\"", "def display_profile(self):\n print(f\"Id: {self._id}\")\n print(f\"username: {self.username}\")\n print(f\"name: {self.name}\")\n print(f\"contact: {self.contact}\")\n print(f\"address: {self.address}\")", "def info(id):\n sql = \"select distinct name, description, stars, url, last_push_date, repo_id, created_date, avatar from python_repos where repo_id=\"+id\n db = get_db()\n cursor = db.execute(sql)\n repo_info = cursor.fetchall()\n return render_template('repo.html',info=repo_info)", "def account_info(request):\r\n user = request.user\r\n\r\n return _api_response(request, user.safe_data())", "async def info(self, ctx):\n\t\tembed = discord.Embed(\n\t\t\tdescription=\"Created By Seperoph#1399 and AkaBaka#4654\",\n\t\t\tcolor=config[\"success\"]\n\t\t)\n\t\tembed.set_author(\n\t\t\tname=\"Bot Information\"\n\t\t)\n\t\tembed.add_field(\n\t\t\tname=\"Head Programmers:\",\n\t\t\tvalue=\"Seperoph#1399 and AkaBaka#4654\",\n\t\t\tinline=True\n\t\t)\n\t\tembed.add_field(\n\t\t\tname=\"Python Version:\",\n\t\t\tvalue=f\"{platform.python_version()}\",\n\t\t\tinline=True\n\t\t)\n\t\tawait ctx.respond(embed=embed)", "def show_user_info(self):\n name = self.get_user_name()\n print(f'Name: {name.title()}')\n print(f'Age: {self.age}')\n print(f'Gender: {self.gender.title()}')\n print(f'Mobile: {self.m_number}')", "def __display_login_info(self):\n print(f'\\nYour card has been created\\n'\n f'Your card number:\\n'\n # f'{self.__card_display()}\\n' # uncomment this line and comment out line below for pretty display\n f'{self.card_number}\\n'\n f'Your card PIN:\\n'\n f'{self.__account_pin}\\n', )", "async def userinfo(self, ctx: \"IceTeaContext\", target: discord.Member = None):\n target = target or ctx.author\n target_data = ctx.author_data if target == ctx.author else await ctx.get_user_data(target)\n if target_data:\n nicknames = await target_data.get_nicknames()\n else:\n nicknames = []\n shared_servers = len([member for member in ctx.bot.get_all_members() if member == target])\n embed = discord.Embed(title=f\"{target.nick or target.name} Profile\")\n embed.set_author(name=f\"{target.name} ({target.id})\", icon_url=target.avatar_url)\n embed.set_thumbnail(url=target.avatar_url)\n embed.add_field(name=\"Shared Servers\", value=f\"{shared_servers} Shared\")\n embed.add_field(name=\"Created\",\n value=f\"\"\"{timeago.format(target.created_at)} ({target.created_at.strftime(\"%b %d, %Y\")})\"\"\")\n embed.add_field(name=\"Joined\",\n value=f\"\"\"{timeago.format(target.joined_at)} ({target.joined_at.strftime(\"%b %d, %Y\")})\"\"\")\n embed.set_footer(text=\"Last Spoke In server\")\n if target_data:\n embed.timestamp = target_data.last_spoke\n else:\n embed.timestamp = ctx.message.created_at\n if len(nicknames) > 0:\n embed.add_field(name=\"Nicknames\", value=\" , \".join(str(nick) for nick in nicknames[:5]), inline=False)\n embed.add_field(name=\"Roles\", value=\" , \".join([role.name for role in target.roles[:5] if len(role.name) > 0]),\n inline=False)\n if target.activity:\n if isinstance(target.activity, discord.Spotify):\n embed.add_field(name=\"Currently Listening to\",\n value=f\"**{target.activity.title}** by {target.activity.artist} \")\n else:\n embed.add_field(name=\"Currently Playing Since\",\n value=f\"{target.activity.name}\\n{target.activity.details}\\n{target.activity.state}\")\n await ctx.send(embed=embed)", "async def info(ctx):\n embed = discord.Embed(title=\"Zane Bot\", description=\"All hail the hypnotoad!\", color=0x0091C5)\n\n # give info about you here\n embed.add_field(name=\"Author\", value=\"Zanexius\")\n\n # Shows the number of servers the bot is member of.\n embed.add_field(name=\"Server count\", value=f\"{len(bot.guilds)}\")\n\n # give users a link to invite thsi bot to their server\n embed.add_field(name=\"Invite\", value=\"[Invite link](<insert your OAuth invitation link here>)\")\n\n await ctx.send(embed=embed)", "async def info(self, ctx: \"IceTeaContext\", *, otag: TagConverter):\n tag: models.Tag = otag\n if not tag.alias:\n embed = discord.Embed(description=f\"{ctx.message.guild.name} ``{tag.title}`` tag information\")\n user = ctx.guild.get_member(tag.author)\n embed.set_author(name=user.display_name, icon_url=user.avatar_url)\n embed.add_field(name=\"Tag name\", value=tag.title)\n embed.add_field(name=\"Amount used\", value=str(tag.count))\n embed.timestamp = tag.created\n await ctx.send(embed=embed)\n else:\n embed = discord.Embed(description=f\"{ctx.message.guild.name} ``{tag.title}`` alias information\")\n user = ctx.guild.get_member(tag.author)\n embed.add_field(name=\"Author\", value=user or \"Unknown\")\n embed.add_field(name=\"Amount used\", value=str(tag.count))\n embed.timestamp = tag.created\n await ctx.send(embed=embed)", "async def info(ctx, message):\n if ctx.args.command == None:\n embed = discord.Embed()\n embed.add_field(name=\"Profile\", value=ctx.profile.name)\n embed.add_field(name=\"Mode\", value=ctx.profile.mode)\n embed.set_author(name=ctx.user.name, icon_url=ctx.user.avatar_url)\n await message.channel.send(embed=embed)\n else:\n embed = discord.Embed(\n title=\"{0.profile.prefix}{0.args.command.name} {0.args.command.usage}\".format(ctx),\n description=ctx.args.command.help,\n url=\"https://github.com/Synixe/Bot/blame/master/{0}#L{1.start}L{1.end}\".format(ctx.args.command.file.replace(os.getcwd(), \"\"), ctx.args.command)\n )\n embed.set_footer(text=ctx.args.command.extension.fullname + \".\" + ctx.args.command.name)\n await message.channel.send(embed=embed)", "def display_accounts_details():\n return Credentials.display_credentials()", "def profile(self, name=\"johndoe\"):\r\n url = \"/account/%s\" % name\r\n return self.app.get(url, follow_redirects=True)", "async def info(self, ctx, member: discord.Member = None):\n\n user = ctx.author if member is None else member\n\n emb = await self.get_embed_info_user(user)\n emb.set_image(url=user.avatar_url)\n\n await ctx.send(embed=emb)", "def profile():\n from flickrAPI import FlickrAPI\n #flickr = FlickrAPI(key=session['resource_owner_key'], secret=session['resource_owner_secret'])\n flickr = FlickrAPI(key=request.cookies.get('oauth_token'), secret=request.cookies.get('oauth_token_secret'))\n faves = flickr.favorites_getList(user_id=\"44124394781@N01\", page=1, per_page=5, extras='owner_name')\n return str(faves)", "async def info(self):\n # [p]info\n\n await self.bot.say(strings.info.format(\n CacheAPI.get(key='dwarf_repository'),\n CacheAPI.get(key='dwarf_invite_link')))", "def user_info(username):\n print(json.dumps(client.user_info(username)))", "def profile():\n \n return render_template(\"profile.html\")", "async def userinfo_command(self, ctx, member: Optional[Member]):\n member = member or ctx.author\n member_avatar = member.avatar_url\n id = member.id\n name = member.name\n accountAge = member.created_at.strftime(\"%a, %#d %B %Y, %I:%M %p UTC\")\n joinServerDate = member.joined_at.strftime(\"%a, %#d %B %Y, %I:%M %p UTC\")\n highestRole = member.top_role.mention\n\n info = \"Server Owner\" if ctx.guild.owner is ctx.author else \"Member\"\n\n embed = Embed(\n title=f\"User Info - {member.name}\",\n timestamp=datetime.utcnow(),\n color=Color.blurple(),\n )\n embed.set_footer(text=f\"Requested by {ctx.author.name}\")\n embed.set_thumbnail(url=member_avatar)\n fields = [\n (\"ID\", id, False),\n (\"Name\", f\"{name} #{ctx.author.discriminator}\", True),\n (\"Highest Role\", highestRole, True),\n (\"Account Created on\", accountAge, True),\n (\"Joined Server on\", joinServerDate, True),\n (\"Additional Info\", info, True),\n ]\n for name, value, inline in fields:\n embed.add_field(name=name, value=value, inline=inline)\n await ctx.send(embed=embed)", "async def info(self, ctx, *, member: disnake.Member = None):\n\n member = member or ctx.author\n\n e = disnake.Embed(description=\"\")\n\n if member.bot:\n e.description = \"This account is a bot.\\n\\n\"\n\n e.description += member.mention\n\n e.add_field(name=\"Status\", value=member.status)\n\n if member.activity:\n e.add_field(name=\"Activity\", value=member.activity.name)\n\n e.set_author(name=str(member), icon_url=member.display_avatar.url)\n\n now = datetime.now(timezone.utc)\n created = member.created_at\n joined = member.joined_at\n\n e.add_field(\n name=\"Account age\",\n value=\"{0} • Created <t:{1}:F>\".format(\n pretty_timedelta(now - created), round(created.timestamp())\n ),\n inline=False,\n )\n\n e.add_field(\n name=\"Member for\",\n value=\"{0} • Joined <t:{1}:F>\".format(\n pretty_timedelta(now - joined), round(joined.timestamp())\n ),\n )\n\n if len(member.roles) > 1:\n e.add_field(\n name=\"Roles\",\n value=\" \".join(role.mention for role in reversed(member.roles[1:])),\n inline=False,\n )\n\n e.set_footer(text=\"ID: \" + str(member.id))\n\n await ctx.send(embed=e)", "async def info(self, ctx, *, tag):\n try:\n self.fetch_tag(ctx, tag)\n except Exception as error:\n return await ctx.send(error)\n data = self._tag_dict[ctx.guild.id][tag]\n author = self.bot.get_user(data['author']) or await self.bot.fetch_user(data['author'])\n embed = discord.Embed(colour=self.bot.colour)\n embed.title = tag\n embed.description = f\"<:author:734991429843157042> **{author}**\\n\"\n embed.description += f\"Uses: **{data['uses']}**\\n\"\n embed.description += f\"ID: **{data['id']}**\"\n embed.set_author(name=str(author), icon_url=author.avatar_url)\n await ctx.send(embed=embed)", "async def github_user_info(self, ctx: commands.Context, username: str) -> None:\n async with ctx.typing():\n user_data = await self.fetch_data(f\"{GITHUB_API_URL}/users/{quote_plus(username)}\")\n\n # User_data will not have a message key if the user exists\n if \"message\" in user_data:\n embed = discord.Embed(\n title=random.choice(NEGATIVE_REPLIES),\n description=f\"The profile for `{username}` was not found.\",\n colour=Colours.soft_red\n )\n\n await ctx.send(embed=embed)\n return\n\n org_data = await self.fetch_data(user_data[\"organizations_url\"])\n orgs = [f\"[{org['login']}](https://github.com/{org['login']})\" for org in org_data]\n orgs_to_add = \" | \".join(orgs)\n\n gists = user_data[\"public_gists\"]\n\n # Forming blog link\n if user_data[\"blog\"].startswith(\"http\"): # Blog link is complete\n blog = user_data[\"blog\"]\n elif user_data[\"blog\"]: # Blog exists but the link is not complete\n blog = f\"https://{user_data['blog']}\"\n else:\n blog = \"No website link available\"\n\n embed = discord.Embed(\n title=f\"`{user_data['login']}`'s GitHub profile info\",\n description=f\"```{user_data['bio']}```\\n\" if user_data[\"bio\"] else \"\",\n colour=discord.Colour.blurple(),\n url=user_data[\"html_url\"],\n timestamp=datetime.strptime(user_data[\"created_at\"], \"%Y-%m-%dT%H:%M:%SZ\")\n )\n embed.set_thumbnail(url=user_data[\"avatar_url\"])\n embed.set_footer(text=\"Account created at\")\n\n if user_data[\"type\"] == \"User\":\n\n embed.add_field(\n name=\"Followers\",\n value=f\"[{user_data['followers']}]({user_data['html_url']}?tab=followers)\"\n )\n embed.add_field(\n name=\"Following\",\n value=f\"[{user_data['following']}]({user_data['html_url']}?tab=following)\"\n )\n\n embed.add_field(\n name=\"Public repos\",\n value=f\"[{user_data['public_repos']}]({user_data['html_url']}?tab=repositories)\"\n )\n\n if user_data[\"type\"] == \"User\":\n embed.add_field(\n name=\"Gists\",\n value=f\"[{gists}](https://gist.github.com/{quote_plus(username, safe='')})\"\n )\n\n embed.add_field(\n name=f\"Organization{'s' if len(orgs)!=1 else ''}\",\n value=orgs_to_add if orgs else \"No organizations.\"\n )\n embed.add_field(name=\"Website\", value=blog)\n\n await ctx.send(embed=embed)", "def describe_user(self):\n print(\"\\nThis is \" + self.first_name + \" \" +\n self.last_name + \"'s user details:\")\n print(\"Username: \" + self.username)\n print(\"Email: \" + self.email)", "def describe_user(self):\n print(\"\\nThis is \" + self.first_name + \" \" +\n self.last_name + \"'s user details:\")\n print(\"Username: \" + self.username)\n print(\"Email: \" + self.email)", "def avatar_preview(self):\r\n h = '<img src=\"%s\" alt=\"%s\"/>' % (self.image_avatar_url, self.title)\r\n return mark_safe(h)", "def slack_info(request):\n params = slack_callback(request)\n\n if not params:\n # Authorization failed.\n return redirect(\"codedoor:login\")\n\n # if user is already in database, return redirect(url)\n # else, if it's a new user, redirect to the finishprofile page for the user to input the rest of their info\n user = authenticate(params[\"user\"][\"email\"])\n if user is None:\n slack_name = params[\"user\"][\"name\"].split(\" \")\n if len(slack_name) == 2:\n first_name, last_name = slack_name\n else:\n first_name = slack_name[0]\n last_name = \"\"\n return render(\n request,\n 'codedoor/finish_profile.html',\n {\n \"id\": params['user']['email'],\n \"first_name\": first_name,\n \"last_name\": last_name,\n \"email\": params[\"user\"][\"email\"],\n \"pic\": params[\"user\"]['image_512']\n }\n )\n else:\n auth_login(request, user)\n return redirect(\"codedoor:viewprofile\", pk=user.profile.id)", "async def _info(self, ctx: Context):\n\n embed = discord.Embed(colour=await ctx.embed_colour())\n\n perm_int = discord.Permissions(268494928)\n\n data = await self.bot.application_info()\n invite_url = discord.utils.oauth_url(data.id, permissions=perm_int)\n\n embed.description = (\n \"TvM Assistant is a Discord bot with utility commands to make hosting TvMs easier.\"\n \"\\n\\nSome of the bot features include:\"\n \"\\n\\n- Setup roles and channel creation\"\n \"\\n- Management of sign-ups, sign-outs, spectators and replacements\"\n \"\\n- In-built logging to detect and ignore private channels\"\n \"\\n- Quick creation of player, mafia and spectator chats\"\n \"\\n- Vote counts and time since day/night started\"\n )\n\n links = (\n f\"\\n- [Invite to your server]({invite_url})\"\n f\"\\n- [Quickstart]({QUICKSTART})\"\n f\"\\n- [Commands Reference]({COMMANDS_REFERENCE})\"\n f\"\\n- [Source Code]({SOURCE_CODE})\"\n )\n\n embed.add_field(name=\"\\u200b\\nQuick Links\", value=links)\n embed.set_author(name=f\"About {ctx.me.name}\", icon_url=ctx.me.avatar_url)\n\n await ctx.send(embed=embed)" ]
[ "0.65351516", "0.6477003", "0.64630944", "0.62933874", "0.62896085", "0.6278736", "0.6263717", "0.6239856", "0.62017554", "0.61626744", "0.61613786", "0.6148831", "0.6129267", "0.6054022", "0.60383016", "0.6036688", "0.6031312", "0.6021112", "0.5999502", "0.599438", "0.5987369", "0.5961587", "0.5911696", "0.59076726", "0.5840414", "0.5837207", "0.5837207", "0.58251953", "0.5823577", "0.58053696" ]
0.7391768
0
Makes the bot leave your voice channel
async def leave(self, ctx): if ctx.guild is None: await ctx.reply("This command can only be used in a server, not in DMs.") raise commands.CommandError("Invoker not in a guild.") if ctx.author.voice is None or ctx.author.voice.channel is None: await ctx.reply("You need to be in a voice channel to use this command.") raise commands.CommandError("Invoker not connected to a voice channel.") if ctx.voice_client is not None and ctx.author.voice.channel != ctx.voice_client.channel: await ctx.reply("You need to be in the same voice channel as the bot to use this command.") raise commands.CommandError("Invoker not in same voice channel as bot.") if ctx.voice_client is not None: SpotifyController.stop_for_channel(ctx.voice_client.channel.id) await ctx.voice_client.disconnect() return await ctx.send('I am not connected to a voice channel...')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def leave(self, ctx):\n if ctx.guild.voice_client:\n await ctx.guild.voice_client.disconnect()\n await ctx.send(\"Left voice channel.\")\n else:\n await ctx.message.add_reaction('\\U0001F615');\n await ctx.send(\"Not in a voice channel.\")", "async def _leave(self, ctx: commands.Context):\n\n if not ctx.voice_state.voice:\n return await ctx.send('Not connected to any voice channel.')\n\n await ctx.voice_state.stop()\n del self.voice_states[ctx.guild.id]\n await ctx.message.add_reaction('👋')", "async def _leave(self, ctx: commands.Context):\n\n if not ctx.voice_state.voice:\n return await ctx.send('Not connected to any voice channel.')\n\n await ctx.voice_state.stop()\n del self.voice_states[ctx.guild.id]", "async def leave(self, msg):\n if msg.author.voice is not None and msg.voice_client is not None:\n if msg.voice_client.is_playing() is True or self.player[msg.guild.id]['queue']:\n self.player[msg.guild.id]['queue'].clear()\n msg.voice_client.stop()\n return await msg.voice_client.disconnect(), await msg.message.add_reaction(emoji='✅')\n\n return await msg.voice_client.disconnect(), await msg.message.add_reaction(emoji='✅')\n\n if msg.author.voice is None:\n return await msg.send(\"You must be in the same voice channel as bot to disconnect it via command\")", "async def leave(self, ctx):\n\n if ctx.voice_client is not None:\n return await ctx.voice_client.disconnect()", "async def leave(self, ctx: commands.Context) -> Optional[discord.VoiceChannel]:\n\n if ctx.guild.id in self.queue:\n self.queue[ctx.guild.id].cleanup()\n del self.queue[ctx.guild.id]\n\n await maybe_coroutine(ctx.voice_client.stop)\n\n channel = ctx.voice_client.channel\n await ctx.voice_client.disconnect(force=True)\n return channel", "async def leave(self, ctx):\n player = self.bot.lavalink.player_manager.get(ctx.guild.id)\n\n if not player.is_connected:\n # We can't disconnect, if we're not connected.\n return await ctx.send(embed=self.error_embed(f'Not playing. [{ctx.message.author.mention}]'))\n\n if not ctx.author.voice or (player.is_connected and ctx.author.voice.channel.id != int(player.channel_id)):\n # Abuse prevention. Users not in voice channels, or not in the same voice channel as the bot\n # may not disconnect the bot.\n return await ctx.send(embed=self.error_embed(f'Not connected to the same voice channel. [{ctx.message.author.mention}]'))\n\n # Clear the queue to ensure old tracks don't start playing\n # when someone else queues something.\n player.queue.clear()\n # Stop the current track so Lavalink consumes less resources.\n await player.stop()\n # Disconnect from the voice channel.\n await ctx.send(embed=self.reply_embed(f\"Left channel `{ctx.message.author.voice.channel}` [{ctx.message.author.mention}]\"))\n await ctx.guild.change_voice_state(channel=None)", "async def leaveForce(self, ctx):\n await ctx.voice_client.disconnect()", "async def _leave(self, ctx: commands.Context):\n\n await ctx.voice_state.stop()\n del self.voice_states[ctx.guild.id]", "async def stop(self, ctx):\n voice = discord.utils.get(self.bot.voice_clients, guild=ctx.guild)\n\n if voice:\n if voice.is_playing():\n voice.stop()\n await ctx.send(\"Playing stopped.\") \n else:\n await ctx.message.add_reaction('\\U0001F615')\n await ctx.send(\"Not playing anything right now.\")\n else:\n await ctx.message.add_reaction('\\U0001F615')\n await ctx.send(\"Not in a voice channel.\")", "async def stop(ctx):\r\n\r\n await ctx.voice_client.disconnect()", "async def disconnect(ctx):\n if ctx.voice_client is not None:\n await ctx.voice_client.disconnect()", "async def disconnect(self, ctx):\r\n if ctx.voice_client:\r\n await ctx.voice_client.disconnect()\r\n print(\"[INFO] Bot disconnected from channel {0}\".format(self.channel_name))\r\n await ctx.message.delete()", "def voice_decrease():\n request_command(tv_command=TVCommand.voice_decrease)", "async def disconnect(self, ctx):\n player = self.bot.lavalink.players.get(ctx.guild.id)\n if not player.is_connected:\n return await ctx.send(\"I'm not currently connected to a voice channel :no_entry:\")\n if not ctx.author.voice or (player.is_connected and player.connected_channel.id != ctx.author.voice.channel.id):\n return await ctx.send(\"You have to be in my voice channel to disconnect :no_entry:\")\n if player.fetch(\"sessionowner\") == ctx.author.id:\n player.queue.clear()\n await player.disconnect()\n player.delete(\"votes\")\n await ctx.send(\"Disconnected <:done:403285928233402378>\")\n else:\n await ctx.send(\"Only the session owner can disconnect the bot :no_entry:\")", "async def stop(self, ctx):\n if ctx.voice_client != None:\n await ctx.voice_client.disconnect()", "async def stop(self, ctx):\n\n await ctx.voice_client.disconnect()", "async def stop(self, ctx):\n\n await ctx.voice_client.disconnect()", "async def _leave(self, server_id):\n srv = self.get_server_dict(server_id)\n if srv['voice'] and srv['voice'].channel:\n await srv['voice'].disconnect()\n srv['voice'] = None", "async def disconnect(self, ctx):\r\n if ctx.message.channel.id != 701868237771505695:\r\n return await ctx.send(\"**Error:** Music Bot commands are only available in <#701868237771505695>\")\r\n if self.music_off:\r\n return await ctx.send(\"**Error:** Music Bot features are currently off\")\r\n if ctx.voice_client is None or ctx.voice_client is not self.voice:\r\n return await ctx.send(\"**Error:** You must be connected to the voice channel.\")\r\n try:\r\n await ctx.voice_client.stop()\r\n except:\r\n pass\r\n await ctx.voice_client.disconnect()\r\n self.songs = [\"None\"]\r\n self.processing_songs = 0\r\n self.del_all_files()\r\n self.voice = None\r\n await ctx.send(\"Disconnected\")", "def leave(bot, event, conversation_id=None, *args):\n\n leave_quietly = False\n convs = []\n\n if not conversation_id:\n convs.append(event.conv.id_)\n elif conversation_id==\"quietly\":\n convs.append(event.conv.id_)\n leave_quietly = True\n else:\n convs.append(conversation_id)\n\n for c_id in convs:\n if not leave_quietly:\n bot.send_message_parsed(c_id, _('I\\'ll be back!'))\n yield from bot._conv_list.leave_conversation(c_id)\n try:\n \"\"\"support convmem plugin\"\"\"\n bot.call_shared(\"convmem.removeconv\", bot, c_id)\n except KeyError:\n print(\"bot left {}, convmem plugin not available\".format(c_id))", "async def endGame(self, ctx):\n print(\"Ending game ...\")\n await self.movePlayer(ctx=ctx, voiceChannel=self.lastVoiceChannel, reason=\"Fin de partie.\")\n await self.deleteCategory(ctx=ctx, reason=\"Fin de partie.\")\n await self.deleteRole(ctx=ctx, reason=\"Fin de partie.\")\n print(\"Game ended\")\n await self.delete()", "async def leave(ctx):\n if ctx.message.channel.name.lower() not in tod_channels:\n return\n\n room = ctx.message.channel.name.lower()\n if room not in tod_games:\n await amor_manager.say(\"Truth Or Dare not in progress in {}\".format(room))\n else:\n player = ctx.message.author.name\n if player.lower() not in list(tod_games[room]['participants'].keys()):\n await amor_manager.say(\"{}, you cannot leave the game if you have not joined\".format(player))\n elif player == tod_games[room]['host']:\n await amor_manager.say(\"{}, you cannot leave the game you're the host\".format(player))\n else:\n del tod_games[room]['participants'][player.lower()]\n await amor_manager.say(\"{} has left Truth or Dare.\".format(player))", "def handle_autovoiceoff(bot, ievent):\n try:\n ievent.chan.data.autovoice = 0\n ievent.reply('autovoice disabled on %s' % ievent.channel)\n except TypeError: ievent.reply('no %s channel in database' % ievent.channel)", "async def end(ctx):\n if ctx.message.channel.name.lower() not in tod_channels:\n return\n\n room = ctx.message.channel.name.lower()\n if room not in tod_games:\n await amor_manager.say(\"Truth Or Dare not in progress in {}\".format(room))\n else:\n host = tod_games[room]['host']\n await amor_manager.say(\"Game Over in {}! Thank you to {} for hosting this game!\".format(room, host))\n del tod_games[room]", "def devoice(self,nick):\n self.logger.debug(\"devoicing %s\" % nick)\n self.connection.mode(self.config[\"IRC/channel\"],\"-v \"+nick)", "def leave(bot, event, conversation_id=None, *args):\n\n arglist = list(args)\n\n if conversation_id == \"quietly\":\n arglist.append(\"quietly\")\n conversation_id = False\n\n if not conversation_id:\n conversation_id = event.conv_id\n\n yield from command.run(bot, event, *[\"convleave\", \"id:\" + conversation_id, \" \".join(arglist)])", "def stop_speaking(self):\n self.ts.send_message(\"Okay, I'll shut up for a bit. !start_speaking when you want me to speak again.\")\n self.allowed_to_chat = False", "async def disconnect_voice_channel(client: \"Client\", guild: Union[\"Guild\", int]) -> None:\n guild_id = guild if isinstance(guild, int) else guild.id\n await update_voice_state(client, guild_id, None)", "async def stop(self, ctx):\n server = ctx.message.server\n state = self.get_voice_state(server)\n\n if state.is_playing():\n player = state.player\n player.stop()\n\n try:\n state.speech_player.cancel()\n state.audio_player.cancel()\n del self.voice_states[server.id]\n await state.voice.disconnect()\n await self.bot.say('Stopped.')\n except:\n await self.bot.say('Couldn\\'t stop.')\n pass" ]
[ "0.8343599", "0.77186316", "0.7650133", "0.76289874", "0.758633", "0.7526717", "0.74780494", "0.7415649", "0.7250327", "0.7245011", "0.7157922", "0.70776135", "0.70071197", "0.6973833", "0.6932585", "0.69310313", "0.6870495", "0.6870495", "0.68690455", "0.68123126", "0.6805128", "0.6641129", "0.66123444", "0.6560766", "0.65141135", "0.6481257", "0.64475113", "0.64318085", "0.64069206", "0.6400439" ]
0.79554284
1
This function will get the whole dictionary of ytm. Here we set the low bound 0.001 and the high boung 0.1, the episilon 1e10.
def get_ytm_dict(self): ytm=self.ytm for term in self.Rmn.keys(): ytm = Bootstrapping.bisection(self,0.001, 0.1, 1e-10, 2 * term, self.Rmn, ytm) return ytm
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def yvals(self):\n return self.germs", "def yvals(self):\n return self.germs", "def bisection(self,\n low_bound: float,\n up_bound: float,\n epsilon: float,\n n: int,\n Rmn: dict,\n ytm: dict) -> float:\n for i in range(100):\n y = (low_bound + up_bound) / 2\n if abs(Bootstrapping.f(self,y, n, Rmn, ytm)) < epsilon:\n ym = ytm[list(ytm.keys())[-1]]\n m = list(ytm.keys())[-1]\n for j in range(list(ytm.keys())[-1] + 1, n + 1):\n ytm[j] = ym + (y - ym) * (j - m) / (n - m)\n return ytm\n else:\n if Bootstrapping.f(self,y, n, Rmn, ytm) * Bootstrapping.f(self,up_bound, n, Rmn, ytm) < 0:\n low_bound = y\n elif Bootstrapping.f(self,y, n, Rmn, ytm) * Bootstrapping.f(self,low_bound, n, Rmn, ytm) < 0:\n up_bound = y", "def get_stig_y(self):\n raise NotImplementedError", "def y(self):\n return self._kml['y']", "def return_loose_bounds(maxlum=None):\n return[(None,None), (10**-6, None), (2., 350),\n (None, -10**-6), (None, None)]", "def load_data(tetrode_number=TETRODE_NUMBER):\n print(\"Loading data...\")\n data, timed_activations, labels = formatData(tetrode_number,BASENAME,CONV,timed=True)\n print(len(timed_activations))\n x, y = getXY()\n print(\"Done!\")\n\n return dict(\n data=data,\n labels=[np.argmax(y) for y in labels],\n timed_activations=timed_activations,\n x=x,\n y=y,\n freq=50.0\n )", "def yvals(self):\n raise NotImplementedError(\"Derived class must implement this.\")", "def get_hyperparameter_bounds():\n minf = float(\"-inf\")\n inf = float(\"inf\")\n params = dict(mu=(minf,inf), nu=(0.0 ,inf), r=(0.0, inf), s=(0.0, inf))\n return params", "def learning_rate_range():\n # Lower and upper bounds\n #######\n lower_bound = 0.1 \n upper_bound = 1e-6\n #######\n return lower_bound, upper_bound", "def range_8(configuration):\n range_dict_all = {\n # updated aLIGO design sensitivity range from 197.5 to 181.5 Mpc on 9 Apr 2018 to reflect T1800044-v4\n \"HL\" : {'H1' : 181.5, 'L1' : 181.5},\n \"HLV\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3 },\n \"HLVK\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3, 'K1' : 160.0},\n \"HLVKI\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3, 'K1' : 160.0, 'I1' : 181.5},\n \"GW170817\" : {'H1': 107/2.26 *1.26 , 'L1': 218/2.26, 'V1': 58/2.26}, # 1.26 is the improvement factor for H1's range due to data processing.\n \"GW170817_without_Virgo\" : {'H1': 107/2.26 *1.26 , 'L1': 218/2.26},\n \"GW170814\" : {'H1': 53, 'L1': 98, 'V1': 26}, # 1.26 is the improvement factor for H1's range due to data processing.\n \"design\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3 },\n \"early\" : {'H1' : 60., 'L1': 60.},\n \"half_ligo\" : {'H1' : 99, 'L1' : 99, 'V1': 128.3 },\n \"half_virgo\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 64 },\n \"nosrm\" : {'H1' : 159, 'L1' : 159, 'V1': 109 },\n \"india\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3, \"I1\" : 181.5 },\n \"kagra\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3, \"I1\" : 181.5 , \\\n \"K1\" : 160.0},\n \"bala\" : {'H1' : 181.5, 'H2' : 181.5, 'L1' : 181.5, 'V1': 128.3, \\\n \"I1\" : 181.5 , \"K1\" : 160.0},\n \"sa\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3, \"I1\" : 181.5 , \\\n \"K1\" : 160.0, \"S1\":181.5},\n \"sa2\" : {'H1' : 181.5, 'L1' : 181.5, 'V1': 128.3, \"I1\" : 181.5 , \\\n \"K1\" : 160.0, \"S1\":181.5},\n \"steve\" : {'H1' : 160.0, 'L1' : 160.0, 'V1': 160.0, \"I1\" : 160.0 },\n \"s6vsr2\" : {'H1' : 20., 'L1' : 20., 'V1': 8. }\n }\n return(range_dict_all[configuration])", "def test_yld(self):\n df = dep.read_yld(get_path('yld.txt'))\n self.assertEquals(len(df.index), 10)\n self.assertAlmostEquals(df['yield_kgm2'].max(), 0.93, 2)", "def lowpass_model(t, y, lowthresh=10.0, sigthresh=10.0, max_iter=3, Pres=1000., plot_steps=True):\n def plot_pgram(t, y, periods=None, title=None):\n periods, power = pgram(t, y, periods=periods)\n plt.figure(figsize=(16,3))\n ax = plt.gca()\n ax.plot(periods, power, 'k-')\n if title is not None:\n ax.set_title(title)\n ax.set_xlabel('Period [yr]')\n ax.set_ylabel('Power')\n \n if plot_steps:\n plot_pgram(t, y, title='Original (std=%0.3g)' % y.std())\n\n y_hipass = y\n model_params = []\n newP = np.arange(lowthresh, duration(t, 'yr'), lowthresh/Pres)\n for i in range(max_iter):\n newP, power = pgram(t, y_hipass, periods=newP)\n pk_ixs = peak_indices(power, thresh=sigthresh)\n if pk_ixs.size == 0:\n break\n peaks = np.sort(newP[pk_ixs])[::-1]\n pk = peaks[0]\n y_hipass, subparams = subtract_sine(t, y_hipass, pk)\n model_params.append(subparams)\n if plot_steps:\n plot_pgram(t, y_hipass, title='Round %i result: subtracted %0.3f (std=%0.3g)' % (i, pk, y_hipass.std()))\n\n if plot_steps:\n model_points = y.size\n t_model = np.linspace(t.jyear.min(), t.jyear.max(), model_points)\n t_model = astropy.time.Time(t_model, format='jyear', scale='utc')\n y_model = sum_sines(t_model, model_params)\n y_model += y.mean()\n y_residual = y - sum_sines(t, model_params)\n plot_pgram(t_model, y_model, title='Lo-Pass Model')\n plot_pgram(t, y_residual, title='Original - Lo-Pass Model (std=%0.3g)' % (y_residual.std()))\n\n return model_params", "def getActualConfig(self) -> object:\n if not self.debug:\n ntraces = int(self.getNumberOfTraces())\n traces = []\n for i in range(1,ntraces+1):\n self.selectTrace(i)\n data = self.getData()\n if i == 1:\n title = \"S11\"\n elif i == 2:\n title = \"S21\"\n elif i == 3:\n title = \"S12\"\n else:\n title = \"S22\"\n trace={\n 'number': i,\n 'xMin': self.getStartFrequency(),\n 'xMax': self.getStopFrequency(),\n 'yMin': self.getmindbm(i),#min([x['y'] for x in data]), #getmindbm(),\n 'yMax': self.getmaxdbm(i),#max([x['y'] for x in data]), #getmaxdbm(),\n 'xScale': \"linear\",#self.getxscale()\n 'yScale': \"linear\",#self.getyscale(),\n 'type': \"bode\",#self.getTypeFormat(),\n 'title': title,#self.getTraceTitle(i),\n 'xLabel': \"Freq [Hz]\",#getxLabel(),\n 'yLabel': \"dBm\", #getyLabel()\n 'data': data,\n 'yPDiv': self.getYPDiv(i)\n }\n traces.append(trace) \n ret = {\n 'traces': traces, \n 'sweepResolution': self.getSweepResolution(),\n 'IFBW': self.getIFBW() \n }\n else:\n trace1 = {\n 'number': 1,\n 'xMin': 100,\n 'xMax': 1000,\n 'yMin': 100,\n 'yMax': 1000,\n 'xScale': 'logarithmic',\n 'yScale': 'logarithmic',\n 'type': 'bode',\n 'title': 'S11',\n 'xLabel': 'Freq',\n 'yLabel': 'dBm',\n 'yPDiv': 10,\n 'data': [\n {'x': 100,'y': 100},\n {'x': 200,'y': 150},\n {'x': 500,'y': 300},\n {'x': 1000,'y': 800}\n ]\n }\n trace2 = {\n 'number': 2,\n 'xMin': 1,\n 'xMax': 100,\n 'yMin': 1,\n 'yMax': 1000,\n 'xScale': 'linear',\n 'yScale': 'linear',\n 'type': 'bode',\n 'title': 'S21',\n 'xLabel': 'Freq',\n 'yLabel': 'dBm',\n 'yPDiv': 10,\n 'data': [\n {'x': 1,'y': 100},\n {'x': 20,'y': 250},\n {'x': 50,'y': 200},\n {'x': 100,'y': 600}\n ]\n }\n trace3 = {\n 'number': 3,\n 'xMin': 500,\n 'xMax': 10000,\n 'yMin': 100,\n 'yMax': 10000,\n 'xScale': 'linear',\n 'yScale': 'logarithmic',\n 'type': 'bode',\n 'title': 'S12',\n 'xLabel': 'Freq',\n 'yLabel': 'dBm',\n 'yPDiv': 10,\n 'data': [\n {'x': 500,'y': 100},\n {'x': 2000,'y': 1000},\n {'x': 5000,'y': 3000},\n {'x': 10000,'y': 8000}\n ]\n }\n trace4 = {\n 'number': 4,\n 'xMin': 100,\n 'xMax': 10000,\n 'yMin': 500,\n 'yMax': 10000,\n 'xScale': 'logarithmic',\n 'yScale': 'linear',\n 'type': 'bode',\n 'title': 'S22',\n 'xLabel': 'Freq',\n 'yLabel': 'dBm',\n 'yPDiv': 10,\n 'data': [\n {'x': 100,'y': 500},\n {'x': 2000,'y': 5000},\n {'x': 5000,'y': 2000},\n {'x': 10000,'y': 4000}\n ]\n }\n ret = {\n 'traces': [ trace1, trace2, trace3, trace4 ], \n 'sweepResolution': 401,\n 'IFBW': 10000 \n }\n return ret", "def set_tlines(ty,slist):\r\n t = []\r\n for i in range(numpops-1):\r\n t.append([slist[5][4][i][1],slist[5][4][i][2],slist[5][4][i][3]]) ## [time, upper ci, lower ci]\r\n ty = []\r\n if gv[\"localyscale\"] == -1:\r\n yint = gv[\"line0y\"] - gv[\"lastt_lower_y\"]\r\n for i in range(numpops-1):\r\n ty.append([])\r\n if gv[\"eventimes\"] == False:\r\n tmax = slist[5][4][numpops-2][3] ## bottom of confidence interval of largest(oldest) t\r\n for j in range(3):\r\n ty[i].append(gv[\"line0y\"] - (t[i][j]*yint)/tmax)\r\n else:\r\n## ty[i].append(gv[\"line0y\"] - ((i+1)/float(numpops+1)*yint)/tmax)\r\n ty[i].append(gv[\"line0y\"] - yint * (i+1)/float(numpops) )\r\n else:\r\n timeumean = slist[7][4][1]\r\n scaleumean = slist[7][4][2]\r\n for i in range(numpops-1):\r\n ty.append([])\r\n for j in range(3):\r\n ty[i].append(gv[\"line0y\"] - (t[i][j] * (scaleumean/timeumean/1e6)* gv[\"localyscale\"]))\r\n if ty[i][j] < gv[\"lineINFy\"]:\r\n print ( \" time line too low in graph, reduce local y scale (-y value) \")\r\n gv[\"lastt_lower_y\"] = ty[numpops-2][2]\r\n## print \"ty : \",ty\r\n return ty", "def satPos(ephi_dict, t_eval):\n \n c_rs = ephi_dict['crs']\n delta_n = ephi_dict['deltan']\n M_0 = ephi_dict['M0']\n c_uc = ephi_dict['cuc']\n ecc = ephi_dict['ecc']\n c_us = ephi_dict['cus']\n roota = ephi_dict['roota']\n t_oe = ephi_dict['toe']\n Omega0 = ephi_dict['Omega0']\n c_is = ephi_dict['cis']\n i0 = ephi_dict['i0']\n c_rc = ephi_dict['crc']\n c_ic = ephi_dict['cic']\n omega = ephi_dict['omega']\n Omega_dot = ephi_dict['Omegadot']\n i_dot = ephi_dict['idot']\n #codes = ephi_dict['codes']\n #weekno = ephi_dict['weekno']\n #L2flag = ephi_dict['L2flag']\n #svaccur = ephi_dict['svaccur']\n #svhealth = ephi_dict['svhealth']\n #tgd = ephi_dict['tgd']\n #iodc = ephi_dict['iodc']\n #tom = ephi_dict['tom']\n #datetime = ephi_dict['datetime']\n \n mu = 3.986005e14 # WGS 84 value of the earth's gravitational constant for GPS user\n Omega_e_dot = 7.2921151467e-5 # WGS 84 value of the earth's rotation rate\n \n a = roota**2 # Semi-major axis\n n0 = math.sqrt(mu/(a**3)) # Computed mean motion (rad/sec)\n \n tgps_sec = Time(t_eval).gps % 604800\n t_k = tgps_sec - t_oe # Time from ephemeris reference epoch\n \n # Account for beginning or end of week crossovers\n if t_k > 302400:\n t_k = t_k - 604800\n elif t_k < -302400:\n t_k = t_k + 604800\n \n n = n0 + delta_n # Corrected mean motion\n M_k = M_0 + n * t_k # Mean anomaly\n M_k = (M_k + 2*np.pi) % (2*np.pi)\n \n # Kepler's Equation for Eccentric Anomaly (may be solved by iteration) (radians)\n E_k = M_k # First guess for E_k\n M_k_delta = 1 # difference between two iterations\n \n #print('----------------------------')\n #print('t_k = %d' % t_k)\n #print('M_k = %12.6f' % M_k)\n for i in range(6):\n M_k_temp = E_k + ecc*math.sin(E_k)\n M_k_delta = M_k - M_k_temp\n # print('i = %d, M_k_temp = %12.6f, M_k_delta = %3.3e' % (i, M_k_temp, M_k_delta))\n E_k = E_k + M_k_delta\n if abs(M_k_delta) < 1e-12:\n break\n \n E_k = (E_k + 2*np.pi) % (2*np.pi)\n \n # True Anomaly\n sinv_k = math.sqrt(1 - ecc**2)*math.sin(E_k) #/ (1 - ecc*math.cos(E_k))\n cosv_k = (math.cos(E_k) - ecc) #/ (1 - ecc*math.cos(E_k))\n v_k = math.atan2(sinv_k, cosv_k)\n \n #E_k = math.acos((ecc+cosv_k)/(1+ecc*cosv_k)) # Eccentric Anomaly\n \n Phi_k = (v_k + omega) % (2*np.pi) # Argument of Latitude\n \n # Second Harmonic Perturbations\n delta_u_k = c_us * math.sin(2*Phi_k) + c_uc * math.cos(2*Phi_k) # Argument of Latitude Correction\n delta_r_k = c_rs * math.sin(2*Phi_k) + c_rc * math.cos(2*Phi_k) # Radius Correction\n delta_i_k = c_is * math.sin(2*Phi_k) + c_ic * math.cos(2*Phi_k) # Inclination Correction\n \n u_k = Phi_k + delta_u_k # Corrected Argument of Latitude\n r_k = a*(1-ecc*math.cos(E_k)) + delta_r_k # Corrected Radius\n i_k = i0 + delta_i_k + i_dot * t_k # Corrected Inclination\n \n # Positions in orbital plane\n x_k_dash = r_k * math.cos(u_k)\n y_k_dash = r_k * math.sin(u_k)\n \n # Corrected longitude of ascending node\n Omega_k = Omega0 + (Omega_dot - Omega_e_dot) * t_k - Omega_e_dot * t_oe\n Omega_k = uwr(Omega_k)\n \n # Earth-fixed coordinates\n x_k = x_k_dash * math.cos(Omega_k) - y_k_dash * math.cos(i_k) * math.sin(Omega_k)\n y_k = x_k_dash * math.sin(Omega_k) + y_k_dash * math.cos(i_k) * math.cos(Omega_k)\n z_k = y_k_dash * math.sin(i_k)\n \n return np.array([x_k, y_k, z_k])", "def minor_yvals(self):\n raise NotImplementedError(\"Derived class must implement this.\")", "def eigenvalue_label_dict_Nex(Nmax,verbose=False):\n\n eigenvalue_label_dict = {\n float(Nex): Nex\n for Nex in mcscript.utils.value_range(Nmax%2,Nmax,2)\n }\n return eigenvalue_label_dict", "def y(self):\n return self[\"y\"]", "def r8_y1x(t):\n y1x = 20.0 / (1.0 + 19.0 * np.exp(- 0.25 * t))\n return(y1x)", "def get_semiMajorAxes_dict():\n de431 = {\n # \"BODY1_semiMajorAxis\" : 2.2031780000000021E+04 ,\n # \"BODY2_semiMajorAxis\" : 3.2485859200000006E+05 ,\n # \"BODY3_semiMajorAxis\" : 4.0350323550225981E+05 ,\n # \"BODY4_semiMajorAxis\" : 4.2828375214000022E+04 ,\n # \"BODY5_semiMajorAxis\" : 1.2671276480000021E+08 ,\n # \"BODY6_semiMajorAxis\" : 3.7940585200000003E+07 ,\n # \"BODY7_semiMajorAxis\" : 5.7945486000000080E+06 ,\n # \"BODY8_semiMajorAxis\" : 6.8365271005800236E+06 ,\n # \"BODY9_semiMajorAxis\" : 9.7700000000000068E+02 ,\n # \"BODY10_semiMajorAxis\" : 1.3271244004193938E+11 ,\n\n \"BODY199_semiMajorAxis\" : 57.91e6 ,\n \"BODY299_semiMajorAxis\" : 108.21e6 ,\n \"BODY399_semiMajorAxis\" : 149.60e6 ,\n \"BODY499_semiMajorAxis\" : 227.92e6 ,\n \"BODY599_semiMajorAxis\" : 778.57e6 ,\n \"BODY699_semiMajorAxis\" : 1433.53e6 ,\n \"BODY799_semiMajorAxis\" : 2872.46e6 ,\n \"BODY899_semiMajorAxis\" : 4495.06e6 ,\n \"BODY999_semiMajorAxis\" : 5906.38e6 ,\n\n \"BODY301_semiMajorAxis\" : 0.3844e6 ,\n\n \"BODY401_semiMajorAxis\" : 9378.0 ,\n \"BODY402_semiMajorAxis\" : 23459.0 ,\n\n \"BODY501_semiMajorAxis\" : 431.8e3 ,\n \"BODY502_semiMajorAxis\" : 671.1e3 ,\n \"BODY503_semiMajorAxis\" : 1070.4e3 ,\n \"BODY504_semiMajorAxis\" : 1882.7e3 ,\n \"BODY505_semiMajorAxis\" : 181.4e3 ,\n\n \"BODY601_semiMajorAxis\" : 185.52e3 ,\n \"BODY602_semiMajorAxis\" : 238.02e3 ,\n \"BODY603_semiMajorAxis\" : 294.64e3 ,\n \"BODY604_semiMajorAxis\" : 377.60e3 ,\n \"BODY605_semiMajorAxis\" : 527.04e3 ,\n \"BODY606_semiMajorAxis\" : 1221.83e3 ,\n \"BODY607_semiMajorAxis\" : 1481.1e3 ,\n \"BODY608_semiMajorAxis\" : 3561.3e3 ,\n # \"BODY609_semiMajorAxis\" : 5.531110414633374E-01 ,\n # \"BODY610_semiMajorAxis\" : 1.266231296945636E-01 ,\n # \"BODY611_semiMajorAxis\" : 3.513977490568457E-02 ,\n # \"BODY615_semiMajorAxis\" : 3.759718886965353E-04 ,\n # \"BODY616_semiMajorAxis\" : 1.066368426666134E-02 ,\n # \"BODY617_semiMajorAxis\" : 9.103768311054300E-03 ,\n\n \"BODY701_semiMajorAxis\" : 190.90e3 ,\n \"BODY702_semiMajorAxis\" : 266.00e3 ,\n \"BODY703_semiMajorAxis\" : 436.39e3 ,\n \"BODY704_semiMajorAxis\" : 583.50e3 ,\n \"BODY705_semiMajorAxis\" : 129.90e3 ,\n\n \"BODY801_semiMajorAxis\" : 354.76e3,\n\n \"BODY901_semiMajorAxis\" : 19596 ,\n \"BODY902_semiMajorAxis\" : 48690 ,\n \"BODY903_semiMajorAxis\" : 64740 ,\n \"BODY904_semiMajorAxis\" : 57780,\n\n # \"BODY2000001_semiMajorAxis\" : 6.3130000000000003E+01 ,\n # \"BODY2000002_semiMajorAxis\" : 1.3730000000000000E+01 ,\n # \"BODY2000003_semiMajorAxis\" : 1.8200000000000001E+00 ,\n # \"BODY2000004_semiMajorAxis\" : 1.7289999999999999E+01 ,\n # \"BODY2000006_semiMajorAxis\" : 9.3000000000000005E-01 ,\n # \"BODY2000007_semiMajorAxis\" : 8.5999999999999999E-01 ,\n # \"BODY2000010_semiMajorAxis\" : 5.7800000000000002E+00 ,\n # \"BODY2000015_semiMajorAxis\" : 2.1000000000000001E+00 ,\n # \"BODY2000016_semiMajorAxis\" : 1.8100000000000001E+00 ,\n # \"BODY2000029_semiMajorAxis\" : 8.5999999999999999E-01 ,\n # \"BODY2000052_semiMajorAxis\" : 1.5900000000000001E+00 ,\n # \"BODY2000065_semiMajorAxis\" : 9.1000000000000003E-01 ,\n # \"BODY2000087_semiMajorAxis\" : 9.8999999999999999E-01 ,\n # \"BODY2000088_semiMajorAxis\" : 1.0200000000000000E+00 ,\n # \"BODY2000433_semiMajorAxis\" : 4.463E-4 ,\n # \"BODY2000511_semiMajorAxis\" : 2.2599999999999998E+00 ,\n # \"BODY2000704_semiMajorAxis\" : 2.1899999999999999E+00 \n }\n return de431", "def get_ytm_discount_data(self):\n ytm=Bootstrapping.get_ytm_dict(self)\n data = pd.DataFrame()\n for i in ytm.keys():\n data.loc[i / 2, 'Yield to maturity'] = ytm[i]\n data.loc[i / 2, 'discount_rate'] = Bootstrapping.P_Tn(self,ytm[i], i)\n return data", "def ytrue(t):\n return np.array([np.exp(lam*t)])", "def define_range():\n\n def_range = {'lt': [0.0, 24.0],\n 'lon': [0.0, 360.0],\n 'angle': [0.0, 2.0 * np.pi]}\n\n return def_range", "def y(self):\n values = self._interpolate_table(\"y\")\n values += self._corrections((\"ortho_eop\", iers.ortho_eop, 1, 1e-6), (\"pmsdnut2\", iers.pmsdnut2, 1, 1e-6))\n return values", "def getETA():", "def getETA():", "def get_exponential_detection_thresholds():\n \n m = utils.MAX_DETECTION_THRESHOLD\n n = utils.NUM_DETECTION_THRESHOLDS\n y = np.exp(np.log(m) / n)\n return y ** np.arange(1, n + 1)", "def test_y_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run([\"MNB\", \"LR\"])\n assert atom.y.equals(atom.mnb.y)\n assert atom.y.equals(atom.lr.y)", "def testTsysMapNN(self):\n self._runTest('tsys', False, [1,3,5,7,9,15], 'nearest,nearest',self.spwmap)" ]
[ "0.5608724", "0.5608724", "0.54514956", "0.52048004", "0.51582503", "0.51236594", "0.5118129", "0.511333", "0.51034176", "0.49981782", "0.49807763", "0.49765843", "0.4958691", "0.49550095", "0.49406016", "0.4939153", "0.49358478", "0.49011344", "0.4895696", "0.48733735", "0.4873016", "0.4871062", "0.48655587", "0.48626235", "0.4862409", "0.48416868", "0.48416868", "0.48404828", "0.48327935", "0.48272935" ]
0.715156
0
This funtion is to get the dataframe of yield to maturity and discount rate value.
def get_ytm_discount_data(self): ytm=Bootstrapping.get_ytm_dict(self) data = pd.DataFrame() for i in ytm.keys(): data.loc[i / 2, 'Yield to maturity'] = ytm[i] data.loc[i / 2, 'discount_rate'] = Bootstrapping.P_Tn(self,ytm[i], i) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_treasury_yield(interval: str, maturity: str) -> pd.DataFrame:\n d_interval = {\"d\": \"daily\", \"w\": \"weekly\", \"m\": \"monthly\"}\n d_maturity = {\"3m\": \"3month\", \"5y\": \"5year\", \"10y\": \"10year\", \"30y\": \"30year\"}\n\n url = f\"https://www.alphavantage.co/query?function=TREASURY_YIELD&interval={d_interval[interval]}&ma\"\n url += f\"turity={d_maturity[maturity]}&apikey={cfg.API_KEY_ALPHAVANTAGE}\"\n r = requests.get(url, headers={\"User-Agent\": get_user_agent()})\n if r.status_code != 200:\n return pd.DataFrame()\n\n data = pd.DataFrame(r.json()[\"data\"])\n data[\"date\"] = pd.to_datetime(data[\"date\"])\n data[\"Yield\"] = data[\"value\"].astype(float)\n data = data.drop(columns=[\"value\"])\n\n return data", "def price_generator(self, start, end, periods):\r\n tickers = [self.SelectedTicker]\r\n tick_yahoo = YahooFinancials(tickers)\r\n data = tick_yahoo.get_historical_price_data(start, \r\n end, \r\n periods)\r\n \r\n df = pd.DataFrame({\r\n a: {x['formatted_date']: x['adjclose'] for x in data[a]['prices']} for a in tickers})\r\n \r\n self.prices = df.dropna()\r\n self.returns = self.prices.pct_change().dropna()\r\n try:\r\n self.div_yield = tick_yahoo.get_dividend_yield()\r\n #print(self.div_yield[self.SelectedTicker])\r\n if self.div_yield[self.SelectedTicker] == None:\r\n self.div_yield = 0.00\r\n else:\r\n self.div_yield = self.div_yield[self.SelectedTicker]\r\n except:\r\n print(\"no dividend yield\")", "def calc(self) -> pd.DataFrame:\n raise NotImplementedError", "def dataframe(self):\n return self.generator.dataframe", "def yield_trend(df, yield_type='rainfed'):\n yield_type_dict = {'all': 'yield', 'rainfed':'yield_rainfed','irrigated':'yield_irr'}\n # Estimate regional yield trend and detrend\n trend_model_txt = \"Q('%s')\"%yield_type_dict[yield_type] + \"~ year\"\n trend_results = smf.ols(trend_model_txt, data=df).fit()\n return trend_results", "def timeseries_report(self):\n report = pd.DataFrame(index=self.price.index)\n report.loc[:, \"FR Energy Throughput (kWh)\"] = self.ene_results['ene']\n report.loc[:, \"FR Energy Throughput Up (Charging) (kWh)\"] = self.variables['regu_c']*self.krd_avg*self.dt*self.storage.rte\n report.loc[:, \"FR Energy Throughput Up (Discharging) (kWh)\"] = self.variables['regu_d']*self.krd_avg*self.dt\n report.loc[:, \"FR Energy Throughput Down (Charging) (kWh)\"] = self.variables['regd_c']*self.krd_avg*self.dt*self.storage.rte\n report.loc[:, \"FR Energy Throughput Down (Discharging) (kWh)\"] = self.variables['regd_d']*self.krd_avg*self.dt\n report.loc[:, \"FR Energy Settlement Price Signal ($/kWh)\"] = self.price\n report.loc[:, 'Regulation Up (Charging) (kW)'] = self.variables['regu_c']\n report.loc[:, 'Regulation Up (Discharging) (kW)'] = self.variables['regu_d']\n report.loc[:, 'Regulation Down (Charging) (kW)'] = self.variables['regd_c']\n report.loc[:, 'Regulation Down (Discharging) (kW)'] = self.variables['regd_d']\n report.loc[:, \"Regulation Up Price Signal ($/kW)\"] = self.p_regu\n report.loc[:, \"Regulation Down Price Signal ($/kW)\"] = self.p_regd\n\n return report", "def simulated(self):\n # Join #\n df = self.age_indicators.left_join(self.bef_ft, on='forest_type')\n # Select only some columns #\n columns_of_interest = ['ave_age', 'time_step', 'area', 'biomass', 'bef_tot', 'density']\n columns_of_interest += list(self.parent.classifiers.columns)\n # Drop the other columns #\n df = df[columns_of_interest].copy()\n # Divide biomass by the expansion factor #\n df['merch_c_ha'] = df['biomass'] / df['bef_tot']\n df['merch_vol_ha'] = df['merch_c_ha'] / df['density']\n # Return #\n return df", "def expected_df():\n return pd.DataFrame(\n {\n \"growth\": [0.873922, 0.814298, 0.0],\n \"gene\": [\"b2935\", \"b0723\", \"b0451\"],\n \"status\": [\"optimal\", \"optimal\", \"optimal\"],\n }\n )", "def exchanges_df(self) -> pd.DataFrame:\n mid_price = self.connectors[self.maker_exchange].get_mid_price(self.maker_pair)\n maker_buy_result = self.connectors[self.maker_exchange].get_price_for_volume(self.taker_pair, True, self.order_amount)\n maker_sell_result = self.connectors[self.maker_exchange].get_price_for_volume(self.taker_pair, False, self.order_amount)\n taker_buy_result = self.connectors[self.taker_exchange].get_price_for_volume(self.taker_pair, True, self.order_amount)\n taker_sell_result = self.connectors[self.taker_exchange].get_price_for_volume(self.taker_pair, False, self.order_amount)\n maker_buy_spread_bps = (maker_buy_result.result_price - taker_buy_result.result_price) / mid_price * 10000\n maker_sell_spread_bps = (taker_sell_result.result_price - maker_sell_result.result_price) / mid_price * 10000\n columns = [\"Exchange\", \"Market\", \"Mid Price\", \"Buy Price\", \"Sell Price\", \"Buy Spread\", \"Sell Spread\"]\n data = []\n data.append([\n self.maker_exchange,\n self.maker_pair,\n float(self.connectors[self.maker_exchange].get_mid_price(self.maker_pair)),\n float(maker_buy_result.result_price),\n float(maker_sell_result.result_price),\n int(maker_buy_spread_bps),\n int(maker_sell_spread_bps)\n ])\n data.append([\n self.taker_exchange,\n self.taker_pair,\n float(self.connectors[self.taker_exchange].get_mid_price(self.maker_pair)),\n float(taker_buy_result.result_price),\n float(taker_sell_result.result_price),\n int(-maker_buy_spread_bps),\n int(-maker_sell_spread_bps)\n ])\n df = pd.DataFrame(data=data, columns=columns)\n return df", "def get_df(self):\n data = self.load_data()\n userID, itemID = self.get_user_and_item_ids(data)\n rating = data[:, 1]\n data_np = np.stack((userID, itemID, rating), axis=-1)\n df = pd.DataFrame(data_np)\n df.columns = [\"userID\", \"itemID\", \"rating\"]\n return df", "def Generating_stock_daily_return_table():\r\n #Getting Names list\r\n Profitfile='pap//CombProfit.csv'\r\n path='D://Doktorat Marek//dane//'\r\n ProfitsFilePath=path+Profitfile\r\n quarterly_profit=pd.read_csv(ProfitsFilePath,index_col=0,header=0,parse_dates=True)\r\n Names_list=quarterly_profit.columns.tolist()\r\n \r\n Stock_returns=pd.DataFrame(index=pd.date_range('19980101','20180918',freq='D'),columns=Names_list)\r\n for name in Names_list:\r\n Stock_returns[name]=1+stock_returns(name)['Return']/100\r\n Stock_returns[name].fillna(1,inplace=True)\r\n \r\n WIG=pd.read_excel('D://Doktorat Marek//dane//notowania//Infostrefa//PL9999999995.xls')\r\n WIG['Date']=pd.to_datetime(WIG['Data'])\r\n WIG.set_index('Date',inplace=True)\r\n Stock_returns['WIG'] = 1+WIG['Zmiana']/100\r\n Stock_returns['WIG'].fillna(1,inplace=True)\r\n Stock_returns['Average']=Stock_returns.mean(1)\r\n \r\n FileReturns='D://Doktorat Marek//dane//Notowania//Stock_returns.csv'\r\n Stock_returns.to_csv(FileReturns,encoding='UTF-8')\r\n return 0", "def get_summary(self, df):\n results_df = pd.DataFrame({'Energy kWh': self.get_all_periods(df).sum()})\n results_df['Prices $/kWh'] = self.deliveryPrice + self.get_rates()\n results_df['Value $'] = results_df['Energy kWh'] * results_df['Prices $/kWh']\n return(results_df)", "def df(self):\n data = {\"sites\": self.sites, \"values\": self.values,\n \"stdeviations\": self.stdeviations}\n return pd.DataFrame(data, columns=[\"sites\", \"values\", \"stdeviations\"])", "def get_data(self)->pd.DataFrame:\n pass", "def bdt(Yield, Volatility):\n\n # let j be the year (starting from 0), j=1 corresponds to 2 in the table\n j = 1\n y, sig = Yield[j], Volatility[j]\n disc_val = bT.real_discount(1) # should be equal to 1/(1+yield)^2\n eqn1 = -(1/(1+y)**(j+1)) + disc_val\n eqn2 = -sig*2 + sp.log(r_u/r_d)\n\n j = 2\n y, sig = Yield[j], Volatility[j]\n # first equation of all\n disc_val = bT.real_discount(2)\n eqn3 = -(1/(1+y)**(j+1)) + disc_val\n # we know that rud is sqrt of the prod of ru and rd\n eqn4 = -r_ud + sp.sqrt(r_uu*r_dd)\n # we know what the log of the yield ratio must be\n bT_u = deepcopy(bT.find('H')) # H subtree\n bT_d = deepcopy(bT.find('T')) # T subtree\n yield_rates = [bT_u.real_discount(1)**(-1/2) - 1, bT_d.real_discount(1)**(-1/2) - 1] # should only have two elements\n eqn5 = -2*sig + sp.log(yield_rates[0]/yield_rates[1])\n\n j = 3\n y, sig = Yield[j], Volatility[j]\n disc_val = bT.real_discount(3)\n eqn6 = -(1/(1+y)**(j+1)) + disc_val\n eqn7 = -r_uud + sp.sqrt(r_uuu*r_udd)\n eqn8 = -r_udd + sp.sqrt(r_uud*r_ddd)\n # equations with yields: using bT_u and bT_d again, now discount 2 years\n yield_rates = [bT_u.real_discount(2)**(-1/3) - 1, bT_d.real_discount(2)**(-1/3) - 1]\n eqn9 = -2*sig + sp.log(yield_rates[0]/yield_rates[1])\n\n j = 4\n y, sig = Yield[j], Volatility[j] # y for yield, sig for volatility\n disc_val = bT.real_discount(4)\n eqn10 = -(1/(1+y)**(j+1)) + disc_val\n eqn11 = -r_uuud + sp.sqrt(r_uuuu*r_uudd)\n eqn12 = -r_uudd + sp.sqrt(r_uuud*r_uddd)\n eqn13 = -r_uddd + sp.sqrt(r_uudd*r_dddd)\n # bond values and yield\n yield_rates = [bT_u.real_discount(3) ** (-1 / 4) - 1, bT_d.real_discount(3) ** (-1 / 4) - 1]\n eqn14 = -2*sig + sp.log(yield_rates[0]/yield_rates[1])\n def fun(X):\n # this variables object can now be substituted by the second element of bdt_sympy\n # variables = [r_u, r_d, r_uu, r_ud, r_dd, r_uuu, r_uud, r_udd, r_ddd, r_uuuu, r_uuud, r_uudd, r_uddd, r_dddd]\n variables = mybdt[1] # substitution in question\n tuples = list(zip(variables, X))\n eqn1_val = np.array([float(np.abs(eqn1.subs(tuples)))])\n eqn2_val = np.array([float(np.abs(eqn2.subs(tuples)))])\n eqn3_val = np.array([float(np.abs(eqn3.subs(tuples)))])\n eqn4_val = np.array([float(np.abs(eqn4.subs(tuples)))])\n eqn5_val = np.array([float(np.abs(eqn5.subs(tuples)))])\n eqn6_val = np.array([float(np.abs(eqn6.subs(tuples)))])\n eqn7_val = np.array([float(np.abs(eqn7.subs(tuples)))])\n eqn8_val = np.array([float(np.abs(eqn8.subs(tuples)))])\n eqn9_val = np.array([float(np.abs(eqn9.subs(tuples)))])\n eqn10_val = np.array([float(np.abs(eqn10.subs(tuples)))])\n eqn11_val = np.array([float(np.abs(eqn11.subs(tuples)))])\n eqn12_val = np.array([float(np.abs(eqn12.subs(tuples)))])\n eqn13_val = np.array([float(np.abs(eqn13.subs(tuples)))])\n eqn14_val = np.array([float(np.abs(eqn14.subs(tuples)))])\n return np.hstack([eqn1_val, eqn2_val, eqn3_val, eqn4_val, eqn5_val, eqn6_val, eqn7_val, eqn8_val, eqn9_val,\n eqn10_val, eqn11_val, eqn12_val, eqn13_val, eqn14_val])\n X_0 = [.01] * 14 # original guess: set a;; rates at initially known short rate\n # X_0 = [.14,.09,.19,.13,.09,.21,.16,.11,.08,.25,.19,.14,.11,.08] # guesses close to true values\n sol = newton_krylov(fun, X_0, f_tol=f_tol) # one can change the f_tol argument in newton_krylov for better results\n # sol = anderson(fun, X_0, f_tol=f_tol)\n rates_list = [.1]\n rates_list += list(sol)\n return rates_list", "def _initialize_df(self, df):\n df['values'] = (self.tc.instrument_returns['cumulative'] *\n self.tc.starting_cash).mul(self.target_weights, axis=1).values * (1 - self.tc.commission)\n df['allocations'] = self.df['values'].div(df['values'].sum(axis=1), axis=0)\n df['returns'] = (df['values'].sum(axis=1)).pct_change(1).fillna(0)", "def get_par_yield(self, spot, t = 0, k = 1, coupon_k = 1, fp = 1):\n coupon_freq = 1 / coupon_k\n try:\n T = [item for item in self.maturity]\n except TypeError:\n T = [self.maturity]\n spot = [spot]\n df = pd.DataFrame(columns = ['T'])\n df['T'] = T\n df['n_c'] = np.floor(df['T'] / coupon_freq)\n t_c = np.linspace(0, len(df) * coupon_freq, len(df)+1)[1:]\n interp_spot = np.interp(t_c.tolist(), T, spot)\n z = self.get_discount_function(pd.Series(interp_spot), pd.Series(t_c), t, k)\n df['accrued'] = df['T'] % coupon_freq / coupon_freq\n df['m_z'] = self.get_discount_function(spot, self.maturity, t, k)\n df['par'] = df.apply(lambda row: k * (1 - row['m_z']) /\n (sum(z[:int(row['n_c'])]) + row['m_z'] * row['accrued']), axis=1)\n return df['par']", "def transactions_df():\n return pd.DataFrame(\n {\n \"user_id\": [1, 1, 1, 2, 2, 2, 3, 3, 3],\n \"item_id\": [11, 22, 22, 11, 22, 33, 33, 33, 44],\n \"amount\": [10, 20, 30, 40, 50, 60, 70, 80, 90],\n }\n )", "def data(self):\n dfdata = pd.concat([self.weights, self.returns, self.category], axis=1)\n dfdata.columns = ['weights', 'returns', self.category_name]\n if self.period is not None:\n dfdata['date'] = self.period\n return dfdata", "def getMarketPerformance(path, market, start_idx, end_idx, number_years):\r\n return_market = np.array([])\r\n return_market_percentage = np.array([])\r\n anual_return_market = np.array([])\r\n for index in market:\r\n index = loadStockData(path, index)\r\n total_return = (index['Adj Close'][end_idx]- index['Adj Close'][start_idx])/index['Adj Close'][start_idx] + 1\r\n return_market = np.append(return_market, total_return) \r\n return_market_percentage = np.append(return_market_percentage, total_return * 100)\r\n anual_return_market = np.append(anual_return_market, annualReturn(total_return, number_years, rounded=True))\r\n \r\n market_performance = pd.DataFrame([return_market_percentage, anual_return_market], columns=market, index = ['return_percentage', 'anual_return'])\r\n return (market_performance)", "def factor_exposure(self):\n exp_hs_all = pd.DataFrame([])\n exp_zz_all = pd.DataFrame([])\n for i in range(len(self.weekly_date)):\n date = self.weekly_date.iloc[i,0]\n factor = get_barra_factor_from_sql(date)\n factor['secID'] = factor.index.tolist()\n stocklist = factor.index.tolist()\n \n hs300 = get_index_composition(date,'000300.SH')\n zz500 = get_index_composition(date,'000905.SH')\n hs300['secID'] = hs300.index.tolist()\n zz500['secID'] = zz500.index.tolist()\n \n stocklist_hs300 = list(set(hs300.index.tolist()).intersection(set(stocklist)))\n stocklist_zz500 = list(set(zz500.index.tolist()).intersection(set(stocklist)))\n stocklist_hs300.sort()\n stocklist_zz500.sort()\n \n factor_hs = extract_part_from_all(stocklist_hs300,factor,'secID')\n factor_zz = extract_part_from_all(stocklist_zz500,factor,'secID')\n hs_weight = extract_part_from_all(stocklist_hs300,hs300,'secID')\n zz_weight = extract_part_from_all(stocklist_zz500,zz500,'secID')\n del factor_hs['secID'],factor_zz['secID'],hs_weight['secID'],zz_weight['secID']\n \n \n exp_hs = pd.DataFrame(np.dot(hs_weight.T,factor_hs))\n exp_zz = pd.DataFrame(np.dot(zz_weight.T,factor_zz))\n \n \n exp_hs_all = pd.concat([exp_hs_all,exp_hs], axis = 0)\n exp_zz_all = pd.concat([exp_zz_all,exp_zz], axis = 0) \n print(i)\n exp_hs_all.columns = ['Beta','Momentum','Size','EY','RV','Growth',\\\n 'BP','Leverage','Liquidity']\n exp_zz_all.columns = ['Beta','Momentum','Size','EY','RV','Growth',\\\n 'BP','Leverage','Liquidity']\n exp_hs_all.index = self.weekly_date.iloc[:,0]\n exp_zz_all.index = self.weekly_date.iloc[:,0]\n return exp_hs_all,exp_zz_all", "def block_reward_USD(df):\n\n miners_revenue_USD = df['Miners Revenue (USD)']\n tx_fees_USD = df['Tx fees (USD)']\n result = miners_revenue_USD - tx_fees_USD\n result.name = 'Block Reward (USD)'\n return out(SETTINGS, df, result)", "def gather_technical_indicators(pricing_data, lookback_window):\r\n master_df = pd.DataFrame()\r\n master_df['Price'] = pricing_data\r\n master_df['SMA'] = find_SMA(pricing_data, lookback_window)\r\n master_df['Bandwidth'] = find_bolling_bandwidth(pricing_data, lookback_window)\r\n master_df['PM'] = find_price_momentum(pricing_data, lookback_window)\r\n return master_df.dropna()", "def get_clk_spr_df(self) -> pd.DataFrame:\n return pd.read_feather(self.figure_data_paths.clk_spr_path)", "def create_data():\n data_set = pd.DataFrame()\n customer_id = list()\n for i in range(1, 10001):\n customer_id.append(i)\n data_set = pd.DataFrame()\n data_set.loc[:, 'customer_id'] = np.array(customer_id)\n product_name = ('dining chair', 'dining table', 'bed', 'dining set',\n 'stool', 'couch', 'occasional table',\n 'recliner')\n product_name_random = random.choices(product_name, k=10000)\n data_set.loc[:, 'product_name'] = np.array(product_name_random)\n quantity_rented = (1, 2, 3, 4)\n quantity_rented_random = random.choices(quantity_rented, k=10000)\n data_set.loc[:, 'quantity_rented'] = np.array(quantity_rented_random)\n unit_rental_price_monthly = list()\n for i in range(0, 10000):\n unit_rental_price_monthly.append(random.uniform(1.5, 25))\n data_set.loc[:, 'unit_rental_price'] = np.array(unit_rental_price_monthly)\n rental_period_months = list()\n for i in range(0, 10000):\n rental_period_months.append(randint(6, 60))\n data_set.loc[:, 'rental_period_months'] = np.array(rental_period_months)\n return data_set", "def get_weight_df(self) -> pd.DataFrame:\n\n day_to_week = self.calendar_df.set_index(\"d\")[\"wm_yr_wk\"].to_dict()\n weight_df = self.train_df[[\"item_id\", \"store_id\"] + self.weight_columns].set_index([\"item_id\", \"store_id\"])\n weight_df = (weight_df.stack().reset_index().rename(columns = {\"level_2\": \"d\", 0: \"value\"}))\n weight_df[\"wm_yr_wk\"] = weight_df[\"d\"].map(day_to_week)\n weight_df = weight_df.merge(self.sell_prices_df, how = \"left\", on = [\"item_id\", \"store_id\", \"wm_yr_wk\"])\n weight_df[\"value\"] = weight_df[\"value\"] * weight_df[\"sell_price\"]\n weight_df = weight_df.set_index([\"item_id\", \"store_id\", \"d\"]).unstack(level = 2)[\"value\"]\n weight_df = weight_df.loc[zip(self.train_df.item_id, self.train_df.store_id), :].reset_index(drop = True)\n weight_df = pd.concat([self.train_df[self.id_columns], weight_df], axis = 1, sort = False)\n\n weights_map_lst = []\n for group_id in self.group_ids:\n if type(group_id) == str:\n group_id = [group_id]\n\n lv_weight = weight_df.groupby(group_id)[self.weight_columns].sum().sum(axis = 1)\n lv_weight = lv_weight / lv_weight.sum()\n \n if len(group_id) == 2:\n lv_weight.index = pd.Series(lv_weight.index.values).apply(lambda x: \"--\".join(x))\n\n weights_map_lst.append(lv_weight)\n\n weights_df = pd.concat(weights_map_lst) / len(self.group_ids)\n\n return weights_df", "def pandas(self):\n names,prior,posterior = [],[],[]\n for iname,name in enumerate(self.posterior_parameter.row_names):\n names.append(name)\n posterior.append(np.sqrt(float(\n self.posterior_parameter[iname, iname]. x)))\n iprior = self.parcov.row_names.index(name)\n prior.append(np.sqrt(float(self.parcov[iprior, iprior].x)))\n for pred_name, pred_var in self.posterior_prediction.items():\n names.append(pred_name)\n posterior.append(np.sqrt(pred_var))\n prior.append(self.prior_prediction[pred_name])\n return pd.DataFrame({\"posterior\": posterior, \"prior\": prior},\n index=names)", "def strategy_returns(df, df_price_of_strategy):\r\n df_return_of_strategy = pd.DataFrame(index=df_price_of_strategy.index)\r\n cols = df_price_of_strategy.columns\r\n\r\n for priceSeries in cols:\r\n df_return_of_strategy[priceSeries] = (df_price_of_strategy[priceSeries]\r\n - df_price_of_strategy[priceSeries].shift()) / (\r\n df_price_of_strategy[priceSeries])\r\n\r\n return df_return_of_strategy", "def strategy_returns(df, df_price_of_strategy):\r\n df_return_of_strategy = pd.DataFrame(index=df_price_of_strategy.index)\r\n cols = df_price_of_strategy.columns\r\n\r\n for priceSeries in cols:\r\n df_return_of_strategy[priceSeries] = (df_price_of_strategy[priceSeries]\r\n - df_price_of_strategy[priceSeries].shift()) / (\r\n df_price_of_strategy[priceSeries])\r\n\r\n return df_return_of_strategy", "def values(self):\n for row in self.yield_matrix:\n yield FissionYield(self.products, row)" ]
[ "0.6506243", "0.61616105", "0.5973585", "0.59033316", "0.57318133", "0.5705728", "0.568607", "0.56644547", "0.5616574", "0.55478776", "0.55426395", "0.5541117", "0.5517417", "0.55045354", "0.55006254", "0.5484498", "0.54590946", "0.54367566", "0.5420372", "0.53916895", "0.5390871", "0.53850096", "0.5351104", "0.53292036", "0.5328146", "0.53265715", "0.53208596", "0.5319527", "0.5319527", "0.53021806" ]
0.6999018
0
Because get_ytm_discount_data will get dataframe but we want a function that we can input the term and then it give the discount rate. So this is that function. parameter
def get_discount_rate_function(self,x: float): discount_rate = Bootstrapping.P_Tn(self,Bootstrapping.get_ytm_function(self,x), x) return discount_rate
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_ytm_discount_data(self):\n ytm=Bootstrapping.get_ytm_dict(self)\n data = pd.DataFrame()\n for i in ytm.keys():\n data.loc[i / 2, 'Yield to maturity'] = ytm[i]\n data.loc[i / 2, 'discount_rate'] = Bootstrapping.P_Tn(self,ytm[i], i)\n return data", "def get_discount(self, price):\r\n pass", "def ytmToPrice(self, yld, redemption=None): \n if(yld < 0.0):\n logging.info(\"%s\\nsettle: %s coupon: %s maturity: %s\" % \n (BondException.NEG_YIELD_MSG,\n self.settlementDate, \n self.coupon, \n self.maturity))\n \n # yld = BondException.MIN_YLD\n \n if not redemption:\n redemption = self.redvalue\n \n if(yld==self.coupon):\n price = 100.0\n else:\n freq_ = ql.freqValue(self.frequency)\n cpn = self.coupon/freq_\n y = yld/freq_\n u = cpn/y\n z = 1. / (1.+y)**self.nper\n t = 1. / (1.+y)**self.frac\n if(self.frac == 0.0):\n nxtcpn = 0.0\n else:\n nxtcpn = cpn\n prc = (t*(u*(1.0-z) + z*redemption/100.0 + nxtcpn) - self.ai())*100.0\n price = round(prc,6)\n\n return price", "def get_discount_function(self, spot, T, t = 0, k = 1):\n return self.present_value(1, spot, T, t, k)", "def apply_discount(self, product):\n pass", "def evaluate(self, x, y, t=0):\n alpha, beta, gamma, y_weights, discount_factors = \\\n self.get_attr('alpha', 'beta', 'gamma', 'y_weights', 'discount_factors')\n # Marginals: power utility\n v_1 = x ** beta\n v_2 = y ** (beta * gamma)\n\n # Case distinction to avoid overflow error\n if x == 0.0:\n if y == 0.0:\n # Both zero\n utils = 0.0\n else:\n # Only y positive\n utils = discount_factors[t] * y_weights[t] * v_2\n else:\n if y == 0.0:\n # Only x positive\n utils = discount_factors[t] * v_1\n else:\n # Both positive.\n try:\n utils = ((v_1 ** alpha) + ((y_weights[t] * v_2) ** alpha)) ** (1.0 / alpha)\n utils = discount_factors[t] * utils\n # Sometimes an overflow error occurs.\n except ArithmeticError:\n utils = HUGE_FLOAT\n\n return utils", "def tiered_discount(request):\n\n cart = Cart.objects.from_request(request)\n discount = TieredDiscount.objects.valid(cart.total)\n if discount:\n amount = discount.amount(cart.total)\n else:\n amount = None\n\n return {\n 'tiered_discount': discount,\n 'tiered_discount_amount': amount,\n }", "def discount(time, discount_rate):\n\n if not isinstance(time, Iterable):\n discounts = (1 + discount_rate) ** (-time)\n else:\n discounts = pd.DataFrame(\n [(1 + discount_rate) ** (-t) for t in time], index=time\n )\n\n return discounts", "def rate(self, t, yt):\n # TODO add with parameters\n T = yt[-1]\n y = yt[:-1]\n # self.__log.debug('Em %s', Em)\n dIdt = (self.parameters.A0 * np.exp(-self._Em / Rgas / T))\n # self.__log.debug('dkdt %s', dkdt)\n coeff1 = self.Wm * self.mt / sqrtpi\n coeff2 = np.exp(-pow(\n (self._Em - self.parameters.E0) / self.parameters.sigma, 2) / 2)\n coeff3 = np.exp(-y[1:]) * dIdt\n # self.__log.debug('coeff: %s %s %s', coeff1, coeff2, coeff3)\n # dydt = (self.parameters['y0'] - y[0]) * \\\n # np.sum(coeff1 + coeff2 + coeff3)\n dydt = self.parameters.y0 * np.sum(coeff1 * coeff2 * coeff3)\n # self.__log.debug('dydt %s', dydt)\n return np.append(dydt, dIdt)", "def draw_discount_curve(self):\n data=Bootstrapping.get_ytm_discount_data(self)\n fig = plt.figure(figsize=[10, 6])\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(data['discount_rate'])\n ax.set_xlabel('Term')\n ax.set_ylabel('value')\n ax.set_title('Discount Curves')\n plt.show()", "def rate(self, t, y):\n k = self._calc_k(y[1])\n # return k * (1 - y - self.parameters['y0'])\n dy = self.parameters.y0 - y[0]\n return [k * dy if dy > 1e-6 else 0]", "def train_predictors(market_data, functions_for_typical_price_data, functions_for_hlc_price_data, labels_for_typical_price_data, labels_for_hlc_price_data):\r\n standard = {}\r\n # high = market_data.loc[:, 'high'].values.tolist()\r\n # low = market_data.loc[:, 'low'].values.tolist()\r\n # close = market_data.loc[:, 'close'].values.tolist()\r\n volume = market_data.loc[:, 'volume'].values\r\n # typical_prices = typical_price(high, low, close)\r\n typical_prices = market_data.loc[:, 'weightedAverage'].values\r\n standard['volume'] = (np.nanmean(volume), np.nanstd(volume))\r\n standard['typical_price'] = (np.nanmean(typical_prices), np.nanstd(typical_prices))\r\n x = ((volume - standard['volume'][0])/standard['volume'][1])\r\n x = np.c_[(typical_prices - standard['typical_price'][0])/standard['typical_price'][1], x]\r\n typical_prices = typical_prices.tolist()\r\n for f, label in zip(functions_for_typical_price_data, labels_for_typical_price_data):\r\n values = np.array(f(typical_prices))\r\n standard[label] = (np.nanmean(values), np.nanstd(values))\r\n x = np.c_[x, (values - standard[label][0])/standard[label][1]]\r\n # for f, label in zip(functions_for_hlc_price_data, labels_for_hlc_price_data):\r\n # values = np.array(f(high, low, close))\r\n # if 'typical_price' in label and label != 'typical_price':\r\n # standard[label] = standard['typical_price']\r\n # else:\r\n # standard[label] = (np.nanmean(values), np.nanstd(values))\r\n # x = np.c_[x, (values - standard[label][0])/standard[label][1]]\r\n return pd.DataFrame(data=x, columns=['typical_price', 'volume']+labels_for_typical_price_data, index=market_data.index), standard", "def discount(ir, period):\n\treturn ir.discount(period)", "def add_average_discount_to_target(data):\n\n data['adj_price'] = data.price.map(lambda x: (x - x * 0.05))\n data['adj_price_sqrm'] = data.price_sqrm.map(lambda x: (x - x * 0.05))\n\n return data", "def calculate_tf(self, book_dict, term):\n term_frequency = 0\n try:\n term_frequency = (\n book_dict[\"SanitizedText\"][term] / book_dict[\"TotalNoOfTerms\"]\n )\n except KeyError:\n print(\"Key Error, Term doesnt exist\")\n return 0\n except ZeroDivisionError:\n print(\"tf division by zero!\")\n return 0\n return term_frequency", "def rate(self, t, y):\n k1, k2 = self._k(y[-1])\n if y[1] > 1e-6:\n dydt = [(self.parameters.y1 * k1 + self.parameters.y2 * k2) * y[1],\n -(k1 + k2) * y[1]]\n else:\n dydt = [0, 0]\n return dydt", "def calculateSingleTax(monthlyIncome):\n pass", "def predict_price(area) -> float:\n response = requests.get(TRAIN_DATA_URL)\n # YOUR IMPLEMENTATION HERE\n #print(response.content)\n d = pd.read_csv(TRAIN_DATA_URL, header = None)\n d_T = d.T\n #d_T = d_T[:].values()\n d_T.drop(d_T.index[1])\n #print(d_T)\n '''x_a = [row[0] for row in d]\n y_a = [row[1] for row in d]\n x_s = np.array(x_a[1:])\n y_s = np.array(y_a[1:])'''\n x_1 = d_T[0][1:]\n y_1 = d_T[1][1:]\n x_min = x_1.min()\n x_max = x_1.max()\n y_min = y_1.min()\n y_max = y_1.max()\n x = np.array((x_1-x_min)/(x_max-x_min))\n y = np.array((y_1-y_min)/(y_max-y_min))\n x_mean, y_mean = mean(x), mean(y)\n b1 = covariance(x, x_mean, y, y_mean/variance(x, x_mean))\n b0 = y_mean - b1*x_mean\n print(b0, b1)\n return np.array(b0+b1*area)", "def calculateDataRate(self):\n pass", "def prediction_fn(self, yhat):\n raise NotImplementedError", "def get_data_term(self):\n \n if self.num_hidden == 0:\n \n data_term = -self.compute_energy(self.x, self.batch_size)\n \n else:\n \n data_term = -self.compute_free_energy(self.x)\n \n return T.sum(T.exp(-data_term))", "def get_ytm_dict(self):\n ytm=self.ytm\n for term in self.Rmn.keys():\n ytm = Bootstrapping.bisection(self,0.001, 0.1, 1e-10, 2 * term, self.Rmn, ytm)\n return ytm", "def discount(rewards, discount_factor=.99):\n # Compute discounted rewards (trust me this works and hopefully it's super fast)\n timesteps = len(rewards) # make into matrix\n rewards = tf.convert_to_tensor([rewards],dtype=tf.float32)\n # create lower triangular matrix of discount_factor weights\n T = tf.convert_to_tensor([[max(1+i-j,0) for j in range(timesteps)] for i in range(timesteps)],dtype=tf.float32)\n T = tf.math.pow(discount_factor, T)\n T = tf.linalg.band_part(T, -1, 0)\n # apply discount factor\n return tf.matmul(rewards, T)", "def custom_scoring(y_te, y_pred):\n #weights computed with training data set\n w = np.array([0.02409584, 0.00787456, 0.03685528, 0.01760536, 0.04589969, 0.8483942 , 0.01724058, 0.00203449]);\n \n ## F1 SCORES\n #evaluate F1 score, precision and recall for each label, \n #along with custom proportionally weighted F1 score\n #and built in weighted and macro F1 scores\n F1_tab, Ptab, Rtab, pf1 = F1_score(y_te, y_pred, w)\n f = F1Score(8, threshold = 0.5, average = 'weighted')\n f.update_state(y_te, y_pred)\n wf1 = f.result().numpy() #weighted f1 score\n f.reset_states()\n f = F1Score(8, threshold = 0.5, average = 'macro')\n f.update_state(y_te, y_pred)\n mf1 = f.result().numpy() #macro f1 score\n f.reset_states()\n\n ##EDIT DISTANCE\n #edit_dist_av = LevDistMultilabels(y_true, y_pred)\n\n ##ACCURACY\n #evaluate accuracy per label\n acc_tab = Acc(y_te, y_pred)\n\n return wf1, mf1, pf1, F1_tab, Ptab, Rtab, acc_tab", "def discount(self,discountFactor,type='geometric'):\n for e in self.estimators:\n e.discount(discountFactor,type)\n return", "def epflux_eddyterms():\n pass", "def test_companies_company_id_data_tax_rates_get(self):\n pass", "def test_get_tax_return_frequencies(self):\n pass", "def discount(self, cart):", "def test_predictors(market_data, functions_for_typical_price_data, functions_for_hlc_price_data, labels_for_typical_price_data, labels_for_hlc_price_data, scaling_dict):\r\n x = []\r\n # high = market_data.loc[:, 'high'].values.tolist()\r\n # low = market_data.loc[:, 'low'].values.tolist()\r\n # close = market_data.loc[:, 'close'].values.tolist()\r\n volume = market_data.loc[:, 'volume'].values\r\n # typical_prices = typical_price(high, low, close)\r\n typical_prices = market_data.loc[:, 'weightedAverage'].values\r\n x.append(((typical_prices - scaling_dict['typical_price'][0])/scaling_dict['typical_price'][1]).tolist())\r\n typical_prices = typical_prices.tolist()\r\n x.append(((volume - scaling_dict['volume'][0])/scaling_dict['volume'][1]).tolist())\r\n for f, label in zip(functions_for_typical_price_data, labels_for_typical_price_data):\r\n values = np.array(f(typical_prices))\r\n x.append(((values - scaling_dict[label][0])/scaling_dict[label][1]).tolist())\r\n # for f, label in zip(functions_for_hlc_price_data, labels_for_hlc_price_data):\r\n # values = np.array(f(high, low, close))\r\n # x.append(((values - scaling_dict[label][0])/scaling_dict[label][1]).tolist())\r\n return pd.DataFrame(data=np.array(x).T, columns=['typical_price', 'volume']+labels_for_typical_price_data, index=market_data.index)" ]
[ "0.66182184", "0.6007628", "0.5676188", "0.5622315", "0.5424499", "0.53397036", "0.53334564", "0.5288079", "0.52863705", "0.5247682", "0.5206629", "0.52029026", "0.51883996", "0.51745844", "0.51648027", "0.5149084", "0.5111866", "0.5102101", "0.5075223", "0.5066884", "0.50492823", "0.50412524", "0.50337344", "0.5029756", "0.5018628", "0.5001028", "0.4989662", "0.4978131", "0.4973768", "0.49677137" ]
0.6481532
1
This function is used to draw the zerocoupon bond yield curve.
def draw_yield_curve(self): data = Bootstrapping.get_ytm_discount_data(self) fig = plt.figure(figsize=[10, 6]) ax = fig.add_subplot(1, 1, 1) ax.plot(data['Yield to maturity']) ax.set_xlabel('year') ax.set_ylabel('rate') ax.set_title('Zero-coupon yield curve') plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_discount_curve(self):\n data=Bootstrapping.get_ytm_discount_data(self)\n fig = plt.figure(figsize=[10, 6])\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(data['discount_rate'])\n ax.set_xlabel('Term')\n ax.set_ylabel('value')\n ax.set_title('Discount Curves')\n plt.show()", "def create_curve(self):\n self._define_amplitude()\n self._define_width()\n self._define_horizontal()\n self._cache_values()\n print(self)", "def draw_n(self):\r\n pen.down()\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.right(135)\r\n pen.forward(1.414*40)\r\n pen.left(135)\r\n pen.forward(40)\r\n pen.up()\r\n pen.back(40)\r\n pen.right(90)\r\n pen.back(40)\r\n pen.forward(50)", "def bifurcation_diagram(args, Bpbmin, Bpbmax, ylim=(-1, 0.6)):\n\n xs = []\n Bpb_list = np.linspace(Bpbmin, Bpbmax, 100)\n Iext, G, Ein, Eex, eps, a, b, A, Bpb, Bbp, vsl = args\n\n sol, t = calcODE(args, -1.5, -1.5, 0.5, 0.5, 0.5, 0.5, ts=4000, nt=2 ** 25)\n sol = sol[-len(sol) // 2:, :]\n t = t[-len(t) // 2:]\n\n x0 = sol[0, :]\n n = np.array(ode(x0, t[0], *args))\n q, _ = np.linalg.qr(n[:, None], mode='complete')\n\n periods = []\n for Bpb in Bpb_list:\n args = (Iext, G, Ein, Eex, eps, a, b, A, Bpb, Bbp, vsl)\n sol, t = calcODE(args, *sol[-1, :], ts=1000, nt=2 ** 15)\n sol = sol[-len(sol) // 2:, :]\n t = t[-len(t) // 2:]\n\n for i in range(len(sol) - 1):\n x1 = sol[i]\n x2 = sol[i + 1]\n if np.sign(n @ (x2 - x0)) != np.sign(n @ (x1 - x0)):\n c1 = dist(x1, x0, n)\n c2 = dist(x2, x0, n)\n alpha = c2 / (c1 + c2)\n x_new = x1 + alpha * (x2 - x1)\n x = (x_new - x0).dot(q)\n xs.append((Bpb, x[0], x[1], x[2], x[3], x[4], x[5]))\n # if np.linalg.norm(x_new - x0) < 1e-2 and period is None:\n period = t[i] - periods[-1][-1] if len(periods) else 0\n periods.append((Bpb, period, np.linalg.norm(x_new - x0), t[i]))\n\n plt.figure(figsize=(15, 10))\n plt.scatter([i[0] for i in xs], [i[2] for i in xs], s=10)\n plt.xlabel('$B_{pb}$')\n\n # plt.ylim(ylim)\n plt.show()\n\n periods = [i for i in periods if i[1] > 0]\n\n return periods, xs", "def _draw_handler(self, bpy_dummy_self, bpy_dummy_context):\r\n self._drawRays()", "def drawCurve(xlist,ylist):\n dislin.curve(xlist,ylist,len(xlist))", "def Draw(Uk): \n vecx = np.zeros([n,1])\n for i in range(n):\n vecx[i][0] =(float(2*i-n+1)/(n-1))*L\n plt.plot(vecx, Uk, linewidth=1.0)\n plt.show()", "def curve_number(self):", "def create_frame_curve(self):\n self.frame_curve = pm.curve(\n d=1,\n p=[(-0.5, 0.5, 0),\n (0.5, 0.5, 0),\n (0.5, -0.5, 0),\n (-0.5, -0.5, 0),\n (-0.5, 0.5, 0)],\n k=[0, 1, 2, 3, 4]\n )\n self.store_node(self.frame_curve)", "def bezier_graph():\n pylon_graph = graph.graph()\n bezier_points = []\n for x in range(25):\n bezier_points.append(bezier_form(x * 0.01, ([4*0.1, 4*0.6, 4*0.45],\n [4*1.0, 4*0.2, 4*0.8],\n [4*0.8, 4*0.95, 4*0.95],\n [4*0.55, 4*0.75, 4*0.8])))\n pylon_graph.add_nodes(bezier_points, \"curve\", True)\n mirror_points = mirror(bezier_points, 'x')\n pylon_graph.add_nodes(mirror_points, \"curve\", True)\n return pylon_graph", "def draw():", "def generate(self, diagram):", "def draw_o(self):\r\n pen.down()\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.up()\r\n pen.forward(50)", "def draw_h(self):\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.down()\r\n pen.forward(40)\r\n pen.up()\r\n pen.back(20)\r\n pen.left(90)\r\n pen.down()\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.up()\r\n pen.forward(20)\r\n pen.down()\r\n pen.back(40)\r\n pen.right(90)\r\n pen.up()\r\n pen.forward(50)", "def price_generator(self, start, end, periods):\r\n tickers = [self.SelectedTicker]\r\n tick_yahoo = YahooFinancials(tickers)\r\n data = tick_yahoo.get_historical_price_data(start, \r\n end, \r\n periods)\r\n \r\n df = pd.DataFrame({\r\n a: {x['formatted_date']: x['adjclose'] for x in data[a]['prices']} for a in tickers})\r\n \r\n self.prices = df.dropna()\r\n self.returns = self.prices.pct_change().dropna()\r\n try:\r\n self.div_yield = tick_yahoo.get_dividend_yield()\r\n #print(self.div_yield[self.SelectedTicker])\r\n if self.div_yield[self.SelectedTicker] == None:\r\n self.div_yield = 0.00\r\n else:\r\n self.div_yield = self.div_yield[self.SelectedTicker]\r\n except:\r\n print(\"no dividend yield\")", "def drawPoints(self, qp):\n\n# pen = self.pen\n\n\n size = self.size()\n self.yOffset = [size.height()*0.2 + size.height()*0.618/self.NUM_CHANNEL * y for y in xrange(self.NUM_CHANNEL) ]\n\n for ix in xrange(self.NUM_CHANNEL):\n self.pen.setStyle(Qt.SolidLine)\n self.pen.setWidth(2)\n self.pen.setBrush(self.PEN_COLOR[ix])\n self.pen.setCapStyle(Qt.RoundCap)\n self.pen.setJoinStyle(Qt.RoundJoin)\n qp.setPen(self.pen)\n\n qp.drawLine(self.x - 2, self.yOffset[ix] - \\\n self.data_1[ix] * self.DISPLAY_SCALING[ix],\\\n self.x , self.yOffset[ix] - \\\n self.data[ix] * self.DISPLAY_SCALING[ix])", "def draw(self, frame):\n self.block_bot.draw(frame, self.structure_offset, self.invert_y)\n self.block_mid.draw(frame, self.structure_offset, self.invert_y)\n self.block_top.draw(frame, self.structure_offset, self.invert_y)\n\n # Draw bars\n self.bars_bot.draw(frame, self.structure_offset, self.invert_y)\n self.bars_top.draw(frame, self.structure_offset, self.invert_y)\n\n # Draw spring\n self.spring_bot.draw(frame, self.structure_offset, self.invert_y)\n self.spring_top.draw(frame, self.structure_offset, self.invert_y)\n\n # Draw point C\n self.draw_C(frame)", "def risePen(gcode):\r\n gcode.append(\"M300 S46\")\r\n #gcode.append(\"G0 Z0.1000\")\r", "def create_graph(self, backtest):\n \n # check number of currencies\n no_currencies = len(np.unique(signals.currency_id))\n \n # plot capital\n fig, ax = plt.subplots(1+no_currencies/2,2)\n ax[0, 0].plot(backtest.capital)\n ax[0, 1].plot(backtest.capital) \n \n # plot each currency \n \n return true", "def draw_edges():\n\n def bezier(p0, p1, p2, **kwargs):\n x0, y0 = p0\n x1, y1 = p1\n x2, y2 = p2\n xb = [\n (1 - t) ** 2 * x0 + 2 * t * (1 - t) * x1 + t ** 2 * x2\n for t in np.linspace(0.0, 1.0, n_bezier)\n ]\n yb = [\n (1 - t) ** 2 * y0 + 2 * t * (1 - t) * y1 + t ** 2 * y2\n for t in np.linspace(0.0, 1.0, n_bezier)\n ]\n ax.plot(xb, yb, **kwargs)\n\n for edge in self._edges:\n\n u, v = edge\n\n x0, y0, a0 = (\n node_properties[\"node_x\"][u],\n node_properties[\"node_y\"][u],\n node_properties[\"theta\"][u],\n )\n x2, y2, a2 = (\n node_properties[\"node_x\"][v],\n node_properties[\"node_y\"][v],\n node_properties[\"theta\"][v],\n )\n\n angle = a0 + (a2 - a0) / 2\n\n # if angle > np.pi:\n # angle_corr = angle - np.pi\n # else:\n # angle_corr = angle\n\n distance = np.abs(a2 - a0)\n if distance > np.pi:\n distance = distance - np.pi\n distance = (1.0 - 1.0 * distance / np.pi) * R / 2.5\n x1 = distance * np.cos(angle)\n y1 = distance * np.sin(angle)\n x1 = 0\n y1 = 0\n\n ## dibuja los arcos\n bezier(\n [x0, y0], [x1, y1], [x2, y2], **self._edges[edge],\n )", "def draw(iiter):\n from matplotlib import pyplot as plt\n fig = plt.gcf()\n fig.canvas.draw()", "def find_curve(self):\n self.set_a()\n while True:\n while not self.check_a():\n self.seed_update()\n self.set_a()\n self.seed_update()\n self.set_b()\n while not self.check_b():\n self.seed_update()\n self.set_b()\n if not self.secure():\n self.seed_update()\n continue\n self.generate_generator()\n break", "def plot(t): \n assert isinstance(t, int), \"'t' argument should be an integer.\"\n assert t > 0, \"'t' argument should be a positive integer.\" \n # Initialize arrays with zeros to store mean cumulative rewards upto t \n # rounds for each of the three implemented bandit algorithms\n EpsGreedy_rewards = np.zeros(t)\n UCB_rewards = np.zeros(t)\n LinUCB_rewards = np.zeros(t)\n # For each round, store the mean cumulative rewards upto that round\n for i in range(1,t):\n EpsGreedy_rewards[i] = np.sum(results_EpsGreedy[0:i]) / t\n UCB_rewards[i] = np.sum(results_UCB[0:i]) / t\n LinUCB_rewards[i] = np.sum(results_LinUCB[0:i]) / t\n # Plot running per round cumulative reward\n plt.plot(range(0,t), EpsGreedy_rewards, color='b', label='e-Greedy')\n plt.plot(range(0,t), UCB_rewards, color='g', label='UCB')\n plt.plot(range(0,t), LinUCB_rewards, color='orange', label='LinUCB')\n plt.xlabel('Round')\n plt.ylabel('Mean Cumulative Reward')\n plt.title('Running Per Round Cumulative Reward')\n plt.legend()\n plt.show()", "def draw(self):\n pass", "def draw(self):\n pass", "def draw(self):\n pass", "def draw(self):\n pass", "def draw(self):", "def _drawBolts(self,view):\n if len(self._bolts)>0:\n for n in self._bolts:\n n.draw(view)", "def draw_block():\n turtle.down()\n turtle.begin_fill()\n turtle.pensize(3)\n turtle.forward(50)\n turtle.left(90)\n turtle.forward(50)\n turtle.left(90)\n turtle.forward(50)\n turtle.left(90)\n turtle.forward(50)\n turtle.left(90)\n turtle.end_fill()\n turtle.up()" ]
[ "0.63446325", "0.5719501", "0.54202026", "0.53648794", "0.5319175", "0.53174984", "0.53135353", "0.53053623", "0.53045815", "0.5267609", "0.52351516", "0.52285373", "0.5218341", "0.5210886", "0.5180416", "0.5146694", "0.51120013", "0.50831884", "0.5078383", "0.5071085", "0.505159", "0.5049514", "0.5047569", "0.5047142", "0.5047142", "0.5047142", "0.5047142", "0.50342685", "0.49991894", "0.49923572" ]
0.7843739
0
This function is used to draw the zerocoupon bond yield curve.
def draw_discount_curve(self): data=Bootstrapping.get_ytm_discount_data(self) fig = plt.figure(figsize=[10, 6]) ax = fig.add_subplot(1, 1, 1) ax.plot(data['discount_rate']) ax.set_xlabel('Term') ax.set_ylabel('value') ax.set_title('Discount Curves') plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_yield_curve(self):\n data = Bootstrapping.get_ytm_discount_data(self)\n fig = plt.figure(figsize=[10, 6])\n ax = fig.add_subplot(1, 1, 1)\n ax.plot(data['Yield to maturity'])\n ax.set_xlabel('year')\n ax.set_ylabel('rate')\n ax.set_title('Zero-coupon yield curve')\n plt.show()", "def create_curve(self):\n self._define_amplitude()\n self._define_width()\n self._define_horizontal()\n self._cache_values()\n print(self)", "def draw_n(self):\r\n pen.down()\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.right(135)\r\n pen.forward(1.414*40)\r\n pen.left(135)\r\n pen.forward(40)\r\n pen.up()\r\n pen.back(40)\r\n pen.right(90)\r\n pen.back(40)\r\n pen.forward(50)", "def bifurcation_diagram(args, Bpbmin, Bpbmax, ylim=(-1, 0.6)):\n\n xs = []\n Bpb_list = np.linspace(Bpbmin, Bpbmax, 100)\n Iext, G, Ein, Eex, eps, a, b, A, Bpb, Bbp, vsl = args\n\n sol, t = calcODE(args, -1.5, -1.5, 0.5, 0.5, 0.5, 0.5, ts=4000, nt=2 ** 25)\n sol = sol[-len(sol) // 2:, :]\n t = t[-len(t) // 2:]\n\n x0 = sol[0, :]\n n = np.array(ode(x0, t[0], *args))\n q, _ = np.linalg.qr(n[:, None], mode='complete')\n\n periods = []\n for Bpb in Bpb_list:\n args = (Iext, G, Ein, Eex, eps, a, b, A, Bpb, Bbp, vsl)\n sol, t = calcODE(args, *sol[-1, :], ts=1000, nt=2 ** 15)\n sol = sol[-len(sol) // 2:, :]\n t = t[-len(t) // 2:]\n\n for i in range(len(sol) - 1):\n x1 = sol[i]\n x2 = sol[i + 1]\n if np.sign(n @ (x2 - x0)) != np.sign(n @ (x1 - x0)):\n c1 = dist(x1, x0, n)\n c2 = dist(x2, x0, n)\n alpha = c2 / (c1 + c2)\n x_new = x1 + alpha * (x2 - x1)\n x = (x_new - x0).dot(q)\n xs.append((Bpb, x[0], x[1], x[2], x[3], x[4], x[5]))\n # if np.linalg.norm(x_new - x0) < 1e-2 and period is None:\n period = t[i] - periods[-1][-1] if len(periods) else 0\n periods.append((Bpb, period, np.linalg.norm(x_new - x0), t[i]))\n\n plt.figure(figsize=(15, 10))\n plt.scatter([i[0] for i in xs], [i[2] for i in xs], s=10)\n plt.xlabel('$B_{pb}$')\n\n # plt.ylim(ylim)\n plt.show()\n\n periods = [i for i in periods if i[1] > 0]\n\n return periods, xs", "def _draw_handler(self, bpy_dummy_self, bpy_dummy_context):\r\n self._drawRays()", "def drawCurve(xlist,ylist):\n dislin.curve(xlist,ylist,len(xlist))", "def Draw(Uk): \n vecx = np.zeros([n,1])\n for i in range(n):\n vecx[i][0] =(float(2*i-n+1)/(n-1))*L\n plt.plot(vecx, Uk, linewidth=1.0)\n plt.show()", "def curve_number(self):", "def create_frame_curve(self):\n self.frame_curve = pm.curve(\n d=1,\n p=[(-0.5, 0.5, 0),\n (0.5, 0.5, 0),\n (0.5, -0.5, 0),\n (-0.5, -0.5, 0),\n (-0.5, 0.5, 0)],\n k=[0, 1, 2, 3, 4]\n )\n self.store_node(self.frame_curve)", "def bezier_graph():\n pylon_graph = graph.graph()\n bezier_points = []\n for x in range(25):\n bezier_points.append(bezier_form(x * 0.01, ([4*0.1, 4*0.6, 4*0.45],\n [4*1.0, 4*0.2, 4*0.8],\n [4*0.8, 4*0.95, 4*0.95],\n [4*0.55, 4*0.75, 4*0.8])))\n pylon_graph.add_nodes(bezier_points, \"curve\", True)\n mirror_points = mirror(bezier_points, 'x')\n pylon_graph.add_nodes(mirror_points, \"curve\", True)\n return pylon_graph", "def draw():", "def generate(self, diagram):", "def draw_o(self):\r\n pen.down()\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.up()\r\n pen.forward(50)", "def draw_h(self):\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.down()\r\n pen.forward(40)\r\n pen.up()\r\n pen.back(20)\r\n pen.left(90)\r\n pen.down()\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.up()\r\n pen.forward(20)\r\n pen.down()\r\n pen.back(40)\r\n pen.right(90)\r\n pen.up()\r\n pen.forward(50)", "def price_generator(self, start, end, periods):\r\n tickers = [self.SelectedTicker]\r\n tick_yahoo = YahooFinancials(tickers)\r\n data = tick_yahoo.get_historical_price_data(start, \r\n end, \r\n periods)\r\n \r\n df = pd.DataFrame({\r\n a: {x['formatted_date']: x['adjclose'] for x in data[a]['prices']} for a in tickers})\r\n \r\n self.prices = df.dropna()\r\n self.returns = self.prices.pct_change().dropna()\r\n try:\r\n self.div_yield = tick_yahoo.get_dividend_yield()\r\n #print(self.div_yield[self.SelectedTicker])\r\n if self.div_yield[self.SelectedTicker] == None:\r\n self.div_yield = 0.00\r\n else:\r\n self.div_yield = self.div_yield[self.SelectedTicker]\r\n except:\r\n print(\"no dividend yield\")", "def drawPoints(self, qp):\n\n# pen = self.pen\n\n\n size = self.size()\n self.yOffset = [size.height()*0.2 + size.height()*0.618/self.NUM_CHANNEL * y for y in xrange(self.NUM_CHANNEL) ]\n\n for ix in xrange(self.NUM_CHANNEL):\n self.pen.setStyle(Qt.SolidLine)\n self.pen.setWidth(2)\n self.pen.setBrush(self.PEN_COLOR[ix])\n self.pen.setCapStyle(Qt.RoundCap)\n self.pen.setJoinStyle(Qt.RoundJoin)\n qp.setPen(self.pen)\n\n qp.drawLine(self.x - 2, self.yOffset[ix] - \\\n self.data_1[ix] * self.DISPLAY_SCALING[ix],\\\n self.x , self.yOffset[ix] - \\\n self.data[ix] * self.DISPLAY_SCALING[ix])", "def draw(self, frame):\n self.block_bot.draw(frame, self.structure_offset, self.invert_y)\n self.block_mid.draw(frame, self.structure_offset, self.invert_y)\n self.block_top.draw(frame, self.structure_offset, self.invert_y)\n\n # Draw bars\n self.bars_bot.draw(frame, self.structure_offset, self.invert_y)\n self.bars_top.draw(frame, self.structure_offset, self.invert_y)\n\n # Draw spring\n self.spring_bot.draw(frame, self.structure_offset, self.invert_y)\n self.spring_top.draw(frame, self.structure_offset, self.invert_y)\n\n # Draw point C\n self.draw_C(frame)", "def risePen(gcode):\r\n gcode.append(\"M300 S46\")\r\n #gcode.append(\"G0 Z0.1000\")\r", "def create_graph(self, backtest):\n \n # check number of currencies\n no_currencies = len(np.unique(signals.currency_id))\n \n # plot capital\n fig, ax = plt.subplots(1+no_currencies/2,2)\n ax[0, 0].plot(backtest.capital)\n ax[0, 1].plot(backtest.capital) \n \n # plot each currency \n \n return true", "def draw_edges():\n\n def bezier(p0, p1, p2, **kwargs):\n x0, y0 = p0\n x1, y1 = p1\n x2, y2 = p2\n xb = [\n (1 - t) ** 2 * x0 + 2 * t * (1 - t) * x1 + t ** 2 * x2\n for t in np.linspace(0.0, 1.0, n_bezier)\n ]\n yb = [\n (1 - t) ** 2 * y0 + 2 * t * (1 - t) * y1 + t ** 2 * y2\n for t in np.linspace(0.0, 1.0, n_bezier)\n ]\n ax.plot(xb, yb, **kwargs)\n\n for edge in self._edges:\n\n u, v = edge\n\n x0, y0, a0 = (\n node_properties[\"node_x\"][u],\n node_properties[\"node_y\"][u],\n node_properties[\"theta\"][u],\n )\n x2, y2, a2 = (\n node_properties[\"node_x\"][v],\n node_properties[\"node_y\"][v],\n node_properties[\"theta\"][v],\n )\n\n angle = a0 + (a2 - a0) / 2\n\n # if angle > np.pi:\n # angle_corr = angle - np.pi\n # else:\n # angle_corr = angle\n\n distance = np.abs(a2 - a0)\n if distance > np.pi:\n distance = distance - np.pi\n distance = (1.0 - 1.0 * distance / np.pi) * R / 2.5\n x1 = distance * np.cos(angle)\n y1 = distance * np.sin(angle)\n x1 = 0\n y1 = 0\n\n ## dibuja los arcos\n bezier(\n [x0, y0], [x1, y1], [x2, y2], **self._edges[edge],\n )", "def draw(iiter):\n from matplotlib import pyplot as plt\n fig = plt.gcf()\n fig.canvas.draw()", "def find_curve(self):\n self.set_a()\n while True:\n while not self.check_a():\n self.seed_update()\n self.set_a()\n self.seed_update()\n self.set_b()\n while not self.check_b():\n self.seed_update()\n self.set_b()\n if not self.secure():\n self.seed_update()\n continue\n self.generate_generator()\n break", "def draw(self):\n pass", "def draw(self):\n pass", "def draw(self):\n pass", "def draw(self):\n pass", "def plot(t): \n assert isinstance(t, int), \"'t' argument should be an integer.\"\n assert t > 0, \"'t' argument should be a positive integer.\" \n # Initialize arrays with zeros to store mean cumulative rewards upto t \n # rounds for each of the three implemented bandit algorithms\n EpsGreedy_rewards = np.zeros(t)\n UCB_rewards = np.zeros(t)\n LinUCB_rewards = np.zeros(t)\n # For each round, store the mean cumulative rewards upto that round\n for i in range(1,t):\n EpsGreedy_rewards[i] = np.sum(results_EpsGreedy[0:i]) / t\n UCB_rewards[i] = np.sum(results_UCB[0:i]) / t\n LinUCB_rewards[i] = np.sum(results_LinUCB[0:i]) / t\n # Plot running per round cumulative reward\n plt.plot(range(0,t), EpsGreedy_rewards, color='b', label='e-Greedy')\n plt.plot(range(0,t), UCB_rewards, color='g', label='UCB')\n plt.plot(range(0,t), LinUCB_rewards, color='orange', label='LinUCB')\n plt.xlabel('Round')\n plt.ylabel('Mean Cumulative Reward')\n plt.title('Running Per Round Cumulative Reward')\n plt.legend()\n plt.show()", "def draw(self):", "def _drawBolts(self,view):\n if len(self._bolts)>0:\n for n in self._bolts:\n n.draw(view)", "def draw_block():\n turtle.down()\n turtle.begin_fill()\n turtle.pensize(3)\n turtle.forward(50)\n turtle.left(90)\n turtle.forward(50)\n turtle.left(90)\n turtle.forward(50)\n turtle.left(90)\n turtle.forward(50)\n turtle.left(90)\n turtle.end_fill()\n turtle.up()" ]
[ "0.7844901", "0.57200205", "0.5420235", "0.5364439", "0.531805", "0.5317455", "0.5312386", "0.5305822", "0.5303851", "0.52682483", "0.52347237", "0.52280426", "0.5218469", "0.52114904", "0.5181944", "0.51469964", "0.51128083", "0.508417", "0.5077651", "0.50719845", "0.50514114", "0.50509316", "0.50470525", "0.50470525", "0.50470525", "0.50470525", "0.50470513", "0.50338364", "0.49992174", "0.49932966" ]
0.6344681
1
Test create maze with properties.
def test_create_maze(self): maze = Maze(4, 4) self.assertEqual(maze.row_count, 4) self.assertEqual(maze.col_count, 4) self.assertEqual(maze.size, 16) self.assertTrue(isinstance(maze.entrance, list)) self.assertTrue(isinstance(maze.exit, list))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testMazeExists(self):\n pass", "def testMazeExists(self):\n\n pass", "def test_ctor(self):\r\n cols = 5\r\n rows = 5\r\n maze = Maze(rows, cols)\r\n\r\n self.assertEqual(maze.num_cols, cols)\r\n self.assertEqual(maze.num_rows, rows)\r\n self.assertEqual(maze.id, 0)\r\n self.assertEqual(maze.grid_size, rows*cols)\r\n\r\n id=33\r\n maze2 = Maze(rows, cols, id)\r\n self.assertEqual(maze2.num_cols, cols)\r\n self.assertEqual(maze2.num_rows, rows)\r\n self.assertEqual(maze2.id, id)\r\n self.assertEqual(maze2.grid_size, rows * cols)", "def test_maze_created_can_be_traversed(self):\n maze = Maze(100, 100)\n\n self.assertTrue(maze._Maze__verify_exit_path())", "def setUp(self):\n\n self.m=Maze()", "def mazeTest():\r\n\tmyMaze = Maze()\r\n\tmyMaze.addCoordinate(1,0,0)\r\n\tmyMaze.addCoordinate(1,1,0)\r\n\tmyMaze.addCoordinate(7,1,0)\r\n\tmyMaze.addCoordinate(1,2,0)\r\n\tmyMaze.addCoordinate(2,2,0)\r\n\tmyMaze.addCoordinate(3,2,0)\r\n\tmyMaze.addCoordinate(4,2,0)\r\n\tmyMaze.addCoordinate(6,2,0)\r\n\tmyMaze.addCoordinate(7,2,0)\r\n\tmyMaze.addCoordinate(4,3,0)\r\n\tmyMaze.addCoordinate(7,3,0)\r\n\tmyMaze.addCoordinate(4,4,0)\r\n\tmyMaze.addCoordinate(7,4,0)\r\n\tmyMaze.addCoordinate(3,5,0)\r\n\tmyMaze.addCoordinate(4,5,0)\r\n\tmyMaze.addCoordinate(7,5,0)\r\n\tmyMaze.addCoordinate(1,6,0)\r\n\tmyMaze.addCoordinate(2,6,0)\r\n\tmyMaze.addCoordinate(3,6,0)\r\n\tmyMaze.addCoordinate(4,6,0)\r\n\tmyMaze.addCoordinate(5,6,0)\r\n\tmyMaze.addCoordinate(6,6,0)\r\n\tmyMaze.addCoordinate(7,6,0)\r\n\tmyMaze.addCoordinate(5,7,0)\r\n\tmyMaze.printMaze()\r\n\tprint(myMaze.findRoute(x1=1, y1=0, x2=5, y2=7))", "def setUp(self):\n self.m=Maze()\n self.m.reset()", "def __init__(self, maze, population_size):\n self.maze = maze\n self.population_size = population_size", "def generate(width=20, height=20):\n m = Maze(width, height)\n m.randomize()\n return m", "def test_maze_created_traversed_from_indices(self):\n maze = Maze(100, 100)\n\n for test in range(20):\n self.assertTrue(maze.can_reach_exit([random.randint(0, 99),\n random.randint(0, 99)]))", "def create_maze(size):\n dots = MazeGenerator.generate_dot_positions(size.x, size.y)\n maze = MazeGenerator.create_grid_string(dots, size.x, size.y)\n return maze", "def test_maze_move_1(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n\n self.assertEqual(maze.move(rat_J, a2.UP, a2.NO_CHANGE), False)", "def _maze(self):\n try:\n return self.__maze\n except AttributeError:\n pass\n # create and store the maze object\n supershape_name, supershape = random.choice(_supershapes)\n grid = polymaze.PolyGrid(supershape=supershape)\n grid.create_string(self._text, complexity=self._complexity)\n self.__maze = polymaze.Maze(grid)\n return self.__maze", "def __init__(self, startpos = (75,75), angle = 0, colour = (240,100,100),\n maxSpeed = 20, maxAccel = 1, maxAngle = 0.1,\n width = 1600, height = 900, maze = None,\n intermediates = (8,), inputdistance = [50,100,150], inputangle = [1.2,0.6,0,-0.6,-1.2],\n parentname = \"\", parentcolour = (240,100,100), name = None,orders = [1,2,3,4,5,6,7,8]):\n self.startpos, self.startangle, self.colour = startpos, angle, colour\n self.maxSpeed, self.maxAccel, self.maxAngle = maxSpeed, maxAccel, maxAngle\n self.maze = maze\n self.width, self.height = width, height\n self.parentname, self.parentcolour = parentname, parentcolour\n # Create dimensions array based on input, intermediate dimensions and output (4)\n self.inputType = 1 # 0: point, 1: linear\n self.setDimension(inputdistance,inputangle,intermediates,orders)\n self.drag = 0.99\n self.initWeights()\n self.sightLength = 200\n \n if name is not None: \n self.name = name\n else:\n self.name = self.getName()\n \n self.reset()", "def _create_new_maze(self, settings: setts.Settings) -> mazegraph.MazeGraph:\n graph = mazegraph.MazeGraph(settings)\n self._make_random_graph(graph)\n\n return graph", "def test_maze_move_3(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n\n self.assertEqual(maze.move(rat_J, a2.DOWN, a2.NO_CHANGE), True)", "def __init__(self):\n self.maze = [['#','#','#','#','#','#','#','#','#','#','#',],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#','^','/',' ',' ',' ',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ','@',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#',' ',' ',' ',' ',' ',' ',' ',' ','#','#'],\n ['#','#','#','#','#','#','#','#','#','#','#'],\n ['#','#','#','#','#','#','#','#','#','#','#']]\n self.diamonds = 1\n self.width = 10\n self.height = 12\n self.crates = 1", "def __init__(self, _filename):\n # -- open text file containing maze\n self.file = open(_filename, 'r')\n self._grid = []\n # -- initialize line_list and append into list\n line_list = []\n lines = self.file.readlines()\n for line in lines:\n line = line.strip('\\n')\n line_list = [char for char in line]\n self._grid.append(line_list)\n # -- placing the player at the very start\n self._player = Player(1,2)\n self._grid[self._player._x][self._player._y] = POINT_OF_PLAYER\n self._grid[3][-1] = POINT_OF_EXIT\n \n \n\n # --- Rename the check method to can_move_to\n \"\"\" \n :return: return False if the location is a wall, otherwise return True\n :rtype: bool\n \"\"\"", "def test_create_maze_with_float(self):\n try:\n _ = Maze(4.0, 4)\n self.assertEqual(True, False, 'should not have got here: '\n 'maze created with float index.')\n except TypeError:\n self.assertEqual(True, True)", "def test_maze_move_2(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n\n self.assertEqual(maze.move(rat_J, a2.DOWN, a2.RIGHT), False)", "def test_maze_move_4(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n old_sprout_count = maze.num_sprouts_left\n\n maze.move(rat_J, a2.DOWN, a2.NO_CHANGE)\n maze.move(rat_J, a2.DOWN, a2.NO_CHANGE)\n maze.move(rat_J, a2.DOWN, a2.NO_CHANGE)\n\n self.assertEqual(maze.num_sprouts_left, old_sprout_count - 1)", "def test_maze_move_5(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n old_sprout_count = maze.num_sprouts_left\n\n maze.move(rat_J, a2.DOWN, a2.RIGHT)\n\n self.assertEqual(maze.num_sprouts_left, old_sprout_count)", "def test_maze_move_6(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n old_sprout_count = maze.num_sprouts_left\n\n maze.move(rat_J, a2.DOWN, a2.NO_CHANGE)\n\n self.assertEqual(maze.num_sprouts_left, old_sprout_count)", "def test_create_maze_non_integer(self):\n try:\n _ = Maze('I am not an integer', 4)\n self.assertEqual(True, False, 'should not have got here: '\n 'maze created with non-integer index.')\n except TypeError:\n self.assertEqual(True, True)", "def examineMaze(self, gameState):\n w = self.walls.width\n h = self.walls.height\n walls = self.walls.deepCopy()\n food1 = self.getFoodYouAreDefending(gameState)\n food2 = self.getFood(gameState)\n\n # Save map as 0, 1, 2 and 3 (0:walls, 1:spaces, 2:babies, 3:food)\n for x in range(w):\n for y in range(h):\n if walls[x][y]:\n walls[x][y] = 0\n elif food1[x][y]:\n walls[x][y] = 2\n elif food2[x][y]:\n walls[x][y] = 2\n else:\n walls[x][y] = 1\n\n roomsDisplay = []\n # Detect doors and spaces. Spaces are now negative\n for x in range(w):\n for y in range(h):\n if walls[x][y] > 0:\n exitsNum = 0\n if walls[x][y - 1] != 0:\n exitsNum += 1\n if walls[x][y + 1] != 0:\n exitsNum += 1\n if walls[x - 1][y] != 0:\n exitsNum += 1\n if walls[x + 1][y] != 0:\n exitsNum += 1\n if exitsNum == 1 or exitsNum == 2:\n walls[x][y] = -1 * walls[x][y]\n roomsDisplay.append((x, y))\n elif exitsNum == 0:\n # We erase unaccessible cells\n walls[x][y] = 0\n else:\n # These are doors or big rooms, we leave them positive\n pass\n\n # Create roomsGraph: every room has a number, some cells and some doors\n roomsGraph = []\n doorsGraph = []\n for x in range(1, w - 1):\n for y in range(1, h - 1):\n if walls[x][y] < 0:\n spacesNum = 0\n if walls[x][y - 1] < 0:\n spacesNum += 1\n if walls[x][y + 1] < 0:\n spacesNum += 1\n if walls[x - 1][y] < 0:\n spacesNum += 1\n if walls[x + 1][y] < 0:\n spacesNum += 1\n if spacesNum < 2:\n endOfPath = False\n graphNode = {\"path\": [], \"doors\": [], \"food\": 0, \"isBig\": False}\n auxx = x\n auxy = y\n while not endOfPath:\n graphNode[\"path\"].append((x, y))\n graphNode[\"food\"] += -walls[x][y] - 1\n walls[x][y] = 0\n xx = x\n yy = y\n if walls[x][y - 1] < 0:\n yy = y - 1\n elif walls[x][y + 1] < 0:\n yy = y + 1\n elif walls[x - 1][y] < 0:\n xx = x - 1\n elif walls[x + 1][y] < 0:\n xx = x + 1\n else:\n endOfPath = True\n if walls[x][y - 1] > 0:\n if [(x, y - 1), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x, y - 1), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x, y - 1), []]))\n if walls[x][y + 1] > 0:\n if [(x, y + 1), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x, y + 1), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x, y + 1), []]))\n if walls[x - 1][y] > 0:\n if [(x - 1, y), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x - 1, y), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x - 1, y), []]))\n if walls[x + 1][y] > 0:\n if [(x + 1, y), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x + 1, y), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x + 1, y), []]))\n x = xx\n y = yy\n roomsGraph.append(graphNode)\n x = auxx\n y = auxy\n\n # Create doorsGraph: every door has a number, and goes to other rooms or other doors\n for j, door in enumerate(doorsGraph):\n for i, room in enumerate(roomsGraph):\n for aDoor in room[\"doors\"]:\n if aDoor == j:\n doorsGraph[j][1] = doorsGraph[j][1] + [i]\n (x, y) = doorsGraph[j][0]\n adjacentCells = [(x+1, y), (x-1, y), (x, y+1), (x, y-1)]\n adjacentDoors = []\n # Check adjacent doors and add them to the current door (door structure is [pos, adjRooms, adjDoors]\n for p in adjacentCells:\n # Skip if wall\n if self.walls[p[0]][p[1]]:\n continue\n # Skip if door\n isRoom = False\n for room in doorsGraph[j][1]:\n if p in roomsGraph[room][\"path\"]:\n isRoom = True\n break\n if not isRoom:\n # Add if existing door\n doorFound = False\n for i, neighborDoor in enumerate(doorsGraph):\n if neighborDoor[0] == p:\n adjacentDoors.append(i)\n doorFound = True\n break\n # Create if non existing door and add\n if not doorFound:\n adjacentDoors.append(len(doorsGraph))\n doorsGraph.append([p, []])\n doorsGraph[j].append(adjacentDoors)\n\n # Create doorsDistance: maps what doors can be accessed from other doors\n roomsMapper = {}\n doorsMapper = {}\n isRoom = util.Counter()\n for i, door in enumerate(doorsGraph):\n doorsMapper[door[0]] = i\n isRoom[door[0]] = 0\n for i, room in enumerate(roomsGraph):\n for p in room[\"path\"]:\n roomsMapper[p] = i\n isRoom[p] = 1\n\n # Create self variables\n self.doorsGraph = doorsGraph\n self.roomsGraph = roomsGraph\n self.roomsMapper = roomsMapper\n self.doorsMapper = doorsMapper\n self.isRoom = isRoom\n\n # # Find dead ends (rooms with only one door)\n # deadRooms = {}\n # deadDoors = {}\n # # deaderDoors = {}\n # # deaderRooms = {}\n # for i, room in enumerate(roomsGraph):\n # if len(room[\"doors\"]) == 1:\n # deadRooms[i] = room[\"doors\"][0]\n # deadDoors[room[\"doors\"][0]] = 1\n # numdR = 0\n # aliveR = -1\n # for adjRoom in doorsGraph[room[\"doors\"][0]][1]:\n # if adjRoom not in deadRooms:\n # numdR += 1\n # aliveR = adjRoom\n # if numdR + len(doorsGraph[room[\"doors\"][0]][2]) == 1:\n # if aliveR >= 0:\n # deaderRooms[aliveR] = room[\"doors\"][0]\n # for adjDoor in roomsGraph[aliveR][\"doors\"]:\n # if adjDoor == room[\"doors\"][0]:\n # continue\n # deaderDoors[adjDoor] = 1.0\n # else:\n # deaderDoors[doorsGraph[room[\"doors\"][0]][2][0]] = 1.0\n\n\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # for r in deaderRooms:\n # for p in roomsGraph[r][\"path\"]:\n # roomsCounter[0][p] = 0.4\n # for d in deaderDoors:\n # roomsCounter[1][doorsGraph[d][0]] = 0.4\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n\n # print deadDoors\n # print\n # print deadRooms\n # print \"-----------------\"\n\n # deadEndsChanged = True\n # while deadEndsChanged:\n # deadEndsChanged = False\n # for i, room in enumerate(roomsGraph):\n # numAliveDoors = 0\n # aliveDoor = 0\n # deadDoor = []\n # for door in room[\"doors\"]:\n # if door not in deadDoors:\n # numAliveDoors += 1\n # aliveDoor = door\n # else:\n # deadDoor.append(door)\n # if numAliveDoors == 1:\n # aliveRoom = 0\n # aliveNeighborDoor = 0\n # for door in deadDoor:\n # # aliveNeighborDoor += len(doorsGraph[door][2])\n # for neighborRoom in doorsGraph[door][1]:\n # if neighborRoom not in deadRooms:\n # aliveRoom += 1\n # if aliveRoom == len(deadDoor):\n # deadRooms[i] = aliveDoor\n # deadDoors[aliveDoor] = 1\n # deadEndsChanged = True\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # roomsCounter[0][doorsGraph[aliveDoor][0]] = 1\n # for p in room[\"path\"]:\n # roomsCounter[1][p] = 1\n # for p in deadDoor:\n # roomsCounter[2][doorsGraph[p][0]] = 1\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n\n # Find dead ends (rooms with doors that only go to other dead ends, except one)\n # Danger, it is theoretically possible to have a map only with dead ends, which may make this crash\n # deadEndsChanged = True\n # while deadEndsChanged:\n # deadEndsChanged = False\n # for i, door in enumerate(doorsGraph):\n # if i not in deadDoors:\n # numOpenRooms = 0\n # openRoom = 0\n # for j, room in enumerate(door[1]):\n # if room not in deadRooms:\n # numOpenRooms += 1\n # openRoom = j\n # if numOpenRooms + len(door[2]) == 1:\n # for room in door[1]:\n # if room != openRoom:\n # deadRooms[room] = i\n # deadDoors[j] = 1\n # deadEndsChanged = True\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # roomsCounter[0][door[0]] = 1\n # for rr in door[1]:\n # print rr\n # if rr in deadRooms:\n # for p in roomsGraph[rr][\"path\"]:\n # roomsCounter[1][p] = 1\n # else:\n # for p in roomsGraph[rr][\"path\"]:\n # roomsCounter[3][p] = 1\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n # print deadDoors\n # print\n # print deadRooms\n # print \"-----------------\"\n\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # roomsCounter[1][(6, 9)] = 0.4\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # for r in deadRooms:\n # for p in roomsGraph[r][\"path\"]:\n # roomsCounter[0][p] = 0.4\n # for d in deadDoors:\n # roomsCounter[1][doorsGraph[d][0]] = 0.4\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n # Show every room\n roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n for room in roomsGraph:\n for p in room[\"path\"]:\n if len(room[\"doors\"]) > 1:\n roomsCounter[0][p] = 0.4\n else:\n roomsCounter[2][p] = 0.4\n # Show every door\n for door in doorsGraph:\n roomsCounter[1][door[0]] = 0.4\n # Display rooms and doors (red: rooms with at least one exit; orange: rooms with 1 exit; blue: doors\n self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")", "def init(self, windowsize:tuple):\r\n y_count, x_count = 3, 0 #< Set the starting counter for the look_up_table. y starts with three because the first three lines are just Nones\r\n # Creating the constant maze \r\n maze_size = windowsize[0], windowsize[1] - 2 * self.grid_size\r\n self.maze = pg.Surface(maze_size) \r\n \r\n \r\n \r\n # Draw the outermost rectangles on self.maze\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((0, 3 * self.grid_size), (28 * self.grid_size, 31 * self.grid_size)), 4)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((0 + self.grid_size // 2, 3 * self.grid_size + self.grid_size // 2),(27 * self.grid_size, 30 * self.grid_size)), 4) \r\n # Draw the inner rectangles\r\n for y in self.look_up_table[3 : -2]: #< y is a list of one row from the maze\r\n for x in y: #< x is a string that is decoded as already explained\r\n pos = [self.grid_size * x_count, self.grid_size * y_count]\r\n # Set reference position in the middle of one square\r\n pos[0] += self.grid_size // 2\r\n pos[1] += self.grid_size // 2\r\n x_count += 1\r\n # Check if x is rectangle\r\n if x != None and x[0] == 'r':\r\n # When the size of the string is equal or greater than 4 it's rectangle with a specific size and not just a border.\r\n if len(x) >= 4:\r\n # get the x and y size of the rectangle. x will be something like 'rx1_y1' x1 resprestens the size in x direction and y1 in y direction.\r\n xy_dim = x[1:].split(\"_\") \r\n xy_dim[0] = int(xy_dim[0])\r\n xy_dim[1] = int(xy_dim[1])\r\n rect = tuple(pos), (xy_dim[0] * self.grid_size , xy_dim[1] * self.grid_size )\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], rect, self.width)\r\n # If the last char is a w (white), u (up) or l (left) a line gets draw one a specific position \r\n if x[-1] == 'w':\r\n self.draw_line(self.maze, 'u', (x_count,y_count), True)\r\n if x[-1] == 'u' or x[-1] == 'l':\r\n if x_count == 0:\r\n self.draw_line(self.maze, x[-1], (len(y), y_count))\r\n else:\r\n self.draw_line(self.maze, x[-1], (x_count, y_count))\r\n \r\n y_count += 1\r\n x_count = 0\r\n # Just some cosmetic drawing\r\n pg.draw.rect(self.maze, Colors.colors['BLACK'], ((0, 12 * self.grid_size + self.grid_size // 2 + 4), (self.grid_size // 2 + 1, 10 * self.grid_size - 4)), 4)\r\n pg.draw.rect(self.maze, Colors.colors['BLACK'], ((28 * self.grid_size - self.grid_size // 2 - 1, 12 * self.grid_size + self.grid_size // 2 + 4), (self.grid_size // 2 + 1, 10 * self.grid_size - 4)), 4)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((-self.width, 13 * self.grid_size), (5 * self.grid_size, 3 * self.grid_size)), self.width)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((-self.width, 19 * self.grid_size), (5 * self.grid_size, 3 * self.grid_size)), self.width)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((23 * self.grid_size, 13 * self.grid_size), (5 * self.grid_size + 10, 3 * self.grid_size)), self.width)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((23 * self.grid_size, 19 * self.grid_size), (5 * self.grid_size + 10, 3 * self.grid_size)), self.width)\r\n pg.draw.rect(self.maze, Colors.colors['BLUE'], ((11 * self.grid_size, 16 * self.grid_size), (6 * self.grid_size, 3 * self.grid_size)), self.width)\r\n \r\n pg.draw.line(self.maze, Colors.colors['BLUE'], (0, 16 * self.grid_size + self.grid_size // 2 - 1), (self.grid_size // 2 + self.width, 16 * self.grid_size + self.grid_size // 2 - 1), self.width)\r\n pg.draw.line(self.maze, Colors.colors['BLUE'], (0, 18 * self.grid_size + self.grid_size // 2), (self.grid_size // 2 + self.width, 18 * self.grid_size + self.grid_size // 2), self.width)\r\n pg.draw.line(self.maze, Colors.colors['BLUE'], (self.grid_size * 28 - self.grid_size, 16 * self.grid_size + self.grid_size // 2 - 1), (self.grid_size * 28 + self.width, 16 * self.grid_size + self.grid_size // 2 - 1), self.width)\r\n pg.draw.line(self.maze, Colors.colors['BLUE'], (self.grid_size * 28 - self.grid_size, 18 * self.grid_size + self.grid_size // 2), (self.grid_size * 28 + self.width, 18 * self.grid_size + self.grid_size // 2), self.width)\r\n self.is_init = True", "def run():\n import argparse\n parser = argparse.ArgumentParser(description=\"Create and solve mazes\")\n parser.add_argument(\"-c\", \"--cli\", help=\"Switch to CLI mode\", action='store_true')\n parser.add_argument(\"-f\", \"--file\", help=\"File to import map from\")\n parser.add_argument(\"-s\", \"--start\", help=\"Starting position in the maze\")\n parser.add_argument(\"-e\", \"--end\", help=\"Ending position in the maze\")\n args = parser.parse_args()\n if args.file:\n myfile = args.file\n else:\n myfile = 'map1.txt'\n with open(myfile, 'r') as mapfile:\n maze_str = mapfile.read()\n maze = Maze(maze_str, cli=args.cli, start=parse_seq(args.start), finish=parse_seq(args.end))\n maze.game_loop()", "def get_maze_instance(nrows, ncols, st_row, st_col, end_row, end_col):\n maze._allow_creation = True\n m = maze(nrows, ncols, st_row, st_col, end_row, end_col)\n maze._allow_creation = False\n return m", "def _generate_maze(self):\n grid = [[GridCell(x, y, self._treasure_prob) for x in range(self._map_size)] for y in range(self._map_size)]\n\n center_x = self._map_size // 2\n center_y = self._map_size // 2\n\n for _ in range(self._sparsity):\n current = grid[center_x][center_y]\n stack = list()\n start = True\n while len(stack) or start:\n start = False\n current.visited = True\n children = current.has_children(grid)\n\n if children:\n choice = np.random.choice(children)\n choice.visited = True\n\n stack.append(current)\n\n self._remove_walls(current, choice)\n\n current = choice\n\n elif stack:\n current = stack.pop()\n for row in grid:\n for cell in row:\n cell.visited = False\n\n # edit center area\n grid[center_x][center_y].set_treasury()\n for x in range(center_x - 1, center_x + 2):\n for y in range(center_y - 1, center_y + 2):\n grid[x][y].erase_walls()\n return grid", "def __init__(self):\n\n # Width and height of the window, in pixels.\n self.width = 800\n self.height = 600\n width = self.width\n height = self.height\n\n # Create the root window.\n self.root = tkinter.Tk()\n root = self.root\n\n #\n # Buttons etc.\n #\n controls = tkinter.Frame(root)\n controls.pack(side=tkinter.TOP, fill='x')\n\n build = tkinter.Button(controls, text='Build new maze')\n build.pack(side=tkinter.LEFT)\n\n reset = tkinter.Button(controls, text='Reset maze')\n reset.pack(side=tkinter.LEFT)\n\n solve = tkinter.Button(controls, text='Solve maze')\n solve.pack(side=tkinter.LEFT)\n\n # maze_type: 0 = prim, 1 = random.\n maze_type = tkinter.IntVar()\n prim = tkinter.Radiobutton(controls, text='Prim', variable=maze_type, \n value=0)\n prim.pack(side=tkinter.LEFT)\n rand = tkinter.Radiobutton(controls, text='Random', variable=maze_type,\n value=1)\n rand.pack(side=tkinter.LEFT)\n prim.select()\n\n def lbl_entry(lbl, v):\n l = tkinter.Label(controls, text=\"{}: \".format(lbl))\n l.pack(side=tkinter.LEFT)\n e = tkinter.Entry(controls, textvariable=v, width=5)\n e.pack(side=tkinter.LEFT)\n\n # Maze size\n nrows_var = tkinter.StringVar()\n lbl_entry('Rows', nrows_var)\n ncols_var = tkinter.StringVar()\n lbl_entry('Columns', ncols_var)\n nrows_var.set('30')\n ncols_var.set('50')\n\n # Sparseness\n sparse = tkinter.StringVar()\n lbl_entry('Sparseness', sparse)\n sparse.set('.05')\n\n # Delay\n delay = tkinter.StringVar()\n lbl_entry('Draw delay (s)', delay)\n delay.set('0.0')\n\n #\n # Canvas in which to display the maze.\n #\n self.cvs = tkinter.Canvas(width=width, height=height)\n cvs = self.cvs\n cvs.pack(side=tkinter.TOP, expand=True, fill='both')\n\n # Build callback\n def build_act():\n nrows = int(nrows_var.get())\n ncols = int(ncols_var.get())\n sparseness = float(sparse.get())\n self.maze = self.build_fn(nrows, ncols, sparseness)\n self.display_maze()\n build.configure(command=build_act)\n\n # Reset callback\n def reset_act():\n self.display_maze()\n reset.configure(command=reset_act)\n\n\n # Solve callback\n def solve_act():\n self.solve_maze(float(delay.get()))\n\n solve.configure(command=solve_act)\n\n # Prim callback\n def prim_act():\n self.build_fn = get_prebuilt_maze_instance\n \"\"\"\n nrows = int(nrows_var.get())\n ncols = int(ncols_var.get())\n sparseness = float(sparse.get())\n self.maze = get_prebuilt_maze_instance(nrows, ncols, sparseness)\n self.display_maze()\n \"\"\"\n prim.configure(command=prim_act)\n\n # Random callback\n def random_act():\n self.build_fn = get_random_maze_instance\n \"\"\"\n nrows = int(nrows_var.get())\n ncols = int(ncols_var.get())\n sparseness = float(sparse.get())\n self.maze = get_random_maze_instance(nrows, ncols, sparseness)\n self.display_maze()\n \"\"\"\n rand.configure(command=random_act)\n\n prim.invoke()\n\n root.mainloop()\n\n return" ]
[ "0.74951434", "0.74755657", "0.7341924", "0.7205087", "0.7166509", "0.7000492", "0.6800368", "0.65627474", "0.6447256", "0.6320677", "0.63178104", "0.6252976", "0.6162495", "0.6116535", "0.60331357", "0.60119486", "0.59742117", "0.59120816", "0.59075636", "0.5906564", "0.58556914", "0.5835685", "0.5811774", "0.57819504", "0.5777983", "0.569719", "0.56967396", "0.5691889", "0.56701773", "0.56641215" ]
0.78807545
0
Test create maze gets type error with noninteger.
def test_create_maze_non_integer(self): try: _ = Maze('I am not an integer', 4) self.assertEqual(True, False, 'should not have got here: ' 'maze created with non-integer index.') except TypeError: self.assertEqual(True, True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_maze(self):\n maze = Maze(4, 4)\n self.assertEqual(maze.row_count, 4)\n self.assertEqual(maze.col_count, 4)\n self.assertEqual(maze.size, 16)\n self.assertTrue(isinstance(maze.entrance, list))\n self.assertTrue(isinstance(maze.exit, list))", "def testMazeExists(self):\n pass", "def testMazeExists(self):\n\n pass", "def test_ctor(self):\r\n cols = 5\r\n rows = 5\r\n maze = Maze(rows, cols)\r\n\r\n self.assertEqual(maze.num_cols, cols)\r\n self.assertEqual(maze.num_rows, rows)\r\n self.assertEqual(maze.id, 0)\r\n self.assertEqual(maze.grid_size, rows*cols)\r\n\r\n id=33\r\n maze2 = Maze(rows, cols, id)\r\n self.assertEqual(maze2.num_cols, cols)\r\n self.assertEqual(maze2.num_rows, rows)\r\n self.assertEqual(maze2.id, id)\r\n self.assertEqual(maze2.grid_size, rows * cols)", "def test_create_maze_with_float(self):\n try:\n _ = Maze(4.0, 4)\n self.assertEqual(True, False, 'should not have got here: '\n 'maze created with float index.')\n except TypeError:\n self.assertEqual(True, True)", "def test_create_entrance_is_list(self):\n maze = Maze(4, 4)\n self.assertTrue(isinstance(maze.exit[0], int))\n self.assertTrue(isinstance(maze.exit[1], int))", "def test_maze_created_can_be_traversed(self):\n maze = Maze(100, 100)\n\n self.assertTrue(maze._Maze__verify_exit_path())", "def mazeTest():\r\n\tmyMaze = Maze()\r\n\tmyMaze.addCoordinate(1,0,0)\r\n\tmyMaze.addCoordinate(1,1,0)\r\n\tmyMaze.addCoordinate(7,1,0)\r\n\tmyMaze.addCoordinate(1,2,0)\r\n\tmyMaze.addCoordinate(2,2,0)\r\n\tmyMaze.addCoordinate(3,2,0)\r\n\tmyMaze.addCoordinate(4,2,0)\r\n\tmyMaze.addCoordinate(6,2,0)\r\n\tmyMaze.addCoordinate(7,2,0)\r\n\tmyMaze.addCoordinate(4,3,0)\r\n\tmyMaze.addCoordinate(7,3,0)\r\n\tmyMaze.addCoordinate(4,4,0)\r\n\tmyMaze.addCoordinate(7,4,0)\r\n\tmyMaze.addCoordinate(3,5,0)\r\n\tmyMaze.addCoordinate(4,5,0)\r\n\tmyMaze.addCoordinate(7,5,0)\r\n\tmyMaze.addCoordinate(1,6,0)\r\n\tmyMaze.addCoordinate(2,6,0)\r\n\tmyMaze.addCoordinate(3,6,0)\r\n\tmyMaze.addCoordinate(4,6,0)\r\n\tmyMaze.addCoordinate(5,6,0)\r\n\tmyMaze.addCoordinate(6,6,0)\r\n\tmyMaze.addCoordinate(7,6,0)\r\n\tmyMaze.addCoordinate(5,7,0)\r\n\tmyMaze.printMaze()\r\n\tprint(myMaze.findRoute(x1=1, y1=0, x2=5, y2=7))", "def test_maze_created_traversed_from_indices(self):\n maze = Maze(100, 100)\n\n for test in range(20):\n self.assertTrue(maze.can_reach_exit([random.randint(0, 99),\n random.randint(0, 99)]))", "def test_create_entrance_is_list(self):\n maze = Maze(4, 4)\n self.assertTrue(isinstance(maze.entrance[0], int))\n self.assertTrue(isinstance(maze.entrance[1], int))", "def test_maze_move_1(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n\n self.assertEqual(maze.move(rat_J, a2.UP, a2.NO_CHANGE), False)", "def create_maze(size):\n dots = MazeGenerator.generate_dot_positions(size.x, size.y)\n maze = MazeGenerator.create_grid_string(dots, size.x, size.y)\n return maze", "def test_maze_move_3(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n\n self.assertEqual(maze.move(rat_J, a2.DOWN, a2.NO_CHANGE), True)", "def generate(width=20, height=20):\n m = Maze(width, height)\n m.randomize()\n return m", "def test_maze_move_2(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n\n self.assertEqual(maze.move(rat_J, a2.DOWN, a2.RIGHT), False)", "def test_maze_move_5(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n old_sprout_count = maze.num_sprouts_left\n\n maze.move(rat_J, a2.DOWN, a2.RIGHT)\n\n self.assertEqual(maze.num_sprouts_left, old_sprout_count)", "def test_maze_move_4(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n old_sprout_count = maze.num_sprouts_left\n\n maze.move(rat_J, a2.DOWN, a2.NO_CHANGE)\n maze.move(rat_J, a2.DOWN, a2.NO_CHANGE)\n maze.move(rat_J, a2.DOWN, a2.NO_CHANGE)\n\n self.assertEqual(maze.num_sprouts_left, old_sprout_count - 1)", "def test_maze_move_6(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n old_sprout_count = maze.num_sprouts_left\n\n maze.move(rat_J, a2.DOWN, a2.NO_CHANGE)\n\n self.assertEqual(maze.num_sprouts_left, old_sprout_count)", "def __init__(self, maze, population_size):\n self.maze = maze\n self.population_size = population_size", "def setUp(self):\n\n self.m=Maze()", "def _maze(self):\n try:\n return self.__maze\n except AttributeError:\n pass\n # create and store the maze object\n supershape_name, supershape = random.choice(_supershapes)\n grid = polymaze.PolyGrid(supershape=supershape)\n grid.create_string(self._text, complexity=self._complexity)\n self.__maze = polymaze.Maze(grid)\n return self.__maze", "def test_generate_board_negative_mines_errors(self):\n # arrange\n game = minesweeper.Minesweeper()\n width = 10\n height = 12\n mines = -1\n\n # act and expect error\n with self.assertRaises(ValueError):\n game.generate_board(width, height, mines)", "def get_maze_instance(nrows, ncols, st_row, st_col, end_row, end_col):\n maze._allow_creation = True\n m = maze(nrows, ncols, st_row, st_col, end_row, end_col)\n maze._allow_creation = False\n return m", "def examineMaze(self, gameState):\n w = self.walls.width\n h = self.walls.height\n walls = self.walls.deepCopy()\n food1 = self.getFoodYouAreDefending(gameState)\n food2 = self.getFood(gameState)\n\n # Save map as 0, 1, 2 and 3 (0:walls, 1:spaces, 2:babies, 3:food)\n for x in range(w):\n for y in range(h):\n if walls[x][y]:\n walls[x][y] = 0\n elif food1[x][y]:\n walls[x][y] = 2\n elif food2[x][y]:\n walls[x][y] = 2\n else:\n walls[x][y] = 1\n\n roomsDisplay = []\n # Detect doors and spaces. Spaces are now negative\n for x in range(w):\n for y in range(h):\n if walls[x][y] > 0:\n exitsNum = 0\n if walls[x][y - 1] != 0:\n exitsNum += 1\n if walls[x][y + 1] != 0:\n exitsNum += 1\n if walls[x - 1][y] != 0:\n exitsNum += 1\n if walls[x + 1][y] != 0:\n exitsNum += 1\n if exitsNum == 1 or exitsNum == 2:\n walls[x][y] = -1 * walls[x][y]\n roomsDisplay.append((x, y))\n elif exitsNum == 0:\n # We erase unaccessible cells\n walls[x][y] = 0\n else:\n # These are doors or big rooms, we leave them positive\n pass\n\n # Create roomsGraph: every room has a number, some cells and some doors\n roomsGraph = []\n doorsGraph = []\n for x in range(1, w - 1):\n for y in range(1, h - 1):\n if walls[x][y] < 0:\n spacesNum = 0\n if walls[x][y - 1] < 0:\n spacesNum += 1\n if walls[x][y + 1] < 0:\n spacesNum += 1\n if walls[x - 1][y] < 0:\n spacesNum += 1\n if walls[x + 1][y] < 0:\n spacesNum += 1\n if spacesNum < 2:\n endOfPath = False\n graphNode = {\"path\": [], \"doors\": [], \"food\": 0, \"isBig\": False}\n auxx = x\n auxy = y\n while not endOfPath:\n graphNode[\"path\"].append((x, y))\n graphNode[\"food\"] += -walls[x][y] - 1\n walls[x][y] = 0\n xx = x\n yy = y\n if walls[x][y - 1] < 0:\n yy = y - 1\n elif walls[x][y + 1] < 0:\n yy = y + 1\n elif walls[x - 1][y] < 0:\n xx = x - 1\n elif walls[x + 1][y] < 0:\n xx = x + 1\n else:\n endOfPath = True\n if walls[x][y - 1] > 0:\n if [(x, y - 1), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x, y - 1), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x, y - 1), []]))\n if walls[x][y + 1] > 0:\n if [(x, y + 1), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x, y + 1), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x, y + 1), []]))\n if walls[x - 1][y] > 0:\n if [(x - 1, y), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x - 1, y), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x - 1, y), []]))\n if walls[x + 1][y] > 0:\n if [(x + 1, y), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x + 1, y), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x + 1, y), []]))\n x = xx\n y = yy\n roomsGraph.append(graphNode)\n x = auxx\n y = auxy\n\n # Create doorsGraph: every door has a number, and goes to other rooms or other doors\n for j, door in enumerate(doorsGraph):\n for i, room in enumerate(roomsGraph):\n for aDoor in room[\"doors\"]:\n if aDoor == j:\n doorsGraph[j][1] = doorsGraph[j][1] + [i]\n (x, y) = doorsGraph[j][0]\n adjacentCells = [(x+1, y), (x-1, y), (x, y+1), (x, y-1)]\n adjacentDoors = []\n # Check adjacent doors and add them to the current door (door structure is [pos, adjRooms, adjDoors]\n for p in adjacentCells:\n # Skip if wall\n if self.walls[p[0]][p[1]]:\n continue\n # Skip if door\n isRoom = False\n for room in doorsGraph[j][1]:\n if p in roomsGraph[room][\"path\"]:\n isRoom = True\n break\n if not isRoom:\n # Add if existing door\n doorFound = False\n for i, neighborDoor in enumerate(doorsGraph):\n if neighborDoor[0] == p:\n adjacentDoors.append(i)\n doorFound = True\n break\n # Create if non existing door and add\n if not doorFound:\n adjacentDoors.append(len(doorsGraph))\n doorsGraph.append([p, []])\n doorsGraph[j].append(adjacentDoors)\n\n # Create doorsDistance: maps what doors can be accessed from other doors\n roomsMapper = {}\n doorsMapper = {}\n isRoom = util.Counter()\n for i, door in enumerate(doorsGraph):\n doorsMapper[door[0]] = i\n isRoom[door[0]] = 0\n for i, room in enumerate(roomsGraph):\n for p in room[\"path\"]:\n roomsMapper[p] = i\n isRoom[p] = 1\n\n # Create self variables\n self.doorsGraph = doorsGraph\n self.roomsGraph = roomsGraph\n self.roomsMapper = roomsMapper\n self.doorsMapper = doorsMapper\n self.isRoom = isRoom\n\n # # Find dead ends (rooms with only one door)\n # deadRooms = {}\n # deadDoors = {}\n # # deaderDoors = {}\n # # deaderRooms = {}\n # for i, room in enumerate(roomsGraph):\n # if len(room[\"doors\"]) == 1:\n # deadRooms[i] = room[\"doors\"][0]\n # deadDoors[room[\"doors\"][0]] = 1\n # numdR = 0\n # aliveR = -1\n # for adjRoom in doorsGraph[room[\"doors\"][0]][1]:\n # if adjRoom not in deadRooms:\n # numdR += 1\n # aliveR = adjRoom\n # if numdR + len(doorsGraph[room[\"doors\"][0]][2]) == 1:\n # if aliveR >= 0:\n # deaderRooms[aliveR] = room[\"doors\"][0]\n # for adjDoor in roomsGraph[aliveR][\"doors\"]:\n # if adjDoor == room[\"doors\"][0]:\n # continue\n # deaderDoors[adjDoor] = 1.0\n # else:\n # deaderDoors[doorsGraph[room[\"doors\"][0]][2][0]] = 1.0\n\n\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # for r in deaderRooms:\n # for p in roomsGraph[r][\"path\"]:\n # roomsCounter[0][p] = 0.4\n # for d in deaderDoors:\n # roomsCounter[1][doorsGraph[d][0]] = 0.4\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n\n # print deadDoors\n # print\n # print deadRooms\n # print \"-----------------\"\n\n # deadEndsChanged = True\n # while deadEndsChanged:\n # deadEndsChanged = False\n # for i, room in enumerate(roomsGraph):\n # numAliveDoors = 0\n # aliveDoor = 0\n # deadDoor = []\n # for door in room[\"doors\"]:\n # if door not in deadDoors:\n # numAliveDoors += 1\n # aliveDoor = door\n # else:\n # deadDoor.append(door)\n # if numAliveDoors == 1:\n # aliveRoom = 0\n # aliveNeighborDoor = 0\n # for door in deadDoor:\n # # aliveNeighborDoor += len(doorsGraph[door][2])\n # for neighborRoom in doorsGraph[door][1]:\n # if neighborRoom not in deadRooms:\n # aliveRoom += 1\n # if aliveRoom == len(deadDoor):\n # deadRooms[i] = aliveDoor\n # deadDoors[aliveDoor] = 1\n # deadEndsChanged = True\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # roomsCounter[0][doorsGraph[aliveDoor][0]] = 1\n # for p in room[\"path\"]:\n # roomsCounter[1][p] = 1\n # for p in deadDoor:\n # roomsCounter[2][doorsGraph[p][0]] = 1\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n\n # Find dead ends (rooms with doors that only go to other dead ends, except one)\n # Danger, it is theoretically possible to have a map only with dead ends, which may make this crash\n # deadEndsChanged = True\n # while deadEndsChanged:\n # deadEndsChanged = False\n # for i, door in enumerate(doorsGraph):\n # if i not in deadDoors:\n # numOpenRooms = 0\n # openRoom = 0\n # for j, room in enumerate(door[1]):\n # if room not in deadRooms:\n # numOpenRooms += 1\n # openRoom = j\n # if numOpenRooms + len(door[2]) == 1:\n # for room in door[1]:\n # if room != openRoom:\n # deadRooms[room] = i\n # deadDoors[j] = 1\n # deadEndsChanged = True\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # roomsCounter[0][door[0]] = 1\n # for rr in door[1]:\n # print rr\n # if rr in deadRooms:\n # for p in roomsGraph[rr][\"path\"]:\n # roomsCounter[1][p] = 1\n # else:\n # for p in roomsGraph[rr][\"path\"]:\n # roomsCounter[3][p] = 1\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n # print deadDoors\n # print\n # print deadRooms\n # print \"-----------------\"\n\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # roomsCounter[1][(6, 9)] = 0.4\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # for r in deadRooms:\n # for p in roomsGraph[r][\"path\"]:\n # roomsCounter[0][p] = 0.4\n # for d in deadDoors:\n # roomsCounter[1][doorsGraph[d][0]] = 0.4\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n # Show every room\n roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n for room in roomsGraph:\n for p in room[\"path\"]:\n if len(room[\"doors\"]) > 1:\n roomsCounter[0][p] = 0.4\n else:\n roomsCounter[2][p] = 0.4\n # Show every door\n for door in doorsGraph:\n roomsCounter[1][door[0]] = 0.4\n # Display rooms and doors (red: rooms with at least one exit; orange: rooms with 1 exit; blue: doors\n self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")", "def test_create_tile_puzzle(self):\n p = hw.create_tile_puzzle(3, 3)\n self.assertEqual(p.get_board(), [[1,2,3],[4,5,6],[7,8,0]])\n p = hw.create_tile_puzzle(2, 4)\n self.assertEqual(p.get_board(), [[1,2,3,4],[5,6,7,0]])\n p = hw.create_tile_puzzle(1, 4)\n self.assertEqual(p.get_board(), [[1,2,3,0]])", "def _get_maze(self):\n if self._maze is None:\n maze_str = self._env.observations()['DEBUG.MAZE.LAYOUT'].strip()\n lines = maze_str.split('\\n')\n\n height = len(lines)\n width = 0\n for line in lines:\n width = max(width, len(line))\n\n maze = np.zeros((width, height), dtype=np.int32)\n\n for j, line in enumerate(lines):\n for i, cell in enumerate(line):\n if cell == _WALL_SYMBOL:\n maze[i, j] = 1\n self._maze = maze\n return self._maze", "def test_generate_board_too_many_mines_errors(self):\n # arrange\n game = minesweeper.Minesweeper()\n width = 10\n height = 12\n mines = int(width * height)\n\n # act and expect error\n with self.assertRaises(ValueError):\n game.generate_board(width, height, mines)", "def main():\n global numrect\n global rectsize\n rectsize += 1\n if rectsize > 30:\n rectsize = 30\n numrect += 2\n # print(\"rectsize:\\t\" + str(rectsize))\n # print(\"numrect:\\t\" + str(numrect))\n maze, rectangles = MazeGenerator.main(msize, numrect, rectsize)\n global lvl\n # print(str(lvl))\n lvl += 1\n pygame.display.set_caption(\"Dungeon Crawlers\" + \" \" + \"Monsters Killed: \" + str(mobsKilled) + \" Current Floor: \" + str(lvl))\n runMaze(maze, rectangles)", "def init_position():\n __maze.init_position()", "def testCanExploreASmallEmptyRoom(self):\n picture = \"\"\"\n ----------- level z=0 :\n ####\n # #\n # #\n ####\n -----------\n \"\"\"\n max_moves_to_make = 3\n\n points_visited = self.explore_dungeon(picture, max_moves_to_make)\n\n assert_that(points_visited).contains(Point(2, 2, 0))\n assert_that(points_visited).contains(Point(1, 2, 0))\n assert_that(points_visited).contains(Point(2, 1, 0))\n assert_that(points_visited).contains(Point(1, 1, 0))" ]
[ "0.7993788", "0.72578806", "0.72363365", "0.7099285", "0.7063041", "0.6861209", "0.68212044", "0.65768915", "0.6436121", "0.63770556", "0.6309218", "0.608427", "0.6020378", "0.5984016", "0.5975598", "0.5941816", "0.58987296", "0.58849007", "0.5789791", "0.57807946", "0.5769706", "0.57654446", "0.57507217", "0.57336396", "0.5717907", "0.5699665", "0.56984305", "0.5680775", "0.56309044", "0.5616128" ]
0.8117515
0
Test create maze gets type error with float.
def test_create_maze_with_float(self): try: _ = Maze(4.0, 4) self.assertEqual(True, False, 'should not have got here: ' 'maze created with float index.') except TypeError: self.assertEqual(True, True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_maze(self):\n maze = Maze(4, 4)\n self.assertEqual(maze.row_count, 4)\n self.assertEqual(maze.col_count, 4)\n self.assertEqual(maze.size, 16)\n self.assertTrue(isinstance(maze.entrance, list))\n self.assertTrue(isinstance(maze.exit, list))", "def test_create_maze_non_integer(self):\n try:\n _ = Maze('I am not an integer', 4)\n self.assertEqual(True, False, 'should not have got here: '\n 'maze created with non-integer index.')\n except TypeError:\n self.assertEqual(True, True)", "def test_maze_created_can_be_traversed(self):\n maze = Maze(100, 100)\n\n self.assertTrue(maze._Maze__verify_exit_path())", "def testMazeExists(self):\n pass", "def testMazeExists(self):\n\n pass", "def mazeTest():\r\n\tmyMaze = Maze()\r\n\tmyMaze.addCoordinate(1,0,0)\r\n\tmyMaze.addCoordinate(1,1,0)\r\n\tmyMaze.addCoordinate(7,1,0)\r\n\tmyMaze.addCoordinate(1,2,0)\r\n\tmyMaze.addCoordinate(2,2,0)\r\n\tmyMaze.addCoordinate(3,2,0)\r\n\tmyMaze.addCoordinate(4,2,0)\r\n\tmyMaze.addCoordinate(6,2,0)\r\n\tmyMaze.addCoordinate(7,2,0)\r\n\tmyMaze.addCoordinate(4,3,0)\r\n\tmyMaze.addCoordinate(7,3,0)\r\n\tmyMaze.addCoordinate(4,4,0)\r\n\tmyMaze.addCoordinate(7,4,0)\r\n\tmyMaze.addCoordinate(3,5,0)\r\n\tmyMaze.addCoordinate(4,5,0)\r\n\tmyMaze.addCoordinate(7,5,0)\r\n\tmyMaze.addCoordinate(1,6,0)\r\n\tmyMaze.addCoordinate(2,6,0)\r\n\tmyMaze.addCoordinate(3,6,0)\r\n\tmyMaze.addCoordinate(4,6,0)\r\n\tmyMaze.addCoordinate(5,6,0)\r\n\tmyMaze.addCoordinate(6,6,0)\r\n\tmyMaze.addCoordinate(7,6,0)\r\n\tmyMaze.addCoordinate(5,7,0)\r\n\tmyMaze.printMaze()\r\n\tprint(myMaze.findRoute(x1=1, y1=0, x2=5, y2=7))", "def test_ctor(self):\r\n cols = 5\r\n rows = 5\r\n maze = Maze(rows, cols)\r\n\r\n self.assertEqual(maze.num_cols, cols)\r\n self.assertEqual(maze.num_rows, rows)\r\n self.assertEqual(maze.id, 0)\r\n self.assertEqual(maze.grid_size, rows*cols)\r\n\r\n id=33\r\n maze2 = Maze(rows, cols, id)\r\n self.assertEqual(maze2.num_cols, cols)\r\n self.assertEqual(maze2.num_rows, rows)\r\n self.assertEqual(maze2.id, id)\r\n self.assertEqual(maze2.grid_size, rows * cols)", "def test_maze_move_1(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n\n self.assertEqual(maze.move(rat_J, a2.UP, a2.NO_CHANGE), False)", "def test_create_entrance_is_list(self):\n maze = Maze(4, 4)\n self.assertTrue(isinstance(maze.exit[0], int))\n self.assertTrue(isinstance(maze.exit[1], int))", "def test_maze_move_3(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n\n self.assertEqual(maze.move(rat_J, a2.DOWN, a2.NO_CHANGE), True)", "def test_maze_move_2(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n\n self.assertEqual(maze.move(rat_J, a2.DOWN, a2.RIGHT), False)", "def test_maze_move_6(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n old_sprout_count = maze.num_sprouts_left\n\n maze.move(rat_J, a2.DOWN, a2.NO_CHANGE)\n\n self.assertEqual(maze.num_sprouts_left, old_sprout_count)", "def test_maze_move_5(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n old_sprout_count = maze.num_sprouts_left\n\n maze.move(rat_J, a2.DOWN, a2.RIGHT)\n\n self.assertEqual(maze.num_sprouts_left, old_sprout_count)", "def test_create_entrance_is_list(self):\n maze = Maze(4, 4)\n self.assertTrue(isinstance(maze.entrance[0], int))\n self.assertTrue(isinstance(maze.entrance[1], int))", "def __init__(self, startpos = (75,75), angle = 0, colour = (240,100,100),\n maxSpeed = 20, maxAccel = 1, maxAngle = 0.1,\n width = 1600, height = 900, maze = None,\n intermediates = (8,), inputdistance = [50,100,150], inputangle = [1.2,0.6,0,-0.6,-1.2],\n parentname = \"\", parentcolour = (240,100,100), name = None,orders = [1,2,3,4,5,6,7,8]):\n self.startpos, self.startangle, self.colour = startpos, angle, colour\n self.maxSpeed, self.maxAccel, self.maxAngle = maxSpeed, maxAccel, maxAngle\n self.maze = maze\n self.width, self.height = width, height\n self.parentname, self.parentcolour = parentname, parentcolour\n # Create dimensions array based on input, intermediate dimensions and output (4)\n self.inputType = 1 # 0: point, 1: linear\n self.setDimension(inputdistance,inputangle,intermediates,orders)\n self.drag = 0.99\n self.initWeights()\n self.sightLength = 200\n \n if name is not None: \n self.name = name\n else:\n self.name = self.getName()\n \n self.reset()", "def test_maze_move_4(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n old_sprout_count = maze.num_sprouts_left\n\n maze.move(rat_J, a2.DOWN, a2.NO_CHANGE)\n maze.move(rat_J, a2.DOWN, a2.NO_CHANGE)\n maze.move(rat_J, a2.DOWN, a2.NO_CHANGE)\n\n self.assertEqual(maze.num_sprouts_left, old_sprout_count - 1)", "def create_maze(size):\n dots = MazeGenerator.generate_dot_positions(size.x, size.y)\n maze = MazeGenerator.create_grid_string(dots, size.x, size.y)\n return maze", "def test_Z_start(self):\t\t\n self.assertAlmostEqual(attempt.Z[0], 40)", "def test_14_float_test(self):\n with self.assertRaises(TypeError) as x:\n r = Rectangle(1.3, 20)\n self.assertEqual(\"width must be an integer\", str(x.exception))\n\n with self.assertRaises(TypeError) as x:\n r = Rectangle(13, 2.0)\n self.assertEqual(\"height must be an integer\", str(x.exception))\n\n with self.assertRaises(TypeError) as x:\n r = Rectangle(13, 20, 1.7777)\n self.assertEqual(\"x must be an integer\", str(x.exception))\n\n with self.assertRaises(TypeError) as x:\n r = Rectangle(13, 20, 17, 8.0)\n self.assertEqual(\"y must be an integer\", str(x.exception))", "def test_float(self):\n htype = h5t.py_create('f')\n self.assertIsInstance(htype, h5t.TypeFloatID)", "def setUp(self):\n\n self.m=Maze()", "def generate(width=20, height=20):\n m = Maze(width, height)\n m.randomize()\n return m", "def test_01_float(self):\n with self.assertRaises(TypeError) as x:\n r = Rectangle(float(1.2), float(2.2), 1)\n self.assertEqual(\"width must be an integer\", str(x.exception))", "def test_creation_float():\n with pytest.raises(ValueError) as __:\n value = 42.30474\n __ = param.Integer(value=value)", "def test_maze_created_traversed_from_indices(self):\n maze = Maze(100, 100)\n\n for test in range(20):\n self.assertTrue(maze.can_reach_exit([random.randint(0, 99),\n random.randint(0, 99)]))", "def test_badsizevaluefloats(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(float(1), 1, 2, 3)\n self.assertEqual(str(e.exception), 'width must be an integer')", "def test_badyvaluewithfloats(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, float(1), 3)\n self.assertEqual(str(e.exception), 'y must be an integer')", "def _maze(self):\n try:\n return self.__maze\n except AttributeError:\n pass\n # create and store the maze object\n supershape_name, supershape = random.choice(_supershapes)\n grid = polymaze.PolyGrid(supershape=supershape)\n grid.create_string(self._text, complexity=self._complexity)\n self.__maze = polymaze.Maze(grid)\n return self.__maze", "def init_position():\n __maze.init_position()", "def __init__(self, maze, population_size):\n self.maze = maze\n self.population_size = population_size" ]
[ "0.7523571", "0.6837212", "0.66139513", "0.6609147", "0.6563737", "0.6271226", "0.62683207", "0.6265946", "0.6129293", "0.60552156", "0.5967328", "0.5800113", "0.5795364", "0.5794256", "0.57674694", "0.5756952", "0.57135016", "0.56914604", "0.5667238", "0.5618049", "0.55793035", "0.55682415", "0.5520725", "0.5492916", "0.54896367", "0.54839826", "0.5482868", "0.54563993", "0.54499227", "0.5446894" ]
0.83329034
0
Test maze creates entrance as list of two integers.
def test_create_entrance_is_list(self): maze = Maze(4, 4) self.assertTrue(isinstance(maze.entrance[0], int)) self.assertTrue(isinstance(maze.entrance[1], int))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_entrance_is_list(self):\n maze = Maze(4, 4)\n self.assertTrue(isinstance(maze.exit[0], int))\n self.assertTrue(isinstance(maze.exit[1], int))", "def init_maze(width: int, height: int) -> list[int]:\n return [0] * width * height", "def mazeTest():\r\n\tmyMaze = Maze()\r\n\tmyMaze.addCoordinate(1,0,0)\r\n\tmyMaze.addCoordinate(1,1,0)\r\n\tmyMaze.addCoordinate(7,1,0)\r\n\tmyMaze.addCoordinate(1,2,0)\r\n\tmyMaze.addCoordinate(2,2,0)\r\n\tmyMaze.addCoordinate(3,2,0)\r\n\tmyMaze.addCoordinate(4,2,0)\r\n\tmyMaze.addCoordinate(6,2,0)\r\n\tmyMaze.addCoordinate(7,2,0)\r\n\tmyMaze.addCoordinate(4,3,0)\r\n\tmyMaze.addCoordinate(7,3,0)\r\n\tmyMaze.addCoordinate(4,4,0)\r\n\tmyMaze.addCoordinate(7,4,0)\r\n\tmyMaze.addCoordinate(3,5,0)\r\n\tmyMaze.addCoordinate(4,5,0)\r\n\tmyMaze.addCoordinate(7,5,0)\r\n\tmyMaze.addCoordinate(1,6,0)\r\n\tmyMaze.addCoordinate(2,6,0)\r\n\tmyMaze.addCoordinate(3,6,0)\r\n\tmyMaze.addCoordinate(4,6,0)\r\n\tmyMaze.addCoordinate(5,6,0)\r\n\tmyMaze.addCoordinate(6,6,0)\r\n\tmyMaze.addCoordinate(7,6,0)\r\n\tmyMaze.addCoordinate(5,7,0)\r\n\tmyMaze.printMaze()\r\n\tprint(myMaze.findRoute(x1=1, y1=0, x2=5, y2=7))", "def build_maze(self, start, end, steprange):\n \n self.directions = [self.turn_R, self.turn_L, self.turn_L, self.go_up]\n currentpos = (start, self.height-4)\n list_of_points = [currentpos]\n \n if currentpos[1] == 0 and currentpos[0] == 0:\n return list_of_points\n #Before the end point is reached, move in a random direction\n while currentpos[1] > end or currentpos[0] > end:\n f = random.choice(self.directions)\n currentpos = f(currentpos, random.randint(steprange[0], steprange[1]))\n \n #Check if the vine is out of bounds\n if currentpos[0] < 0:\n currentpos = (0, currentpos[1])\n if currentpos[0] > self.width:\n currentpos = (self.width-4, currentpos[1])\n if currentpos[1] < 0:\n currentpos = (currentpos[0], 0)\n #Add the point to the list of points the vine goes through\n list_of_points.append(currentpos)\n return list_of_points", "def extra(maze):\n # TODO: Write your code here\n return []", "def extra(maze):\n # TODO: Write your code here\n return []", "def test_maze_created_traversed_from_indices(self):\n maze = Maze(100, 100)\n\n for test in range(20):\n self.assertTrue(maze.can_reach_exit([random.randint(0, 99),\n random.randint(0, 99)]))", "def test_create_maze(self):\n maze = Maze(4, 4)\n self.assertEqual(maze.row_count, 4)\n self.assertEqual(maze.col_count, 4)\n self.assertEqual(maze.size, 16)\n self.assertTrue(isinstance(maze.entrance, list))\n self.assertTrue(isinstance(maze.exit, list))", "def tinyMazeSearch(problem):\n print \"ahahaha\"\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s,s,w,s,w,w,s,w]", "def initialize_maze(self, file):\n with open(file, 'r') as txt_file:\n for y_pos in range(21):\n maze_line = txt_file.readline().split()\n x_pos = 0\n while x_pos < len(maze_line):\n if maze_line[x_pos] == '1':\n self.forbidden_tiles.append((x_pos, y_pos))\n elif maze_line[x_pos] == '0':\n self.allowed_tiles.append((x_pos, y_pos))\n x_pos += 1\n\n for obj in self.forbidden_tiles:\n x_pos = obj[0]\n y_pos = obj[1]\n self.grid[y_pos][x_pos] = 'X'\n\n for position in self.allowed_tiles:\n if position[1] == 0:\n self.exit = position\n elif position[1] == self._len_y-1:\n self.entrance = position\n\n return (\n self.grid,\n self.forbidden_tiles,\n self.allowed_tiles,\n self.entrance,\n self.exit\n )", "def tinyMazeSearch(problem):\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n print \"I am here!!!\"\n return [s, s, w, s, w, w, s, w]", "def walk_maze(maze: list[int], width: int, height: int, start: tuple[int, int]) -> None:\n # Shortcut for accessing maze\n maze_idx = lambda p: p[1] * width + p[0]\n\n # Shortcut funcs for surrounding points\n north = lambda p: (p[0] , p[1] -1)\n east = lambda p: (p[0] +1, p[1] )\n south = lambda p: (p[0] , p[1] +1)\n west = lambda p: (p[0] -1, p[1] )\n\n def check_neighbours(pt, visited=False) -> list[tuple[int, int]]:\n \"\"\"\n Returns a list of possible neighbours.\n Can pass arg to only count visited neighbours\n \"\"\"\n # Points will be added to this list if they havent been traversed yet\n possible_points = dict()\n\n # -- NORTH\n p_pt = north(pt)\n # This mess of a condition will evaluate to true if the cell is visited and the user is asking for a visited cell. Viceversa.\n if pt[1] > 0 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):\n possible_points[p_pt] = \"N\"\n\n # -- EAST\n p_pt = east(pt)\n if pt[0] < width - 1 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):\n possible_points[p_pt] = \"E\"\n\n # -- SOUTH\n p_pt = south(pt)\n if pt[1] < height - 1 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):\n possible_points[p_pt] = \"S\"\n\n # -- WEST\n p_pt = west(pt)\n if pt[0] > 0 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):\n possible_points[p_pt] = \"W\"\n\n return possible_points\n\n # First, connect to a random neighbour that has been visited.\n starting_n = check_neighbours(start, True)\n if starting_n:\n neigh, dire = random.choice(tuple(starting_n.items()))\n\n maze[maze_idx(neigh)] |= DIRS[O_DIRS[dire]]\n maze[maze_idx(start)] |= DIRS[dire]\n\n step = start\n\n # Walk randomly until out of options\n while possible_n := check_neighbours(step):\n next_step, direction = random.choice(tuple(possible_n.items()))\n\n # Connect the two cells\n maze[maze_idx(step)] |= DIRS[direction]\n maze[maze_idx(next_step)] |= DIRS[O_DIRS[direction]]\n\n # Go to next\n step = next_step", "def __maze_generator(self):\n grid = []\n for row in range(self.__row_count):\n new_row = []\n for col in range(self.__col_count):\n new_row.append(RoomFactory.create_room([row, col]))\n if col > 0:\n new_row[col].left = new_row[col - 1]\n new_row[col - 1].right = new_row[col]\n if row > 0:\n new_row[col].up = grid[row - 1][col]\n grid[row - 1][col].down = new_row[col]\n grid.append(new_row)\n return grid", "def createMazeSet(ndMaze,mazeRowSize,mazeColSize):\n MazeTupleSet = set() \n MazeFirstRow = set()\n MazeLastRow = set() \n FullRow = set()\n FullRow = {s for s in range(mazeColSize)}\n for i,item in enumerate(ndMaze):\n if(i==0): \n pass\n else:\n temp_tuple = tuple()\n temp_tuple = ((ndMaze[i][0],ndMaze[i][1]))\n #get all elements from the first and last row\n if(ndMaze[i][0] == 0):\n MazeFirstRow.add(ndMaze[i][1])\n if(ndMaze[i][0] == mazeRowSize - 1):\n MazeLastRow.add(ndMaze[i][1])\n MazeTupleSet.add(temp_tuple)\n #and then find entrance and exit by subtracting intersection with full\n mazeEntrance = FullRow - MazeFirstRow.intersection(FullRow)\n mazeExit = FullRow - MazeLastRow.intersection(FullRow)\n return MazeTupleSet,mazeEntrance,mazeExit", "def examineMaze(self, gameState):\n w = self.walls.width\n h = self.walls.height\n walls = self.walls.deepCopy()\n food1 = self.getFoodYouAreDefending(gameState)\n food2 = self.getFood(gameState)\n\n # Save map as 0, 1, 2 and 3 (0:walls, 1:spaces, 2:babies, 3:food)\n for x in range(w):\n for y in range(h):\n if walls[x][y]:\n walls[x][y] = 0\n elif food1[x][y]:\n walls[x][y] = 2\n elif food2[x][y]:\n walls[x][y] = 2\n else:\n walls[x][y] = 1\n\n roomsDisplay = []\n # Detect doors and spaces. Spaces are now negative\n for x in range(w):\n for y in range(h):\n if walls[x][y] > 0:\n exitsNum = 0\n if walls[x][y - 1] != 0:\n exitsNum += 1\n if walls[x][y + 1] != 0:\n exitsNum += 1\n if walls[x - 1][y] != 0:\n exitsNum += 1\n if walls[x + 1][y] != 0:\n exitsNum += 1\n if exitsNum == 1 or exitsNum == 2:\n walls[x][y] = -1 * walls[x][y]\n roomsDisplay.append((x, y))\n elif exitsNum == 0:\n # We erase unaccessible cells\n walls[x][y] = 0\n else:\n # These are doors or big rooms, we leave them positive\n pass\n\n # Create roomsGraph: every room has a number, some cells and some doors\n roomsGraph = []\n doorsGraph = []\n for x in range(1, w - 1):\n for y in range(1, h - 1):\n if walls[x][y] < 0:\n spacesNum = 0\n if walls[x][y - 1] < 0:\n spacesNum += 1\n if walls[x][y + 1] < 0:\n spacesNum += 1\n if walls[x - 1][y] < 0:\n spacesNum += 1\n if walls[x + 1][y] < 0:\n spacesNum += 1\n if spacesNum < 2:\n endOfPath = False\n graphNode = {\"path\": [], \"doors\": [], \"food\": 0, \"isBig\": False}\n auxx = x\n auxy = y\n while not endOfPath:\n graphNode[\"path\"].append((x, y))\n graphNode[\"food\"] += -walls[x][y] - 1\n walls[x][y] = 0\n xx = x\n yy = y\n if walls[x][y - 1] < 0:\n yy = y - 1\n elif walls[x][y + 1] < 0:\n yy = y + 1\n elif walls[x - 1][y] < 0:\n xx = x - 1\n elif walls[x + 1][y] < 0:\n xx = x + 1\n else:\n endOfPath = True\n if walls[x][y - 1] > 0:\n if [(x, y - 1), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x, y - 1), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x, y - 1), []]))\n if walls[x][y + 1] > 0:\n if [(x, y + 1), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x, y + 1), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x, y + 1), []]))\n if walls[x - 1][y] > 0:\n if [(x - 1, y), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x - 1, y), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x - 1, y), []]))\n if walls[x + 1][y] > 0:\n if [(x + 1, y), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x + 1, y), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x + 1, y), []]))\n x = xx\n y = yy\n roomsGraph.append(graphNode)\n x = auxx\n y = auxy\n\n # Create doorsGraph: every door has a number, and goes to other rooms or other doors\n for j, door in enumerate(doorsGraph):\n for i, room in enumerate(roomsGraph):\n for aDoor in room[\"doors\"]:\n if aDoor == j:\n doorsGraph[j][1] = doorsGraph[j][1] + [i]\n (x, y) = doorsGraph[j][0]\n adjacentCells = [(x+1, y), (x-1, y), (x, y+1), (x, y-1)]\n adjacentDoors = []\n # Check adjacent doors and add them to the current door (door structure is [pos, adjRooms, adjDoors]\n for p in adjacentCells:\n # Skip if wall\n if self.walls[p[0]][p[1]]:\n continue\n # Skip if door\n isRoom = False\n for room in doorsGraph[j][1]:\n if p in roomsGraph[room][\"path\"]:\n isRoom = True\n break\n if not isRoom:\n # Add if existing door\n doorFound = False\n for i, neighborDoor in enumerate(doorsGraph):\n if neighborDoor[0] == p:\n adjacentDoors.append(i)\n doorFound = True\n break\n # Create if non existing door and add\n if not doorFound:\n adjacentDoors.append(len(doorsGraph))\n doorsGraph.append([p, []])\n doorsGraph[j].append(adjacentDoors)\n\n # Create doorsDistance: maps what doors can be accessed from other doors\n roomsMapper = {}\n doorsMapper = {}\n isRoom = util.Counter()\n for i, door in enumerate(doorsGraph):\n doorsMapper[door[0]] = i\n isRoom[door[0]] = 0\n for i, room in enumerate(roomsGraph):\n for p in room[\"path\"]:\n roomsMapper[p] = i\n isRoom[p] = 1\n\n # Create self variables\n self.doorsGraph = doorsGraph\n self.roomsGraph = roomsGraph\n self.roomsMapper = roomsMapper\n self.doorsMapper = doorsMapper\n self.isRoom = isRoom\n\n # # Find dead ends (rooms with only one door)\n # deadRooms = {}\n # deadDoors = {}\n # # deaderDoors = {}\n # # deaderRooms = {}\n # for i, room in enumerate(roomsGraph):\n # if len(room[\"doors\"]) == 1:\n # deadRooms[i] = room[\"doors\"][0]\n # deadDoors[room[\"doors\"][0]] = 1\n # numdR = 0\n # aliveR = -1\n # for adjRoom in doorsGraph[room[\"doors\"][0]][1]:\n # if adjRoom not in deadRooms:\n # numdR += 1\n # aliveR = adjRoom\n # if numdR + len(doorsGraph[room[\"doors\"][0]][2]) == 1:\n # if aliveR >= 0:\n # deaderRooms[aliveR] = room[\"doors\"][0]\n # for adjDoor in roomsGraph[aliveR][\"doors\"]:\n # if adjDoor == room[\"doors\"][0]:\n # continue\n # deaderDoors[adjDoor] = 1.0\n # else:\n # deaderDoors[doorsGraph[room[\"doors\"][0]][2][0]] = 1.0\n\n\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # for r in deaderRooms:\n # for p in roomsGraph[r][\"path\"]:\n # roomsCounter[0][p] = 0.4\n # for d in deaderDoors:\n # roomsCounter[1][doorsGraph[d][0]] = 0.4\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n\n # print deadDoors\n # print\n # print deadRooms\n # print \"-----------------\"\n\n # deadEndsChanged = True\n # while deadEndsChanged:\n # deadEndsChanged = False\n # for i, room in enumerate(roomsGraph):\n # numAliveDoors = 0\n # aliveDoor = 0\n # deadDoor = []\n # for door in room[\"doors\"]:\n # if door not in deadDoors:\n # numAliveDoors += 1\n # aliveDoor = door\n # else:\n # deadDoor.append(door)\n # if numAliveDoors == 1:\n # aliveRoom = 0\n # aliveNeighborDoor = 0\n # for door in deadDoor:\n # # aliveNeighborDoor += len(doorsGraph[door][2])\n # for neighborRoom in doorsGraph[door][1]:\n # if neighborRoom not in deadRooms:\n # aliveRoom += 1\n # if aliveRoom == len(deadDoor):\n # deadRooms[i] = aliveDoor\n # deadDoors[aliveDoor] = 1\n # deadEndsChanged = True\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # roomsCounter[0][doorsGraph[aliveDoor][0]] = 1\n # for p in room[\"path\"]:\n # roomsCounter[1][p] = 1\n # for p in deadDoor:\n # roomsCounter[2][doorsGraph[p][0]] = 1\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n\n # Find dead ends (rooms with doors that only go to other dead ends, except one)\n # Danger, it is theoretically possible to have a map only with dead ends, which may make this crash\n # deadEndsChanged = True\n # while deadEndsChanged:\n # deadEndsChanged = False\n # for i, door in enumerate(doorsGraph):\n # if i not in deadDoors:\n # numOpenRooms = 0\n # openRoom = 0\n # for j, room in enumerate(door[1]):\n # if room not in deadRooms:\n # numOpenRooms += 1\n # openRoom = j\n # if numOpenRooms + len(door[2]) == 1:\n # for room in door[1]:\n # if room != openRoom:\n # deadRooms[room] = i\n # deadDoors[j] = 1\n # deadEndsChanged = True\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # roomsCounter[0][door[0]] = 1\n # for rr in door[1]:\n # print rr\n # if rr in deadRooms:\n # for p in roomsGraph[rr][\"path\"]:\n # roomsCounter[1][p] = 1\n # else:\n # for p in roomsGraph[rr][\"path\"]:\n # roomsCounter[3][p] = 1\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n # print deadDoors\n # print\n # print deadRooms\n # print \"-----------------\"\n\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # roomsCounter[1][(6, 9)] = 0.4\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # for r in deadRooms:\n # for p in roomsGraph[r][\"path\"]:\n # roomsCounter[0][p] = 0.4\n # for d in deadDoors:\n # roomsCounter[1][doorsGraph[d][0]] = 0.4\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n # Show every room\n roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n for room in roomsGraph:\n for p in room[\"path\"]:\n if len(room[\"doors\"]) > 1:\n roomsCounter[0][p] = 0.4\n else:\n roomsCounter[2][p] = 0.4\n # Show every door\n for door in doorsGraph:\n roomsCounter[1][door[0]] = 0.4\n # Display rooms and doors (red: rooms with at least one exit; orange: rooms with 1 exit; blue: doors\n self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")", "def _get_movements_8n():\n s2 = math.sqrt(2)\n return [(1, 0, 1.0),\n (0, 1, 1.0),\n (-1, 0, 1.0),\n (0, -1, 1.0),\n (1, 1, s2),\n (-1, 1, s2),\n (-1, -1, s2),\n (1, -1, s2)]", "def astar(maze, start, end, agents):\r\n\r\n # Create start and end node\r\n start_node = Node(None, start)\r\n end_node = Node(None, end)\r\n\r\n # Initialize both open and closed list\r\n open_list = []\r\n open_pos = []\r\n closed_pos = []\r\n\r\n # Add the start node\r\n open_list.append(start_node)\r\n open_pos.append(start)\r\n\r\n # Loop until you find the end\r\n while len(open_list) > 0:\r\n\r\n # Get the current node\r\n current_node = open_list[0]\r\n current_index = 0\r\n \r\n for index, item in enumerate(open_list):\r\n if item.f < current_node.f:\r\n current_node = item\r\n current_index = index\r\n\r\n # Pop current off open list, add to closed list\r\n open_list.pop(current_index)\r\n open_pos.pop(current_index)\r\n closed_pos.append(current_node.position)\r\n\r\n # Found the goal\r\n if current_node == end_node:\r\n path = []\r\n\r\n current = current_node\r\n while current is not None:\r\n path.append(current.position) \r\n current = current.parent\r\n\r\n return path[::-1] # Return reversed path\r\n\r\n # # Generate children\r\n for new_position in [(0, -1), (0, 1), (-1, 0), (1, 0)]: # Adjacent squares\r\n \r\n # Get node position\r\n node_position = (current_node.position[0] + new_position[0], current_node.position[1] + new_position[1])\r\n\r\n # Make sure within range\r\n if node_position[0] > maze.shape[0]-1 or node_position[0] < 0 or node_position[1] > maze.shape[1]-1 or node_position[1] < 0:\r\n continue\r\n\r\n # Make sure walkable terrain\r\n if maze[node_position[0]][node_position[1]] == 0:\r\n continue\r\n\r\n if not validataPath(current_node, node_position, agents):\r\n continue\r\n\r\n # Create new node\r\n child = Node(current_node, node_position)\r\n\r\n if node_position not in closed_pos:\r\n child = Node(current_node, node_position)\r\n\r\n # Create the f, g, and h values\r\n child.g = current_node.g + 1\r\n child.h = ((child.position[0] - end_node.position[0]) ** 2) + ((child.position[1] - end_node.position[1]) ** 2)\r\n child.f = child.g + child.h\r\n\r\n # Child is already in the open list\r\n if node_position in open_pos:\r\n index = open_pos.index(node_position)\r\n if open_list[index].g > child.g:\r\n open_list[index] = child\r\n\r\n # Add the child to the open list\r\n else:\r\n open_list.append(child)\r\n open_pos.append(node_position)\r\n\r\n return None", "def state_to_locations(state: list) -> list:\n\n locations = []\n for i in range(0, 16):\n locations.append((0, 0))\n # Each tuple represents a location on the board as (row, column)\n\n \"\"\" \"locations\" keeps track of all fifteen numbers in the given state and the goal \n state. The location of the blank in the state is stored as the tuple at locations[0], \n the location of the number 1 is stored as locations[1], so on and so forth.\"\"\"\n\n \"\"\" Due to the nature of indices on a list, when a location is stored as a tuple \n (row, column), the four rows and four columns are represented as indices from 0 \n to 3, even though the numbers 1 through 15 are represented as indices from 1 to \n 15 on the list.\"\"\"\n\n for i in range(0, 4):\n for j in range(0, 4):\n \"\"\" The loop scans the given state and reads the integer at [i][j]. The number \n is stored at its corresponding index in the list \"locations\". By the time the \n loop finishes, the locations of all fifteen numbers as well as the blank in \n the given state will have been stored in the list.\"\"\"\n num = state[i][j]\n locations[num] = (i, j)\n\n return locations", "def astar(maze, start, end):\n\n # Create start and end \n start_node = node.Node(None, start)\n start_node.g = start_node.h = start_node.f = 0\n end_node = node.Node(None, end)\n end_node.g = end_node.h = end_node.f = 0\n\n # Initialize both open and closed list\n open_list = []\n closed_list = []\n\n # Add the start node\n open_list.append(start_node)\n\n # Loop until you find the end\n while len(open_list) > 0:\n\n # Get the current node\n current_node = open_list[0]\n current_index = 0\n for index, item in enumerate(open_list):\n if item.f < current_node.f:\n current_node = item\n current_index = index\n\n # Pop current off open list, add to closed list\n open_list.pop(current_index)\n closed_list.append(current_node)\n\n # Found the goal\n if current_node == end_node:\n path = []\n current = current_node\n while current is not None:\n path.append(current.position)\n current = current.parent\n return path[::-1] # Return reversed path\n\n # Generate children\n children = []\n \n for new_position in [(0, 0, -1), (0, 0, 1), (0, -1, 0), (0, 1, 0), (1, 0, 0), (-1, 0, 0)]: # Adjacent squares\n \n # Get node position\n node_position = (current_node.position[0] + new_position[0], current_node.position[1] + new_position[1], current_node.position[2] + new_position[2])\n\n # Make sure within range\n if node_position[1] > (len(maze[0]) - 1) or node_position[1] < 0 or node_position[2] > (len(maze[0][len(maze)-1]) -1) or node_position[2] < 0 or node_position[0] < 0 or node_position[0] > len(maze) - 1:\n continue\n \n # Make sure walkable terrain\n if node_position == end_node.position:\n new_node = node.Node(current_node, node_position)\n \n # Append\n children.append(new_node)\n continue\n \n if maze[node_position[0]][node_position[1]][node_position[2]] != 0:\n continue\n\n # Create new node\n new_node = node.Node(current_node, node_position)\n \n # Append\n children.append(new_node)\n\n # Loop through children\n for child in children:\n # Child is on the closed list\n for closed_child in closed_list:\n if child == closed_child:\n break\n else:\n # Create the f, g, and h values\n child.g = current_node.g + 1\n # H: Manhattan distance to end point\n child.h = abs(child.position[0] - end_node.position[0]) + abs(child.position[1] - end_node.position[1])\n child.f = child.g + child.h\n\n # Child is already in the open list\n for open_node in open_list:\n # check if the new path to children is worst or equal \n # than one already in the open_list (by measuring g)\n if child == open_node and child.g >= open_node.g:\n break\n else:\n # Add the child to the open list\n open_list.append(child)", "def test_traversal(self):\n expected = [\n (0, 0),\n (1, 0), (1, 1), (0, 1), (-1, 1), (-1, 0), (-1, -1), (0, -1), (1, -1),\n (2, -1), (2, 0), (2, 1), (2, 2), (1, 2), (0, 2), (-1, 2), (-2, 2),\n (-2, 1), (-2, 0), (-2, -1), (-2, -2), (-1, -2), (0, -2), (1, -2), (2, -2),\n (3, -2)\n ]\n\n num_desired = len(expected)\n actual = list(itertools.islice(iter_coords(), num_desired))\n\n for i in range(0, num_desired):\n message = \"At index '{}', expected '{}' and got '{}'\".format(i, expected[0], actual[0])\n self.assertEqual(expected[i], actual[i], msg=message)", "def test_get_random_indices_in_range(self):\n maze = Maze(10, 10)\n\n for test in range(1000):\n position = maze._Maze__get_random_indices()\n self.assertTrue(-1 < position[0] < 10)\n self.assertTrue(-1 < position[1] < 10)", "def tinyMazeSearch(problem):\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n n = Directions.NORTH\n\n return [s, s, n, s, w, s, w, w, s, w]", "def tinyMazeSearch(problem):\r\n from game import Directions\r\n s = Directions.SOUTH\r\n w = Directions.WEST\r\n return [s,s,w,s,w,w,s,w]", "def tinyMazeSearch(problem):\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s,s,w,s,w,w,s,w]", "def tinyMazeSearch(problem):\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s,s,w,s,w,w,s,w]", "def tinyMazeSearch(problem):\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s,s,w,s,w,w,s,w]", "def tinyMazeSearch(problem):\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s,s,w,s,w,w,s,w]", "def tinyMazeSearch(problem):\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s,s,w,s,w,w,s,w]", "def tinyMazeSearch(problem):\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s,s,w,s,w,w,s,w]", "def tinyMazeSearch(problem):\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s,s,w,s,w,w,s,w]" ]
[ "0.76696694", "0.6474169", "0.64016163", "0.63518304", "0.63174134", "0.63174134", "0.62914747", "0.62676936", "0.6032349", "0.59382766", "0.59230834", "0.5911689", "0.58982056", "0.58766043", "0.5766839", "0.57373405", "0.57052636", "0.5697641", "0.5694257", "0.5687101", "0.56777024", "0.56530255", "0.56471545", "0.56413245", "0.56413245", "0.56413245", "0.56413245", "0.56413245", "0.56413245", "0.56413245" ]
0.7438955
1
Test mazes creates exit as list of two integers
def test_create_entrance_is_list(self): maze = Maze(4, 4) self.assertTrue(isinstance(maze.exit[0], int)) self.assertTrue(isinstance(maze.exit[1], int))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_exit_reached(minimum: int, maximum: int) -> list:\n the_exit = [minimum - 1, maximum - 1]\n return the_exit", "def test_list_int(self):\n result = mul(2, 4)\n self.assertEqual(result, 8)", "def test_list_int2(self):\n inp = [(0, 0), (10, 1), (1, 2)]\n expected = 19\n actual = get_num_steps(inp)\n self.assertEqual(expected, actual)", "def test_generate_mine_sequence(self):\n mine_list = utils.generate_mine_sequence(1)\n self.assertEqual(len(mine_list), utils.MINE_COUNT)", "def test_list(self):\n self.assertEqual(max_integer([1, 5, -7, 6, -4 , 10]), 10)\n self.assertEqual(max_integer([0]), 0)\n self.assertEqual(max_integer([0, 3 + 2, 7, 9 * 8, 35, 12]), 72)", "def test_list_int(self):\n result = add(2, 4)\n self.assertEqual(result, 6)", "def get_test_index():\n return list(range(305, 435))", "def test_2():\n assert primes(2) == [2, 3]", "def test_end(self):\n lst = [1, 5, 98]\n self.assertEqual(max_integer(lst), 98)", "def test_create_entrance_is_list(self):\n maze = Maze(4, 4)\n self.assertTrue(isinstance(maze.entrance[0], int))\n self.assertTrue(isinstance(maze.entrance[1], int))", "def test_running_sum_two_items(self):\n\n argument = [2,5]\n expected = [2,7]\n sums.running_sum(argument)\n self.assertEqual(expected,argument,\"The list contains two items \")", "def test_running_sum_multi_zeros(self):\n argument = [0,0,0,0]\n expected = [0,0,0,0]\n sums.running_sum(argument)\n self.assertEqual(expected,argument,\"the list contains only zeros\")", "def test_2_return(self):\n plan = list(astar(self.mapp_1_s,\n lambda s : s == self.mapp_1_s,\n MAPPDistanceSum(self.mapp_1_s)))\n self.assertEqual(len(plan),0)", "def test_list_int3(self):\n inp = [(0, 0), (10, 5), (-1, 0)]\n expected = 21\n actual = get_num_steps(inp)\n self.assertEqual(expected, actual)", "def test_12():\n assert primes(12) == [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61]", "def test_1():\n assert primes(1) == [2]", "def test_int_list(self):\n \n self.assertEqual(False, \n maps.map_list([1, 2, 3]))", "def test_twoSum(self):\n self.assertEqual(twoSum([2, 7, 11, 15], 9), [0, 1])", "def test_3():\n assert primes(3) == [2, 3, 5]", "def test_maze_created_traversed_from_indices(self):\n maze = Maze(100, 100)\n\n for test in range(20):\n self.assertTrue(maze.can_reach_exit([random.randint(0, 99),\n random.randint(0, 99)]))", "def test_programs():\n yield 4, 4, 1\n yield 16, 12, 2", "def test_5():\n assert primes(5) == [2, 3, 5, 7, 11]", "def test_1_return(self):\n plan = list(astar(self.mapp_1_s,\n lambda s : s == self.mapp_1_g,\n MAPPDistanceSum(self.mapp_1_g)))\n for a in plan:\n self.assertIsInstance(a,Action)", "def test_machine_interface(self) -> Tuple[float, List]:\n # b = self.__math__([1, 3, 5, 2, 5, 3, 2, 15, 6,7, 5])\n return self.__compile__()(0, [])", "def test_max_list_zeros(self):\n int_list = [1]\n self.assertEqual(max_list_iter(int_list),1)", "def test_if_it_outputs_correct_output(self):\n self.assertEquals(prime_numbers(5), [2, 3, 5])", "def test_return_negative_numbers_from_lst():\n assert return_negative_numbers_from_lst([-1, 0, 1, -23, 4]) == [-1, -23]\n assert return_negative_numbers_from_lst([0]) == []\n assert return_negative_numbers_from_lst([2, 3, 17]) == []\n assert return_negative_numbers_from_lst([-2, -3, -17]) == [-2, -3, -17]", "def test_task330_main_logic(number, expected_value):\r\n assert list(algo.Task330.main_logic(number)) == expected_value", "def test_milestone_list_ok(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('milestone list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def test_void_list(self):\n lst = []\n self.assertIsNone(max_integer(lst))" ]
[ "0.62346864", "0.60159165", "0.57737637", "0.5748641", "0.5747704", "0.57114", "0.56058764", "0.56034714", "0.5573308", "0.5560567", "0.5557499", "0.55546963", "0.55476743", "0.55161655", "0.5508791", "0.5507034", "0.5499264", "0.54853886", "0.5477175", "0.54638946", "0.54615057", "0.54557914", "0.54461366", "0.5441862", "0.53834105", "0.5368768", "0.53478664", "0.534532", "0.53394", "0.5322357" ]
0.6633522
0
Test maze creates random indices between 0 and len(row) and 0 and len(column).
def test_get_random_indices_in_range(self): maze = Maze(10, 10) for test in range(1000): position = maze._Maze__get_random_indices() self.assertTrue(-1 < position[0] < 10) self.assertTrue(-1 < position[1] < 10)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_maze_created_traversed_from_indices(self):\n maze = Maze(100, 100)\n\n for test in range(20):\n self.assertTrue(maze.can_reach_exit([random.randint(0, 99),\n random.randint(0, 99)]))", "def testMazeExists(self):\n\n pass", "def testMazeExists(self):\n pass", "def gen_maze(dim, p):\n maze = []\n for i in range(dim):\n maze.append([])\n for j in range(dim):\n if(random.uniform(0, 1) < p):\n maze[i].append(1)\n else:\n maze[i].append(0)\n\n maze[0][0] = 0\n maze[dim - 1][dim - 1] = 0\n return maze", "def generate(width=20, height=20):\n m = Maze(width, height)\n m.randomize()\n return m", "def mazeTest():\r\n\tmyMaze = Maze()\r\n\tmyMaze.addCoordinate(1,0,0)\r\n\tmyMaze.addCoordinate(1,1,0)\r\n\tmyMaze.addCoordinate(7,1,0)\r\n\tmyMaze.addCoordinate(1,2,0)\r\n\tmyMaze.addCoordinate(2,2,0)\r\n\tmyMaze.addCoordinate(3,2,0)\r\n\tmyMaze.addCoordinate(4,2,0)\r\n\tmyMaze.addCoordinate(6,2,0)\r\n\tmyMaze.addCoordinate(7,2,0)\r\n\tmyMaze.addCoordinate(4,3,0)\r\n\tmyMaze.addCoordinate(7,3,0)\r\n\tmyMaze.addCoordinate(4,4,0)\r\n\tmyMaze.addCoordinate(7,4,0)\r\n\tmyMaze.addCoordinate(3,5,0)\r\n\tmyMaze.addCoordinate(4,5,0)\r\n\tmyMaze.addCoordinate(7,5,0)\r\n\tmyMaze.addCoordinate(1,6,0)\r\n\tmyMaze.addCoordinate(2,6,0)\r\n\tmyMaze.addCoordinate(3,6,0)\r\n\tmyMaze.addCoordinate(4,6,0)\r\n\tmyMaze.addCoordinate(5,6,0)\r\n\tmyMaze.addCoordinate(6,6,0)\r\n\tmyMaze.addCoordinate(7,6,0)\r\n\tmyMaze.addCoordinate(5,7,0)\r\n\tmyMaze.printMaze()\r\n\tprint(myMaze.findRoute(x1=1, y1=0, x2=5, y2=7))", "def test_create_maze(self):\n maze = Maze(4, 4)\n self.assertEqual(maze.row_count, 4)\n self.assertEqual(maze.col_count, 4)\n self.assertEqual(maze.size, 16)\n self.assertTrue(isinstance(maze.entrance, list))\n self.assertTrue(isinstance(maze.exit, list))", "def generate_random_maze_matrix(size, ambient_size=None):\n maze = np.ones((size, size))\n\n # Start from a random point and recursively open points\n closed_neighbors = [] # Closed points that are neighbors of open points\n \n def _open_point(point):\n # Open a point and add its neighbors to closed_neighbors\n for p in _get_neighbors(size, point):\n if maze[p[0], p[1]] and p not in closed_neighbors:\n closed_neighbors.append(p)\n maze[point[0], point[1]] = 0\n\n def _find_and_open_new_point():\n # Find a closed neighbor that can be opened without creating an open\n # block, open it, and return True. If no such point exists, return\n # False.\n np.random.shuffle(closed_neighbors)\n for new_point in closed_neighbors:\n if not maze[new_point[0], new_point[1]]:\n continue\n will_make_open_block = any([\n np.sum(maze[i: i + 2, j: j + 2]) <= 1\n for i, j in _get_containing_blocks(size, new_point)\n ])\n if not will_make_open_block:\n _open_point(new_point)\n return True\n return False\n\n # Seed the maze and iteratively open points\n _open_point(tuple(np.random.randint(0, size, size=(2,))))\n points_to_add = True\n while points_to_add:\n points_to_add = _find_and_open_new_point()\n\n # Remove dead ends\n _remove_dead_ends(maze)\n \n # If maze has no open points, recurse to generate a new one\n if np.sum(1 - maze) == 0:\n return generate_random_maze_matrix(size, ambient_size=ambient_size)\n\n # Add wall border if necessary\n if ambient_size is not None and ambient_size > size:\n maze_with_border = np.ones((ambient_size, ambient_size))\n start_index = (ambient_size - size) // 2\n maze_with_border[start_index: start_index + size,\n start_index: start_index + size] = maze\n maze = maze_with_border\n\n return maze", "def maze(size):\n assert size % 2\n grid = np.random.randint(0, 2, size=(size, size,), dtype=bool)\n grid[0, 0:size] = grid[size - 1, 0:size] = True\n grid[0:size, 0] = grid[0:size, size - 1] = True\n \n key = hash(str(grid))\n looped = set()\n yield grid\n \n def alive(i, j):\n n = np.sum(grid[max(0, i-1):i+2, max(0, j-1):j+2]) - grid[i, j]\n return 1 if grid[i, j] and 0 < n < 6 else int(n == 3)\n \n while key not in looped:\n looped.add(key)\n grid = np.array([[alive(i, j) for j in range(size)] \\\n for i in range(size)], dtype=bool)\n grid[0, 0:size] = grid[size - 1, 0:size] = True\n grid[0:size, 0] = grid[0:size, size - 1] = True\n key = hash(str(grid))\n yield grid", "def get_random_maze_instance(nrows, ncols, sparseness):\n\n st_row = random.randrange(0, nrows)\n st_col = random.randrange(0, ncols)\n end_row = random.randrange(0, nrows)\n end_col = random.randrange(0, ncols)\n\n maze._allow_creation = True\n m = maze(nrows, ncols, st_row, st_col, end_row, end_col)\n maze._allow_creation = False\n\n # Remove walls\n for i in range(0, nrows):\n for j in range(0, ncols):\n for d in (EAST, NORTH):\n if m.is_cell((i, j), d) and \\\n m.has_wall((i, j), d) and \\\n random.random() < sparseness:\n m.remove_wall((i, j), d)\n\n return m", "def generate_random_maze(w=20, h=20):\n\n if not (5 <= w <= 40):\n raise ValueError(\"width: {0} was not in the appropriate range of\"\n \" 5 - 40\".format(w))\n if not(5 <= h <= 40):\n raise ValueError(\"height: {0} was not in the appropriate range of\"\n \" 5 - 40\".format(h))\n vis = [[0] * w + [1] for _ in range(h)] + [[1] * (w + 1)]\n ver = [[\"10\"] * w + ['1'] for _ in range(h)] + [[]]\n hor = [[\"11\"] * w + ['1'] for _ in range(h + 1)]\n\n def walk(x, y):\n vis[y][x] = 1\n\n d = [(x - 1, y), (x, y + 1), (x + 1, y), (x, y - 1)]\n shuffle(d)\n for (xx, yy) in d:\n if vis[yy][xx]: continue\n if xx == x: hor[max(y, yy)][x] = \"10\"\n if yy == y: ver[y][max(x, xx)] = \"00\"\n walk(xx, yy)\n\n walk(randrange(w), randrange(h))\n maze = []\n for (a, b) in zip(hor, ver):\n if a:\n row1 = ''.join(a)\n row2 = ''.join(b)\n r1_ls = []\n r2_ls = []\n for thing in row1:\n thing = int(thing)\n r1_ls.append(thing)\n for thing in row2:\n thing = int(thing)\n r2_ls.append(thing)\n if r1_ls:\n maze.append(r1_ls)\n if r2_ls:\n maze.append(r2_ls)\n\n return maze", "def _generate_maze(self):\n grid = [[GridCell(x, y, self._treasure_prob) for x in range(self._map_size)] for y in range(self._map_size)]\n\n center_x = self._map_size // 2\n center_y = self._map_size // 2\n\n for _ in range(self._sparsity):\n current = grid[center_x][center_y]\n stack = list()\n start = True\n while len(stack) or start:\n start = False\n current.visited = True\n children = current.has_children(grid)\n\n if children:\n choice = np.random.choice(children)\n choice.visited = True\n\n stack.append(current)\n\n self._remove_walls(current, choice)\n\n current = choice\n\n elif stack:\n current = stack.pop()\n for row in grid:\n for cell in row:\n cell.visited = False\n\n # edit center area\n grid[center_x][center_y].set_treasury()\n for x in range(center_x - 1, center_x + 2):\n for y in range(center_y - 1, center_y + 2):\n grid[x][y].erase_walls()\n return grid", "def generate_maze(self):\r\n # reset the grid before generation\r\n self.initialize_grid()\r\n\r\n # choose the first cell to put in the visited list\r\n # see Step 1 of the algorithm.\r\n current = self.unvisited.pop(random.randint(0,len(self.unvisited)-1))\r\n self.visited.append(current)\r\n self.cut(current)\r\n\r\n # loop until all cells have been visited\r\n while len(self.unvisited) > 0:\r\n # choose a random cell to start the walk (Step 2)\r\n first = self.unvisited[random.randint(0,len(self.unvisited)-1)]\r\n current = first\r\n # loop until the random walk reaches a visited cell\r\n while True:\r\n # choose direction to walk (Step 3)\r\n dirNum = random.randint(0,3)\r\n # check if direction is valid. If not, choose new direction\r\n while not self.is_valid_direction(current,dirNum):\r\n dirNum = random.randint(0,3)\r\n # save the cell and direction in the path\r\n self.path[current] = dirNum\r\n # get the next cell in that direction\r\n current = self.get_next_cell(current,dirNum,2)\r\n if (current in self.visited): # visited cell is reached (Step 5)\r\n break\r\n\r\n current = first # go to start of path\r\n # loop until the end of path is reached\r\n while True:\r\n # add cell to visited and cut into the maze\r\n self.visited.append(current)\r\n self.unvisited.remove(current) # (Step 6.b)\r\n self.cut(current)\r\n\r\n # follow the direction to next cell (Step 6.a)\r\n dirNum = self.path[current]\r\n crossed = self.get_next_cell(current,dirNum,1)\r\n self.cut(crossed) # cut crossed edge\r\n\r\n current = self.get_next_cell(current,dirNum,2)\r\n if (current in self.visited): # end of path is reached\r\n self.path = dict() # clear the path\r\n break\r\n \r\n self.generated = True", "def new_tile(self):\r\n # replace with your code\r\n # complete search ....\r\n non_zero_count = 0;\r\n for row in range(self._grid_height):\r\n for col in range(self._grid_width):\r\n if self._grid_tile[row][col] == 0:\r\n non_zero_count += 1\r\n random_choice = random.randrange(0, non_zero_count)\r\n count = 0\r\n # another search ....\r\n generated_new_tile = False\r\n for row in range(self._grid_height):\r\n for col in range(self._grid_width):\r\n if generated_new_tile == False and self._grid_tile[row][col] == 0:\r\n if count != random_choice:\r\n count += 1 \r\n else:\r\n if random.randrange(0,100) < 10:\r\n self.set_tile(row, col ,4)\r\n else:\r\n self.set_tile(row, col ,2)\r\n generated_new_tile = True", "def new_tile(self):\r\n random_row = random.randrange(0, self._grid_height)\r\n random_col = random.randrange(0, self._grid_width)\r\n random_choice = random.choice([2]*90 + [4] * 10)\r\n \r\n if 0 in [num for elem in self._cells for num in elem]: \r\n if self._cells[random_row][random_col] == 0:\r\n self._cells[random_row][random_col] = random_choice \r\n else:\r\n self.new_tile()\r\n else:\r\n pass", "def generate_maze(self):\n complexity = self.maze_complexity\n density = self.maze_density\n width = self.ncols-1\n height = self.nrows-1\n\n \"\"\"Generate a maze using a maze generation algorithm.\"\"\"\n # Only odd shapes\n shape = ((height // 2) * 2 + 1, (width // 2) * 2 + 1)\n # Adjust complexity and density relative to maze size\n complexity = int(complexity * (5 * (shape[0] + shape[1]))) # Number of components\n density = int(density * ((shape[0] // 2) * (shape[1] // 2))) # Size of components\n # Build actual maze\n Z = numpy.zeros(shape, dtype=bool)\n # Fill borders\n Z[0, :] = Z[-1, :] = 1\n Z[:, 0] = Z[:, -1] = 1\n # Make aisles\n for i in range(density):\n x, y = rand(0, shape[1] // 2) * 2, rand(0, shape[0] // 2) * 2 # Pick a random position\n Z[y, x] = 1\n for j in range(complexity):\n neighbours = []\n if x > 1: neighbours.append((y, x - 2))\n if x < shape[1] - 2: neighbours.append((y, x + 2))\n if y > 1: neighbours.append((y - 2, x))\n if y < shape[0] - 2: neighbours.append((y + 2, x))\n if len(neighbours):\n y_, x_ = neighbours[rand(0, len(neighbours) - 1)]\n if Z[y_, x_] == 0:\n Z[y_, x_] = 1\n Z[y_ + (y - y_) // 2, x_ + (x - x_) // 2] = 1\n x, y = x_, y_\n return Z", "def find_random_spot(self):\n random_line_index = []\n random_col = 0\n random_col_index = 100\n\n # -- loops through maze and stops when it finds an open spot\n while random_col != \" \":\n random_line_index = randrange(len(self._grid))\n random_col_index = randrange(len(self._grid[random_line_index]))\n random_col = self._grid[random_line_index][random_col_index]\n # -- returns the coordinates of the open spot\n coordinates = (random_line_index, random_col_index)\n return coordinates", "def new_tile(self):\n while True:\n random_row = random.randrange(self._grid_height)\n random_column = random.randrange(self._grid_width)\n if self._grid[random_row][random_column] == 0:\n self._grid[random_row][random_column] = random.choice([2] * 9 + [4])\n break", "def test_ctor(self):\r\n cols = 5\r\n rows = 5\r\n maze = Maze(rows, cols)\r\n\r\n self.assertEqual(maze.num_cols, cols)\r\n self.assertEqual(maze.num_rows, rows)\r\n self.assertEqual(maze.id, 0)\r\n self.assertEqual(maze.grid_size, rows*cols)\r\n\r\n id=33\r\n maze2 = Maze(rows, cols, id)\r\n self.assertEqual(maze2.num_cols, cols)\r\n self.assertEqual(maze2.num_rows, rows)\r\n self.assertEqual(maze2.id, id)\r\n self.assertEqual(maze2.grid_size, rows * cols)", "def test_indices_distance(self):\n maze = Maze(10, 10)\n\n for test in range(1000):\n self.assertTrue(\n (abs(maze.entrance[0] - maze.exit[0]) +\n abs(maze.entrance[1] - maze.exit[1])) >= 9)", "def random_walker_generator(rows, cols, negative=False):\n attempts = 0\n while True:\n steps = 0\n found_goal = False\n grid = np.zeros((rows, cols))\n # start on bottom row\n current = (rows - 1, random.randint(0, cols - 1))\n grid[current] = 1\n steps += 1\n visited = set(current)\n\n connection = 0\n\n neighbors = get_neighbors(current, grid, visited, similar_cells={1})\n while len(neighbors) > 0:\n for (neigh_x, neigh_y) in set(neighbors):\n # lookahead for neighbors neighbors\n lookahead = get_neighbors(\n (neigh_x, neigh_y), grid, visited, similar_cells={1})\n if len(lookahead) < 3: # contains neighbors with 1's\n # edge cases\n if neigh_x == 0 and random.random() >= 0.25:\n # chance of reaching goal at top\n continue\n elif ((neigh_y == 0 or neigh_y == rows - 1) and\n len(lookahead) == 2):\n continue\n else:\n neighbors.remove((neigh_x, neigh_y))\n\n if len(neighbors) == 0:\n # print (\"no more neighbors to pick\")\n break\n\n # time.sleep(0.15)\n # os.system(\"clear\")\n # draw_grid(grid)\n\n current = random.sample(neighbors, 1)[0] # pick a random neighbor\n # print (\"selected: \", current)\n grid[current] = 1\n steps += 1\n visited.add(current)\n if current[0] == 0: # top row\n # print (\"top row reached\")\n found_goal = True\n break\n neighbors = get_neighbors(current, grid, visited, similar_cells={1})\n\n if (found_goal and not negative) or (not found_goal and negative):\n # print (\"Succeeded after %d attempts\" % attempts)\n attempts = 0\n grid = apply_noise(grid)\n\n # hack\n # override above step counter, because the random noise\n # might have added more, shorter connections\n # we do this because network was picking up patterns\n # from making random noise not entirely random\n steps, connected = check_connections_length(grid)\n if connected and negative:\n continue\n\n # randomly flip grid upside down\n if random.random() <= 0.5:\n grid = np.flipud(grid)\n\n yield grid, steps, connected\n else:\n attempts += 1", "def new_tile(self):\n \n empty_items = []\n for row in range(self.get_grid_height()):\n for col in range(self.get_grid_width()):\n if self.get_tile(row, col) == 0:\n empty_items.append((row, col))\n \n random_row = 0\n random_col = 0\n if len(empty_items) != 0:\n random_empty_tile = random.randrange(0, len(empty_items))\n (random_row, random_col) = empty_items[random_empty_tile]\n else:\n return\n # the % of getting \"4\" from 0~9 is 10%\n random_time = random.randrange(0, 10)\n \n if random_time == 4:\n self._cells[random_row][random_col] = 4\n else:\n self._cells[random_row][random_col] = 2", "def gen_fire_maze(maze):\n maze_f = copy.deepcopy(maze)\n dim = len(maze)\n num_empty = 0\n\n # count the number of empty cells in the maze\n for i in maze_f:\n num_empty += (dim-sum(i))\n\n # chose an empty cell to set on fire\n fire_spawn = random.randrange(num_empty)\n\n # iterate over chosen number of empty cells before setting one on fire\n for i in range(dim):\n for j in range(dim):\n if(maze_f[i][j] == 0 and fire_spawn == 0):\n maze_f[i][j] = 2 # set cell to be on fire\n return (maze_f, (i, j))\n elif(maze_f[i][j] == 0):\n fire_spawn -= 1 # decrement counter\n\n # function should always return before loop is completed\n return -1", "def new_tile(self):\n col = random.choice(range(self.grid_width))\n row = random.choice(range(self.grid_height))\n if self.grid[row][col] == 0:\n if random.random() >= 0.9:\n self.grid[row][col] = 4\n else:\n self.grid[row][col] = 2\n else:\n self.new_tile()", "def _get_random_position(self):\n return (random.randrange(0, self.maze.width),\n random.randrange(0, self.maze.height))", "def new_tile(self):\r\n # check if is zero or not\r\n new_tile_added = False\r\n # a list to 2 90% of the time and 4 10% of the time\r\n new_tile_list = [2,2,2,2,2,2,2,2,2,4]\r\n counter = 0\r\n while not new_tile_added:\r\n row_position = random.randrange(0,self.grid_height)\r\n col_position = random.randrange(0,self.grid_width)\r\n if self.grid[row_position][col_position] == 0:\r\n self.grid[row_position][col_position] = random.choice(new_tile_list)\r\n new_tile_added = True\r\n if counter > self.grid_width * self.grid_height:\r\n print 'you failed'\r\n break\r\n\r\n counter +=1", "def new_tile(self):\n zero_list = []\n zero_cell = ()\n # self._cells = [[0 for col in range(self._grid_width)] for row in range(self._grid_height)]\n for row in range(self._grid_height):\n for col in range(self._grid_width):\n if self._cells[row][col] == 0:\n zero_cell = (row, col)\n zero_list.append(zero_cell)\n if len(zero_list) > 0:\n chance = random.randrange(0,10)\n cell_idx = random.randrange(len(zero_list))\n if chance == 9:\n self._cells[zero_list[cell_idx][0]][zero_list[cell_idx][1]] = 4\n else:\n self._cells[zero_list[cell_idx][0]][zero_list[cell_idx][1]] = 2\n else:\n print(\"You lost! Better luck next time!\")", "def randomfill(self):\r\n for r in range(self.rows):\r\n for c in range(self.columns):\r\n self.mat[r][c] = randint(-10, 10)", "def randomized_prims(width=16, height=16) -> Maze:\n maze = Maze(width=width, height=height, algorithm=None)\n visited = [[False for _ in range(maze.width)] for _ in range(maze.height)]\n\n # ensure only one entrance to the center squares\n centerx = maze.width // 2 - 1\n centery = maze.height // 2 - 1\n \n visited[centery][centerx] = True\n visited[centery][centerx+1] = True\n visited[centery+1][centerx+1] = False\n visited[centery+1][centerx] = True\n\n visited[0][0] = True\n boundary = [(0,0,Compass.EAST), (0,0,Compass.SOUTH)]\n\n while boundary:\n x, y, direction = boundary.pop(random.randint(0, len(boundary)-1))\n nx, ny = maze.neighbor(x, y, direction)\n if not visited[ny][nx]:\n maze.break_wall(x, y, direction)\n boundary.extend([(nx,ny,direction) for direction in maze.neighbors(nx, ny)])\n visited[ny][nx] = True\n \n return maze", "def new_tile(self):\r\n # replace with your code\r\n empty_square_lists = []\r\n for row in range(self._grid_height):\r\n for col in range(self._grid_width):\r\n if(self.get_tile(row, col) == 0):\r\n empty_square_lists.append((row, col))\r\n \r\n if len(empty_square_lists) == 0:\r\n return \"game over!\"\r\n \r\n random_cell = random.choice(empty_square_lists)\r\n random_cell_row = random_cell[0]\r\n random_cell_col = random_cell[1]\r\n \r\n values = [2] * 90 + [4] * 10\r\n value = random.choice(values)\r\n \r\n self.set_tile(random_cell_row, random_cell_col, value)" ]
[ "0.76525325", "0.69647396", "0.69635177", "0.6832171", "0.6793682", "0.6792029", "0.6658967", "0.66427785", "0.66094947", "0.65932906", "0.6544248", "0.6461002", "0.6455614", "0.6413605", "0.63796437", "0.63742", "0.6348787", "0.6320797", "0.63205516", "0.6318752", "0.62997806", "0.6257158", "0.6251568", "0.622176", "0.62199897", "0.62081605", "0.6167674", "0.61175656", "0.611102", "0.6087776" ]
0.7503006
1
Test that a maze exit can be can be found at creation using private method __verify_exit_path.
def test_maze_created_can_be_traversed(self): maze = Maze(100, 100) self.assertTrue(maze._Maze__verify_exit_path())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testMazeExists(self):\n pass", "def testMazeExists(self):\n\n pass", "def test_maze_exit_pointers_are_none(self):\n maze = Maze(100, 100)\n\n row, col = maze.exit\n if maze.grid[row][col].up:\n maze.grid[row][col].up.down = None\n if maze.grid[row][col].right:\n maze.grid[row][col].right.left = None\n if maze.grid[row][col].down:\n maze.grid[row][col].down.up = None\n if maze.grid[row][col].left:\n maze.grid[row][col].left.right = None\n\n self.assertFalse(maze.can_reach_exit([maze.entrance[0],\n maze.entrance[1]]))", "def test_maze_created_traversed_from_indices(self):\n maze = Maze(100, 100)\n\n for test in range(20):\n self.assertTrue(maze.can_reach_exit([random.randint(0, 99),\n random.randint(0, 99)]))", "def test_maze_entrance_pointers_are_none(self):\n maze = Maze(100, 100)\n\n row, col = maze.entrance\n maze.grid[row][col].up = None\n maze.grid[row][col].right = None\n maze.grid[row][col].down = None\n maze.grid[row][col].left = None\n\n self.assertFalse(maze.can_reach_exit([row, col]))", "def test_maze_exit_adjacent_are_blocked(self):\n maze = Maze(100, 100)\n\n row, col = maze.exit\n if row - 1 >= 0:\n maze.grid[row - 1][col].blocked = True\n if col + 1 < 100:\n maze.grid[row][col + 1].blocked = True\n if row + 1 < 100:\n maze.grid[row + 1][col].blocked = True\n if col - 1 >= 0:\n maze.grid[row][col - 1].blocked = True\n\n self.assertFalse(maze.can_reach_exit([maze.entrance[0],\n maze.entrance[1]]))", "def test_create_maze(self):\n maze = Maze(4, 4)\n self.assertEqual(maze.row_count, 4)\n self.assertEqual(maze.col_count, 4)\n self.assertEqual(maze.size, 16)\n self.assertTrue(isinstance(maze.entrance, list))\n self.assertTrue(isinstance(maze.exit, list))", "def place_exit(self):\r\n x = random.randint(0, (self.__nx - 1))\r\n y = random.randint(0, (self.__ny - 1))\r\n self.__exit_room = x, y\r\n if self.exit_room() == self.pillar_a_room() or \\\r\n self.exit_room() == self.pillar_e_room() or \\\r\n self.exit_room() == self.pillar_i_room() or \\\r\n self.exit_room() == self.pillar_p_room() or \\\r\n self.exit_room() == self.entrance_room():\r\n return self.place_exit()\r\n self.__maze[x][y].set_exit(True)", "def __verify_exit_path(self, position=None):\n\n # Start from entrance if no position provided.\n if not position:\n position = self.__entrance\n\n # Create search queue, traversed list and return condition.\n search_queue = deque()\n search_queue.append(position)\n traversed = [[False for _ in range(self.__col_count)]\n for _ in range(self.__row_count)]\n can_exit = False\n\n # Add first position to traversed\n row, col = position\n traversed[row][col] = True\n\n count = 0\n # Loop through the queue, adding new positions.\n while search_queue:\n # Grab first position.\n count += 1\n row, col = search_queue.popleft()\n # Check if room is exit.\n if [row, col] == self.__exit:\n can_exit = True\n break\n # Add positions to queue if valid.\n if (self.__grid[row][col].up and\n self.__can_enter([row - 1, col], traversed)):\n search_queue.append([row - 1, col])\n traversed[row - 1][col] = True\n if (self.__grid[row][col].right and\n self.__can_enter([row, col + 1], traversed)):\n search_queue.append([row, col + 1])\n traversed[row][col + 1] = True\n if (self.__grid[row][col].down and\n self.__can_enter([row + 1, col], traversed)):\n search_queue.append([row + 1, col])\n traversed[row + 1][col] = True\n if (self.__grid[row][col].left and\n self.__can_enter([row, col - 1], traversed)):\n search_queue.append([row, col - 1])\n traversed[row][col - 1] = True\n return can_exit", "def checkSameEntranceExit(mazeEntrance,mazeExit,solnEntrance,solnExit):\n if((solnEntrance == mazeEntrance) and (solnExit == mazeExit)):\n return True\n else:\n return False", "def mazeTest():\r\n\tmyMaze = Maze()\r\n\tmyMaze.addCoordinate(1,0,0)\r\n\tmyMaze.addCoordinate(1,1,0)\r\n\tmyMaze.addCoordinate(7,1,0)\r\n\tmyMaze.addCoordinate(1,2,0)\r\n\tmyMaze.addCoordinate(2,2,0)\r\n\tmyMaze.addCoordinate(3,2,0)\r\n\tmyMaze.addCoordinate(4,2,0)\r\n\tmyMaze.addCoordinate(6,2,0)\r\n\tmyMaze.addCoordinate(7,2,0)\r\n\tmyMaze.addCoordinate(4,3,0)\r\n\tmyMaze.addCoordinate(7,3,0)\r\n\tmyMaze.addCoordinate(4,4,0)\r\n\tmyMaze.addCoordinate(7,4,0)\r\n\tmyMaze.addCoordinate(3,5,0)\r\n\tmyMaze.addCoordinate(4,5,0)\r\n\tmyMaze.addCoordinate(7,5,0)\r\n\tmyMaze.addCoordinate(1,6,0)\r\n\tmyMaze.addCoordinate(2,6,0)\r\n\tmyMaze.addCoordinate(3,6,0)\r\n\tmyMaze.addCoordinate(4,6,0)\r\n\tmyMaze.addCoordinate(5,6,0)\r\n\tmyMaze.addCoordinate(6,6,0)\r\n\tmyMaze.addCoordinate(7,6,0)\r\n\tmyMaze.addCoordinate(5,7,0)\r\n\tmyMaze.printMaze()\r\n\tprint(myMaze.findRoute(x1=1, y1=0, x2=5, y2=7))", "def go_to_exit(self):\n ys = [self.currY]\n xs = [self.currX]\n options = np.zeros((self.h, self.w), np.uint8)\n visited = np.zeros((self.h, self.w), np.bool_)\n visited[self.currY, self.currX] = True\n distance = 1\n while True:\n while len(ys) > 0:\n cur = (ys.pop(), xs.pop())\n for d, m in enumerate(self.__get_map_offsets()):\n if (m[cur[0], cur[1]] > 1) and (\n not visited[cur[0] + self.directions[d][0], cur[1] + self.directions[d][1]]):\n options[cur[0] + self.directions[d][0], cur[1] + self.directions[d][1]] = distance\n visited[cur[0] + self.directions[d][0], cur[1] + self.directions[d][1]] = True\n if (cur[0] + self.directions[d][0] == self.exitY) and (\n cur[1] + self.directions[d][1] == self.exitX):\n return self.__convert_to_path_exit(options)\n yTemp, xTemp = np.where(options == distance)\n ys += yTemp.tolist()\n xs += xTemp.tolist()\n distance += 1", "def is_exit(self, x_coordinate, y_coordinate):\n if self.grid[x_coordinate][y_coordinate] == POINT_OF_EXIT:\n return True\n else:\n return False", "def test_out_of_maze_colision(self):\n manager = DummyLevelManager()\n game = Game(manager)\n game.move(GameMoves.DOWN)\n for i in range(5):\n state = game.move(GameMoves.PASS)\n self.assertEquals(state, LevelState.RUNNING)\n state = game.move(GameMoves.PASS)\n self.assertEqual(state, LevelState.LOSE)", "def test_indices_distance(self):\n maze = Maze(10, 10)\n\n for test in range(1000):\n self.assertTrue(\n (abs(maze.entrance[0] - maze.exit[0]) +\n abs(maze.entrance[1] - maze.exit[1])) >= 9)", "def check_path_tile(self):\n self.tile = (self.get_nearest_row(), self.get_nearest_col())\n if self.return_path and self.tile == self.return_path[0]:\n del self.return_path[0]\n if not len(self.return_path) > 0:\n return '*' # signal that the path is complete\n return None", "def test_maze_entrance_adjacent_are_blocked(self):\n maze = Maze(100, 100)\n\n row, col = maze.entrance\n if row - 1 >= 0:\n maze.grid[row - 1][col].blocked = True\n if col + 1 < 100:\n maze.grid[row][col + 1].blocked = True\n if row + 1 < 100:\n maze.grid[row + 1][col].blocked = True\n if col - 1 >= 0:\n maze.grid[row][col - 1].blocked = True\n\n self.assertFalse(maze.can_reach_exit([row, col]))", "def test_set_path_2(self, sys_exit_mock):\n output = basic.set_path(self.file, kind=\"file\", expect=True)\n self.assertTrue(sys_exit_mock.called)", "def can_reach_exit(self, position):\n return self.__verify_exit_path(position)", "def check_if_exit_reached(character_dict: dict) -> bool:\n return (character_dict['x'], character_dict['y']) == (4, 4)", "def test_finish_draw(self):\n game = self.ending(['bbw.wwww'], 8, 1)\n game.man_move(0, 3)\n self.assertTrue(game.finished)", "def set_exit(self, exit_name):\r\n pass", "def assert_clean_exit():\n with pytest.raises(SystemExit) as e:\n yield\n assert e.value.code == 0", "def test_maze_move_1(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n\n self.assertEqual(maze.move(rat_J, a2.UP, a2.NO_CHANGE), False)", "def main(self):\n sys.setrecursionlimit(2**20)#10**8)\n self.result=self.Breakwalls() #IS THE INITIAL RESULT OF THE MAZE. THE INITIAL LIST STARTING FROM A RANDOM POINT AND ENDING WHEN ALL CELLS VISITED.\n if self.CurrentCell==[1,1]: #[1,1] is already taken as the start of explore \n self.CurrentCell=[1,randrange(2,self.N+1)]\n self.MazeKey=self.GenerateKey() ##Generates a mazekey\n print(\"Start: \", [1,1])\n print(\"MazeKey: \", self.MazeKey)\n print(\"Exit: \", self.CurrentCell)\n self.Objectives() #Things to do in the maze, find key, find exit.\n print(\"Path to key: \", self.ExploreKey) #Path to the Key\n print()\n print(\"Path from key to exit: \", self.ExploreExit)\n print()\n print(\"Entire Path from Start to Key, and Key to Exit: \", self.EntirePath) \n self.drawmaze()", "def test_maze_move_2(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n\n self.assertEqual(maze.move(rat_J, a2.DOWN, a2.RIGHT), False)", "def test_endofgame(self):\n game = self.ending(['bwwwwww.'], 8, 1)\n game.man_move(0, 7)\n self.assertTrue(game.finished)", "def is_valid_exit(exits, chosen_exit):\r\n return chosen_exit in exits", "def is_in_maze(handler_input):\n # type: (HandlerInput) -> bool\n attr = handler_input.attributes_manager.persistent_attributes\n in_maze = 'NO'\n if 'in_maze' in attr:\n in_maze = attr['in_maze']\n\n return in_maze", "def check_exit(self, position, direction):\n if self.get_room((position[0] + direction[0], position[1] + direction[1])):\n return True\n return False" ]
[ "0.7718954", "0.7711811", "0.7170441", "0.6457077", "0.64257324", "0.63298804", "0.6325129", "0.6186493", "0.6110823", "0.60940695", "0.6087242", "0.5893084", "0.58477277", "0.57537746", "0.56882125", "0.56534946", "0.5650642", "0.5640697", "0.56240755", "0.56226784", "0.5569984", "0.55472773", "0.55343217", "0.55307984", "0.5530738", "0.5518563", "0.55041146", "0.5487127", "0.54687256", "0.54643327" ]
0.7800023
0
Test that a maze exit can be found at creation from random positions.
def test_maze_created_traversed_from_indices(self): maze = Maze(100, 100) for test in range(20): self.assertTrue(maze.can_reach_exit([random.randint(0, 99), random.randint(0, 99)]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testMazeExists(self):\n pass", "def testMazeExists(self):\n\n pass", "def test_maze_created_can_be_traversed(self):\n maze = Maze(100, 100)\n\n self.assertTrue(maze._Maze__verify_exit_path())", "def test_get_random_indices_in_range(self):\n maze = Maze(10, 10)\n\n for test in range(1000):\n position = maze._Maze__get_random_indices()\n self.assertTrue(-1 < position[0] < 10)\n self.assertTrue(-1 < position[1] < 10)", "def test_maze_exit_pointers_are_none(self):\n maze = Maze(100, 100)\n\n row, col = maze.exit\n if maze.grid[row][col].up:\n maze.grid[row][col].up.down = None\n if maze.grid[row][col].right:\n maze.grid[row][col].right.left = None\n if maze.grid[row][col].down:\n maze.grid[row][col].down.up = None\n if maze.grid[row][col].left:\n maze.grid[row][col].left.right = None\n\n self.assertFalse(maze.can_reach_exit([maze.entrance[0],\n maze.entrance[1]]))", "def test_create_maze(self):\n maze = Maze(4, 4)\n self.assertEqual(maze.row_count, 4)\n self.assertEqual(maze.col_count, 4)\n self.assertEqual(maze.size, 16)\n self.assertTrue(isinstance(maze.entrance, list))\n self.assertTrue(isinstance(maze.exit, list))", "def place_exit(self):\r\n x = random.randint(0, (self.__nx - 1))\r\n y = random.randint(0, (self.__ny - 1))\r\n self.__exit_room = x, y\r\n if self.exit_room() == self.pillar_a_room() or \\\r\n self.exit_room() == self.pillar_e_room() or \\\r\n self.exit_room() == self.pillar_i_room() or \\\r\n self.exit_room() == self.pillar_p_room() or \\\r\n self.exit_room() == self.entrance_room():\r\n return self.place_exit()\r\n self.__maze[x][y].set_exit(True)", "def test_indices_distance(self):\n maze = Maze(10, 10)\n\n for test in range(1000):\n self.assertTrue(\n (abs(maze.entrance[0] - maze.exit[0]) +\n abs(maze.entrance[1] - maze.exit[1])) >= 9)", "def mazeTest():\r\n\tmyMaze = Maze()\r\n\tmyMaze.addCoordinate(1,0,0)\r\n\tmyMaze.addCoordinate(1,1,0)\r\n\tmyMaze.addCoordinate(7,1,0)\r\n\tmyMaze.addCoordinate(1,2,0)\r\n\tmyMaze.addCoordinate(2,2,0)\r\n\tmyMaze.addCoordinate(3,2,0)\r\n\tmyMaze.addCoordinate(4,2,0)\r\n\tmyMaze.addCoordinate(6,2,0)\r\n\tmyMaze.addCoordinate(7,2,0)\r\n\tmyMaze.addCoordinate(4,3,0)\r\n\tmyMaze.addCoordinate(7,3,0)\r\n\tmyMaze.addCoordinate(4,4,0)\r\n\tmyMaze.addCoordinate(7,4,0)\r\n\tmyMaze.addCoordinate(3,5,0)\r\n\tmyMaze.addCoordinate(4,5,0)\r\n\tmyMaze.addCoordinate(7,5,0)\r\n\tmyMaze.addCoordinate(1,6,0)\r\n\tmyMaze.addCoordinate(2,6,0)\r\n\tmyMaze.addCoordinate(3,6,0)\r\n\tmyMaze.addCoordinate(4,6,0)\r\n\tmyMaze.addCoordinate(5,6,0)\r\n\tmyMaze.addCoordinate(6,6,0)\r\n\tmyMaze.addCoordinate(7,6,0)\r\n\tmyMaze.addCoordinate(5,7,0)\r\n\tmyMaze.printMaze()\r\n\tprint(myMaze.findRoute(x1=1, y1=0, x2=5, y2=7))", "def __random_entrance_exit(self):\n # Generate random entrance and exit coordinates.\n while True:\n get_random = False\n rand_entrance = self.__get_random_indices()\n rand_exit = self.__get_random_indices()\n # Entrance and exit coordinates must be different.\n if rand_entrance == rand_exit:\n get_random = True\n # Make sure there is some distance between entrance and exit.\n distance = (abs(rand_entrance[0] - rand_exit[0]) +\n abs(rand_entrance[1] - rand_exit[1]))\n\n if distance < round(math.sqrt(self.size) - 1):\n get_random = True\n if not get_random:\n # Set rooms to entrance and exit and return\n self.__grid[rand_entrance[0]][rand_entrance[1]].is_entrance = True\n self.__grid[rand_exit[0]][rand_exit[1]].is_exit = True\n\n return rand_entrance, rand_exit", "def test_maze_exit_adjacent_are_blocked(self):\n maze = Maze(100, 100)\n\n row, col = maze.exit\n if row - 1 >= 0:\n maze.grid[row - 1][col].blocked = True\n if col + 1 < 100:\n maze.grid[row][col + 1].blocked = True\n if row + 1 < 100:\n maze.grid[row + 1][col].blocked = True\n if col - 1 >= 0:\n maze.grid[row][col - 1].blocked = True\n\n self.assertFalse(maze.can_reach_exit([maze.entrance[0],\n maze.entrance[1]]))", "def test_maze_entrance_pointers_are_none(self):\n maze = Maze(100, 100)\n\n row, col = maze.entrance\n maze.grid[row][col].up = None\n maze.grid[row][col].right = None\n maze.grid[row][col].down = None\n maze.grid[row][col].left = None\n\n self.assertFalse(maze.can_reach_exit([row, col]))", "def test_out_of_maze_colision(self):\n manager = DummyLevelManager()\n game = Game(manager)\n game.move(GameMoves.DOWN)\n for i in range(5):\n state = game.move(GameMoves.PASS)\n self.assertEquals(state, LevelState.RUNNING)\n state = game.move(GameMoves.PASS)\n self.assertEqual(state, LevelState.LOSE)", "def maze(size):\n assert size % 2\n grid = np.random.randint(0, 2, size=(size, size,), dtype=bool)\n grid[0, 0:size] = grid[size - 1, 0:size] = True\n grid[0:size, 0] = grid[0:size, size - 1] = True\n \n key = hash(str(grid))\n looped = set()\n yield grid\n \n def alive(i, j):\n n = np.sum(grid[max(0, i-1):i+2, max(0, j-1):j+2]) - grid[i, j]\n return 1 if grid[i, j] and 0 < n < 6 else int(n == 3)\n \n while key not in looped:\n looped.add(key)\n grid = np.array([[alive(i, j) for j in range(size)] \\\n for i in range(size)], dtype=bool)\n grid[0, 0:size] = grid[size - 1, 0:size] = True\n grid[0:size, 0] = grid[0:size, size - 1] = True\n key = hash(str(grid))\n yield grid", "def test_maze_entrance_adjacent_are_blocked(self):\n maze = Maze(100, 100)\n\n row, col = maze.entrance\n if row - 1 >= 0:\n maze.grid[row - 1][col].blocked = True\n if col + 1 < 100:\n maze.grid[row][col + 1].blocked = True\n if row + 1 < 100:\n maze.grid[row + 1][col].blocked = True\n if col - 1 >= 0:\n maze.grid[row][col - 1].blocked = True\n\n self.assertFalse(maze.can_reach_exit([row, col]))", "def testCanExploreASmallEmptyRoom(self):\n picture = \"\"\"\n ----------- level z=0 :\n ####\n # #\n # #\n ####\n -----------\n \"\"\"\n max_moves_to_make = 3\n\n points_visited = self.explore_dungeon(picture, max_moves_to_make)\n\n assert_that(points_visited).contains(Point(2, 2, 0))\n assert_that(points_visited).contains(Point(1, 2, 0))\n assert_that(points_visited).contains(Point(2, 1, 0))\n assert_that(points_visited).contains(Point(1, 1, 0))", "def new_tile(self):\r\n # replace with your code\r\n # complete search ....\r\n non_zero_count = 0;\r\n for row in range(self._grid_height):\r\n for col in range(self._grid_width):\r\n if self._grid_tile[row][col] == 0:\r\n non_zero_count += 1\r\n random_choice = random.randrange(0, non_zero_count)\r\n count = 0\r\n # another search ....\r\n generated_new_tile = False\r\n for row in range(self._grid_height):\r\n for col in range(self._grid_width):\r\n if generated_new_tile == False and self._grid_tile[row][col] == 0:\r\n if count != random_choice:\r\n count += 1 \r\n else:\r\n if random.randrange(0,100) < 10:\r\n self.set_tile(row, col ,4)\r\n else:\r\n self.set_tile(row, col ,2)\r\n generated_new_tile = True", "def get_random_maze_instance(nrows, ncols, sparseness):\n\n st_row = random.randrange(0, nrows)\n st_col = random.randrange(0, ncols)\n end_row = random.randrange(0, nrows)\n end_col = random.randrange(0, ncols)\n\n maze._allow_creation = True\n m = maze(nrows, ncols, st_row, st_col, end_row, end_col)\n maze._allow_creation = False\n\n # Remove walls\n for i in range(0, nrows):\n for j in range(0, ncols):\n for d in (EAST, NORTH):\n if m.is_cell((i, j), d) and \\\n m.has_wall((i, j), d) and \\\n random.random() < sparseness:\n m.remove_wall((i, j), d)\n\n return m", "def test_check_user_location_and_goal_location_match_state_and_next_state():\n for _ in range(50):\n env = Four_Rooms_Environment()\n env.reset()\n for _ in range(50):\n move = randint(0, 3)\n env.step(move)\n assert env.state == [env.location_to_state(env.current_user_location), env.location_to_state(env.current_goal_location)]\n assert env.next_state == [env.location_to_state(env.current_user_location), env.location_to_state(env.current_goal_location)]", "def generate_maze(self):\r\n # reset the grid before generation\r\n self.initialize_grid()\r\n\r\n # choose the first cell to put in the visited list\r\n # see Step 1 of the algorithm.\r\n current = self.unvisited.pop(random.randint(0,len(self.unvisited)-1))\r\n self.visited.append(current)\r\n self.cut(current)\r\n\r\n # loop until all cells have been visited\r\n while len(self.unvisited) > 0:\r\n # choose a random cell to start the walk (Step 2)\r\n first = self.unvisited[random.randint(0,len(self.unvisited)-1)]\r\n current = first\r\n # loop until the random walk reaches a visited cell\r\n while True:\r\n # choose direction to walk (Step 3)\r\n dirNum = random.randint(0,3)\r\n # check if direction is valid. If not, choose new direction\r\n while not self.is_valid_direction(current,dirNum):\r\n dirNum = random.randint(0,3)\r\n # save the cell and direction in the path\r\n self.path[current] = dirNum\r\n # get the next cell in that direction\r\n current = self.get_next_cell(current,dirNum,2)\r\n if (current in self.visited): # visited cell is reached (Step 5)\r\n break\r\n\r\n current = first # go to start of path\r\n # loop until the end of path is reached\r\n while True:\r\n # add cell to visited and cut into the maze\r\n self.visited.append(current)\r\n self.unvisited.remove(current) # (Step 6.b)\r\n self.cut(current)\r\n\r\n # follow the direction to next cell (Step 6.a)\r\n dirNum = self.path[current]\r\n crossed = self.get_next_cell(current,dirNum,1)\r\n self.cut(crossed) # cut crossed edge\r\n\r\n current = self.get_next_cell(current,dirNum,2)\r\n if (current in self.visited): # end of path is reached\r\n self.path = dict() # clear the path\r\n break\r\n \r\n self.generated = True", "def generate_random_maze_matrix(size, ambient_size=None):\n maze = np.ones((size, size))\n\n # Start from a random point and recursively open points\n closed_neighbors = [] # Closed points that are neighbors of open points\n \n def _open_point(point):\n # Open a point and add its neighbors to closed_neighbors\n for p in _get_neighbors(size, point):\n if maze[p[0], p[1]] and p not in closed_neighbors:\n closed_neighbors.append(p)\n maze[point[0], point[1]] = 0\n\n def _find_and_open_new_point():\n # Find a closed neighbor that can be opened without creating an open\n # block, open it, and return True. If no such point exists, return\n # False.\n np.random.shuffle(closed_neighbors)\n for new_point in closed_neighbors:\n if not maze[new_point[0], new_point[1]]:\n continue\n will_make_open_block = any([\n np.sum(maze[i: i + 2, j: j + 2]) <= 1\n for i, j in _get_containing_blocks(size, new_point)\n ])\n if not will_make_open_block:\n _open_point(new_point)\n return True\n return False\n\n # Seed the maze and iteratively open points\n _open_point(tuple(np.random.randint(0, size, size=(2,))))\n points_to_add = True\n while points_to_add:\n points_to_add = _find_and_open_new_point()\n\n # Remove dead ends\n _remove_dead_ends(maze)\n \n # If maze has no open points, recurse to generate a new one\n if np.sum(1 - maze) == 0:\n return generate_random_maze_matrix(size, ambient_size=ambient_size)\n\n # Add wall border if necessary\n if ambient_size is not None and ambient_size > size:\n maze_with_border = np.ones((ambient_size, ambient_size))\n start_index = (ambient_size - size) // 2\n maze_with_border[start_index: start_index + size,\n start_index: start_index + size] = maze\n maze = maze_with_border\n\n return maze", "def test_spawn(self):\n self.grid.spawn()\n\n self.assertEqual(xyzroom.XYZRoom.objects.all().count(), 6)\n self.assertEqual(xyzroom.XYZExit.objects.all().count(), 10)\n\n room1 = xyzroom.XYZRoom.objects.get_xyz(xyz=(0, 1, \"map12a\"))\n room2 = xyzroom.XYZRoom.objects.get_xyz(xyz=(1, 0, \"map12b\"))\n east_exit = [exi for exi in room1.exits if exi.db_key == \"east\"][0]\n west_exit = [exi for exi in room2.exits if exi.db_key == \"west\"][0]\n\n # make sure exits traverse the maps\n self.assertEqual(east_exit.db_destination, room2)\n self.assertEqual(west_exit.db_destination, room1)", "def test_human_cannot_move_through_grid_wall(mock_random):\n mock_random.randint.return_value = 0\n human = Human()\n\n coordinates = [0, 0]\n dimensions = [4, 4]\n\n new_coordinates = human.move(coordinates, dimensions)\n assert new_coordinates == [0, 0]", "def randomized_prims(width=16, height=16) -> Maze:\n maze = Maze(width=width, height=height, algorithm=None)\n visited = [[False for _ in range(maze.width)] for _ in range(maze.height)]\n\n # ensure only one entrance to the center squares\n centerx = maze.width // 2 - 1\n centery = maze.height // 2 - 1\n \n visited[centery][centerx] = True\n visited[centery][centerx+1] = True\n visited[centery+1][centerx+1] = False\n visited[centery+1][centerx] = True\n\n visited[0][0] = True\n boundary = [(0,0,Compass.EAST), (0,0,Compass.SOUTH)]\n\n while boundary:\n x, y, direction = boundary.pop(random.randint(0, len(boundary)-1))\n nx, ny = maze.neighbor(x, y, direction)\n if not visited[ny][nx]:\n maze.break_wall(x, y, direction)\n boundary.extend([(nx,ny,direction) for direction in maze.neighbors(nx, ny)])\n visited[ny][nx] = True\n \n return maze", "def generate_world(world_seed, biome_min, biome_max, w, h):\n\n while True:\n\n try:\n\n # Set the initial seed for the random module (random.seed())\n seed(world_seed)\n\n # Create a blank map (2D list filled with '0' strings\n world = [[0 for y in range(h)] for x in range(w)]\n # Generates the random values for the terrain construction\n terrain = [randrange(20) + 40 for _ in range(w)]\n\n #Empty biome map\n biomes = []\n\n #Generates biomes\n for __ in range(w//biome_min):\n\n #Biome at cursor\n biome_select = choice(list(biome_data))\n\n #Biomes size\n for _ in range(randint(biome_min, biome_max)):\n biomes.append(biome_select)\n\n #World size met\n if len(biomes) >= w:\n biomes = biomes[:w] #Truncate selection\n break\n\n\n # ----- Construct the Terrain\n # Counter that changes dynamically to check through all blocks in the terrain list\n cur_pos = 0\n # Runs through all the generated numbers in a while loop\n while cur_pos < w:\n\n # print(\".\", end=\"\")\n\n # Check to see if terrain gap is too large\n\n if abs(terrain[cur_pos] - terrain[cur_pos - 1]) > biome_data[str(biomes[cur_pos])][\"maxh\"]: # if terrain gap is larger than threshhold (too big)\n\n for n in range(randint(biome_data[str(str(biomes[cur_pos]))][\"minx\"], biome_data[str(str(biomes[cur_pos]))][\"maxx\"])):\n # Insert a new value into the terrain list between the values that are too far apart\n terrain.insert(cur_pos, (terrain[cur_pos] + terrain[cur_pos - 1]) // 2)\n\n else: # Difference between the two blocks is not too big\n\n # Check next block\n cur_pos += 1\n\n # ----- Transfer Terrain To Empty World\n # Run through every space in the empty world\n for x in range(len(world)): # runs through each level\n for y in range(len(world[x])): # runs through each individual space\n\n # Generates structures\n if y > terrain[x]:\n\n #Top layer\n if y - terrain[x] == 1:\n\n #Sets the layer with block specified in biome config\n world[x][y] = block_lookup[biome_data[biomes[x]][\"layer\"][\"top\"]]\n\n if randint(0, 10) == 0 and x + 10 < w:\n world = generate_structure(x, y - 1, world, choice(biome_data[biomes[x]][\"structure\"]))\n\n #Middle layer\n elif y - terrain[x] < randint(3, 8):\n world[x][y] = block_lookup[biome_data[biomes[x]][\"layer\"][\"middle\"]]\n\n #Base\n else:\n world[x][y] = block_lookup[biome_data[biomes[x]][\"layer\"][\"lower\"]]\n\n #Generate ores\n # Coal\n if 10 + terrain[x] > y > 5 + terrain[x] and randint(0, 200) == 0:\n for cluster in range(randint(3, 10)):\n world[x + randint(-4, 4)][y + randint(-4, 4)] = block_lookup[\"Coal Ore\"]\n\n # Iron\n if 30 + terrain[x] > y > 20 + terrain[x] and randint(0, 200) == 0:\n\n for cluster in range(randint(3, 6)):\n world[x + randint(-4, 4)][y + randint(-4, 4)] = block_lookup[\"Iron Ore\"]\n\n # Gold\n if 80 > y > 65 and randint(0, 400) == 0:\n for cluster in range(randint(3, 6)):\n world[x + randint(-4, 4)][y + randint(-4, 4)] = block_lookup[\"Gold Ore\"]\n\n # Diamonds\n if 80 > y > 70 and randint(0, 500) == 0:\n for cluster in range(randint(1, 5)):\n world[x + randint(-3, 3)][y + randint(-3, 3)] = block_lookup[\"Diamond Ore\"]\n\n # Bedrock\n if y > 92 or y > 87 and randint(0, 3) == 0:\n world[x][y] = block_lookup[\"Bed Rock\"]\n\n # Last edit, adding extras to the top of the world to prevent problems\n world = [[0] * 40 + x for x in world]\n\n # Return the world object for use\n return np.array(world)\n\n except:\n world_seed += '1'", "def random_walker_generator(rows, cols, negative=False):\n attempts = 0\n while True:\n steps = 0\n found_goal = False\n grid = np.zeros((rows, cols))\n # start on bottom row\n current = (rows - 1, random.randint(0, cols - 1))\n grid[current] = 1\n steps += 1\n visited = set(current)\n\n connection = 0\n\n neighbors = get_neighbors(current, grid, visited, similar_cells={1})\n while len(neighbors) > 0:\n for (neigh_x, neigh_y) in set(neighbors):\n # lookahead for neighbors neighbors\n lookahead = get_neighbors(\n (neigh_x, neigh_y), grid, visited, similar_cells={1})\n if len(lookahead) < 3: # contains neighbors with 1's\n # edge cases\n if neigh_x == 0 and random.random() >= 0.25:\n # chance of reaching goal at top\n continue\n elif ((neigh_y == 0 or neigh_y == rows - 1) and\n len(lookahead) == 2):\n continue\n else:\n neighbors.remove((neigh_x, neigh_y))\n\n if len(neighbors) == 0:\n # print (\"no more neighbors to pick\")\n break\n\n # time.sleep(0.15)\n # os.system(\"clear\")\n # draw_grid(grid)\n\n current = random.sample(neighbors, 1)[0] # pick a random neighbor\n # print (\"selected: \", current)\n grid[current] = 1\n steps += 1\n visited.add(current)\n if current[0] == 0: # top row\n # print (\"top row reached\")\n found_goal = True\n break\n neighbors = get_neighbors(current, grid, visited, similar_cells={1})\n\n if (found_goal and not negative) or (not found_goal and negative):\n # print (\"Succeeded after %d attempts\" % attempts)\n attempts = 0\n grid = apply_noise(grid)\n\n # hack\n # override above step counter, because the random noise\n # might have added more, shorter connections\n # we do this because network was picking up patterns\n # from making random noise not entirely random\n steps, connected = check_connections_length(grid)\n if connected and negative:\n continue\n\n # randomly flip grid upside down\n if random.random() <= 0.5:\n grid = np.flipud(grid)\n\n yield grid, steps, connected\n else:\n attempts += 1", "def find_random_spot(self):\n random_line_index = []\n random_col = 0\n random_col_index = 100\n\n # -- loops through maze and stops when it finds an open spot\n while random_col != \" \":\n random_line_index = randrange(len(self._grid))\n random_col_index = randrange(len(self._grid[random_line_index]))\n random_col = self._grid[random_line_index][random_col_index]\n # -- returns the coordinates of the open spot\n coordinates = (random_line_index, random_col_index)\n return coordinates", "def spawn_start_goal(grid, spawn_seed=None):\n\n xs, ys = np.where(grid == 0)\n free_positions = list(zip(xs, ys))\n\n start, goal = random.Random(spawn_seed).sample(free_positions, 2)\n\n return start, goal", "def check_seed():\n np.random.seed(1000)\n standard = [\n {0: -3.0, 1: -5.0, 'index': 0},\n {0: -6.0, 1: -8.0, 'index': 1},\n {0: 5.0, 1: -1.0, 'index': 2},\n {0: 1.0, 1: -7.0, 'index': 3},\n {0: -2.0, 1: -3.0, 'index': 4},\n {0: 7.0, 1: 3.0, 'index': 5},\n {0: -4.0, 1: -2.0, 'index': 6},\n {0: 2.0, 1: 6.0, 'index': 7}\n ]\n\n this_machine = create_points(8)\n\n flag = True\n for i in range(8) :\n flag &= this_machine[i][0] == standard[i][0] \n flag &= this_machine[i][1] == standard[i][1] \n flag &= this_machine[i][\"index\"] == i\n \n if not flag :\n print(\"\"\"\n The Python installation on this machine is odd: it appears to\n use a non-standard random number generator -- run \n this script on the machines in the Otter lab instead.\n If that fails too, send an email to [email protected].\n \"\"\")\n print (\"You got these test points:\", this_machine)\n print (\"You should have got:\", standard)\n exit(-1)\n else :\n print (\"Check passed\")", "def examineMaze(self, gameState):\n w = self.walls.width\n h = self.walls.height\n walls = self.walls.deepCopy()\n food1 = self.getFoodYouAreDefending(gameState)\n food2 = self.getFood(gameState)\n\n # Save map as 0, 1, 2 and 3 (0:walls, 1:spaces, 2:babies, 3:food)\n for x in range(w):\n for y in range(h):\n if walls[x][y]:\n walls[x][y] = 0\n elif food1[x][y]:\n walls[x][y] = 2\n elif food2[x][y]:\n walls[x][y] = 2\n else:\n walls[x][y] = 1\n\n roomsDisplay = []\n # Detect doors and spaces. Spaces are now negative\n for x in range(w):\n for y in range(h):\n if walls[x][y] > 0:\n exitsNum = 0\n if walls[x][y - 1] != 0:\n exitsNum += 1\n if walls[x][y + 1] != 0:\n exitsNum += 1\n if walls[x - 1][y] != 0:\n exitsNum += 1\n if walls[x + 1][y] != 0:\n exitsNum += 1\n if exitsNum == 1 or exitsNum == 2:\n walls[x][y] = -1 * walls[x][y]\n roomsDisplay.append((x, y))\n elif exitsNum == 0:\n # We erase unaccessible cells\n walls[x][y] = 0\n else:\n # These are doors or big rooms, we leave them positive\n pass\n\n # Create roomsGraph: every room has a number, some cells and some doors\n roomsGraph = []\n doorsGraph = []\n for x in range(1, w - 1):\n for y in range(1, h - 1):\n if walls[x][y] < 0:\n spacesNum = 0\n if walls[x][y - 1] < 0:\n spacesNum += 1\n if walls[x][y + 1] < 0:\n spacesNum += 1\n if walls[x - 1][y] < 0:\n spacesNum += 1\n if walls[x + 1][y] < 0:\n spacesNum += 1\n if spacesNum < 2:\n endOfPath = False\n graphNode = {\"path\": [], \"doors\": [], \"food\": 0, \"isBig\": False}\n auxx = x\n auxy = y\n while not endOfPath:\n graphNode[\"path\"].append((x, y))\n graphNode[\"food\"] += -walls[x][y] - 1\n walls[x][y] = 0\n xx = x\n yy = y\n if walls[x][y - 1] < 0:\n yy = y - 1\n elif walls[x][y + 1] < 0:\n yy = y + 1\n elif walls[x - 1][y] < 0:\n xx = x - 1\n elif walls[x + 1][y] < 0:\n xx = x + 1\n else:\n endOfPath = True\n if walls[x][y - 1] > 0:\n if [(x, y - 1), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x, y - 1), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x, y - 1), []]))\n if walls[x][y + 1] > 0:\n if [(x, y + 1), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x, y + 1), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x, y + 1), []]))\n if walls[x - 1][y] > 0:\n if [(x - 1, y), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x - 1, y), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x - 1, y), []]))\n if walls[x + 1][y] > 0:\n if [(x + 1, y), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x + 1, y), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x + 1, y), []]))\n x = xx\n y = yy\n roomsGraph.append(graphNode)\n x = auxx\n y = auxy\n\n # Create doorsGraph: every door has a number, and goes to other rooms or other doors\n for j, door in enumerate(doorsGraph):\n for i, room in enumerate(roomsGraph):\n for aDoor in room[\"doors\"]:\n if aDoor == j:\n doorsGraph[j][1] = doorsGraph[j][1] + [i]\n (x, y) = doorsGraph[j][0]\n adjacentCells = [(x+1, y), (x-1, y), (x, y+1), (x, y-1)]\n adjacentDoors = []\n # Check adjacent doors and add them to the current door (door structure is [pos, adjRooms, adjDoors]\n for p in adjacentCells:\n # Skip if wall\n if self.walls[p[0]][p[1]]:\n continue\n # Skip if door\n isRoom = False\n for room in doorsGraph[j][1]:\n if p in roomsGraph[room][\"path\"]:\n isRoom = True\n break\n if not isRoom:\n # Add if existing door\n doorFound = False\n for i, neighborDoor in enumerate(doorsGraph):\n if neighborDoor[0] == p:\n adjacentDoors.append(i)\n doorFound = True\n break\n # Create if non existing door and add\n if not doorFound:\n adjacentDoors.append(len(doorsGraph))\n doorsGraph.append([p, []])\n doorsGraph[j].append(adjacentDoors)\n\n # Create doorsDistance: maps what doors can be accessed from other doors\n roomsMapper = {}\n doorsMapper = {}\n isRoom = util.Counter()\n for i, door in enumerate(doorsGraph):\n doorsMapper[door[0]] = i\n isRoom[door[0]] = 0\n for i, room in enumerate(roomsGraph):\n for p in room[\"path\"]:\n roomsMapper[p] = i\n isRoom[p] = 1\n\n # Create self variables\n self.doorsGraph = doorsGraph\n self.roomsGraph = roomsGraph\n self.roomsMapper = roomsMapper\n self.doorsMapper = doorsMapper\n self.isRoom = isRoom\n\n # # Find dead ends (rooms with only one door)\n # deadRooms = {}\n # deadDoors = {}\n # # deaderDoors = {}\n # # deaderRooms = {}\n # for i, room in enumerate(roomsGraph):\n # if len(room[\"doors\"]) == 1:\n # deadRooms[i] = room[\"doors\"][0]\n # deadDoors[room[\"doors\"][0]] = 1\n # numdR = 0\n # aliveR = -1\n # for adjRoom in doorsGraph[room[\"doors\"][0]][1]:\n # if adjRoom not in deadRooms:\n # numdR += 1\n # aliveR = adjRoom\n # if numdR + len(doorsGraph[room[\"doors\"][0]][2]) == 1:\n # if aliveR >= 0:\n # deaderRooms[aliveR] = room[\"doors\"][0]\n # for adjDoor in roomsGraph[aliveR][\"doors\"]:\n # if adjDoor == room[\"doors\"][0]:\n # continue\n # deaderDoors[adjDoor] = 1.0\n # else:\n # deaderDoors[doorsGraph[room[\"doors\"][0]][2][0]] = 1.0\n\n\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # for r in deaderRooms:\n # for p in roomsGraph[r][\"path\"]:\n # roomsCounter[0][p] = 0.4\n # for d in deaderDoors:\n # roomsCounter[1][doorsGraph[d][0]] = 0.4\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n\n # print deadDoors\n # print\n # print deadRooms\n # print \"-----------------\"\n\n # deadEndsChanged = True\n # while deadEndsChanged:\n # deadEndsChanged = False\n # for i, room in enumerate(roomsGraph):\n # numAliveDoors = 0\n # aliveDoor = 0\n # deadDoor = []\n # for door in room[\"doors\"]:\n # if door not in deadDoors:\n # numAliveDoors += 1\n # aliveDoor = door\n # else:\n # deadDoor.append(door)\n # if numAliveDoors == 1:\n # aliveRoom = 0\n # aliveNeighborDoor = 0\n # for door in deadDoor:\n # # aliveNeighborDoor += len(doorsGraph[door][2])\n # for neighborRoom in doorsGraph[door][1]:\n # if neighborRoom not in deadRooms:\n # aliveRoom += 1\n # if aliveRoom == len(deadDoor):\n # deadRooms[i] = aliveDoor\n # deadDoors[aliveDoor] = 1\n # deadEndsChanged = True\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # roomsCounter[0][doorsGraph[aliveDoor][0]] = 1\n # for p in room[\"path\"]:\n # roomsCounter[1][p] = 1\n # for p in deadDoor:\n # roomsCounter[2][doorsGraph[p][0]] = 1\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n\n # Find dead ends (rooms with doors that only go to other dead ends, except one)\n # Danger, it is theoretically possible to have a map only with dead ends, which may make this crash\n # deadEndsChanged = True\n # while deadEndsChanged:\n # deadEndsChanged = False\n # for i, door in enumerate(doorsGraph):\n # if i not in deadDoors:\n # numOpenRooms = 0\n # openRoom = 0\n # for j, room in enumerate(door[1]):\n # if room not in deadRooms:\n # numOpenRooms += 1\n # openRoom = j\n # if numOpenRooms + len(door[2]) == 1:\n # for room in door[1]:\n # if room != openRoom:\n # deadRooms[room] = i\n # deadDoors[j] = 1\n # deadEndsChanged = True\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # roomsCounter[0][door[0]] = 1\n # for rr in door[1]:\n # print rr\n # if rr in deadRooms:\n # for p in roomsGraph[rr][\"path\"]:\n # roomsCounter[1][p] = 1\n # else:\n # for p in roomsGraph[rr][\"path\"]:\n # roomsCounter[3][p] = 1\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n # print deadDoors\n # print\n # print deadRooms\n # print \"-----------------\"\n\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # roomsCounter[1][(6, 9)] = 0.4\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # for r in deadRooms:\n # for p in roomsGraph[r][\"path\"]:\n # roomsCounter[0][p] = 0.4\n # for d in deadDoors:\n # roomsCounter[1][doorsGraph[d][0]] = 0.4\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n # Show every room\n roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n for room in roomsGraph:\n for p in room[\"path\"]:\n if len(room[\"doors\"]) > 1:\n roomsCounter[0][p] = 0.4\n else:\n roomsCounter[2][p] = 0.4\n # Show every door\n for door in doorsGraph:\n roomsCounter[1][door[0]] = 0.4\n # Display rooms and doors (red: rooms with at least one exit; orange: rooms with 1 exit; blue: doors\n self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")" ]
[ "0.7603047", "0.75861", "0.70993465", "0.70667547", "0.68742085", "0.6755556", "0.67491025", "0.67421407", "0.67201865", "0.6700757", "0.66145056", "0.6428856", "0.630201", "0.61533684", "0.6113033", "0.60697323", "0.6057306", "0.599535", "0.59817874", "0.59677285", "0.59619266", "0.5954314", "0.5935587", "0.59350276", "0.5928011", "0.59012645", "0.5878972", "0.58767843", "0.5875366", "0.5875193" ]
0.81655616
0
Test that maze can not be exited when entrance pointers are set to None.
def test_maze_entrance_pointers_are_none(self): maze = Maze(100, 100) row, col = maze.entrance maze.grid[row][col].up = None maze.grid[row][col].right = None maze.grid[row][col].down = None maze.grid[row][col].left = None self.assertFalse(maze.can_reach_exit([row, col]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_maze_exit_pointers_are_none(self):\n maze = Maze(100, 100)\n\n row, col = maze.exit\n if maze.grid[row][col].up:\n maze.grid[row][col].up.down = None\n if maze.grid[row][col].right:\n maze.grid[row][col].right.left = None\n if maze.grid[row][col].down:\n maze.grid[row][col].down.up = None\n if maze.grid[row][col].left:\n maze.grid[row][col].left.right = None\n\n self.assertFalse(maze.can_reach_exit([maze.entrance[0],\n maze.entrance[1]]))", "def test_maze_exit_adjacent_are_blocked(self):\n maze = Maze(100, 100)\n\n row, col = maze.exit\n if row - 1 >= 0:\n maze.grid[row - 1][col].blocked = True\n if col + 1 < 100:\n maze.grid[row][col + 1].blocked = True\n if row + 1 < 100:\n maze.grid[row + 1][col].blocked = True\n if col - 1 >= 0:\n maze.grid[row][col - 1].blocked = True\n\n self.assertFalse(maze.can_reach_exit([maze.entrance[0],\n maze.entrance[1]]))", "def test_maze_entrance_adjacent_are_blocked(self):\n maze = Maze(100, 100)\n\n row, col = maze.entrance\n if row - 1 >= 0:\n maze.grid[row - 1][col].blocked = True\n if col + 1 < 100:\n maze.grid[row][col + 1].blocked = True\n if row + 1 < 100:\n maze.grid[row + 1][col].blocked = True\n if col - 1 >= 0:\n maze.grid[row][col - 1].blocked = True\n\n self.assertFalse(maze.can_reach_exit([row, col]))", "def testMazeExists(self):\n pass", "def testMazeExists(self):\n\n pass", "def test_out_of_maze_colision(self):\n manager = DummyLevelManager()\n game = Game(manager)\n game.move(GameMoves.DOWN)\n for i in range(5):\n state = game.move(GameMoves.PASS)\n self.assertEquals(state, LevelState.RUNNING)\n state = game.move(GameMoves.PASS)\n self.assertEqual(state, LevelState.LOSE)", "def maze_guard():\n cell_guard = all([1 <= len(row) <= 100 for row in maze])\n row_guard = 1 <= len(maze) <= 100\n return cell_guard and row_guard", "def checkSameEntranceExit(mazeEntrance,mazeExit,solnEntrance,solnExit):\n if((solnEntrance == mazeEntrance) and (solnExit == mazeExit)):\n return True\n else:\n return False", "def test_no_moves(self):\n game = self.ending(['bw..wwww'], 8, 1)\n game.man_move(0, 2)\n self.assertEqual(game.finish_state,\n (400, game.first_player, 'No moves'))", "def test_maze_created_can_be_traversed(self):\n maze = Maze(100, 100)\n\n self.assertTrue(maze._Maze__verify_exit_path())", "def test_heuristic_abort(self):\n graph = {}\n for u in self.complete:\n graph[u] = set()\n for v in self.complete[u]:\n if u != v: # ignore self-loop\n graph[u].add(v)\n\n deg_heuristic = MinDegreeHeuristic(graph)\n node = deg_heuristic.best_node(graph)\n if node is None:\n pass\n else:\n assert False", "def test_maze_created_traversed_from_indices(self):\n maze = Maze(100, 100)\n\n for test in range(20):\n self.assertTrue(maze.can_reach_exit([random.randint(0, 99),\n random.randint(0, 99)]))", "def place_exit(self):\r\n x = random.randint(0, (self.__nx - 1))\r\n y = random.randint(0, (self.__ny - 1))\r\n self.__exit_room = x, y\r\n if self.exit_room() == self.pillar_a_room() or \\\r\n self.exit_room() == self.pillar_e_room() or \\\r\n self.exit_room() == self.pillar_i_room() or \\\r\n self.exit_room() == self.pillar_p_room() or \\\r\n self.exit_room() == self.entrance_room():\r\n return self.place_exit()\r\n self.__maze[x][y].set_exit(True)", "def test_heuristic_abort(self):\n graph = {}\n for u in self.complete:\n graph[u] = set()\n for v in self.complete[u]:\n if u != v: # ignore self-loop\n graph[u].add(v)\n next_node = min_fill_in_heuristic(graph)\n if next_node is None:\n pass\n else:\n assert False", "def is_unoccupied(self, row, col):\n return self.maze[row][col] is EMPTY", "def test_has_location_with_invalid_states():\n for state in (None, 1, \"hello\", object):\n assert not location.has_location(state)", "def checkAdjacent(self, direction, maze):\n\n y = self.y\n x = self.x\n\n # Shift x or y depending on the given direction.\n if direction in NS:\n y = shift[direction](y)\n elif direction in EW:\n x = shift[direction](x)\n\n # Check new location for obstacle or unwanted direction\n if maze[y][x] == 1 or ([x, y] in self.fullpath()) or (self.moved() is False and direction in self.dead_end_direction()[-1]):\n return False\n else:\n return True", "def test_adjacent_none(graph_with_edges):\n assert graph_with_edges.adjacent('B', 'A') is False", "def checkIntoWall(MazeTupleSet,SolnTupleSet):\n ele = MazeTupleSet.intersection(SolnTupleSet)\n if(len(ele)==0): #if the intersection of wall and solution is zero\n return True #means we do not run into wall\n else:\n return False", "def test_unreachable_pair(self):\n G = DiGraph([(0, 1), (0, 2), (1, 2)])\n assert_false(is_reachable(G, 1, 0))", "def _remove_dead_ends(maze):\n def _fill_maze():\n # Fill in dead ends, return True if the maze has no dead ends, otherwise\n # False.\n size = maze.shape[0]\n for i in range(size):\n for j in range(size):\n if maze[i, j]: # Not an open point\n continue\n num_open_neighbors = np.sum(\n [1 - maze[n[0], n[1]]\n for n in _get_neighbors(size, (i, j))])\n if num_open_neighbors < 2:\n maze[i, j] = 1\n return False\n return True\n\n valid_maze = False\n while not valid_maze:\n valid_maze = _fill_maze()", "def test_valid_move(self, move):\n if self.game_state[move[0]][move[1]] is not None:\n return False\n return True", "def valid_move(x, y):\r\n if [x, y] in empty_cells(board):\r\n return True\r\n else:\r\n return False", "def check_lost (grid):\r\n adjacent = False\r\n zero_value = False\r\n for i in range(4): \r\n for j in range(4):\r\n if grid[i][j] == 0:\r\n zero_value = True\r\n break\r\n for i in range(3):\r\n for j in range(3):\r\n if grid[i][j] == grid[i][j+1]:\r\n adjacent = True\r\n break\r\n if grid[i][j] == grid[i+1][j]:\r\n adjacent = True\r\n break\r\n if not adjacent and not zero_value:\r\n return True\r\n return False", "def check_move(self, y, x):\n return 0 <= y < len(self.maze) \\\n and 0 <= x < len(self.maze[y]) \\\n and self.maze[y][x] != \"#\"", "def testCanExploreASmallEmptyRoom(self):\n picture = \"\"\"\n ----------- level z=0 :\n ####\n # #\n # #\n ####\n -----------\n \"\"\"\n max_moves_to_make = 3\n\n points_visited = self.explore_dungeon(picture, max_moves_to_make)\n\n assert_that(points_visited).contains(Point(2, 2, 0))\n assert_that(points_visited).contains(Point(1, 2, 0))\n assert_that(points_visited).contains(Point(2, 1, 0))\n assert_that(points_visited).contains(Point(1, 1, 0))", "def __verify_exit_path(self, position=None):\n\n # Start from entrance if no position provided.\n if not position:\n position = self.__entrance\n\n # Create search queue, traversed list and return condition.\n search_queue = deque()\n search_queue.append(position)\n traversed = [[False for _ in range(self.__col_count)]\n for _ in range(self.__row_count)]\n can_exit = False\n\n # Add first position to traversed\n row, col = position\n traversed[row][col] = True\n\n count = 0\n # Loop through the queue, adding new positions.\n while search_queue:\n # Grab first position.\n count += 1\n row, col = search_queue.popleft()\n # Check if room is exit.\n if [row, col] == self.__exit:\n can_exit = True\n break\n # Add positions to queue if valid.\n if (self.__grid[row][col].up and\n self.__can_enter([row - 1, col], traversed)):\n search_queue.append([row - 1, col])\n traversed[row - 1][col] = True\n if (self.__grid[row][col].right and\n self.__can_enter([row, col + 1], traversed)):\n search_queue.append([row, col + 1])\n traversed[row][col + 1] = True\n if (self.__grid[row][col].down and\n self.__can_enter([row + 1, col], traversed)):\n search_queue.append([row + 1, col])\n traversed[row + 1][col] = True\n if (self.__grid[row][col].left and\n self.__can_enter([row, col - 1], traversed)):\n search_queue.append([row, col - 1])\n traversed[row][col - 1] = True\n return can_exit", "def has_exited(self):\n agents = self.board[self.agent_locs_idx]\n return agents & (CellTypes.agent | CellTypes.exit) == CellTypes.exit", "def terminal_test(gameState):\n return len(gameState.get_legal_moves()) == 0", "def test_check_move_with_invalid(self):\n board = [\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\"\\u25cb\"] * 6,\n [\" \"] * 6,\n [\" \"] * 6\n ]\n valid = self.game.check_move(board, 4)\n self.assertFalse(valid)" ]
[ "0.8649427", "0.7370002", "0.73293865", "0.6856556", "0.68072176", "0.62326103", "0.61708444", "0.61359787", "0.6099684", "0.60358566", "0.5930605", "0.59018624", "0.5899554", "0.587606", "0.5869301", "0.58278805", "0.57988024", "0.5797692", "0.5775867", "0.5741775", "0.57401747", "0.5738312", "0.5674244", "0.565792", "0.5623409", "0.55771416", "0.55506325", "0.55314654", "0.5526284", "0.5516225" ]
0.8637258
1
Test that a maze cannot be exited when rooms adjacent to the exit have their pointers set to None.
def test_maze_exit_pointers_are_none(self): maze = Maze(100, 100) row, col = maze.exit if maze.grid[row][col].up: maze.grid[row][col].up.down = None if maze.grid[row][col].right: maze.grid[row][col].right.left = None if maze.grid[row][col].down: maze.grid[row][col].down.up = None if maze.grid[row][col].left: maze.grid[row][col].left.right = None self.assertFalse(maze.can_reach_exit([maze.entrance[0], maze.entrance[1]]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_maze_entrance_pointers_are_none(self):\n maze = Maze(100, 100)\n\n row, col = maze.entrance\n maze.grid[row][col].up = None\n maze.grid[row][col].right = None\n maze.grid[row][col].down = None\n maze.grid[row][col].left = None\n\n self.assertFalse(maze.can_reach_exit([row, col]))", "def test_maze_exit_adjacent_are_blocked(self):\n maze = Maze(100, 100)\n\n row, col = maze.exit\n if row - 1 >= 0:\n maze.grid[row - 1][col].blocked = True\n if col + 1 < 100:\n maze.grid[row][col + 1].blocked = True\n if row + 1 < 100:\n maze.grid[row + 1][col].blocked = True\n if col - 1 >= 0:\n maze.grid[row][col - 1].blocked = True\n\n self.assertFalse(maze.can_reach_exit([maze.entrance[0],\n maze.entrance[1]]))", "def test_maze_entrance_adjacent_are_blocked(self):\n maze = Maze(100, 100)\n\n row, col = maze.entrance\n if row - 1 >= 0:\n maze.grid[row - 1][col].blocked = True\n if col + 1 < 100:\n maze.grid[row][col + 1].blocked = True\n if row + 1 < 100:\n maze.grid[row + 1][col].blocked = True\n if col - 1 >= 0:\n maze.grid[row][col - 1].blocked = True\n\n self.assertFalse(maze.can_reach_exit([row, col]))", "def testMazeExists(self):\n pass", "def testMazeExists(self):\n\n pass", "def maze_guard():\n cell_guard = all([1 <= len(row) <= 100 for row in maze])\n row_guard = 1 <= len(maze) <= 100\n return cell_guard and row_guard", "def check_lost (grid):\r\n adjacent = False\r\n zero_value = False\r\n for i in range(4): \r\n for j in range(4):\r\n if grid[i][j] == 0:\r\n zero_value = True\r\n break\r\n for i in range(3):\r\n for j in range(3):\r\n if grid[i][j] == grid[i][j+1]:\r\n adjacent = True\r\n break\r\n if grid[i][j] == grid[i+1][j]:\r\n adjacent = True\r\n break\r\n if not adjacent and not zero_value:\r\n return True\r\n return False", "def is_unoccupied(self, row, col):\n return self.maze[row][col] is EMPTY", "def checkIntoWall(MazeTupleSet,SolnTupleSet):\n ele = MazeTupleSet.intersection(SolnTupleSet)\n if(len(ele)==0): #if the intersection of wall and solution is zero\n return True #means we do not run into wall\n else:\n return False", "def checkSameEntranceExit(mazeEntrance,mazeExit,solnEntrance,solnExit):\n if((solnEntrance == mazeEntrance) and (solnExit == mazeExit)):\n return True\n else:\n return False", "def test_out_of_maze_colision(self):\n manager = DummyLevelManager()\n game = Game(manager)\n game.move(GameMoves.DOWN)\n for i in range(5):\n state = game.move(GameMoves.PASS)\n self.assertEquals(state, LevelState.RUNNING)\n state = game.move(GameMoves.PASS)\n self.assertEqual(state, LevelState.LOSE)", "def place_exit(self):\r\n x = random.randint(0, (self.__nx - 1))\r\n y = random.randint(0, (self.__ny - 1))\r\n self.__exit_room = x, y\r\n if self.exit_room() == self.pillar_a_room() or \\\r\n self.exit_room() == self.pillar_e_room() or \\\r\n self.exit_room() == self.pillar_i_room() or \\\r\n self.exit_room() == self.pillar_p_room() or \\\r\n self.exit_room() == self.entrance_room():\r\n return self.place_exit()\r\n self.__maze[x][y].set_exit(True)", "def _remove_dead_ends(maze):\n def _fill_maze():\n # Fill in dead ends, return True if the maze has no dead ends, otherwise\n # False.\n size = maze.shape[0]\n for i in range(size):\n for j in range(size):\n if maze[i, j]: # Not an open point\n continue\n num_open_neighbors = np.sum(\n [1 - maze[n[0], n[1]]\n for n in _get_neighbors(size, (i, j))])\n if num_open_neighbors < 2:\n maze[i, j] = 1\n return False\n return True\n\n valid_maze = False\n while not valid_maze:\n valid_maze = _fill_maze()", "def check_lost(grid):\r\n for i in range(len(grid)):\r\n for j in range(len(grid[i])):\r\n if grid[i][j] == 0:\r\n return False\r\n elif i+1 < len(grid):\r\n if grid[i][j] == grid[i+1][j]:\r\n return False\r\n elif j+1 < len(grid[i]):\r\n if grid[i][j] == grid[i][j+1]:\r\n return False \r\n return True", "def valid_move(x, y):\r\n if [x, y] in empty_cells(board):\r\n return True\r\n else:\r\n return False", "def testCanExploreASmallEmptyRoom(self):\n picture = \"\"\"\n ----------- level z=0 :\n ####\n # #\n # #\n ####\n -----------\n \"\"\"\n max_moves_to_make = 3\n\n points_visited = self.explore_dungeon(picture, max_moves_to_make)\n\n assert_that(points_visited).contains(Point(2, 2, 0))\n assert_that(points_visited).contains(Point(1, 2, 0))\n assert_that(points_visited).contains(Point(2, 1, 0))\n assert_that(points_visited).contains(Point(1, 1, 0))", "def check_lost (grid):\r\n for row in range(4):\r\n for col in range(4):\r\n if grid[row][col]==0:\r\n return False\r\n if grid[0][0]==grid[0][1] or grid[0][0]==grid[1][0]:\r\n return False \r\n if grid[0][3]==grid[0][2] or grid[0][3]==grid[1][3]:\r\n return False \r\n if grid[3][0]==grid[2][0] or grid[3][0]==grid[3][1]:\r\n return False\r\n if grid[3][3]==grid[2][3] or grid[3][3]==grid[3][2]:\r\n return False \r\n if grid[0][1]==grid[0][2] or grid[0][1]==grid[1][1]:\r\n return False \r\n if grid[0][2]==grid[1][2]:\r\n return False \r\n if grid[1][1]==grid[2][1] or grid[1][1]==grid[1][2] or grid[1][1]==grid[1][0]:\r\n return False\r\n if grid[2][1]==grid[2][0] or grid[2][1]==grid[2][2] or grid[2][1]==grid[3][1]:\r\n return False \r\n if grid[1][0]==grid[2][0]:\r\n return False\r\n if grid[1][2]==grid[1][3] or grid[1][2]==grid[2][2]:\r\n return False\r\n if grid[2][2]==grid[2][3] or grid[2][2]==grid[3][2]:\r\n return False\r\n if grid[3][1]==grid[3][2]:\r\n return False\r\n else:\r\n return True", "def check_grid_full(self):\n for row in self.game_state:\n for e in row:\n if e is None:\n return False\n return True", "def checkAdjacent(self, direction, maze):\n\n y = self.y\n x = self.x\n\n # Shift x or y depending on the given direction.\n if direction in NS:\n y = shift[direction](y)\n elif direction in EW:\n x = shift[direction](x)\n\n # Check new location for obstacle or unwanted direction\n if maze[y][x] == 1 or ([x, y] in self.fullpath()) or (self.moved() is False and direction in self.dead_end_direction()[-1]):\n return False\n else:\n return True", "def examineMaze(self, gameState):\n w = self.walls.width\n h = self.walls.height\n walls = self.walls.deepCopy()\n food1 = self.getFoodYouAreDefending(gameState)\n food2 = self.getFood(gameState)\n\n # Save map as 0, 1, 2 and 3 (0:walls, 1:spaces, 2:babies, 3:food)\n for x in range(w):\n for y in range(h):\n if walls[x][y]:\n walls[x][y] = 0\n elif food1[x][y]:\n walls[x][y] = 2\n elif food2[x][y]:\n walls[x][y] = 2\n else:\n walls[x][y] = 1\n\n roomsDisplay = []\n # Detect doors and spaces. Spaces are now negative\n for x in range(w):\n for y in range(h):\n if walls[x][y] > 0:\n exitsNum = 0\n if walls[x][y - 1] != 0:\n exitsNum += 1\n if walls[x][y + 1] != 0:\n exitsNum += 1\n if walls[x - 1][y] != 0:\n exitsNum += 1\n if walls[x + 1][y] != 0:\n exitsNum += 1\n if exitsNum == 1 or exitsNum == 2:\n walls[x][y] = -1 * walls[x][y]\n roomsDisplay.append((x, y))\n elif exitsNum == 0:\n # We erase unaccessible cells\n walls[x][y] = 0\n else:\n # These are doors or big rooms, we leave them positive\n pass\n\n # Create roomsGraph: every room has a number, some cells and some doors\n roomsGraph = []\n doorsGraph = []\n for x in range(1, w - 1):\n for y in range(1, h - 1):\n if walls[x][y] < 0:\n spacesNum = 0\n if walls[x][y - 1] < 0:\n spacesNum += 1\n if walls[x][y + 1] < 0:\n spacesNum += 1\n if walls[x - 1][y] < 0:\n spacesNum += 1\n if walls[x + 1][y] < 0:\n spacesNum += 1\n if spacesNum < 2:\n endOfPath = False\n graphNode = {\"path\": [], \"doors\": [], \"food\": 0, \"isBig\": False}\n auxx = x\n auxy = y\n while not endOfPath:\n graphNode[\"path\"].append((x, y))\n graphNode[\"food\"] += -walls[x][y] - 1\n walls[x][y] = 0\n xx = x\n yy = y\n if walls[x][y - 1] < 0:\n yy = y - 1\n elif walls[x][y + 1] < 0:\n yy = y + 1\n elif walls[x - 1][y] < 0:\n xx = x - 1\n elif walls[x + 1][y] < 0:\n xx = x + 1\n else:\n endOfPath = True\n if walls[x][y - 1] > 0:\n if [(x, y - 1), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x, y - 1), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x, y - 1), []]))\n if walls[x][y + 1] > 0:\n if [(x, y + 1), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x, y + 1), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x, y + 1), []]))\n if walls[x - 1][y] > 0:\n if [(x - 1, y), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x - 1, y), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x - 1, y), []]))\n if walls[x + 1][y] > 0:\n if [(x + 1, y), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x + 1, y), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x + 1, y), []]))\n x = xx\n y = yy\n roomsGraph.append(graphNode)\n x = auxx\n y = auxy\n\n # Create doorsGraph: every door has a number, and goes to other rooms or other doors\n for j, door in enumerate(doorsGraph):\n for i, room in enumerate(roomsGraph):\n for aDoor in room[\"doors\"]:\n if aDoor == j:\n doorsGraph[j][1] = doorsGraph[j][1] + [i]\n (x, y) = doorsGraph[j][0]\n adjacentCells = [(x+1, y), (x-1, y), (x, y+1), (x, y-1)]\n adjacentDoors = []\n # Check adjacent doors and add them to the current door (door structure is [pos, adjRooms, adjDoors]\n for p in adjacentCells:\n # Skip if wall\n if self.walls[p[0]][p[1]]:\n continue\n # Skip if door\n isRoom = False\n for room in doorsGraph[j][1]:\n if p in roomsGraph[room][\"path\"]:\n isRoom = True\n break\n if not isRoom:\n # Add if existing door\n doorFound = False\n for i, neighborDoor in enumerate(doorsGraph):\n if neighborDoor[0] == p:\n adjacentDoors.append(i)\n doorFound = True\n break\n # Create if non existing door and add\n if not doorFound:\n adjacentDoors.append(len(doorsGraph))\n doorsGraph.append([p, []])\n doorsGraph[j].append(adjacentDoors)\n\n # Create doorsDistance: maps what doors can be accessed from other doors\n roomsMapper = {}\n doorsMapper = {}\n isRoom = util.Counter()\n for i, door in enumerate(doorsGraph):\n doorsMapper[door[0]] = i\n isRoom[door[0]] = 0\n for i, room in enumerate(roomsGraph):\n for p in room[\"path\"]:\n roomsMapper[p] = i\n isRoom[p] = 1\n\n # Create self variables\n self.doorsGraph = doorsGraph\n self.roomsGraph = roomsGraph\n self.roomsMapper = roomsMapper\n self.doorsMapper = doorsMapper\n self.isRoom = isRoom\n\n # # Find dead ends (rooms with only one door)\n # deadRooms = {}\n # deadDoors = {}\n # # deaderDoors = {}\n # # deaderRooms = {}\n # for i, room in enumerate(roomsGraph):\n # if len(room[\"doors\"]) == 1:\n # deadRooms[i] = room[\"doors\"][0]\n # deadDoors[room[\"doors\"][0]] = 1\n # numdR = 0\n # aliveR = -1\n # for adjRoom in doorsGraph[room[\"doors\"][0]][1]:\n # if adjRoom not in deadRooms:\n # numdR += 1\n # aliveR = adjRoom\n # if numdR + len(doorsGraph[room[\"doors\"][0]][2]) == 1:\n # if aliveR >= 0:\n # deaderRooms[aliveR] = room[\"doors\"][0]\n # for adjDoor in roomsGraph[aliveR][\"doors\"]:\n # if adjDoor == room[\"doors\"][0]:\n # continue\n # deaderDoors[adjDoor] = 1.0\n # else:\n # deaderDoors[doorsGraph[room[\"doors\"][0]][2][0]] = 1.0\n\n\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # for r in deaderRooms:\n # for p in roomsGraph[r][\"path\"]:\n # roomsCounter[0][p] = 0.4\n # for d in deaderDoors:\n # roomsCounter[1][doorsGraph[d][0]] = 0.4\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n\n # print deadDoors\n # print\n # print deadRooms\n # print \"-----------------\"\n\n # deadEndsChanged = True\n # while deadEndsChanged:\n # deadEndsChanged = False\n # for i, room in enumerate(roomsGraph):\n # numAliveDoors = 0\n # aliveDoor = 0\n # deadDoor = []\n # for door in room[\"doors\"]:\n # if door not in deadDoors:\n # numAliveDoors += 1\n # aliveDoor = door\n # else:\n # deadDoor.append(door)\n # if numAliveDoors == 1:\n # aliveRoom = 0\n # aliveNeighborDoor = 0\n # for door in deadDoor:\n # # aliveNeighborDoor += len(doorsGraph[door][2])\n # for neighborRoom in doorsGraph[door][1]:\n # if neighborRoom not in deadRooms:\n # aliveRoom += 1\n # if aliveRoom == len(deadDoor):\n # deadRooms[i] = aliveDoor\n # deadDoors[aliveDoor] = 1\n # deadEndsChanged = True\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # roomsCounter[0][doorsGraph[aliveDoor][0]] = 1\n # for p in room[\"path\"]:\n # roomsCounter[1][p] = 1\n # for p in deadDoor:\n # roomsCounter[2][doorsGraph[p][0]] = 1\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n\n # Find dead ends (rooms with doors that only go to other dead ends, except one)\n # Danger, it is theoretically possible to have a map only with dead ends, which may make this crash\n # deadEndsChanged = True\n # while deadEndsChanged:\n # deadEndsChanged = False\n # for i, door in enumerate(doorsGraph):\n # if i not in deadDoors:\n # numOpenRooms = 0\n # openRoom = 0\n # for j, room in enumerate(door[1]):\n # if room not in deadRooms:\n # numOpenRooms += 1\n # openRoom = j\n # if numOpenRooms + len(door[2]) == 1:\n # for room in door[1]:\n # if room != openRoom:\n # deadRooms[room] = i\n # deadDoors[j] = 1\n # deadEndsChanged = True\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # roomsCounter[0][door[0]] = 1\n # for rr in door[1]:\n # print rr\n # if rr in deadRooms:\n # for p in roomsGraph[rr][\"path\"]:\n # roomsCounter[1][p] = 1\n # else:\n # for p in roomsGraph[rr][\"path\"]:\n # roomsCounter[3][p] = 1\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n # print deadDoors\n # print\n # print deadRooms\n # print \"-----------------\"\n\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # roomsCounter[1][(6, 9)] = 0.4\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # for r in deadRooms:\n # for p in roomsGraph[r][\"path\"]:\n # roomsCounter[0][p] = 0.4\n # for d in deadDoors:\n # roomsCounter[1][doorsGraph[d][0]] = 0.4\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n # Show every room\n roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n for room in roomsGraph:\n for p in room[\"path\"]:\n if len(room[\"doors\"]) > 1:\n roomsCounter[0][p] = 0.4\n else:\n roomsCounter[2][p] = 0.4\n # Show every door\n for door in doorsGraph:\n roomsCounter[1][door[0]] = 0.4\n # Display rooms and doors (red: rooms with at least one exit; orange: rooms with 1 exit; blue: doors\n self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")", "def test_has_location_with_invalid_states():\n for state in (None, 1, \"hello\", object):\n assert not location.has_location(state)", "def test_maze_created_traversed_from_indices(self):\n maze = Maze(100, 100)\n\n for test in range(20):\n self.assertTrue(maze.can_reach_exit([random.randint(0, 99),\n random.randint(0, 99)]))", "def test_unreachable_pair(self):\n G = DiGraph([(0, 1), (0, 2), (1, 2)])\n assert_false(is_reachable(G, 1, 0))", "def test_adjacent_none(graph_with_edges):\n assert graph_with_edges.adjacent('B', 'A') is False", "def test_no_moves(self):\n game = self.ending(['bw..wwww'], 8, 1)\n game.man_move(0, 2)\n self.assertEqual(game.finish_state,\n (400, game.first_player, 'No moves'))", "def test_valid_move(self, move):\n if self.game_state[move[0]][move[1]] is not None:\n return False\n return True", "def check_lost (grid):\r\n t=0\r\n for o in range(len(grid)):\r\n for e in range(len(grid[o])):\r\n if grid[o][e]==0:\r\n t+=1\r\n else:\r\n ()\r\n r=0\r\n for o in range(len(grid)):\r\n for e in range(len(grid[o])-1):\r\n if grid[o][e]==grid[o][e+1]:\r\n r+=1\r\n elif grid[o][3]==grid[o][2]:\r\n r+=1 \r\n else:\r\n ()\r\n \r\n v=0\r\n for o in range(len(grid)):\r\n for e in range(len(grid[o])-1):\r\n if grid[e][o]==grid[e+1][o]:\r\n v+=1\r\n elif grid[3][o]==grid[2][o]:\r\n v+=1 \r\n else:\r\n () \r\n \r\n if t==0 and r==0 and v==0:\r\n return True\r\n else:\r\n return False", "def test_heuristic_abort(self):\n graph = {}\n for u in self.complete:\n graph[u] = set()\n for v in self.complete[u]:\n if u != v: # ignore self-loop\n graph[u].add(v)\n next_node = min_fill_in_heuristic(graph)\n if next_node is None:\n pass\n else:\n assert False", "def check_neighbours(self):\n for p in self.targetCell.possibilities:\n if p != 0:\n if p not in self.targetCell.row_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n elif p not in self.targetCell.column_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n elif p not in self.targetCell.box_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n return False", "def solve_maze(row0, col0, maze):\n visited = set() # Set of visited cells\n cols, rows = range(len(maze[0])), range(len(maze))\n def escapep(r, c): \n \"\"\"True iff is a path of empty, unvisited cells from (R, C) out of maze.\"\"\"\n if r not in rows or c not in cols:\n return True\n elif maze[r][c] or (r, c) in visited:\n return False\n else:\n visited.add((r,c))\n return escapep(r+1, c) or escapep(r-1, c) \\\n or escapep(r, c+1) or escapep(r, c-1)\n return escapep(row0, col0)" ]
[ "0.84921086", "0.7337513", "0.7119611", "0.6680111", "0.6654044", "0.6379823", "0.62804466", "0.61869204", "0.61617595", "0.61408913", "0.6139881", "0.61133355", "0.6019866", "0.5997094", "0.5945427", "0.593739", "0.5920975", "0.5908153", "0.5904515", "0.59022844", "0.58883816", "0.58831626", "0.58802295", "0.58712846", "0.5854417", "0.5851219", "0.58192223", "0.57862914", "0.5771145", "0.57329684" ]
0.86979586
0
Test that a maze cannot be exited when rooms adjacent to the entrance are blocked.
def test_maze_entrance_adjacent_are_blocked(self): maze = Maze(100, 100) row, col = maze.entrance if row - 1 >= 0: maze.grid[row - 1][col].blocked = True if col + 1 < 100: maze.grid[row][col + 1].blocked = True if row + 1 < 100: maze.grid[row + 1][col].blocked = True if col - 1 >= 0: maze.grid[row][col - 1].blocked = True self.assertFalse(maze.can_reach_exit([row, col]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_maze_exit_adjacent_are_blocked(self):\n maze = Maze(100, 100)\n\n row, col = maze.exit\n if row - 1 >= 0:\n maze.grid[row - 1][col].blocked = True\n if col + 1 < 100:\n maze.grid[row][col + 1].blocked = True\n if row + 1 < 100:\n maze.grid[row + 1][col].blocked = True\n if col - 1 >= 0:\n maze.grid[row][col - 1].blocked = True\n\n self.assertFalse(maze.can_reach_exit([maze.entrance[0],\n maze.entrance[1]]))", "def test_maze_entrance_pointers_are_none(self):\n maze = Maze(100, 100)\n\n row, col = maze.entrance\n maze.grid[row][col].up = None\n maze.grid[row][col].right = None\n maze.grid[row][col].down = None\n maze.grid[row][col].left = None\n\n self.assertFalse(maze.can_reach_exit([row, col]))", "def test_maze_exit_pointers_are_none(self):\n maze = Maze(100, 100)\n\n row, col = maze.exit\n if maze.grid[row][col].up:\n maze.grid[row][col].up.down = None\n if maze.grid[row][col].right:\n maze.grid[row][col].right.left = None\n if maze.grid[row][col].down:\n maze.grid[row][col].down.up = None\n if maze.grid[row][col].left:\n maze.grid[row][col].left.right = None\n\n self.assertFalse(maze.can_reach_exit([maze.entrance[0],\n maze.entrance[1]]))", "def test_out_of_maze_colision(self):\n manager = DummyLevelManager()\n game = Game(manager)\n game.move(GameMoves.DOWN)\n for i in range(5):\n state = game.move(GameMoves.PASS)\n self.assertEquals(state, LevelState.RUNNING)\n state = game.move(GameMoves.PASS)\n self.assertEqual(state, LevelState.LOSE)", "def maze_guard():\n cell_guard = all([1 <= len(row) <= 100 for row in maze])\n row_guard = 1 <= len(maze) <= 100\n return cell_guard and row_guard", "def testMazeExists(self):\n pass", "def testMazeExists(self):\n\n pass", "def test_maze_created_traversed_from_indices(self):\n maze = Maze(100, 100)\n\n for test in range(20):\n self.assertTrue(maze.can_reach_exit([random.randint(0, 99),\n random.randint(0, 99)]))", "def isBlocked(mapObj, gameStateObj, x, y):\n\n if isWall(mapObj, x, y):\n return True\n\n elif x < 0 or x >= len(mapObj) or y < 0 or y >= len(mapObj[x]):\n return True # x and y aren't actually on the map.\n\n elif (x, y) in gameStateObj['stars']:\n return True # a star is blocking\n\n return False", "def examineMaze(self, gameState):\n w = self.walls.width\n h = self.walls.height\n walls = self.walls.deepCopy()\n food1 = self.getFoodYouAreDefending(gameState)\n food2 = self.getFood(gameState)\n\n # Save map as 0, 1, 2 and 3 (0:walls, 1:spaces, 2:babies, 3:food)\n for x in range(w):\n for y in range(h):\n if walls[x][y]:\n walls[x][y] = 0\n elif food1[x][y]:\n walls[x][y] = 2\n elif food2[x][y]:\n walls[x][y] = 2\n else:\n walls[x][y] = 1\n\n roomsDisplay = []\n # Detect doors and spaces. Spaces are now negative\n for x in range(w):\n for y in range(h):\n if walls[x][y] > 0:\n exitsNum = 0\n if walls[x][y - 1] != 0:\n exitsNum += 1\n if walls[x][y + 1] != 0:\n exitsNum += 1\n if walls[x - 1][y] != 0:\n exitsNum += 1\n if walls[x + 1][y] != 0:\n exitsNum += 1\n if exitsNum == 1 or exitsNum == 2:\n walls[x][y] = -1 * walls[x][y]\n roomsDisplay.append((x, y))\n elif exitsNum == 0:\n # We erase unaccessible cells\n walls[x][y] = 0\n else:\n # These are doors or big rooms, we leave them positive\n pass\n\n # Create roomsGraph: every room has a number, some cells and some doors\n roomsGraph = []\n doorsGraph = []\n for x in range(1, w - 1):\n for y in range(1, h - 1):\n if walls[x][y] < 0:\n spacesNum = 0\n if walls[x][y - 1] < 0:\n spacesNum += 1\n if walls[x][y + 1] < 0:\n spacesNum += 1\n if walls[x - 1][y] < 0:\n spacesNum += 1\n if walls[x + 1][y] < 0:\n spacesNum += 1\n if spacesNum < 2:\n endOfPath = False\n graphNode = {\"path\": [], \"doors\": [], \"food\": 0, \"isBig\": False}\n auxx = x\n auxy = y\n while not endOfPath:\n graphNode[\"path\"].append((x, y))\n graphNode[\"food\"] += -walls[x][y] - 1\n walls[x][y] = 0\n xx = x\n yy = y\n if walls[x][y - 1] < 0:\n yy = y - 1\n elif walls[x][y + 1] < 0:\n yy = y + 1\n elif walls[x - 1][y] < 0:\n xx = x - 1\n elif walls[x + 1][y] < 0:\n xx = x + 1\n else:\n endOfPath = True\n if walls[x][y - 1] > 0:\n if [(x, y - 1), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x, y - 1), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x, y - 1), []]))\n if walls[x][y + 1] > 0:\n if [(x, y + 1), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x, y + 1), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x, y + 1), []]))\n if walls[x - 1][y] > 0:\n if [(x - 1, y), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x - 1, y), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x - 1, y), []]))\n if walls[x + 1][y] > 0:\n if [(x + 1, y), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x + 1, y), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x + 1, y), []]))\n x = xx\n y = yy\n roomsGraph.append(graphNode)\n x = auxx\n y = auxy\n\n # Create doorsGraph: every door has a number, and goes to other rooms or other doors\n for j, door in enumerate(doorsGraph):\n for i, room in enumerate(roomsGraph):\n for aDoor in room[\"doors\"]:\n if aDoor == j:\n doorsGraph[j][1] = doorsGraph[j][1] + [i]\n (x, y) = doorsGraph[j][0]\n adjacentCells = [(x+1, y), (x-1, y), (x, y+1), (x, y-1)]\n adjacentDoors = []\n # Check adjacent doors and add them to the current door (door structure is [pos, adjRooms, adjDoors]\n for p in adjacentCells:\n # Skip if wall\n if self.walls[p[0]][p[1]]:\n continue\n # Skip if door\n isRoom = False\n for room in doorsGraph[j][1]:\n if p in roomsGraph[room][\"path\"]:\n isRoom = True\n break\n if not isRoom:\n # Add if existing door\n doorFound = False\n for i, neighborDoor in enumerate(doorsGraph):\n if neighborDoor[0] == p:\n adjacentDoors.append(i)\n doorFound = True\n break\n # Create if non existing door and add\n if not doorFound:\n adjacentDoors.append(len(doorsGraph))\n doorsGraph.append([p, []])\n doorsGraph[j].append(adjacentDoors)\n\n # Create doorsDistance: maps what doors can be accessed from other doors\n roomsMapper = {}\n doorsMapper = {}\n isRoom = util.Counter()\n for i, door in enumerate(doorsGraph):\n doorsMapper[door[0]] = i\n isRoom[door[0]] = 0\n for i, room in enumerate(roomsGraph):\n for p in room[\"path\"]:\n roomsMapper[p] = i\n isRoom[p] = 1\n\n # Create self variables\n self.doorsGraph = doorsGraph\n self.roomsGraph = roomsGraph\n self.roomsMapper = roomsMapper\n self.doorsMapper = doorsMapper\n self.isRoom = isRoom\n\n # # Find dead ends (rooms with only one door)\n # deadRooms = {}\n # deadDoors = {}\n # # deaderDoors = {}\n # # deaderRooms = {}\n # for i, room in enumerate(roomsGraph):\n # if len(room[\"doors\"]) == 1:\n # deadRooms[i] = room[\"doors\"][0]\n # deadDoors[room[\"doors\"][0]] = 1\n # numdR = 0\n # aliveR = -1\n # for adjRoom in doorsGraph[room[\"doors\"][0]][1]:\n # if adjRoom not in deadRooms:\n # numdR += 1\n # aliveR = adjRoom\n # if numdR + len(doorsGraph[room[\"doors\"][0]][2]) == 1:\n # if aliveR >= 0:\n # deaderRooms[aliveR] = room[\"doors\"][0]\n # for adjDoor in roomsGraph[aliveR][\"doors\"]:\n # if adjDoor == room[\"doors\"][0]:\n # continue\n # deaderDoors[adjDoor] = 1.0\n # else:\n # deaderDoors[doorsGraph[room[\"doors\"][0]][2][0]] = 1.0\n\n\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # for r in deaderRooms:\n # for p in roomsGraph[r][\"path\"]:\n # roomsCounter[0][p] = 0.4\n # for d in deaderDoors:\n # roomsCounter[1][doorsGraph[d][0]] = 0.4\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n\n # print deadDoors\n # print\n # print deadRooms\n # print \"-----------------\"\n\n # deadEndsChanged = True\n # while deadEndsChanged:\n # deadEndsChanged = False\n # for i, room in enumerate(roomsGraph):\n # numAliveDoors = 0\n # aliveDoor = 0\n # deadDoor = []\n # for door in room[\"doors\"]:\n # if door not in deadDoors:\n # numAliveDoors += 1\n # aliveDoor = door\n # else:\n # deadDoor.append(door)\n # if numAliveDoors == 1:\n # aliveRoom = 0\n # aliveNeighborDoor = 0\n # for door in deadDoor:\n # # aliveNeighborDoor += len(doorsGraph[door][2])\n # for neighborRoom in doorsGraph[door][1]:\n # if neighborRoom not in deadRooms:\n # aliveRoom += 1\n # if aliveRoom == len(deadDoor):\n # deadRooms[i] = aliveDoor\n # deadDoors[aliveDoor] = 1\n # deadEndsChanged = True\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # roomsCounter[0][doorsGraph[aliveDoor][0]] = 1\n # for p in room[\"path\"]:\n # roomsCounter[1][p] = 1\n # for p in deadDoor:\n # roomsCounter[2][doorsGraph[p][0]] = 1\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n\n # Find dead ends (rooms with doors that only go to other dead ends, except one)\n # Danger, it is theoretically possible to have a map only with dead ends, which may make this crash\n # deadEndsChanged = True\n # while deadEndsChanged:\n # deadEndsChanged = False\n # for i, door in enumerate(doorsGraph):\n # if i not in deadDoors:\n # numOpenRooms = 0\n # openRoom = 0\n # for j, room in enumerate(door[1]):\n # if room not in deadRooms:\n # numOpenRooms += 1\n # openRoom = j\n # if numOpenRooms + len(door[2]) == 1:\n # for room in door[1]:\n # if room != openRoom:\n # deadRooms[room] = i\n # deadDoors[j] = 1\n # deadEndsChanged = True\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # roomsCounter[0][door[0]] = 1\n # for rr in door[1]:\n # print rr\n # if rr in deadRooms:\n # for p in roomsGraph[rr][\"path\"]:\n # roomsCounter[1][p] = 1\n # else:\n # for p in roomsGraph[rr][\"path\"]:\n # roomsCounter[3][p] = 1\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n # print deadDoors\n # print\n # print deadRooms\n # print \"-----------------\"\n\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # roomsCounter[1][(6, 9)] = 0.4\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # for r in deadRooms:\n # for p in roomsGraph[r][\"path\"]:\n # roomsCounter[0][p] = 0.4\n # for d in deadDoors:\n # roomsCounter[1][doorsGraph[d][0]] = 0.4\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n # Show every room\n roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n for room in roomsGraph:\n for p in room[\"path\"]:\n if len(room[\"doors\"]) > 1:\n roomsCounter[0][p] = 0.4\n else:\n roomsCounter[2][p] = 0.4\n # Show every door\n for door in doorsGraph:\n roomsCounter[1][door[0]] = 0.4\n # Display rooms and doors (red: rooms with at least one exit; orange: rooms with 1 exit; blue: doors\n self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")", "def is_unoccupied(self, row, col):\n return self.maze[row][col] is EMPTY", "def checkAdjacent(self, direction, maze):\n\n y = self.y\n x = self.x\n\n # Shift x or y depending on the given direction.\n if direction in NS:\n y = shift[direction](y)\n elif direction in EW:\n x = shift[direction](x)\n\n # Check new location for obstacle or unwanted direction\n if maze[y][x] == 1 or ([x, y] in self.fullpath()) or (self.moved() is False and direction in self.dead_end_direction()[-1]):\n return False\n else:\n return True", "def test_maze_created_can_be_traversed(self):\n maze = Maze(100, 100)\n\n self.assertTrue(maze._Maze__verify_exit_path())", "def testCanExploreASmallEmptyRoom(self):\n picture = \"\"\"\n ----------- level z=0 :\n ####\n # #\n # #\n ####\n -----------\n \"\"\"\n max_moves_to_make = 3\n\n points_visited = self.explore_dungeon(picture, max_moves_to_make)\n\n assert_that(points_visited).contains(Point(2, 2, 0))\n assert_that(points_visited).contains(Point(1, 2, 0))\n assert_that(points_visited).contains(Point(2, 1, 0))\n assert_that(points_visited).contains(Point(1, 1, 0))", "def test_point_not_in_room(rectangle, big_area):\n new_room = Room(rectangle, 0, 1, 1, big_area, 'bathroom')\n point = (0, 15, 15)\n assert new_room.contains_point(point[0], point[1], point[2]) is False", "def test_check_move_with_invalid(self):\n board = [\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\"\\u25cb\"] * 6,\n [\" \"] * 6,\n [\" \"] * 6\n ]\n valid = self.game.check_move(board, 4)\n self.assertFalse(valid)", "def check_lost (grid):\r\n for row in range(4):\r\n for col in range(4):\r\n if grid[row][col]==0:\r\n return False\r\n if grid[0][0]==grid[0][1] or grid[0][0]==grid[1][0]:\r\n return False \r\n if grid[0][3]==grid[0][2] or grid[0][3]==grid[1][3]:\r\n return False \r\n if grid[3][0]==grid[2][0] or grid[3][0]==grid[3][1]:\r\n return False\r\n if grid[3][3]==grid[2][3] or grid[3][3]==grid[3][2]:\r\n return False \r\n if grid[0][1]==grid[0][2] or grid[0][1]==grid[1][1]:\r\n return False \r\n if grid[0][2]==grid[1][2]:\r\n return False \r\n if grid[1][1]==grid[2][1] or grid[1][1]==grid[1][2] or grid[1][1]==grid[1][0]:\r\n return False\r\n if grid[2][1]==grid[2][0] or grid[2][1]==grid[2][2] or grid[2][1]==grid[3][1]:\r\n return False \r\n if grid[1][0]==grid[2][0]:\r\n return False\r\n if grid[1][2]==grid[1][3] or grid[1][2]==grid[2][2]:\r\n return False\r\n if grid[2][2]==grid[2][3] or grid[2][2]==grid[3][2]:\r\n return False\r\n if grid[3][1]==grid[3][2]:\r\n return False\r\n else:\r\n return True", "def place_exit(self):\r\n x = random.randint(0, (self.__nx - 1))\r\n y = random.randint(0, (self.__ny - 1))\r\n self.__exit_room = x, y\r\n if self.exit_room() == self.pillar_a_room() or \\\r\n self.exit_room() == self.pillar_e_room() or \\\r\n self.exit_room() == self.pillar_i_room() or \\\r\n self.exit_room() == self.pillar_p_room() or \\\r\n self.exit_room() == self.entrance_room():\r\n return self.place_exit()\r\n self.__maze[x][y].set_exit(True)", "def checkSameEntranceExit(mazeEntrance,mazeExit,solnEntrance,solnExit):\n if((solnEntrance == mazeEntrance) and (solnExit == mazeExit)):\n return True\n else:\n return False", "def test_move_knight_legally_blocked(self):\n for piece in [('N', True), ('N', False)]:\n self.c.board = \\\n [[('K', piece[1]) for i in range(8)] for i in range(8)]\n self.c.turn = piece[1]\n self.c.board[4][4] = piece\n for dest in ['d6', 'f6', 'c5', 'g5', 'c3', 'g3', 'd2', 'f2']:\n self.groups['dest'] = dest\n self.assertRaises(\n MoveNotLegalError, self.c._knight_evaluator, self.groups)", "def checkIntoWall(MazeTupleSet,SolnTupleSet):\n ele = MazeTupleSet.intersection(SolnTupleSet)\n if(len(ele)==0): #if the intersection of wall and solution is zero\n return True #means we do not run into wall\n else:\n return False", "def check_lost (grid):\r\n adjacent = False\r\n zero_value = False\r\n for i in range(4): \r\n for j in range(4):\r\n if grid[i][j] == 0:\r\n zero_value = True\r\n break\r\n for i in range(3):\r\n for j in range(3):\r\n if grid[i][j] == grid[i][j+1]:\r\n adjacent = True\r\n break\r\n if grid[i][j] == grid[i+1][j]:\r\n adjacent = True\r\n break\r\n if not adjacent and not zero_value:\r\n return True\r\n return False", "def test(brickheight,bricklength,row,column,walllength,wallheight,occupied,answer):\n if brickheight>wallheight or bricklength>walllength:\n return False\n elif over(brickheight,bricklength,row,column,walllength,wallheight):\n return False\n else:\n for x in range(column,column+bricklength):\n for y in range(row,row+brickheight):\n if (x,y) in occupied:\n return False \n break\n else:\n return True", "def test_areas_locked_ok(self):", "def is_legal(self, start, end) -> bool:\n return self.board(end) == 0 \\\n and self.board(start) > 0 \\\n and self._check_zone_locks(start, end) \\\n and self.exists_path(start, end)", "def _remove_dead_ends(maze):\n def _fill_maze():\n # Fill in dead ends, return True if the maze has no dead ends, otherwise\n # False.\n size = maze.shape[0]\n for i in range(size):\n for j in range(size):\n if maze[i, j]: # Not an open point\n continue\n num_open_neighbors = np.sum(\n [1 - maze[n[0], n[1]]\n for n in _get_neighbors(size, (i, j))])\n if num_open_neighbors < 2:\n maze[i, j] = 1\n return False\n return True\n\n valid_maze = False\n while not valid_maze:\n valid_maze = _fill_maze()", "def check_lost(grid):\r\n for i in range(len(grid)):\r\n for j in range(len(grid[i])):\r\n if grid[i][j] == 0:\r\n return False\r\n elif i+1 < len(grid):\r\n if grid[i][j] == grid[i+1][j]:\r\n return False\r\n elif j+1 < len(grid[i]):\r\n if grid[i][j] == grid[i][j+1]:\r\n return False \r\n return True", "def check_move(self, y, x):\n return 0 <= y < len(self.maze) \\\n and 0 <= x < len(self.maze[y]) \\\n and self.maze[y][x] != \"#\"", "def _validate(self) -> None:\n for box in self.boxes:\n if any(box[0] == s[0] and box[1] == s[1] for s in self.wall_squares):\n raise RuntimeError('In illegal state. Box should not be inside wall.')\n if box[0] == self.current_location[0] and box[1] == self.current_location[1]:\n raise RuntimeError('In illegal state. Box should not be inside player.')\n if any(self.current_location[0] == s[0] and self.current_location[1] == s[1] for s in self.wall_squares):\n raise RuntimeError('In illegal state. Player should not be inside wall.')", "def is_valid_room(self, x, y):\r\n return 0 <= x < self.__nx and 0 <= y < self.__ny" ]
[ "0.81660235", "0.73107517", "0.7274399", "0.69842875", "0.68054783", "0.656764", "0.65411395", "0.63449854", "0.61716986", "0.6112959", "0.60910326", "0.60790956", "0.6052811", "0.600987", "0.6005713", "0.6005683", "0.5965321", "0.5943916", "0.59438556", "0.5941174", "0.59009296", "0.58905005", "0.5878643", "0.58763963", "0.5875243", "0.5853533", "0.58437455", "0.58306396", "0.58077294", "0.57901174" ]
0.81951046
0
Test that a maze cannot be exited when rooms adjacent to the exit are blocked.
def test_maze_exit_adjacent_are_blocked(self): maze = Maze(100, 100) row, col = maze.exit if row - 1 >= 0: maze.grid[row - 1][col].blocked = True if col + 1 < 100: maze.grid[row][col + 1].blocked = True if row + 1 < 100: maze.grid[row + 1][col].blocked = True if col - 1 >= 0: maze.grid[row][col - 1].blocked = True self.assertFalse(maze.can_reach_exit([maze.entrance[0], maze.entrance[1]]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_maze_entrance_adjacent_are_blocked(self):\n maze = Maze(100, 100)\n\n row, col = maze.entrance\n if row - 1 >= 0:\n maze.grid[row - 1][col].blocked = True\n if col + 1 < 100:\n maze.grid[row][col + 1].blocked = True\n if row + 1 < 100:\n maze.grid[row + 1][col].blocked = True\n if col - 1 >= 0:\n maze.grid[row][col - 1].blocked = True\n\n self.assertFalse(maze.can_reach_exit([row, col]))", "def test_maze_exit_pointers_are_none(self):\n maze = Maze(100, 100)\n\n row, col = maze.exit\n if maze.grid[row][col].up:\n maze.grid[row][col].up.down = None\n if maze.grid[row][col].right:\n maze.grid[row][col].right.left = None\n if maze.grid[row][col].down:\n maze.grid[row][col].down.up = None\n if maze.grid[row][col].left:\n maze.grid[row][col].left.right = None\n\n self.assertFalse(maze.can_reach_exit([maze.entrance[0],\n maze.entrance[1]]))", "def test_maze_entrance_pointers_are_none(self):\n maze = Maze(100, 100)\n\n row, col = maze.entrance\n maze.grid[row][col].up = None\n maze.grid[row][col].right = None\n maze.grid[row][col].down = None\n maze.grid[row][col].left = None\n\n self.assertFalse(maze.can_reach_exit([row, col]))", "def test_out_of_maze_colision(self):\n manager = DummyLevelManager()\n game = Game(manager)\n game.move(GameMoves.DOWN)\n for i in range(5):\n state = game.move(GameMoves.PASS)\n self.assertEquals(state, LevelState.RUNNING)\n state = game.move(GameMoves.PASS)\n self.assertEqual(state, LevelState.LOSE)", "def maze_guard():\n cell_guard = all([1 <= len(row) <= 100 for row in maze])\n row_guard = 1 <= len(maze) <= 100\n return cell_guard and row_guard", "def testMazeExists(self):\n pass", "def testMazeExists(self):\n\n pass", "def test_maze_created_traversed_from_indices(self):\n maze = Maze(100, 100)\n\n for test in range(20):\n self.assertTrue(maze.can_reach_exit([random.randint(0, 99),\n random.randint(0, 99)]))", "def place_exit(self):\r\n x = random.randint(0, (self.__nx - 1))\r\n y = random.randint(0, (self.__ny - 1))\r\n self.__exit_room = x, y\r\n if self.exit_room() == self.pillar_a_room() or \\\r\n self.exit_room() == self.pillar_e_room() or \\\r\n self.exit_room() == self.pillar_i_room() or \\\r\n self.exit_room() == self.pillar_p_room() or \\\r\n self.exit_room() == self.entrance_room():\r\n return self.place_exit()\r\n self.__maze[x][y].set_exit(True)", "def test_maze_created_can_be_traversed(self):\n maze = Maze(100, 100)\n\n self.assertTrue(maze._Maze__verify_exit_path())", "def checkSameEntranceExit(mazeEntrance,mazeExit,solnEntrance,solnExit):\n if((solnEntrance == mazeEntrance) and (solnExit == mazeExit)):\n return True\n else:\n return False", "def checkAdjacent(self, direction, maze):\n\n y = self.y\n x = self.x\n\n # Shift x or y depending on the given direction.\n if direction in NS:\n y = shift[direction](y)\n elif direction in EW:\n x = shift[direction](x)\n\n # Check new location for obstacle or unwanted direction\n if maze[y][x] == 1 or ([x, y] in self.fullpath()) or (self.moved() is False and direction in self.dead_end_direction()[-1]):\n return False\n else:\n return True", "def examineMaze(self, gameState):\n w = self.walls.width\n h = self.walls.height\n walls = self.walls.deepCopy()\n food1 = self.getFoodYouAreDefending(gameState)\n food2 = self.getFood(gameState)\n\n # Save map as 0, 1, 2 and 3 (0:walls, 1:spaces, 2:babies, 3:food)\n for x in range(w):\n for y in range(h):\n if walls[x][y]:\n walls[x][y] = 0\n elif food1[x][y]:\n walls[x][y] = 2\n elif food2[x][y]:\n walls[x][y] = 2\n else:\n walls[x][y] = 1\n\n roomsDisplay = []\n # Detect doors and spaces. Spaces are now negative\n for x in range(w):\n for y in range(h):\n if walls[x][y] > 0:\n exitsNum = 0\n if walls[x][y - 1] != 0:\n exitsNum += 1\n if walls[x][y + 1] != 0:\n exitsNum += 1\n if walls[x - 1][y] != 0:\n exitsNum += 1\n if walls[x + 1][y] != 0:\n exitsNum += 1\n if exitsNum == 1 or exitsNum == 2:\n walls[x][y] = -1 * walls[x][y]\n roomsDisplay.append((x, y))\n elif exitsNum == 0:\n # We erase unaccessible cells\n walls[x][y] = 0\n else:\n # These are doors or big rooms, we leave them positive\n pass\n\n # Create roomsGraph: every room has a number, some cells and some doors\n roomsGraph = []\n doorsGraph = []\n for x in range(1, w - 1):\n for y in range(1, h - 1):\n if walls[x][y] < 0:\n spacesNum = 0\n if walls[x][y - 1] < 0:\n spacesNum += 1\n if walls[x][y + 1] < 0:\n spacesNum += 1\n if walls[x - 1][y] < 0:\n spacesNum += 1\n if walls[x + 1][y] < 0:\n spacesNum += 1\n if spacesNum < 2:\n endOfPath = False\n graphNode = {\"path\": [], \"doors\": [], \"food\": 0, \"isBig\": False}\n auxx = x\n auxy = y\n while not endOfPath:\n graphNode[\"path\"].append((x, y))\n graphNode[\"food\"] += -walls[x][y] - 1\n walls[x][y] = 0\n xx = x\n yy = y\n if walls[x][y - 1] < 0:\n yy = y - 1\n elif walls[x][y + 1] < 0:\n yy = y + 1\n elif walls[x - 1][y] < 0:\n xx = x - 1\n elif walls[x + 1][y] < 0:\n xx = x + 1\n else:\n endOfPath = True\n if walls[x][y - 1] > 0:\n if [(x, y - 1), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x, y - 1), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x, y - 1), []]))\n if walls[x][y + 1] > 0:\n if [(x, y + 1), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x, y + 1), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x, y + 1), []]))\n if walls[x - 1][y] > 0:\n if [(x - 1, y), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x - 1, y), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x - 1, y), []]))\n if walls[x + 1][y] > 0:\n if [(x + 1, y), []] not in doorsGraph:\n graphNode[\"doors\"].append(len(doorsGraph))\n doorsGraph.append([(x + 1, y), []])\n else:\n graphNode[\"doors\"].append(doorsGraph.index([(x + 1, y), []]))\n x = xx\n y = yy\n roomsGraph.append(graphNode)\n x = auxx\n y = auxy\n\n # Create doorsGraph: every door has a number, and goes to other rooms or other doors\n for j, door in enumerate(doorsGraph):\n for i, room in enumerate(roomsGraph):\n for aDoor in room[\"doors\"]:\n if aDoor == j:\n doorsGraph[j][1] = doorsGraph[j][1] + [i]\n (x, y) = doorsGraph[j][0]\n adjacentCells = [(x+1, y), (x-1, y), (x, y+1), (x, y-1)]\n adjacentDoors = []\n # Check adjacent doors and add them to the current door (door structure is [pos, adjRooms, adjDoors]\n for p in adjacentCells:\n # Skip if wall\n if self.walls[p[0]][p[1]]:\n continue\n # Skip if door\n isRoom = False\n for room in doorsGraph[j][1]:\n if p in roomsGraph[room][\"path\"]:\n isRoom = True\n break\n if not isRoom:\n # Add if existing door\n doorFound = False\n for i, neighborDoor in enumerate(doorsGraph):\n if neighborDoor[0] == p:\n adjacentDoors.append(i)\n doorFound = True\n break\n # Create if non existing door and add\n if not doorFound:\n adjacentDoors.append(len(doorsGraph))\n doorsGraph.append([p, []])\n doorsGraph[j].append(adjacentDoors)\n\n # Create doorsDistance: maps what doors can be accessed from other doors\n roomsMapper = {}\n doorsMapper = {}\n isRoom = util.Counter()\n for i, door in enumerate(doorsGraph):\n doorsMapper[door[0]] = i\n isRoom[door[0]] = 0\n for i, room in enumerate(roomsGraph):\n for p in room[\"path\"]:\n roomsMapper[p] = i\n isRoom[p] = 1\n\n # Create self variables\n self.doorsGraph = doorsGraph\n self.roomsGraph = roomsGraph\n self.roomsMapper = roomsMapper\n self.doorsMapper = doorsMapper\n self.isRoom = isRoom\n\n # # Find dead ends (rooms with only one door)\n # deadRooms = {}\n # deadDoors = {}\n # # deaderDoors = {}\n # # deaderRooms = {}\n # for i, room in enumerate(roomsGraph):\n # if len(room[\"doors\"]) == 1:\n # deadRooms[i] = room[\"doors\"][0]\n # deadDoors[room[\"doors\"][0]] = 1\n # numdR = 0\n # aliveR = -1\n # for adjRoom in doorsGraph[room[\"doors\"][0]][1]:\n # if adjRoom not in deadRooms:\n # numdR += 1\n # aliveR = adjRoom\n # if numdR + len(doorsGraph[room[\"doors\"][0]][2]) == 1:\n # if aliveR >= 0:\n # deaderRooms[aliveR] = room[\"doors\"][0]\n # for adjDoor in roomsGraph[aliveR][\"doors\"]:\n # if adjDoor == room[\"doors\"][0]:\n # continue\n # deaderDoors[adjDoor] = 1.0\n # else:\n # deaderDoors[doorsGraph[room[\"doors\"][0]][2][0]] = 1.0\n\n\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # for r in deaderRooms:\n # for p in roomsGraph[r][\"path\"]:\n # roomsCounter[0][p] = 0.4\n # for d in deaderDoors:\n # roomsCounter[1][doorsGraph[d][0]] = 0.4\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n\n # print deadDoors\n # print\n # print deadRooms\n # print \"-----------------\"\n\n # deadEndsChanged = True\n # while deadEndsChanged:\n # deadEndsChanged = False\n # for i, room in enumerate(roomsGraph):\n # numAliveDoors = 0\n # aliveDoor = 0\n # deadDoor = []\n # for door in room[\"doors\"]:\n # if door not in deadDoors:\n # numAliveDoors += 1\n # aliveDoor = door\n # else:\n # deadDoor.append(door)\n # if numAliveDoors == 1:\n # aliveRoom = 0\n # aliveNeighborDoor = 0\n # for door in deadDoor:\n # # aliveNeighborDoor += len(doorsGraph[door][2])\n # for neighborRoom in doorsGraph[door][1]:\n # if neighborRoom not in deadRooms:\n # aliveRoom += 1\n # if aliveRoom == len(deadDoor):\n # deadRooms[i] = aliveDoor\n # deadDoors[aliveDoor] = 1\n # deadEndsChanged = True\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # roomsCounter[0][doorsGraph[aliveDoor][0]] = 1\n # for p in room[\"path\"]:\n # roomsCounter[1][p] = 1\n # for p in deadDoor:\n # roomsCounter[2][doorsGraph[p][0]] = 1\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n\n # Find dead ends (rooms with doors that only go to other dead ends, except one)\n # Danger, it is theoretically possible to have a map only with dead ends, which may make this crash\n # deadEndsChanged = True\n # while deadEndsChanged:\n # deadEndsChanged = False\n # for i, door in enumerate(doorsGraph):\n # if i not in deadDoors:\n # numOpenRooms = 0\n # openRoom = 0\n # for j, room in enumerate(door[1]):\n # if room not in deadRooms:\n # numOpenRooms += 1\n # openRoom = j\n # if numOpenRooms + len(door[2]) == 1:\n # for room in door[1]:\n # if room != openRoom:\n # deadRooms[room] = i\n # deadDoors[j] = 1\n # deadEndsChanged = True\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # roomsCounter[0][door[0]] = 1\n # for rr in door[1]:\n # print rr\n # if rr in deadRooms:\n # for p in roomsGraph[rr][\"path\"]:\n # roomsCounter[1][p] = 1\n # else:\n # for p in roomsGraph[rr][\"path\"]:\n # roomsCounter[3][p] = 1\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n # print deadDoors\n # print\n # print deadRooms\n # print \"-----------------\"\n\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # roomsCounter[1][(6, 9)] = 0.4\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n # roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n # for r in deadRooms:\n # for p in roomsGraph[r][\"path\"]:\n # roomsCounter[0][p] = 0.4\n # for d in deadDoors:\n # roomsCounter[1][doorsGraph[d][0]] = 0.4\n # self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")\n\n # Show every room\n roomsCounter = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n for room in roomsGraph:\n for p in room[\"path\"]:\n if len(room[\"doors\"]) > 1:\n roomsCounter[0][p] = 0.4\n else:\n roomsCounter[2][p] = 0.4\n # Show every door\n for door in doorsGraph:\n roomsCounter[1][door[0]] = 0.4\n # Display rooms and doors (red: rooms with at least one exit; orange: rooms with 1 exit; blue: doors\n self.displayDistributionsOverPositions(roomsCounter)\n # raw_input(\"Press Enter to continue ...\")", "def test_check_move_with_invalid(self):\n board = [\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\"\\u25cb\"] * 6,\n [\" \"] * 6,\n [\" \"] * 6\n ]\n valid = self.game.check_move(board, 4)\n self.assertFalse(valid)", "def check_lost (grid):\r\n for row in range(4):\r\n for col in range(4):\r\n if grid[row][col]==0:\r\n return False\r\n if grid[0][0]==grid[0][1] or grid[0][0]==grid[1][0]:\r\n return False \r\n if grid[0][3]==grid[0][2] or grid[0][3]==grid[1][3]:\r\n return False \r\n if grid[3][0]==grid[2][0] or grid[3][0]==grid[3][1]:\r\n return False\r\n if grid[3][3]==grid[2][3] or grid[3][3]==grid[3][2]:\r\n return False \r\n if grid[0][1]==grid[0][2] or grid[0][1]==grid[1][1]:\r\n return False \r\n if grid[0][2]==grid[1][2]:\r\n return False \r\n if grid[1][1]==grid[2][1] or grid[1][1]==grid[1][2] or grid[1][1]==grid[1][0]:\r\n return False\r\n if grid[2][1]==grid[2][0] or grid[2][1]==grid[2][2] or grid[2][1]==grid[3][1]:\r\n return False \r\n if grid[1][0]==grid[2][0]:\r\n return False\r\n if grid[1][2]==grid[1][3] or grid[1][2]==grid[2][2]:\r\n return False\r\n if grid[2][2]==grid[2][3] or grid[2][2]==grid[3][2]:\r\n return False\r\n if grid[3][1]==grid[3][2]:\r\n return False\r\n else:\r\n return True", "def is_unoccupied(self, row, col):\n return self.maze[row][col] is EMPTY", "def isBlocked(mapObj, gameStateObj, x, y):\n\n if isWall(mapObj, x, y):\n return True\n\n elif x < 0 or x >= len(mapObj) or y < 0 or y >= len(mapObj[x]):\n return True # x and y aren't actually on the map.\n\n elif (x, y) in gameStateObj['stars']:\n return True # a star is blocking\n\n return False", "def checkIntoWall(MazeTupleSet,SolnTupleSet):\n ele = MazeTupleSet.intersection(SolnTupleSet)\n if(len(ele)==0): #if the intersection of wall and solution is zero\n return True #means we do not run into wall\n else:\n return False", "def test(brickheight,bricklength,row,column,walllength,wallheight,occupied,answer):\n if brickheight>wallheight or bricklength>walllength:\n return False\n elif over(brickheight,bricklength,row,column,walllength,wallheight):\n return False\n else:\n for x in range(column,column+bricklength):\n for y in range(row,row+brickheight):\n if (x,y) in occupied:\n return False \n break\n else:\n return True", "def check_lost (grid):\r\n adjacent = False\r\n zero_value = False\r\n for i in range(4): \r\n for j in range(4):\r\n if grid[i][j] == 0:\r\n zero_value = True\r\n break\r\n for i in range(3):\r\n for j in range(3):\r\n if grid[i][j] == grid[i][j+1]:\r\n adjacent = True\r\n break\r\n if grid[i][j] == grid[i+1][j]:\r\n adjacent = True\r\n break\r\n if not adjacent and not zero_value:\r\n return True\r\n return False", "def check_lost(grid):\r\n for i in range(len(grid)):\r\n for j in range(len(grid[i])):\r\n if grid[i][j] == 0:\r\n return False\r\n elif i+1 < len(grid):\r\n if grid[i][j] == grid[i+1][j]:\r\n return False\r\n elif j+1 < len(grid[i]):\r\n if grid[i][j] == grid[i][j+1]:\r\n return False \r\n return True", "def testCanExploreASmallEmptyRoom(self):\n picture = \"\"\"\n ----------- level z=0 :\n ####\n # #\n # #\n ####\n -----------\n \"\"\"\n max_moves_to_make = 3\n\n points_visited = self.explore_dungeon(picture, max_moves_to_make)\n\n assert_that(points_visited).contains(Point(2, 2, 0))\n assert_that(points_visited).contains(Point(1, 2, 0))\n assert_that(points_visited).contains(Point(2, 1, 0))\n assert_that(points_visited).contains(Point(1, 1, 0))", "def check_move(self, y, x):\n return 0 <= y < len(self.maze) \\\n and 0 <= x < len(self.maze[y]) \\\n and self.maze[y][x] != \"#\"", "def _remove_dead_ends(maze):\n def _fill_maze():\n # Fill in dead ends, return True if the maze has no dead ends, otherwise\n # False.\n size = maze.shape[0]\n for i in range(size):\n for j in range(size):\n if maze[i, j]: # Not an open point\n continue\n num_open_neighbors = np.sum(\n [1 - maze[n[0], n[1]]\n for n in _get_neighbors(size, (i, j))])\n if num_open_neighbors < 2:\n maze[i, j] = 1\n return False\n return True\n\n valid_maze = False\n while not valid_maze:\n valid_maze = _fill_maze()", "def test_move_knight_legally_blocked(self):\n for piece in [('N', True), ('N', False)]:\n self.c.board = \\\n [[('K', piece[1]) for i in range(8)] for i in range(8)]\n self.c.turn = piece[1]\n self.c.board[4][4] = piece\n for dest in ['d6', 'f6', 'c5', 'g5', 'c3', 'g3', 'd2', 'f2']:\n self.groups['dest'] = dest\n self.assertRaises(\n MoveNotLegalError, self.c._knight_evaluator, self.groups)", "def check_lost (grid):\r\n t=0\r\n for o in range(len(grid)):\r\n for e in range(len(grid[o])):\r\n if grid[o][e]==0:\r\n t+=1\r\n else:\r\n ()\r\n r=0\r\n for o in range(len(grid)):\r\n for e in range(len(grid[o])-1):\r\n if grid[o][e]==grid[o][e+1]:\r\n r+=1\r\n elif grid[o][3]==grid[o][2]:\r\n r+=1 \r\n else:\r\n ()\r\n \r\n v=0\r\n for o in range(len(grid)):\r\n for e in range(len(grid[o])-1):\r\n if grid[e][o]==grid[e+1][o]:\r\n v+=1\r\n elif grid[3][o]==grid[2][o]:\r\n v+=1 \r\n else:\r\n () \r\n \r\n if t==0 and r==0 and v==0:\r\n return True\r\n else:\r\n return False", "def valid_move(x, y):\r\n if [x, y] in empty_cells(board):\r\n return True\r\n else:\r\n return False", "def test_point_not_in_room(rectangle, big_area):\n new_room = Room(rectangle, 0, 1, 1, big_area, 'bathroom')\n point = (0, 15, 15)\n assert new_room.contains_point(point[0], point[1], point[2]) is False", "def has_exited(self):\n agents = self.board[self.agent_locs_idx]\n return agents & (CellTypes.agent | CellTypes.exit) == CellTypes.exit", "def test_areas_locked_ok(self):" ]
[ "0.80039895", "0.7448992", "0.7195284", "0.6965521", "0.6831383", "0.66254824", "0.659395", "0.6501339", "0.6198514", "0.61660945", "0.61389726", "0.60900736", "0.60867363", "0.60384566", "0.6005758", "0.59558624", "0.59533626", "0.59515226", "0.593887", "0.5937304", "0.59134567", "0.5904037", "0.58753234", "0.5847926", "0.57977027", "0.57888275", "0.5783148", "0.57544696", "0.57488954", "0.5747478" ]
0.82839954
0
Returns 3xn numpy array describing motion accelerations Those acceleration are analytical calculated and aren't susceptible to errors
def get_analytical_accelerations(self): # create empty numpy array for accelerations accelerations = np.zeros((3, len(self.times))) # radial accelerations is equal to angular velocity^2 / radius but radius is unitary is this trajectory radial_acceleration = self.wz ** 2 # decompose radial accelerations in x and y components accelerations[0, :] = radial_acceleration * -cos(self.th[:, 2]) accelerations[1, :] = radial_acceleration * -sin(self.th[:, 2]) # accelerations along x axis is constant accelerations[2, :] = self.ax return accelerations
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def acceleration(R,M,G):\r\n N = R.shape[0]\r\n a = np.zeros((N,3)) # initialize accelerations\r\n for n in range(N):\r\n for nn in range(N):\r\n if n!=nn:\r\n a[n,:] += G*M[nn] * (R[nn,:]-R[n,:]) / util.enod(R[nn,:],R[n,:])**(3) \r\n return a", "def calculate_acceleration(self) -> np.array:\n F = self.calculate_net_force()\n m = self.mass\n a = F / m\n\n return a", "def target_acceleration(self, time):\n x_a = -self.w**2*self.r*sin(self.w*time)\n y_a = -self.w**2*self.r*cos(self.w*time)\n z_a = 0\n # raise NotImplementedError\n return np.array([x_a,y_a,z_a])", "def get_A3():\n\n return array([[0.68557183+0.46550108j, 0.12934765-0.1622676j,\n 0.24409518+0.25335939j],\n [0.1531015 + 0.66678983j, 0.45112492+0.18206976j,\n -0.02633966+0.43477693j],\n [-0.10817164-1.16879196j, -0.18446849+0.03755672j,\n 0.06430325-0.44757084j]])", "def _acceleration(data):\n\n # intermediate values\n sumcube_resids = ((data.mean() - data)**3).sum()\n\n # dodge the ZeroDivision error\n sumsqr_resids = max(((data.mean() - data)**2).sum(), 1e-12)\n\n # compute and return the acceleration\n return sumcube_resids / (6 * sumsqr_resids**1.5)", "def getAcc(pos: dc.float64[N, 3], mass: dc.float64[N], G: dc.float64,\n softening: dc.float64):\n # positions r = [x,y,z] for all particles\n x = pos[:, 0:1]\n y = pos[:, 1:2]\n z = pos[:, 2:3]\n\n # matrix that stores all pairwise particle separations: r_j - r_i\n # dx = x.T - x\n # dy = y.T - y\n # dz = z.T - z\n # dx = np.transpose(x) - x\n # dy = np.transpose(y) - y\n # dz = np.transpose(z) - z\n dx = np.add.outer(-x, x)\n dy = np.add.outer(-y, y)\n dz = np.add.outer(-z, z)\n\n # matrix that stores 1/r^3 for all particle pairwise particle separations\n inv_r3 = (dx**2 + dy**2 + dz**2 + softening**2)\n # inv_r3[inv_r3>0] = inv_r3[inv_r3>0]**(-1.5)\n I = inv_r3 > 0\n np.power(inv_r3, -1.5, out=inv_r3, where=I)\n\n ax = G * (dx * inv_r3) @ mass\n ay = G * (dy * inv_r3) @ mass\n az = G * (dz * inv_r3) @ mass\n\n # pack together the acceleration components\n # a = np.hstack((ax,ay,az))\n a = np.ndarray((N, 3), dtype=np.float64)\n # hstack(a, ax, ay, az)\n a[:, 0] = ax\n a[:, 1] = ay\n a[:, 2] = az\n\n return a", "def acceleration(data_array, time=1):\n speed = DataOperation.speed(data_array)\n acc_values = np.zeros(speed.size)\n count = 1\n acc_values[0] = 0\n for d in speed[1:]:\n acc_values[count] = (d - speed[count-1])/3.6/time\n count += 1\n return acc_values", "def accelerometer(self):\n accel = [0,0,0]\n accel[X] = twos_comp(self.i2c_bus.read_byte_data(self.addr, OUT_X_H_A) << 8 | \n self.i2c_bus.read_byte_data(self.addr, OUT_X_L_A), 16)\n accel[Y] = twos_comp(self.i2c_bus.read_byte_data(self.addr, OUT_Y_H_A) << 8 | \n self.i2c_bus.read_byte_data(self.addr, OUT_Y_L_A), 16)\n accel[Z] = twos_comp(self.i2c_bus.read_byte_data(self.addr, OUT_Z_H_A) << 8 | \n self.i2c_bus.read_byte_data(self.addr, OUT_Z_L_A), 16)\n\n for i in range(X, Z+1):\n self._accel[i] = accel[i] / math.pow(2, 15) * ACCEL_SCALE\n\n return vector(self._accel)", "def compute_acceleration(self, x, y):\n a_x = G * self.m / (x*x + y*y) * -x/(np.sqrt(x*x + y*y))\n a_y = G * self.m / (x*x + y*y) * -y/(np.sqrt(x*x + y*y))\n return np.array([a_x, a_y])", "def jacdelta_xi_tot(xis, cco2, n_alts = 40):\n\n J = np.empty((len(allatms), len(xis)))\n jacall = jacdelta_xi_all_x0s_fast(xis, cco2)\n delta = delta_xi_tot(xis, cco2)\n alldeltas = []\n for ialt in range(n_alts):\n alldeltas.append(delta_xi_at_x0(xis, cco2, ialt))\n\n for i in range(len(allatms)):\n for k in range(len(xis)):\n #print(i,k)\n J[i,k] = 1/(delta[i]) * np.sum([alldeltas[ialt][i]*jacall[i,k,ialt] for ialt in range(n_alts)])\n\n #print(np.mean(J))\n return J", "def target_acceleration(self, time):\n return np.array([0, 0, 0])", "def get_accel_data(self):\n x = self.read_i2c_word(self.ACCEL_XOUT0)\n y = self.read_i2c_word(self.ACCEL_YOUT0)\n z = self.read_i2c_word(self.ACCEL_ZOUT0)\n\n accel_scale_modifier = None\n accel_range = self.read_accel_range(True)\n\n if accel_range == self.ACCEL_RANGE_2G:\n accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_2G\n elif accel_range == self.ACCEL_RANGE_4G:\n accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_4G\n elif accel_range == self.ACCEL_RANGE_8G:\n accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_8G\n elif accel_range == self.ACCEL_RANGE_16G:\n accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_16G\n else:\n print(\"Unkown range - accel_scale_modifier set to self.ACCEL_SCALE_MODIFIER_2G\")\n accel_scale_modifier = self.ACCEL_SCALE_MODIFIER_2G\n\n x = x / accel_scale_modifier\n y = y / accel_scale_modifier\n z = z / accel_scale_modifier\n\n x = x * self.GRAVITIY_MS2\n y = y * self.GRAVITIY_MS2\n z = z * self.GRAVITIY_MS2\n return [x, y, z]", "def j3_accel(state_vec, mu=MU, j2=J3, j3=J3):\n mu = set_mu(state_vec)\n j2 = set_j2(state_vec)\n j3 = set_j3(state_vec)\n x, y, z = state_vec[0:3]\n r = sqrt(x**2 + y**2 + z**2)\n xddot = -5 * j3 * mu * R_E**3 * x / (2 * r**7) * (3 * z - 7 * z**3 / r**2)\n yddot = -5 * j3 * mu * R_E**3 * y / (2 * r**7) * (3 * z - 7 * z**3 / r**2)\n zddot = -5 * j3 * mu * R_E**3 / (2 * r**7) * (6 * z**2 - 7 * z**4 / r**2 - 3 / 5 * r**2)\n\n return [xddot, yddot, zddot]", "def acceleration( x, u, m, rho, P, b, h):\n\t\n n = x.size\n a = np.zeros((n,1))\n\n for i in range(0, n):\n \n # damping & harmonic potential (0.5 x^2)\n a[i] = a[i] - u[i]*b - x[i]\n\n # quantum pressure (pairwise calculation)\n x_js = np.delete(x,i)\n P_js = np.delete(P,i)\n rho_js = np.delete(rho,i)\n # first, calculate vector between two particles\n uij = x[i] - x_js\n # calculate acceleration due to pressure\n fac = -m * (P[i]/rho[i]**2 + P_js/rho_js**2)\n pressure_a = fac * kernel( uij, h, '1' )\n # accumulate contributions to the acceleration\n a[i] = a[i] + np.sum(pressure_a)\n\n return a", "def tidal_acc(self, xyz):\r\n assert(xyz.shape[1] == 3)\r\n\r\n # create solar mass variable, if not already extant\r\n try:\r\n self.M\r\n except AttributeError:\r\n self.M = Constant(2e33) # solar mass in g\r\n\r\n # get the vector of x-values\r\n x = xyz[:, 0]\r\n\r\n # create an all-zero force vector\r\n acc = np.zeros_like(xyz)\r\n\r\n # get the heliocentric distance at the current time\r\n dist = self.trajectory(self.t)\r\n\r\n # get the acceleration as the difference between the acceleration at\r\n # the points and the acceleration at the c.o.m.\r\n acc[:, 0] = float(self.G)*float(self.M)*(1/(dist-x)**2-1/(dist)**2)\r\n return(acc)", "def acceleration(self):\n ux,uy = np.gradient(self._obj['u'],self._obj['x'],self._obj['y'],axis=(0,1))\n vx,vy = np.gradient(self._obj['v'],self._obj['x'],self._obj['y'],axis=(0,1))\n \n ax = self._obj['u']*ux + self._obj['v']*uy\n ay = self._obj['u']*vx + self._obj['v']*vy\n\n self._obj['w'] = xr.DataArray(np.sqrt(ax**2+ay**2), dims=['x', 'y','t'])\n\n if len(self._obj.attrs['units']) == 4:\n vel_units = self._obj.attrs['units'][-1]\n self._obj.attrs['units'].append(f'{vel_units}^2')\n else:\n vel_units = self._obj.attrs['units'][-2]\n self._obj.attrs['units'][-1] = (f'{vel_units}^2')\n\n\n return self._obj", "def calc_observables(X):\n \n n = X.shape[1]\n Y = np.zeros((len(X), n*3+n*(n-1)//2))\n \n # average orientation (magnetization)\n counter = 0\n for i in range(3*n):\n Y[:,counter] = X[:,i]\n counter += 1\n \n # pairwise correlations\n for i in range(n-1):\n for j in range(i+1, n):\n Y[:,counter] = X[:,i]*X[:,j]\n counter += 1\n \n return Y", "def acceleration(self, Y):\n acc = np.array([0.0, 0.0])\n for part in self.particles:\n d3 = ((part.x-Y[0])**2 + (part.y-Y[1])**2)**1.5\n if d3 < 1e5: # When particles are too close, divergences occur\n continue\n acc += (part.mass/d3)*G*np.array([part.x-Y[0], part.y-Y[1]])\n return acc", "def read_acceleration(self):\n data = self.ag.read_bytes(Register.OUT_X_XL, 6)\n return lsm9ds1.to_vector_left_to_right_hand_rule(data)", "def C3D(self, temperature: float = None) -> ArrayLike:\n t = temperature\n n1 = self.nu(t) / (1 - self.nu(t))\n n2 = (1 - 2 * self.nu(t)) / (2 * (1 - self.nu(t)))\n\n C = np.array([[ 1, n1, n1, 0, 0, 0],\n [n1, 1, n1, 0, 0, 0],\n [n1, n1, 1, 0, 0, 0],\n [ 0, 0, 0, n2, 0, 0],\n [ 0, 0, 0, 0, n2, 0],\n [ 0, 0, 0, 0, 0, n2]], dtype=float)\n C *= self.E(t) * (1 - self.nu(t)) / ((1 + self.nu(t)) * (1 - 2 * self.nu(t)))\n\n return C", "def LEIsotropic3D(self):\n const = self.ymod / ((1+self.Nu) * (1-(2*self.Nu)))\n a = const * self.Nu\n b = const * (1-self.Nu)\n c = (a-b)/2\n Cmat = np.array(\n [\n [b, a, a, 0, 0, 0],\n [a, b, a, 0, 0, 0],\n [a, a, b, 0, 0, 0],\n [0, 0, 0, c, 0, 0],\n [0, 0, 0, 0, c, 0],\n [0, 0, 0, 0, 0, c]\n ], dtype=float)\n stress_el = Cmat @ self.eps\n return stress_el, Cmat", "def acceleration(self):\n return self.__accel", "def forward_kinematics(self):\n temp_T = Matrix.eye(3)\n for i in range(len(self.lengths)):\n angle_mat = self.T_a.subs(self.q,self.angles[i]).evalf()\n len_mat = self.T_x.subs(self.l,self.lengths[i]).evalf()\n temp_T = temp_T * angle_mat * len_mat\n \n self.final_T = np.array(temp_T,dtype=float)\n \n return self.final_T", "def get_analytical_velocities(self):\n # create empty numpy array for accelerations\n velocities = np.zeros((3, len(self.times)))\n # tangential velocity is angular velocity multiplied by radius but radius is one\n vt = self.wz\n # decompose tangential velocity in x and y components\n velocities[0, :] = vt * -sin(self.th[:, 2])\n velocities[1, :] = vt * cos(self.th[:, 2])\n # linear velocity along z axis\n velocities[2, :] = self.v0x + self.ax * self.times\n return velocities", "def _compute_all(self, acc: np.ndarray, mag: np.ndarray) -> np.ndarray:\n _assert_numerical_iterable(acc, 'Gravitational acceleration vector')\n _assert_numerical_iterable(mag, 'Geomagnetic field vector')\n acc, mag = np.copy(acc), np.copy(mag)\n self._assert_observations(acc, mag)\n if acc.ndim < 2:\n return self.estimate(acc, mag)\n # Normalize measurements (eq. 1)\n ax, ay, az = np.transpose(acc/np.linalg.norm(acc, axis=1)[:, None])\n mx, my, mz = np.transpose(mag/np.linalg.norm(mag, axis=1)[:, None])\n # Dynamic magnetometer reference vector (eq. 12)\n mD = ax*mx + ay*my + az*mz\n mN = np.sqrt(1-mD**2)\n # Quaternion components (eq. 16)\n qw = ax*my - ay*(mN+mx)\n qx = (az-1)*(mN+mx) + ax*(mD-mz)\n qy = (az-1)*my + ay*(mD-mz)\n qz = az*mD - ax*mN-mz\n # Final quaternion (eq. 18)\n Q = np.c_[-qw, qx, qy, qz]\n return Q/np.linalg.norm(Q, axis=1)[:, None]", "def get_acceleration(self):\n return self.acceleration", "def dynamics(x,Earth):\r\n\r\n # precompute a few terms to reduce number of operations\r\n r = norm(x[0:3])\r\n Re_r_sqr = 1.5*Earth.J2*(Earth.R/r)**2\r\n five_z_sqr = 5*x[2]**2/(r**2)\r\n\r\n # two body and J2 acceleration together\r\n accel = (-Earth.mu/(r**3))*np.array([x[0]*(1 - Re_r_sqr*(five_z_sqr - 1)),\r\n x[1]*(1 - Re_r_sqr*(five_z_sqr - 1)),\r\n x[2]*(1 - Re_r_sqr*(five_z_sqr - 3))])\r\n\r\n return np.array([x[3],x[4],x[5],accel[0],accel[1],accel[2]])", "def read_data(self):\n self.data = self.i2c.readfrom_mem(accel_address, x_data, 6)\n data_xyz = []\n for i in range(3):\n value = (self.data[2*i + 1] << 8) | self.data[2*i]\n data_xyz.append(self.get_acceleration(value) - self.offset[i])\n data_xyz.append(utime.ticks_ms() - self.time)\n return data_xyz", "def c(self) -> np.ndarray:\n return self._vector[10:12]", "def get_variables(self) -> np.array:\n return np.array([self.m, self.c])" ]
[ "0.67607427", "0.6659853", "0.65132993", "0.6360973", "0.6349496", "0.6343993", "0.62899894", "0.62222946", "0.6102571", "0.6017444", "0.6007366", "0.599949", "0.59906644", "0.5926159", "0.5886498", "0.5861928", "0.5837955", "0.5786055", "0.5779665", "0.57683545", "0.5741493", "0.57157284", "0.56872135", "0.5679788", "0.5646559", "0.5630116", "0.5616995", "0.5595825", "0.55863476", "0.55822396" ]
0.74758273
0
Returns 3xn numpy array describing motion velocities Those velocities are analytical calculated and aren't susceptible to errors
def get_analytical_velocities(self): # create empty numpy array for accelerations velocities = np.zeros((3, len(self.times))) # tangential velocity is angular velocity multiplied by radius but radius is one vt = self.wz # decompose tangential velocity in x and y components velocities[0, :] = vt * -sin(self.th[:, 2]) velocities[1, :] = vt * cos(self.th[:, 2]) # linear velocity along z axis velocities[2, :] = self.v0x + self.ax * self.times return velocities
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def motor_velocities(self):\n return np.asarray(self._robot_state.velocity)", "def get_velocities(self):\n\n return np.array([p.velocity for p in self.particles])", "def velocities(self, return_np=False):\n if return_np:\n return self.si_values()[3:]\n return [self.v_x, self.v_y, self.v_z]", "def velocity(self) -> np.ndarray:\n return self._state[3:5]", "def velocity_trajectory(self):\n return self._read(MX_VELOCITY_TRAJECTORY)", "def velocities(self, session):\n velocities = session.query(\n Timepoint.velocity_x,\n Timepoint.velocity_y,\n Timepoint.velocity_z).filter(\n Timepoint.id.between(self.start_timepoint_id, self.end_timepoint_id))\n return np.array(velocities.all())", "def velocities(self, return_np=False):\n if return_np:\n return self.si_values()[3:6]\n return [self.v_r, self.v_t, self.v_p]", "def angular_velocities(self, dt: float) -> np.ndarray:\n if not isinstance(dt, float):\n raise TypeError(f\"dt must be a float. Got {type(dt)}.\")\n if dt <= 0:\n raise ValueError(f\"dt must be greater than zero. Got {dt}.\")\n w = np.c_[\n self.w[:-1]*self.x[1:] - self.x[:-1]*self.w[1:] - self.y[:-1]*self.z[1:] + self.z[:-1]*self.y[1:],\n self.w[:-1]*self.y[1:] + self.x[:-1]*self.z[1:] - self.y[:-1]*self.w[1:] - self.z[:-1]*self.x[1:],\n self.w[:-1]*self.z[1:] - self.x[:-1]*self.y[1:] + self.y[:-1]*self.x[1:] - self.z[:-1]*self.w[1:]]\n return 2.0 * w / dt", "def velocities(self, return_np=False):\n\n if return_np:\n return self.si_values()[3:]\n return [self.v_r, self.v_t, self.v_p]", "def _create_velocities(self):\n velocities = []\n for boid in self.boids:\n neighbouring = self.get_neighbours(boid)\n boid_v_x = boid.v * math.cos(boid.direction)\n boid_v_y = boid.v * math.sin(boid.direction)\n if neighbouring:\n coh_x, coh_y = ReynoldsModel.calculate_cohesion(neighbouring)\n coh_x -= boid.x\n coh_y -= boid.y\n align_x, align_y = ReynoldsModel.calculate_alignment(neighbouring)\n sep_x, sep_y = self.calculate_separation(boid, neighbouring)\n velocities.append((\n boid_v_x + self.coh_coef * coh_x + self.align_coef * align_x + self.sep_coef * sep_x,\n boid_v_y + self.coh_coef * coh_y + self.align_coef * align_y + self.sep_coef * sep_y\n ))\n else:\n velocities.append((boid_v_x, boid_v_y))\n return velocities", "def target_velocity(self, time):\n\n x_v = self.w*self.r*cos(self.w*time)\n y_v = -self.w*self.r*sin(self.w*time)\n z_v = 0\n # raise NotImplementedError\n return np.array([x_v,y_v,z_v])", "def compute_velocities(self):\n Ddemo_trajs = []\n\n for demo_traj in self._demo_trajs:\n d_traj = np.diff(demo_traj, axis=0)/self._dt\n #append last element to adjust the length\n d_traj = np.hstack([d_traj, d_traj[-1]])\n #add it to the list\n Ddemo_trajs.append(d_traj)", "def obtain_vel_timestep(self, tstep):\n return np.swapaxes(np.array([np.ndarray.flatten(self.data[\"Base\"][\"Zone1\"][\"FlowSolution_%04d\" % tstep][\"VelocityX\"][\" data\"][:, :, :]),\n np.ndarray.flatten(self.data[\"Base\"][\"Zone1\"][\"FlowSolution_%04d\" % tstep][\"VelocityY\"][\" data\"][:, :, :]),\n np.ndarray.flatten(self.data[\"Base\"][\"Zone1\"][\"FlowSolution_%04d\" % tstep][\"VelocityZ\"][\" data\"][:, :, :])]), 0, 1)", "def differentiater(trajectories, time_interval=0.01):\r\n number_of_array_axis = len(np.shape(trajectories))\r\n if number_of_array_axis == 2:\r\n velocities = (trajectories[:, 1:] - trajectories[:, :-1])/time_interval\r\n elif number_of_array_axis == 3:\r\n velocities = (trajectories[:, :, 1:] - trajectories[:, :, :-1]) / time_interval\r\n else:\r\n raise ValueError(\"The size of the trajectories array was not correct.\")\r\n return velocities", "def _calc_frame_vels(self):\n num_frames = self.get_num_frames()\n frame_vel_size = self.get_frame_vel_size()\n dt = self.get_frame_duration()\n frame_vels = np.zeros([num_frames, frame_vel_size])\n\n for f in range(num_frames - 1):\n frame0 = self.get_frame(f)\n frame1 = self.get_frame(f + 1)\n\n root_pos0 = self.get_frame_root_pos(frame0)\n root_pos1 = self.get_frame_root_pos(frame1)\n\n root_rot0 = self.get_frame_root_rot(frame0)\n root_rot1 = self.get_frame_root_rot(frame1)\n\n joints0 = self.get_frame_joints(frame0)\n joints1 = self.get_frame_joints(frame1)\n\n root_vel = (root_pos1 - root_pos0) / dt\n\n root_rot_diff = transformations.quaternion_multiply(\n root_rot1, transformations.quaternion_conjugate(root_rot0))\n root_rot_diff_axis, root_rot_diff_angle = \\\n pose3d.QuaternionToAxisAngle(root_rot_diff)\n root_ang_vel = (root_rot_diff_angle / dt) * root_rot_diff_axis\n\n joints_vel = (joints1 - joints0) / dt\n\n curr_frame_vel = np.zeros(frame_vel_size)\n self.set_frame_root_vel(root_vel, curr_frame_vel)\n self.set_frame_root_ang_vel(root_ang_vel, curr_frame_vel)\n self.set_frame_joints_vel(joints_vel, curr_frame_vel)\n\n frame_vels[f, :] = curr_frame_vel\n\n # replicate the velocity at the last frame\n if num_frames > 1:\n frame_vels[-1, :] = frame_vels[-2, :]\n\n return frame_vels", "def get_voltages(self):\n if self.v is None or self.dirty is True:\n v = self.simulator.get_voltages()\n n_compartments = self.neuron_collection.total_compartments()\n self.v = np.array(v).reshape([len(v) / n_compartments, n_compartments])\n\n self.dirty = False\n t = int(self.T / self.dt)\n return self.v[:t, :]", "def velocity_field(self):\n return scipy.dstack((self._u_int, self._v_int))", "def vel2acc(timeseries, dt):\n return np.diff(np.hstack(([0], timeseries)) * (1.0 / dt))", "def velocityVerlet(XY, yh, yt, h, n):\n for l in range(n):\n yt_temp = yt + (0.5 * h * XY.grad_log_density(yh))\n yhp1 = yh + (h * yt_temp)\n ytp1 = yt_temp + (0.5 * h * XY.grad_log_density(yhp1))\n yh = yhp1\n yt = ytp1\n\n return [yhp1, ytp1]", "def velocity(self):\n return self._vel.to_list()", "def calc_vel(position, step_size):\n start_frame = step_size + 1\n end_frame = len(position)\n com_vel = []\n for i in range(start_frame, end_frame + 1):\n com_vel.append(calc_vel_frame(position, step_size, i))\n return start_frame, end_frame, com_vel", "def getVelocity(grid=None, ppar=None):\n\n vel = np.zeros([grid.nx, grid.ny, grid.nz, 3], dtype=np.float64)\n return vel", "def calc_vel(pos_data, dt):\n\n vx = np.gradient(pos_data[:, 0], dt)\n vy = np.gradient(pos_data[:, 1], dt)\n\n return np.c_[vx, vy]", "def motor_angles(self):\n return np.asarray(self._robot_state.position)", "def get_velocity(self):\n\n vs = []\n pairs = [(-2, -1), (-3, -1), (-3, -1)]\n\n for i1, i2 in pairs:\n f1 = self.files[i1]\n p1 = Profile(os.path.join(self.name, f1))\n\n f2 = self.files[i2]\n p2 = Profile(os.path.join(self.name, f2))\n\n # we'll do this by looking at 3 different temperature\n # thresholds and averaging\n T_ref = [2.e9, 3.e9, 4.e9]\n\n for T0 in T_ref:\n x1 = p1.find_x_for_T(T0)\n x2 = p2.find_x_for_T(T0)\n vs.append((x1 - x2)/(p1.time - p2.time))\n\n vs = np.array(vs)\n v = np.mean(vs)\n v_sigma = np.std(vs)\n return v, v_sigma", "def carla_angular_velocity_to_numpy_vector(carla_angular_velocity):\n return numpy.array([math.radians(carla_angular_velocity.x), \n -math.radians(carla_angular_velocity.y), \n -math.radians(carla_angular_velocity.z)])", "def carla_velocity_to_numpy_vector(carla_velocity):\n\n return numpy.array([\n carla_velocity.x,\n -carla_velocity.y,\n carla_velocity.z\n ])", "def _get_observation_np(self) -> np.ndarray: # need this for baselines\n observation = []\n observation.extend(self.rex.GetMotorAngles().tolist())\n observation.extend(self.rex.GetMotorVelocities().tolist())\n observation.extend(self.rex.GetMotorTorques().tolist())\n observation.extend(list(self.rex.GetBaseOrientation()))\n\n # in addition to state, will need ratio, clock_variables, and desired speed\n observation.extend([self.ratio]) # only 1\n observation.extend(self.get_clock()) # 4 variables (1 per leg)\n observation.extend(self.speed_des) # [vx_des, vy_des]\n self._observation = observation\n return np.array(self._observation)", "def get_analytical_accelerations(self):\n # create empty numpy array for accelerations\n accelerations = np.zeros((3, len(self.times)))\n # radial accelerations is equal to angular velocity^2 / radius but radius is unitary is this trajectory\n radial_acceleration = self.wz ** 2\n # decompose radial accelerations in x and y components\n accelerations[0, :] = radial_acceleration * -cos(self.th[:, 2])\n accelerations[1, :] = radial_acceleration * -sin(self.th[:, 2])\n # accelerations along x axis is constant\n accelerations[2, :] = self.ax\n return accelerations", "def get_variables(self) -> np.array:\n return np.array([self.m, self.c])" ]
[ "0.7477564", "0.71334136", "0.6991451", "0.6933057", "0.67952305", "0.66930515", "0.66805524", "0.6625498", "0.6615499", "0.6569754", "0.6533391", "0.64655375", "0.6296626", "0.6216558", "0.6187872", "0.6160401", "0.6115366", "0.6099247", "0.6098212", "0.6074818", "0.6048958", "0.60220486", "0.60212773", "0.60022205", "0.5994286", "0.5974519", "0.5972259", "0.5893929", "0.5887984", "0.58819836" ]
0.7648973
0
Check an external generated trajectory against the internally one
def check_trajectory(self, external_trajectory): # Create empty array for error measure error = np.zeros((3, external_trajectory.shape[1])) # loop over external trajectory for i, external_x in enumerate(external_trajectory.T): # get trajectory coordinates at step i real_x = self.trajectory[:, i] # calculate difference from external trajectory error[:, i] = abs(real_x - external_x) # return average error on all axis return error.mean(axis=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_trajectory(self):\n raise NotImplementedError", "def test_object_with_trajectory() -> None:\n system_name = \"Octanol2\"\n system = database.system(system_name)\n parser = FlatfileParser()\n simulation_data = parser.get_simulation_data(\n units=system.units,\n ensemble=system.ensemble(\"GasPhase\"),\n system=system.system_data,\n position_file=system.trajectory_flat_file(\"GasPhase\", \"position\"),\n velocity_file=system.trajectory_flat_file(\"GasPhase\", \"velocity\"),\n )\n simulation_data_copy = parser.get_simulation_data(\n units=system.units,\n ensemble=system.ensemble(\"GasPhase\"),\n system=system.system_data,\n position_file=system.trajectory_flat_file(\"GasPhase\", \"position\"),\n velocity_file=system.trajectory_flat_file(\"GasPhase\", \"velocity\"),\n )\n\n assert simulation_data == simulation_data_copy\n\n # Change a position entry\n simulation_data_copy.trajectory.position[0][0] += 1\n # Suppressed LGTM warning: See https://github.com/github/codeql/issues/5777\n assert simulation_data != simulation_data_copy # lgtm [py/redundant-comparison]", "def test_run_time_validation(run_time, jptl_ref, has_to_from, expected):\n if run_time:\n run_time = \"<RunTime>{0}</RunTime>\".format(run_time)\n\n if jptl_ref:\n jptl_ref = (\n \"<JourneyPatternTimingLinkRef>{0}</JourneyPatternTimingLinkRef>\".format(\n jptl_ref\n )\n )\n\n if has_to_from:\n to_from = \"\"\"\n <From>\n <StopPointRef>9990000001</StopPointRef>\n <TimingStatus>PTP</TimingStatus>\n </From>\n <To>\n <StopPointRef>9990000002</StopPointRef>\n <TimingStatus>PTP</TimingStatus>\n </To>\n \"\"\"\n else:\n to_from = \"\"\n\n xml = \"\"\"\n <JourneyPatternSections>\n <JourneyPatternSection id=\"JPS1\">\n <JourneyPatternTimingLink id=\"JPTL1\">\n <Direction>clockwise</Direction>\n {0}\n </JourneyPatternTimingLink>\n </JourneyPatternSection>\n </JourneyPatternSections>\n <VehicleJourneys>\n <VehicleJourney>\n <VehicleJourneyCode>VJ1</VehicleJourneyCode>\n <ServiceRef>S1</ServiceRef>\n <LineRef>L1</LineRef>\n <JourneyPatternRef>JP1</JourneyPatternRef>\n <DepartureTime>10:29:00</DepartureTime>\n <VehicleJourneyTimingLink id=\"VJTL5\">\n {1}\n {2}\n </VehicleJourneyTimingLink>\n </VehicleJourney>\n </VehicleJourneys>\n \"\"\"\n xml = xml.format(run_time, jptl_ref, to_from)\n\n OBSERVATION_ID = 34\n schema = Schema.from_path(PTI_PATH)\n observations = [o for o in schema.observations if o.number == OBSERVATION_ID]\n schema = SchemaFactory(observations=observations)\n json_file = JSONFile(schema.json())\n pti = PTIValidator(json_file)\n txc = TXCFile(xml)\n is_valid = pti.is_valid(txc)\n assert is_valid == expected", "def _check_saved_trajectory(saved_pdb_file_path, reference_trajectory, indices=None):\n import MDAnalysis.coordinates\n\n if indices is None:\n indices = range(len(reference_trajectory))\n\n with MDAnalysis.coordinates.PDB.PDBReader(saved_pdb_file_path) as saved_trajectory:\n for saved_idx, reference_idx in enumerate(indices):\n saved_positions = saved_trajectory[saved_idx].positions\n\n # Check if the reference is a trajectory or custom positions.\n if isinstance(reference_trajectory, np.ndarray):\n reference_positions = reference_trajectory[reference_idx]\n else:\n reference_positions = reference_trajectory.get_ts(reference_idx).positions\n\n assert np.allclose(saved_positions, reference_positions, atol=1e-2, rtol=0.0)", "def verify_trajectory_safety(env, p_0, k_fb, k_ff, p_ctrl, h_mat_safe, h_safe,\n h_mat_obs=None, h_obs=None):\n n, _ = np.shape(k_ff)\n x_all = simulate_trajectory(env, p_0, k_fb, k_ff, p_ctrl)\n\n in_all = True\n if not h_mat_obs is None:\n for i in range(1, n):\n in_all = in_all & sample_inside_polytope(x_all[None, i, :], h_mat_obs,\n h_obs)\n\n in_all = in_all & sample_inside_polytope(x_all[None, -1, :], h_mat_safe, h_safe)\n\n return in_all, x_all", "def test_h2_trajectory(h2_trajectory):\n traj = h2_trajectory\n mol = traj.mol\n k = mol.energy_model.params.k\n period = 2*u.pi*np.sqrt(mol.atoms[0].mass/k)\n for frame in traj.frames:\n period_progress = (frame.time % period) / period\n if period_progress < 0.1 or period_progress > 0.9:\n # check for expected peaks of sine wave\n assert frame.positions[0, 0] > 0.1 * u.angstrom\n elif 0.4 < period_progress < 0.6:\n # check for expected troughs of sine wave\n assert frame.positions[0, 0] < -0.1 * u.angstrom", "def compute_trajectory():\n pass", "def check(self):\n self.init()\n self.calculate_output()\n self.compare_outputs_with_expects()", "def test_t(self):\n assert np.isclose(self.stepper.t, self.final_t)", "def validate(self):\n variables = ['bottomDepth', 'layerThickness', 'maxLevelCell',\n 'temperature', 'salinity']\n compare_variables(\n test_case=self, variables=variables,\n filename1='initial_state/initial_state.nc')\n\n variables = ['temperature', 'layerThickness']\n compare_variables(\n test_case=self, variables=variables,\n filename1='forward/output/output.0001-01-01_00.00.00.nc')\n\n if self.with_particles:\n # just do particle validation at coarse res\n variables = [\n 'xParticle', 'yParticle', 'zParticle', 'zLevelParticle',\n 'buoyancyParticle', 'indexToParticleID', 'currentCell',\n 'transfered', 'numTimesReset']\n compare_variables(test_case=self, variables=variables,\n filename1='forward/analysis_members/'\n 'lagrPartTrack.0001-01-01_00.00.00.nc')\n\n timers = ['init_lagrPartTrack', 'compute_lagrPartTrack',\n 'write_lagrPartTrack', 'restart_lagrPartTrack',\n 'finalize_lagrPartTrack']\n compare_timers(self, timers, rundir1='forward')", "def validate_ts(self):\n try:\n self.get_log_file()\n\n self.parse_vibrations()\n\n self.obtain_geometries()\n\n self.percent_changes = self.obtain_percent_changes()\n\n\n center_values = np.log(\n self.percent_changes[self.percent_changes.center].percent_change.mean())\n shell_values = np.log(\n self.percent_changes[self.percent_changes.center != True].percent_change.mean())\n\n if center_values > shell_values + 1:\n logging.info(\"Vibrational analysis was successful\")\n return True\n else:\n logging.info(\n \"Cannot reasonably say that we have arrived at a TS through vibrational analysis.\")\n return False\n except AssertionError:\n logging.info(\"Something went wrong when attempting vibrational analysis...\")\n logging.info(\"Cannot verify via vibrational analysis\")\n return False", "def testUsedFlag(self):\n self.exposure.setWcs(self.tanWcs)\n loadRes = self.refObjLoader.loadPixelBox(bbox=self.bbox, wcs=self.tanWcs, filterName=\"r\")\n refCat = loadRes.refCat\n refCentroidKey = afwTable.Point2DKey(refCat.schema[\"centroid\"])\n refFluxRKey = refCat.schema[\"r_flux\"].asKey()\n\n sourceSchema = afwTable.SourceTable.makeMinimalSchema()\n measBase.SingleFrameMeasurementTask(schema=sourceSchema) # expand the schema\n config = AstrometryTask.ConfigClass()\n config.wcsFitter.order = 2\n config.wcsFitter.numRejIter = 0\n # schema must be passed to the solver task constructor\n solver = AstrometryTask(config=config, refObjLoader=self.refObjLoader, schema=sourceSchema)\n sourceCat = afwTable.SourceCatalog(sourceSchema)\n sourceCat.reserve(len(refCat))\n sourceCentroidKey = afwTable.Point2DKey(sourceSchema[\"slot_Centroid\"])\n sourceInstFluxKey = sourceSchema[\"slot_ApFlux_instFlux\"].asKey()\n sourceInstFluxErrKey = sourceSchema[\"slot_ApFlux_instFluxErr\"].asKey()\n\n for refObj in refCat:\n src = sourceCat.addNew()\n src.set(sourceCentroidKey, refObj.get(refCentroidKey))\n src.set(sourceInstFluxKey, refObj.get(refFluxRKey))\n src.set(sourceInstFluxErrKey, refObj.get(refFluxRKey)/100)\n\n results = solver.run(\n sourceCat=sourceCat,\n exposure=self.exposure,\n )\n # check that the used flag is set the right number of times\n count = 0\n for source in sourceCat:\n if source.get('calib_astrometry_used'):\n count += 1\n self.assertEqual(count, len(results.matches))", "def arrived(self):\n \"\"\" Responsible for transformations \"\"\"\n \n if self.phase == 1:\n if self.closest_i_could_get is not None:\n return array_equal(self.closest_i_could_get, array([0,0]))\n else: \n return array_equal(self.destination, array([0,0]))\n elif self.phase > 1:\n if self.closest_i_could_get is not None:\n return array_equal(self.closest_i_could_get, self.position)\n else: \n return array_equal(self.destination, self.position)", "def have_trains_crashed(first_train_speed, second_train_speed, sidetrack=4, path=10):\n first_train_road_time = sidetrack / first_train_speed\n second_train_road_time = (path - sidetrack) / second_train_speed\n if first_train_road_time == second_train_road_time or second_train_road_time < first_train_road_time:\n return True\n elif first_train_road_time < second_train_road_time:\n return False", "def _one_system_changed_phase(self, thermo, ref_values):\n singlet_array = self._get_singlet_array(thermo)\n for cur_array, ref_array in zip(singlet_array, ref_values):\n for cur_val, ref_val in zip(cur_array, ref_array):\n if self._system_changed_phase(cur_val, ref_val):\n return True\n return False", "def check():", "def is_done(self, observations):\n ####################################################################\n # Plan0: init #\n ####################################################################\n # done = False\n # done_reward = 0\n # reward_reached_goal = 2000\n # reward_crashing = -200\n # reward_no_motion_plan = -50\n # reward_joint_range = -150\n\n ####################################################################################\n # Plan1: Reach a point in 3D space (usually right above the target object) #\n # Reward only dependent on distance. Nu punishment for crashing or joint_limits #\n ####################################################################################\n done = False\n done_reward = 0\n reward_reached_goal = 100\n reward_crashing = 0\n reward_no_motion_plan = 0\n reward_joint_range = 0\n\n\n # Check if there are invalid collisions\n invalid_collision = self.get_collisions()\n\n # print(\"##################{}: {}\".format(self.moveit_action_feedback.header.seq, self.moveit_action_feedback.status.text))\n if self.moveit_action_feedback.status.text == \"No motion plan found. No execution attempted.\" or \\\n self.moveit_action_feedback.status.text == \"Solution found but controller failed during execution\" or \\\n self.moveit_action_feedback.status.text == \"Motion plan was found but it seems to be invalid (possibly due to postprocessing).Not executing.\":\n\n print(\">>>>>>>>>>>> NO MOTION PLAN!!! <<<<<<<<<<<<<<<\")\n done = True\n done_reward = reward_no_motion_plan\n\n # Successfully reached goal: Contact with at least one contact sensor and there is no invalid contact\n if observations[7] != 0 and observations[8] != 0 and not invalid_collision:\n done = True\n print('>>>>>>>>>>>>> get two contacts <<<<<<<<<<<<<<<<<<')\n done_reward = reward_reached_goal\n # save state in csv file\n U.append_to_csv(self.csv_success_exp, observations)\n self.success_2_contacts += 1\n print(\"Successful 2 contacts so far: {} attempts\".format(self.success_2_contacts))\n\n if observations[7] != 0 or observations[8] != 0 and not invalid_collision:\n done = True\n print('>>>>>>>>>>>>> get one contacts <<<<<<<<<<<<<<<<<<')\n self.success_1_contact += 1\n print(\"Successful 1 contact so far: {} attempts\".format(self.success_1_contact))\n\n # Check if the box has been moved compared to the last observation\n target_pos = U.get_target_position()\n if not np.allclose(self.object_position, target_pos, rtol=0.0, atol=0.0001):\n print(\">>>>>>>>>>>>>>>>>>> Target moved <<<<<<<<<<<<<<<<<<<<<<<\")\n done = True\n\n # Crashing with itself, shelf, base\n if invalid_collision:\n done = True\n print('>>>>>>>>>>>>>>>>>>>> crashing <<<<<<<<<<<<<<<<<<<<<<<')\n done_reward = reward_crashing\n\n joint_exceeds_limits = False\n for joint_pos in self.joints_state.position:\n joint_correction = []\n if joint_pos < -math.pi or joint_pos > math.pi:\n joint_exceeds_limits = True\n done = True\n done_reward = reward_joint_range\n print('>>>>>>>>>>>>>>>>>>>> joint exceeds limit <<<<<<<<<<<<<<<<<<<<<<<')\n joint_correction.append(-joint_pos)\n else:\n joint_correction.append(0.0)\n\n if joint_exceeds_limits:\n print(\"is_done: Joints: {}\".format(np.round(self.joints_state.position, decimals=3)))\n self.publisher_to_moveit_object.pub_joints_to_moveit([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n while not self.movement_complete.data:\n pass\n self.publisher_to_moveit_object.pub_relative_joints_to_moveit(joint_correction)\n while not self.movement_complete.data:\n pass\n print('>>>>>>>>>>>>>>>> joint corrected <<<<<<<<<<<<<<<<<')\n\n return done, done_reward, invalid_collision", "def test_input_target_different():\n for day in range(len(departure_cameras)):\n which_targets_day = which_targets[day]\n when_targets_day = when_targets[day]\n where_targets_day = where_targets[day]\n departure_cameras_day = departure_cameras[day]\n # Which\n for departure_camera, target in zip(departure_cameras_day, which_targets_day):\n entrance_cameras = np.argwhere(target == 1) + 1\n assert departure_camera not in entrance_cameras\n # When\n for departure_camera, when_target in zip(departure_cameras_day, when_targets_day):\n target = when_target.sum(axis=1) > 1\n entrance_cameras = np.argwhere(target == 1) + 1\n assert departure_camera not in entrance_cameras\n # Where\n for departure_camera, where_target in zip(departure_cameras_day, where_targets_day):\n target = where_target.sum(axis=3).sum(axis=2).sum(axis=1) > 1\n entrance_cameras = np.argwhere(target == 1) + 1\n assert departure_camera not in entrance_cameras", "def match_based_on_spatial_temperal_prior_test_2(tracker_record_1, tracker_record_2, pt_obj_1, pt_obj_2, associate_dict, t_interval=30):\n print(\"===== Get in the match_based_on_spatial_temperal_prior_test_2! ===== \")\n \n # file path\n device_id_1 = 0\n device_id_2 = 1\n img_root_1 = data_path[device_id_1]\n img_root_2 = data_path[device_id_2]\n # save_root =\n \n obj_single_camera_stp_cam_1 = SingleCameraSTP(tracker_record_1, pt_obj_1)\n obj_single_camera_stp_cam_2 = SingleCameraSTP(tracker_record_2, pt_obj_2)\n \n print(obj_single_camera_stp_cam_1.perspective_trace)\n print(obj_single_camera_stp_cam_1.motion_params_4_each)\n obj_multi_cameras_stp_c1c2 = MultiCamerasSTP(\n obj_single_camera_stp_cam_1,\n obj_single_camera_stp_cam_2,\n associate_dict)\n\n # # ===== TEST:coord_transformer_test =====\n # coord_transformer_test(obj_multi_cameras_stp_c1c2)\n # obj_multi_cameras_stp_c1c2.get_start_point_transform()\n \n pt_box_info_1 = obj_multi_cameras_stp_c1c2.obj_single_camera_stp_cam_1.perspective_trace\n pt_box_info_2 = obj_multi_cameras_stp_c1c2.obj_single_camera_stp_cam_2.perspective_trace\n \n # Test on object id '1'\n object_id = '0'\n \n for i in range(np.min([len(pt_box_info_1[object_id]), len(pt_box_info_2[object_id])])):\n f1 = i\n f2 = i\n fname_1 = str(pt_box_info_1[object_id][f1][1])+'.jpg'\n fname_2 = str(pt_box_info_2[object_id][f2][1])+'.jpg'\n \n img_1 = cv2.imread(os.path.join(img_root_1, fname_1))\n img_2 = cv2.imread(os.path.join(img_root_2, fname_2))\n \n cam_1_x = pt_box_info_1[object_id][f1][0][0]\n cam_1_y = pt_box_info_1[object_id][f1][0][1]\n \n cam_2_x = pt_box_info_2[object_id][f2][0][0]\n cam_2_y = pt_box_info_2[object_id][f2][0][1]\n \n t_interval = pt_box_info_2[object_id][f2][1]-pt_box_info_1[object_id][f1][1]\n \n print(cam_1_x, cam_1_y)\n print(cam_2_x, cam_2_y)\n print(t_interval)\n # print(obj_multi_cameras_stp_c1c2.starting_point)\n \n p_map = obj_multi_cameras_stp_c1c2.get_probability_map(cam_1_x, cam_1_y, t_interval, height=210, width=80)\n p_map = cv2.applyColorMap(p_map, cv2.COLORMAP_JET)\n p = obj_multi_cameras_stp_c1c2.get_probability(cam_2_x, cam_2_y, cam_1_x, cam_1_y, t_interval)\n print(p)\n # dist = obj_multi_cameras_stp_c1c2.get_distance(cam_2_x,cam_2_y,cam_1_x,cam_1_y,t_interval)\n p_map = cv2.resize(p_map, (int(pt_obj_2.transformed_width_for_disp), int(pt_obj_2.transformed_height_for_disp)))\n p_map = cv2.flip(p_map, 0) # 0:vertical flip\n pt_color_p_map = pt_obj_2.get_inverse_disp_transform(p_map)\n \n alpha = 0.5\n img_3 = cv2.addWeighted(img_2, alpha, pt_color_p_map, 1-alpha, 0)\n \n img_4 = np.zeros((int(img_2.shape[0]), int(img_2.shape[1]*2), 3), np.uint8)\n img_4[:, :img_1.shape[1], :] = img_1\n img_4[:, img_1.shape[1]:, :] = img_3\n\n # cv2.namedWindow('img_1',cv2.WINDOW_NORMAL)\n # cv2.namedWindow('img_2',cv2.WINDOW_NORMAL)\n cv2.namedWindow('img_4', cv2.WINDOW_NORMAL)\n \n # cv2.imshow('img_1',img_1)\n # cv2.imshow('img_2',img_2)\n cv2.imshow('img_4', img_4)\n \n cv2.imwrite(os.path.join(save_root, fname_1), img_4)\n \n cv2.waitKey()\n return", "def test_guarded_approach_pose(self):\n # Note: This has to match the .test file\n desired_twist = geometry_msgs.msg.TwistStamped()\n desired_twist.header.frame_id = 'arm_link_5'\n desired_twist.twist.linear.z = 1.0\n\n current_joints = sensor_msgs.msg.JointState()\n current_joints.name = ['arm_joint_2', 'arm_joint_3', 'arm_joint_4']\n current_joints.position = [0.2, 1.8, 1.7]\n current_joints.effort = [0.2, 1.8, 1.7]\n\n while not self.wait_for_result:\n self.pub_current_joints.publish(current_joints)\n self.event_out.publish('e_start')\n\n self.assertEqual(self.result.header.frame_id, desired_twist.header.frame_id)\n self.assertEqual(\n self.result.twist, desired_twist.twist,\n msg=\"Result: {0}\\nDesired: {1}\".format(self.result, desired_twist)\n )\n\n self.result = None\n self.wait_for_result = None\n\n while not self.wait_for_result:\n self.event_out.publish('e_collision')\n\n # the twist should be zero after a collision is detected\n desired_twist.twist.linear.z = 0.0\n self.assertEqual(\n self.result.twist, desired_twist.twist,\n msg=\"Result: {0}\\nDesired: {1}\".format(self.result, desired_twist)\n )", "def check_position_from_array(self, t0):\n\tposition_array=np.load('pos.npy')\n\tx_y=np.zeros(shape=(self.no_planets-1, 2))\n tol=1e-5\n diff=np.zeros(self.no_planets-1)\n\tfor k in range(self.no_planets-1):\n r1=position_array[-1]\n\t r2=position_array[k]\n r3=position_array[k+1]\n x1=0\n y1=0\n x2=self.positionFunction(t0)[0,k]\n y2=self.positionFunction(t0)[1,k]\n x3=self.positionFunction(t0)[0,k+1]\n y3=self.positionFunction(t0)[1, k+1]\n x,y,difference=self.triangulate_analytic(x1,y1,r1,x2,y2,r2,x3,y3,r3)\n x_y[k, 0]=x\n x_y[k, 1]=y\n diff[k]=difference\n if (diff > tol).any():\n print diff.max()\n print \"Oh no, one failed :(\"\n \tmin_index=np.argmin(np.abs(diff))\n\tprint \"Lowest x:\", x_y[min_index, 0]\n\tprint \"Lowest y:\", x_y[min_index, 1]", "def find_trajectory(self):\n\n translation,_ = self.trans_listener.lookupTransform(\"/map\", \"/base_footprint\", rospy.Time(0))\n self.x = translation[0]\n self.y = translation[1]\n \n cell_x = int(np.floor(self.x / self.metadata.resolution) + self.w / 2) - self.x_offset\n cell_y = int(np.floor(self.y / self.metadata.resolution) + self.h / 2) - self.y_offset\n\n visited = np.zeros(self.costmap.shape)\n visited[cell_y,cell_x] = 1\n\n to_explore = self.add_neighbors(visited, Node(cell_x,cell_y,0,None))\n to_explore.sort(key=operator.attrgetter('cost'))\n\n # Run modified Dijkstra algorithm\n while to_explore: \n next_node = to_explore.pop(0)\n if next_node.cost == -1:\n print(\"Found goal!\")\n\t\tself.send_final_pose(next_node)\n self.number_of_fails = 0\n self.get_trajectory(next_node)\n return\n \n to_explore = to_explore + self.add_neighbors(visited, next_node)\n to_explore.sort(key=operator.attrgetter('cost'))\n\n self.number_of_fails += 1\n print(\"Failed: %d times % self.number_of_fails\")\n\n if self.number_of_fails >= NUMBER_OF_FAILS:\n print(\"Exiting!\")\n msg = Bool()\n msg.data = True\n self.exp_complete_pub.publish(msg)", "def test_online_reads_checkpoint():\n current_log_level = logger.level\n logger.setLevel(logging.ERROR) # Temporarily suppress some of the logging output\n raw_template_script = get_template_script(systems=['explicit-system'])\n\n # Pair down the processing\n allowed_molecules = [raw_template_script['systems']['explicit-system']['receptor'],\n raw_template_script['systems']['explicit-system']['ligand']]\n popables = []\n for molecule in raw_template_script['molecules'].keys():\n if molecule not in allowed_molecules:\n popables.append(molecule)\n for popable in popables:\n raw_template_script['molecules'].pop(popable)\n\n raw_template_script.pop('samplers')\n sampler_entry = {'type': 'SAMSSampler'}\n sampler = {'samplers': {'sams': sampler_entry}}\n base_template_script = {**raw_template_script, **sampler}\n base_template_script['experiments']['sampler'] = 'sams'\n\n def spinup_sampler(script):\n with mmtools.utils.temporary_directory() as tmp_dir:\n template_script['options']['output_dir'] = tmp_dir\n exp_builder = ExperimentBuilder(script)\n experiment = [ex for ex in exp_builder.build_experiments()][0]\n sampler = experiment.phases[0].sampler\n return sampler\n\n # Testing Note: All the test numbers for the checkpoint_interval below are different from the default and each\n # other to ensure making changes actually has the intended effect odd settings get carried over between tests\n # respectively.\n\n # Test that setting \"checkpoint\" for online analysis gets the checkpoint interval default\n template_script = copy.deepcopy(base_template_script)\n template_script['options'].pop(\"checkpoint_interval\", None)\n template_script['samplers']['sams']['online_analysis_interval'] = \"checkpoint\"\n sampler = spinup_sampler(template_script)\n assert sampler.online_analysis_interval == AlchemicalPhaseFactory.DEFAULT_OPTIONS['checkpoint_interval']\n\n # Test that setting \"checkpoint\" for online analysis gets the checkpoint interval that is set\n template_script['options'][\"checkpoint_interval\"] = 10\n sampler = spinup_sampler(template_script)\n assert sampler.online_analysis_interval == 10\n\n # Test that not setting online analysis gets the checkpoint interval default\n template_script = copy.deepcopy(base_template_script)\n template_script['options'].pop(\"checkpoint_interval\", None)\n template_script['samplers']['sams'].pop('online_analysis_interval', None)\n sampler = spinup_sampler(template_script)\n assert sampler.online_analysis_interval == AlchemicalPhaseFactory.DEFAULT_OPTIONS['checkpoint_interval']\n\n # Test that not setting online analysis gets the checkpoint interval that is set\n template_script['options'][\"checkpoint_interval\"] = 100\n sampler = spinup_sampler(template_script)\n assert sampler.online_analysis_interval == 100\n\n # Test that setting online analysis still returns the set value\n template_script = copy.deepcopy(base_template_script)\n template_script['options'][\"checkpoint_interval\"] = 70\n template_script['samplers']['sams']['online_analysis_interval'] = 13\n sampler = spinup_sampler(template_script)\n assert sampler.online_analysis_interval == 13\n\n # Test that setting online analysis to None keeps online analysis None\n template_script = copy.deepcopy(base_template_script)\n template_script['options'][\"checkpoint_interval\"] = 80\n template_script['samplers']['sams']['online_analysis_interval'] = None\n sampler = spinup_sampler(template_script)\n assert sampler.online_analysis_interval is None\n\n # Test that not setting a sampler gets a the checkpoint interval for online analysis\n template_script = copy.deepcopy(base_template_script)\n template_script['options'][\"checkpoint_interval\"] = 90\n template_script.pop('samplers', None)\n template_script['experiments'].pop('sampler', None)\n sampler = spinup_sampler(template_script)\n assert sampler.online_analysis_interval == 90\n\n # Test that setting the checkpoint_interval in *experiments:options* block correctly sets the checkpoint interval\n template_script = copy.deepcopy(base_template_script)\n template_script['options'][\"checkpoint_interval\"] = 110\n template_script.pop('samplers', None)\n opts = {'checkpoint_interval': 120}\n template_script['experiments'].pop('sampler', None)\n template_script['experiments']['options'] = opts\n sampler = spinup_sampler(template_script)\n assert sampler.online_analysis_interval == 120\n\n logger.setLevel(current_log_level) # Reset logging to normal", "def _check_result(self, tesselation, orig_gdf, unique_id):\n # check against input layer\n ids_original = list(orig_gdf[unique_id])\n ids_generated = list(tesselation[unique_id])\n if len(ids_original) != len(ids_generated):\n\n self.collapsed = set(ids_original).difference(ids_generated)\n warnings.warn(\n f\"Tessellation does not fully match buildings. \"\n f\"{len(self.collapsed)} element(s) collapsed \"\n f\"during generation - unique_id: {self.collapsed}\"\n )\n\n # check MultiPolygons - usually caused by error in input geometry\n self.multipolygons = tesselation[tesselation.geometry.type == \"MultiPolygon\"][\n unique_id\n ]\n if len(self.multipolygons) > 0:\n warnings.warn(\n \"Tessellation contains MultiPolygon elements. Initial objects should \"\n f\"be edited. unique_id of affected elements: {list(self.multipolygons)}\"\n )", "def trajcheck(file, lat_diff=1., lon_diff=1., search=None, path = '/home/ollie/muali/Data/winter_all/'):\n \n \n \n \n file_name = path + file\n \n df_IGR = pd.read_excel('IGR_25N_v2.xlsx')\n #f = open(file_name,'rb+')\n \n # getting launch date\n #for i, file_ in enumerate(f):\n # if i ==4: # reads 6th line\n # date_str = f.read(25)\n # f.close()\n #l_date = parse(date_str)\n \n df = pd.read_csv(file_name, skiprows=7, header=None, delim_whitespace=True) \n \n # dict to rename columns\n cols_renames = {2: 'year', 3: 'month', 4: 'day', 5: 'hour', 6: 'minute'}\n\n # converting year to 4 digits\n df.iloc[:,2] = df.iloc[:,2] + 1900\n\n # df_subset is used to merge yy, mm, dd, hh values into BT_time column\n df_subset = df.loc[:, list(cols_renames.keys())].rename(columns=cols_renames)\n # df_subset is extracting the time from the original dataframe to be used to make a time index for the final dataframe\n dt_series = pd.to_datetime(df_subset) # converts the df into a time series which we will add as extra column\n dt_series.head()\n # adding a new backtrajectory column\n df['BT_time'] = dt_series # adding the back trajectory time column to df\n \n # dropping the not required columns\n df.drop([0,1,2,3,4,5,6,7], axis =1, inplace=True)\n df.head()\n \n col_rename2 = {8: 'back_hr', 9:'Lat', 10: 'Lon', 11:'AGL', 12:'Pressure'}\n df.rename(columns=col_rename2, inplace=True)\n\n del df_subset\n del dt_series\n \n count = 0\n # count will be used to tell how many times our condition is met\n \n string_list =[] # to store the output of print to be used by regex\n for row_IRG in df_IGR.itertuples():\n for row in df.itertuples():\n # lat_diff lon_diff provided by the user\n if ((-lat_diff < (row_IRG.Lat - row.Lat) < lat_diff) & (-lon_diff < (row_IRG.Lon - row.Lon) < lon_diff)):\n # print(row.back_hr, row.BT_time, row_IRG.ID, row_IRG.Lat, row_IRG.Lon ,row_IRG.Lat - row.Lat, row_IRG.Lon - row.Lon)\n# print(row.back_hr, row.BT_time.strftime('%d-%m-%Y %H:%M'), row_IRG.ID, row_IRG.Lat, row_IRG.Lon , row.Lat - row_IRG.Lat, row.Lon - row_IRG.Lon, geodesic((row.Lat, row.Lon), (row_IRG.Lat, row_IRG.Lon)).km )\n string_list.append((row.back_hr, row.BT_time.strftime('%d-%m-%Y %H:%M'), row_IRG.ID, row_IRG.Lat, row_IRG.Lon , row.Lat - row_IRG.Lat, row.Lon - row_IRG.Lon, geodesic((row.Lat, row.Lon), (row_IRG.Lat, row_IRG.Lon)).km ))\n count = count + 1\n \n print(\"The following are the counts:\")\n print(count)\n \n if search is not None:\n for item in string_list:\n if search in item:\n print(item,'\\n')\n \n else:\n for item in string_list:\n print(item,'\\n')", "def check(self, runtime):", "def task_validate_spiketrain_file(pk):\n\n # init and checks\n valid = False\n logger = Logger.get_logger(StringIO())\n try:\n df = Data.objects.get(id=pk)\n assert df.kind == 'st_file'\n tr = df.content_object\n except:\n logger.log('ERROR')\n return valid\n\n try:\n logger.log('looking at spiketrain file with pk: %s' % df.id)\n sts = read_gdf_sts(df.file.path)\n logger.log('found st_file: %s' % df.name)\n for st in sts:\n if not isinstance(sts[st], sp.ndarray):\n raise TypeError('spike train %s not ndarray' % st)\n if not sts[st].ndim == 1:\n raise ValueError('spike trains have to be ndim==1')\n\n # TODO: more checks?\n\n logger.log('st_file passed all checks')\n valid = True\n except Exception, ex:\n logger.log('ERROR: spiketrain file check: %s' % str(ex))\n finally:\n df.save()\n tr.valid_gt_log = logger.get_content()\n tr.save()\n return valid", "def isInternal(self):\n if self.data.depend_er_job == self.data.depend_on_job:\n return True\n return False", "def sanity_check(self):\n res = True\n res = res and self.detected\n res = res and np.sum(self.diffs) < 30000 # experimental value\n return res", "def test_check_source_2(self):\n self.eval_flags[\"check_id_typo\"] = False\n import_genome.check_source(self.src1, self.eval_flags,\n host_genus=\"Mycobacterium\")\n self.assertEqual(len(self.src1.evaluations), 3)" ]
[ "0.6346247", "0.61815774", "0.5947306", "0.58746684", "0.58349323", "0.5784224", "0.575642", "0.5685432", "0.56115633", "0.56095415", "0.5555707", "0.5527303", "0.54907143", "0.5460359", "0.5445118", "0.5434387", "0.5405169", "0.53977865", "0.5394035", "0.53772044", "0.5369217", "0.5338532", "0.53190887", "0.53160536", "0.5310499", "0.5305002", "0.53024834", "0.52968323", "0.52944475", "0.52867156" ]
0.65443593
0
For a given liaison and set of DLCs, update all unsent EmailMessages associated with those DLCs to have that Liaison. We can't make this part of, e.g., the save() method on DLC, because the liaison.dlc_set.update() commands used in views.py go straight to SQL, bypassing the ORM save() doesn't get hit, and neither do pre/postsave signals. Therefore we make it a standalone function, so it can be used in cases where save() is unavailable, but also connect it to the post_save signal.
def update_emails_with_dlcs(dlcs, liaison=None): for dlc in dlcs: EmailMessage.objects.filter( record__author__dlc=dlc, date_sent__isnull=True).update(_liaison=liaison)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_attributes_by_domains(etl, update_kwargs):\r\n import arcetl\r\n func = functools.partial(\r\n etl.transform, transformation=arcetl.attributes.update_by_domain_code,\r\n )\r\n tuple(func(**kwargs) for kwargs in update_kwargs)", "def update_data(self):\n staff = Staff.objects.all()\n orgs = Organization.objects.all()\n depts = Department.objects.all()\n\n existing = self.all()\n if existing.count():\n existing.delete()\n\n if staff.count():\n for s in staff:\n record = CombinedTeledata(\n id=s.id,\n alpha=s.alpha,\n name=s.name,\n first_name=s.first_name,\n last_name=s.last_name,\n sort_name=s.sort_name,\n email=s.email,\n phone=s.phone,\n postal=s.postal,\n job_position=s.job_position,\n department=s.dept.name,\n dept_id=s.dept.id,\n organization=s.dept.org.name,\n org_id=s.dept.org.id,\n building=s.bldg.name,\n bldg_id=s.bldg.import_id,\n room=s.room,\n from_table='staff'\n )\n\n try:\n record.save(doing_import=True)\n record.keywords_combined.set(s.keywords.all())\n except Exception as e:\n logger.error(str(e))\n\n if orgs.count():\n for o in orgs:\n record = CombinedTeledata(\n id=o.id,\n name=o.name,\n sort_name=o.name,\n phone=o.phone,\n fax=o.fax,\n building=o.bldg.name,\n bldg_id=o.bldg.import_id,\n room=o.room,\n from_table='organizations'\n )\n\n try:\n record.save(doing_import=True)\n record.keywords_combined.set(o.keywords.all())\n except Exception as e:\n logger.error(str(e))\n\n if depts.count():\n for d in depts:\n record = CombinedTeledata(\n id=d.id,\n name=d.name,\n sort_name=d.name,\n phone=d.phone,\n fax=d.fax,\n organization=d.org.name,\n org_id=d.org.id,\n building=d.bldg.name,\n bldg_id=d.bldg.import_id,\n room=d.room,\n from_table='departments'\n )\n\n try:\n record.save(doing_import=True)\n record.keywords_combined.set(d.keywords.all())\n except Exception as e:\n logger.error(str(e))", "def on_update(self):\n\t\tfor email_account in frappe.get_all(\"Email Account\", filters={\"domain\": self.name}):\n\t\t\ttry:\n\t\t\t\temail_account = frappe.get_doc(\"Email Account\", email_account.name)\n\t\t\t\tfor attr in [\"email_server\", \"use_imap\", \"use_ssl\", \"use_tls\", \"attachment_limit\", \"smtp_server\", \"smtp_port\", \"use_ssl_for_outgoing\", \"append_emails_to_sent_folder\", \"incoming_port\"]:\n\t\t\t\t\temail_account.set(attr, self.get(attr, default=0))\n\t\t\t\temail_account.save()\n\n\t\t\texcept Exception as e:\n\t\t\t\tfrappe.msgprint(_(\"Error has occurred in {0}\").format(email_account.name), raise_exception=e.__class__)", "def update_existing_entry(client, list_id, mail_addr, merge_fields, l_tags):\n # hash mail address \n mail_h = hash_string(mail_addr)\n # send entry\n try:\n response = client.lists.set_list_member(list_id, mail_h,\n {\"email_address\": mail_addr, \"status_if_new\": \"subscribed\",\n \"status\": \"subscribed\", \"merge_fields\": merge_fields})\n print(response)\n except ApiClientError as error:\n print(\"Error on mail address {}: {}\".format(mail_addr, error.text))\n for tag in l_tags:\n try:\n response = client.lists.update_list_member_tags(list_id, mail_h, \n {\"tags\": [{\"name\": tag, \"status\": \"active\"}]})\n print(response)\n except ApiClientError as error:\n print(\"Error on updating tag '{}' for mail address {}: {}\".format(tag, mail_addr, error.text))", "def update_fields(self, request):\n message = request.message\n objects = message.model.objects\n if message.filter is not None and len(message.filter) > 0:\n objects.filter(**message.filter).update(**message.kwargs)\n\n else:\n objects.all().update(**message.kwargs)\n\n return SuccessReply()", "def update_data_from_requests(cls,\n user_request,\n model,\n fields_to_update=None):\n\n err_dict = cls._generate_for_errors_object_when_updating(user_request)\n if len(err_dict) > 0:\n raise_error(serialization_errors['many_invalid_fields'],\n err_dict=err_dict)\n return super().update_data_from_requests(user_request, model,\n fields_to_update)", "def _update_event(klass, event, *data_dicts):\n # Create d\n d = {}\n for data_dict in data_dicts:\n d.update(data_dict)\n d = klass._remove_none_fields(d)\n d = dict((\"set__\" + k, v) for k, v in d.iteritems())\n\n # Update and save.\n event.update(**d)\n event.save()", "def _update(self, data: Dict[str, Any], fields_to_modify: List[str]):\n pass", "def update( id=UNDEFINED, identifier=UNDEFINED, prioritya=UNDEFINED, priorityb=UNDEFINED, priorityc=UNDEFINED, priorityd=UNDEFINED, prioritye=UNDEFINED, created_at=UNDEFINED, updated_at=UNDEFINED, published_at=UNDEFINED, processed=UNDEFINED):\n errors = []\n params = dict( filter( lambda i: not isinstance( i, SV ), locals().items() ) )\n if 'id' not in params:\n return None, [(\"'id' is a required parameter to update\", None, None)]\n n, errors = note.update( **params )\n try: \n return db_to_model(n), errors\n except:\n return None, [sys.exc_info()] + errors", "def update_fields(self, *fieldnames, objects=None, batch_size=None, send_signal=True,\n concurrent=False, max_concurrent_workers=None, return_queryset=False):\n if not fieldnames:\n fieldnames = [\n i.name for i in self.model._meta.fields\n ]\n\n if objects is not None:\n if not isinstance(objects, collections.Iterable):\n raise TypeError('objects must be iterable')\n\n self.populate_queryset_values(objects, *fieldnames)\n\n concurrent_write = self._get_concurrent(concurrent)\n\n if send_signal:\n pre_update_fields.send(\n self.model,\n instances = self,\n field_names = fieldnames,\n batch_size = batch_size\n )\n\n # TODO: ensure connected each time an update happens within the loop\n self.model.objects.ensure_connected()\n\n n = 0\n\n if concurrent_write:\n n_concurrent_writers = self._get_n_concurrent_workers(max_concurrent_workers)\n chunks = self.get_chunks(batch_size, n_concurrent_writers)\n\n jobs = [(BulkModelQuerySet._cased_update_chunk, self, chunk, fieldnames,) for chunk in chunks if chunk]\n executor = ConcurrentExecutor(jobs)\n results = executor.run_async()\n n = sum(results)\n\n else:\n chunks = self.get_chunks(batch_size)\n\n for chunk in chunks:\n if not chunk:\n # skip empty chunks (only happens in the case of an empty queryset)\n continue\n\n result = self._cased_update_chunk(chunk, fieldnames)\n n += result\n\n\n if return_queryset:\n _ids = []\n for obj in self:\n _id = getattr(obj, 'id') or getattr(obj, 'pk')\n if _id is not None:\n _ids.append(_id)\n\n qs = self.filter(id__in = _ids)\n else:\n qs = self.none()\n\n\n if send_signal:\n post_update_fields.send(\n self.model,\n instances = self,\n queryset = qs,\n field_names = fieldnames,\n batch_size = batch_size,\n n = n\n )\n\n if return_queryset:\n return qs\n\n return n", "def _update_bulk(self, iterable):\n self.cursor.executemany(self.UPDATE, iterable)", "def _update(self, data: Dict[str, Any], fields_to_modify: List[str]):\n\n to_set = [\n \"{0} = %({0})s\".format(f) for f in fields_to_modify\n if f in self.editable_fields\n ]\n if len(to_set) == 0:\n print('Warning: No data to set', data)\n return\n\n query = \"UPDATE {} SET {} WHERE {}\".format(\n self._NAME,\n ', '.join(to_set),\n ' AND '.join(\"{0} = %({0})s\".format(f) for f in self.primary_fields),\n )\n self._execute(query, data)", "def test_update_all_with_ally_code(db):\n unit = UnitFactory()\n for x in range(7):\n StatMedalRuleFactory(unit=unit, stat=\"health\", value=x * 1000)\n\n p1, p2 = PlayerFactory.create_batch(2)\n PlayerUnitFactory(player=p1, unit=unit, health=3500)\n pu2 = PlayerUnitFactory(player=p2, unit=unit, health=3500)\n\n Medal.objects.update_all(ally_codes=[p1.ally_code])\n assert Medal.objects.count() == 4\n assert Medal.objects.filter(player_unit=pu2).count() == 0\n\n Medal.objects.update_all()\n assert Medal.objects.count() == 8", "def bulk_update(self, iterable):\n inserted, updated = [], []\n for d, h in iterable:\n if d in self:\n updated.append((d, h))\n else:\n inserted.append((d, h))\n self._update_bulk(updated)\n self._insert_bulk(inserted)", "def _update_all(self, criteria: Q, *args, **kwargs):\n raise NotImplementedError", "def bulk_update(self, iterable):\n inserted, updated = [], []\n for d, h in iterable:\n if -d in self:\n updated.append((-d, h))\n else:\n inserted.append((-d, h))\n self._update_bulk(updated)\n self._insert_bulk(inserted)", "def modify_eml(old_eml_file_list, url_sha1_list, url_sha1_dict):\n i = 0\n flag = True\n j = 0\n count = 0\n tmp_url = ''\n\n for sha1 in url_sha1_list:\n tmp_url = tmp_url + url_sha1_dict[sha1] + '\\n' # Construct the URLs\n if i == 2:\n try:\n fp = open(old_eml_file_list[j], 'r')\n except Exception as err:\n print \"Read message fail for %s\" % err\n raise\n\n msg = em.message_from_file(fp)\n fp.close()\n\n # Modify the eml body\n if msg.is_multipart():\n # email has multiple parts\n ret_msg = modify_multipart_eml(old_eml_file_list[j], tmp_url)\n else:\n # email hasn't multiple parts\n ret_msg = modify_nonmultipart_eml(old_eml_file_list[j], tmp_url)\n\n # Write to a new email\n new_eml = '/home/develop/backup/email/samples/10Per_with_mal_aft_mod/' + os.path.basename(old_eml_file_list[j])\n fp = open(new_eml, 'wb')\n fp.write(ret_msg.as_string())\n fp.close()\n\n # Re-initialize the variables\n i = 0\n j = j + 1\n tmp_url = ''\n else:\n i = i + 1\n continue", "def _update_subscribers(self):\n try:\n campaign = self.campaigns.latest('when')\n except StudyGuideCampaign.DoesNotExist:\n pass\n else:\n for student in utils.students_for_event(self.event):\n subscriber, created = StudyGuideCampaignSubscriber.objects.get_or_create(\n campaign=campaign,\n user=student.user)\n if created: # only add if it's not there already\n campaign.subscribers.add(subscriber)", "def hook_add_campaign_data(user, objects_list):\n message, success, title = \"\", 0, \"error\"\n\n if SetDefault.objects.count():\n set_defaults_data = SetDefault.objects.filter(user=user)\n\n if(set_defaults_data):\n for set_default in set_defaults_data:\n field_name = set_default.field\n field_value = set_default.values\n\n for obj in objects_list:\n # check is mapping object has current field\n if hasattr(obj, field_name):\n is_attr_set = setattr(\n obj, field_name, field_value) # f.foo=bar\n obj.save() # this will update only\n\n else:\n message += \"No set as default data exists for user\"\n else:\n message += \"Set as default table is empty so skipping cloning of data\"\n print(message)\n return True", "def updateMessages(self, parameters):\n\t\treturn", "def updateMessages(self, parameters):\n return", "def updateMessages(self, parameters):\n return", "def updateMessages(self, parameters):\n return", "def updateMessages(self, parameters):\n return", "def updateMessages(self, parameters):\n return", "def updateMessages(self, parameters):\n return", "def updateMessages(self, parameters):\n return", "def updateMessages(self, parameters):\n return", "def updateMessages(self, parameters):\n return", "def updateMessages(self, parameters):\n return" ]
[ "0.52795607", "0.5235836", "0.5187958", "0.50904024", "0.50034666", "0.48836243", "0.48716804", "0.48288625", "0.47948357", "0.4792678", "0.47558114", "0.47519177", "0.47414127", "0.47377956", "0.4734942", "0.46482447", "0.46457776", "0.4619367", "0.4618994", "0.46182483", "0.45937487", "0.45937487", "0.45937487", "0.45937487", "0.45937487", "0.45937487", "0.45937487", "0.45937487", "0.45937487", "0.45937487" ]
0.7852708
0
Analytic expression for the normalized inverse cumulative mass function. The argument ms is normalized mass fraction [0,1]
def _icmf(self, ms): return self._pot.a * numpy.sqrt(ms) / (1 - numpy.sqrt(ms))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mass(query, ts):\n\n m = len(query)\n q_mean = np.mean(query)\n q_std = np.std(query)\n mean, std = mov_mean_std(ts, m)\n dot = sliding_dot_product(query, ts)\n return 2 * m * (1 - (dot - m * mean * q_mean) / (m * std * q_std))", "def normalize(X, m, s):\n return (X - m) / s", "def normalize(X, m, s):\n return (X - m)/s", "def normalize(X, m, s):\n return ((X - m) / s)", "def inverse_normal_transform(M):\n R = stats.mstats.rankdata(M, axis=1) # ties are averaged\n if isinstance(M, pd.DataFrame):\n Q = pd.DataFrame(stats.norm.ppf(R/(M.shape[1]+1)), index=M.index, columns=M.columns)\n else:\n Q = stats.norm.ppf(R/(M.shape[1]+1))\n return Q", "def Ncen(self, m):\n result = np.log10(m) - self.log10mMin\n result /= self.sLog10m\n result = 0.5 * (1. + special.erf(result))\n result *= self.fInc(m)\n return result", "def normalize(X, m, s):\n normX = (X - m) / s\n return (normX)", "def Ncen(self, m):\n result = np.log10(m) - np.log10(self.mMinHod)\n result /= self.sLogM\n result = 0.5 * (1. + special.erf(result))\n return result", "def reduced_normal_pmf(x: np.array, mean: float, sigma: float) -> np.array:\n x = np.exp(-1 / 2 * ((x - mean) / sigma) ** 2)\n x /= np.sqrt(2 * np.pi * sigma ** 2)\n x[mean] = 0.\n x /= x.sum()\n return x", "def inv_ms(self):\n for i in self.indices:\n i.inv_m()\n continue\n\n return _NEG_UNITY ** self._total_j", "def Mi(m):\n return 700 * (numpy.exp(m/1127.0) - 1)", "def inv_efunc(z):\n return 1. / sqrt(omega_m * (1. + z)**3 + omega_lam)", "def dPdlogM_internal(self, mass):\n res = 0.0 * mass\n res[np.digitize(mass, self.mass_edges) == self.i0] = 1.0 / self.deltalogm\n return res", "def normal_pmf(x: np.array, mean: float, sigma: float) -> np.array:\n x = np.exp(-1 / 2 * ((x - mean) / sigma) ** 2)\n x /= np.sqrt(2 * np.pi * sigma ** 2)\n x /= x.sum()\n return x", "def periodicity_metric(light_curve_rms, sm_phase_rms):\n return (sm_phase_rms ** 2) / (light_curve_rms ** 2)", "def SM2m(sm):\n return sm * 1609.344", "def _flux_unc_as_mags(fluxes, uncs):\n uncs_mag = np.empty(len(fluxes))\n\n # fluxes-uncs case\n indxs, = np.where(fluxes - uncs <= 0)\n if len(indxs) > 0:\n uncs_mag[indxs] = -2.5*np.log10(fluxes[indxs]\n / (fluxes[indxs] + uncs[indxs]))\n\n # normal case\n indxs, = np.where(fluxes - uncs > 0)\n if len(indxs) > 0:\n uncs_mag[indxs] = -2.5*np.log10((fluxes[indxs] - uncs[indxs])\n / (fluxes[indxs] + uncs[indxs]))\n\n return uncs_mag", "def central_first_moment(self, mass):\n if self.sigma <= 0.0:\n log_mass = numpy.log10(mass)\n return numpy.where(log_mass > self.log_M_min, 1.0, 0.0)\n return 0.5*(1+special.erf((numpy.log10(mass) - \n self.log_M_min)/self.sigma))", "def inverse_normal_transformation(x, c=3/8):\n r = scipy.stats.rankdata(x, \"average\")\n return scipy.stats.norm.ppf((r - c) / (len(x) - 2 * c + 1))", "def convert_dop_cmm1(n,fracme=0.067,epsinf=11):\n eps0 =8.854e-12\n me =9.109e-31\n e =1.60218e-19\n #epsinf = 11\n n = n*1e6 #convertion en m-3\n c = 3e8\n return 1e-2*((np.sqrt((n*e**2)/(eps0*fracme*me*epsinf))))/(2*np.pi*c)", "def Mo96(self,dc,nu):\n return 1. + (nu**2.-1.)/dc", "def dPdlogM_internal(self, mass):\n return self.gamma*mass**self.gamma/(self.mmax**self.gamma-self.mmin**self.gamma)", "def _mult_inverse(self, a, m):\n g, x, y = self._egcd(a, m)\n if g != 1:\n raise Exception('modular inverse does not exist')\n else:\n return x % m", "def mc2ms(mc,eta):\n root = np.sqrt(0.25-eta)\n fraction = (0.5+root) / (0.5-root)\n invfraction = 1/fraction\n\n m2= mc * np.power((1+fraction),0.2) / np.power(fraction,0.6)\n\n m1= mc* np.power(1+invfraction,0.2) / np.power(invfraction,0.6)\n return (m1,m2)", "def convert_mev_inv_cm(toto):\n hb=1.05458e-34\n ev=1.60218e-19\n c= 3e8\n return toto*ev/(1e5*hb*2*np.pi*c)", "def f_multivariate_normal(x,M):\n return .5*np.dot(np.dot(x,M),x)", "def mass_energy():\n c2 = _si.c.value**2\n return Equivalency(\n [\n (si.kg, si.J, lambda x: x * c2, lambda x: x / c2),\n (si.kg / si.m**2, si.J / si.m**2, lambda x: x * c2, lambda x: x / c2),\n (si.kg / si.m**3, si.J / si.m**3, lambda x: x * c2, lambda x: x / c2),\n (si.kg / si.s, si.J / si.s, lambda x: x * c2, lambda x: x / c2),\n ],\n \"mass_energy\",\n )", "def pm_mag(pmdec, pmra):\n ra_index = ~np.isnan(pmra)\n pmdec = pmdec[ra_index]\n pmra = pmra[ra_index]\n dec_index = ~np.isnan(pmdec)\n pmdec = pmdec[dec_index]\n pmra = pmra[dec_index]\n\n return np.array([np.sqrt(dec**2 + ra**2) for (dec, ra) in zip(pmdec, pmra)])", "def second_moment(self, mass, z=None):\n return 1.0", "def fnu(self, m):\n return 10**(-0.4*(m -23.9))" ]
[ "0.6188651", "0.61184984", "0.6113977", "0.6110041", "0.60532564", "0.60418516", "0.6002625", "0.59949887", "0.59357876", "0.5919308", "0.5904984", "0.5893741", "0.5891592", "0.5873965", "0.58559597", "0.5819413", "0.58134836", "0.5808183", "0.5798541", "0.57581913", "0.5741526", "0.5721421", "0.57086426", "0.57070994", "0.56753254", "0.5672176", "0.56477314", "0.56377715", "0.5637609", "0.5606828" ]
0.68552166
0
Split up seq in pieces of size
def split_seq(seq,size): return [seq[i:i+size] for i in range(0, len(seq), size)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_seq(seq, size):\n return [seq[ii:ii + size] for ii in range(0, len(seq), size)]", "def split_seq(seq,size):\n for i in range(0,len(seq),size):\n if i+size<len(seq) and seq[i+size] - seq[i] == size:\n yield seq[i:i+size]", "def _chunker(self, seq, size):\n return (seq[pos:pos + size] for pos in range(0, len(seq), size))", "def chunker(seq, size):\n\n return (seq[pos : pos + size] for pos in range(0, len(seq), size))", "def chunks(seq, size):\n for i in range(0, len(seq), size):\n yield seq[i:i + size]", "def chunk(size, seq):\n if not isinstance(size, int) or size <= 0: # pragma: no cover\n raise ValueError(\"size must be an integer greater than zero\")\n\n group = []\n\n for item in seq:\n if len(group) >= size:\n yield group\n group = []\n group.append(item)\n\n if group:\n yield group", "def chunk(seq, size, groupByList=True):\n func = tuple\n if groupByList:\n func = list\n return [func(seq[i:i + size]) for i in range(0, len(seq), size)]", "def get_chunks(sequence, chunk_size):\n segments = []\n for i in range(0, len(sequence), chunk_size):\n tmp = sequence[i:chunk_size+i]\n if len(tmp) == chunk_size:\n segments.append(tmp)\n return segments", "def _chunker(self, seq, size):\n return (seq.iloc[pos:pos + size] for pos in range(0, len(seq), size))", "def chunker(seq: list, size: int) -> list:\n if isinstance(seq, list) == False:\n raise ValueError(\"`seq` must be a list\")\n return list(seq[pos:pos + size] for pos in range(0, len(seq), size))", "def get_chunks(sequence, ck_size):\n \n list_chunk = []\n i=1\n l = len(sequence)\n if l < 4*ck_size:\n raise ValueError(\"Chunk size should be of 4 at least \")\n for i in range(1, l):\n if i*ck_size < l:\n list_chunk.append(sequence[i*ck_size-ck_size:i*ck_size])\n #while(i*ck_size < l):\n #list_chunk.append(sequence[i*ck_size-ck_size:i*ck_size])\n #i += 1\n return list_chunk", "def get_chunks(sequence, chunk_size):\n seq_length = len(sequence)\n seq_list = []\n treshold = int(seq_length) // int(chunk_size)\n if treshold <4:\n raise ValueError(\"Change chunk size\")\n for i in range(treshold):\n seq = sequence[i*chunk_size:(i+1)*chunk_size]\n seq_list.append(seq)\n return seq_list", "def chunk_sequence(sequence, chunk_size=10, n_leave=0):\r\n \r\n \r\n chunks=[]\r\n for i in range(0,len(sequence)-chunk_size+1-n_leave):\r\n chunks.append(sequence[i:i+chunk_size])\r\n \r\n return torch.stack(chunks)", "def group(seq, size):\n if not hasattr(seq, 'next'):\n seq = iter(seq)\n while True:\n yield [seq.next() for i in xrange(size)]", "def Split(ar, size):\r\n return [ar[i:i + size] for i in range(0, len(ar), size)]", "def _chunkify(sequence, num_chunks):\n quo, rem = divmod(len(sequence), num_chunks)\n return (sequence[i * quo + min(i, rem):(i + 1) * quo + min(i + 1, rem)] for i in range(num_chunks))", "def chunks(sequence, chunk_size):\r\n\r\n # YOUR CODE HERE\r", "def split_on_chunks(sequence, length: int, no_rest=False):\n\n if not isinstance(sequence, (list, tuple)):\n raise TypeError('Support only an instance of list or tuple')\n\n sequence_len = len(sequence)\n\n if length < 1:\n raise ValueError(\"Length of a chunk must be at least 1\")\n elif length >= sequence_len:\n return sequence\n\n chuncks = list()\n for i in range(0, sequence_len, length):\n chuncks.append(tuple(sequence[i: i + length]))\n\n if no_rest is True and len(chuncks[-1]) < length and len(chuncks) > 1:\n chuncks[-2] = list(chuncks[-2])\n chuncks[-2].extend(chuncks[-1])\n chuncks[-2] = tuple(chuncks[-2])\n chuncks = chuncks[:-1]\n\n return chuncks", "def split_len(seq, length):\n return [seq[i:i+length] for i in range(0, len(seq), length)]", "def chunks(sequence: Iterable[T], chunk_size: int = 2) -> Iterable[List[T]]:\n lsequence = list(sequence)\n while lsequence:\n size = min(len(lsequence), chunk_size)\n yield lsequence[:size]\n lsequence = lsequence[size:]", "def get_chunks(vals, size):\n for i in range(0, len(vals), size):\n yield vals[i:i + size]", "def chunks(items, size):\n return [items[i:i+size] for i in range(0, len(items), size)]", "def chunk( seq, size, pad=None ):\n n = len(seq)\n mod = n % size\n for i in xrange(0, n-mod, size):\n yield seq[i:i+size]\n if mod:\n padding = [pad] * (size-mod)\n yield seq[-mod:] + padding", "def _chunk(iterable, size, fillvalue=None):\n\t\targs = [iter(iterable)] * size\n\t\treturn [''.join(x) for x in itertools.izip_longest(*args, fillvalue=fillvalue)]", "def iter_chunks(sequence, chunk_size) :\n res = []\n for item in sequence :\n res.append(item)\n if len(res) >= chunk_size :\n yield res\n res = []\n if res : yield res", "def split_by_n( seq, n ):\n while seq:\n yield seq[:n]\n seq = seq[n:]", "def split_by_n(seq, n):\n while seq:\n yield seq[:n]\n seq = seq[n:]", "def _split_on_chunks(self, iterable, size):\n return utils.split_on_chunks(iterable, size)", "def chunks(cipher, size):\n\treturn [cipher[i*size:(i+1)*size] for i in range(int(math.ceil(len(cipher)*1.0/size)))]", "def chunk_seq(iseq: ISeq, maxlen: int) -> Iterable[ISeq]:\n return (iseq[i : i + maxlen] for i in range(0, len(iseq), maxlen))" ]
[ "0.83308667", "0.80712384", "0.7818045", "0.77790713", "0.7623721", "0.7519637", "0.74383765", "0.7353186", "0.7353124", "0.731081", "0.72468454", "0.7198315", "0.713888", "0.71279585", "0.705325", "0.7039897", "0.7039241", "0.70042735", "0.69883114", "0.69418406", "0.694126", "0.6880825", "0.6860806", "0.6838044", "0.6817919", "0.6808697", "0.676538", "0.6730113", "0.6718483", "0.6713017" ]
0.8370639
0
Returns the parent/enclosing tag (instance of PythonTag()) from the specified tag list. If no such parent tag exists, returns None.
def getParentTag(self, tagsStack): # DOC {{{ # }}} # CODE {{{ # determine the parent tag {{{ if (len(tagsStack)): parentTag = tagsStack[-1] else: parentTag = None # }}} # return the tag return parentTag # }}}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def findTypeParent(element, tag):\n \n p = element\n while True:\n p = p.getparent()\n if p.tag == tag:\n return p\n \n # Not found\n return None", "def parent(self):\n if not self._parents:\n return None\n elif len(self._parents) == 1:\n return tuple(self._parents)[0]\n else:\n raise RuntimeError('Ambiguous parent: there are multiple parents.')", "def find_non_exec_parent(tag):\n no_exec_parent = \"\"\n for parent in tag.parents:\n if parent and parent.name in NONEXEC_PARENTS:\n no_exec_parent = parent.name\n\n return no_exec_parent", "def get_parent(self):\n if self.parent:\n return self.parent()\n else:\n return None", "def parent(self, node):\r\n return self.find_node(node).parent.content", "def parent(self) -> Union[\"ExpressionNode\", None]:\n return self.__parent", "def wordclass_parent(self):\n if self.wordclass is None:\n return None\n for ancestor in self.ancestors_descending():\n if ancestor.wordclass is not None:\n return ancestor", "def extends(self):\n p = re.compile(r\"^{%\\s+(extends\\s*(?P<parent>.*))\\s+%}\")\n e = p.match(self.source)\n # extends detact must be done at the begining of parsing.\n # return None if there is no extends tag.\n if not e:\n return None\n self.cur += len(e.group())\n return e.group(\"parent\")", "def parent(self):\n if self._parent is not None:\n return self._parent()\n else:\n return None", "def lookup_element(self, name: str) -> ElementNSEntry:\n for i, scope in enumerate(reversed(self.element_ns_stack)):\n if name in scope:\n el, parent_def = scope[name]\n if i == 0:\n # Return anything from local namespace\n return (el, parent_def)\n elif isinstance(el, comp.Signal):\n # Signals are allowed to be found in parent namespaces\n return (el, parent_def)\n elif self.parent_parameters_visible and isinstance(el, Parameter):\n # Parameters are allowed to be found in parent namespaces,\n # except in some contexts\n return (el, parent_def)\n return (None, None)", "def find_parent_of(self, *args):\n return _ida_hexrays.citem_t_find_parent_of(self, *args)", "def wordclass_parent_plus_one(self):\n if self.wordclass is None:\n return None\n for i, ancestor in enumerate(self.ancestors_descending()):\n if ancestor.wordclass is not None:\n try:\n return self.ancestors_descending()[i + 1]\n except IndexError:\n return None", "def get_parent_type(self) -> Optional['XsdType']:\n component = self.parent\n while component is not self and component is not None:\n if isinstance(component, XsdType):\n return component\n component = component.parent\n return None", "def elm_parent(root, elm):\n if lxml:\n return elm.getparent()\n else:\n def find_parent(cur, elm):\n for o in cur:\n if o == elm:\n return cur\n parent = find_parent(o, elm)\n if parent is not None:\n return parent\n return None\n return find_parent(root, elm)", "def get_parent(self):\n return BinaryNode.or_none(self.parent)", "def wordclass_parent_minus_one(self):\n if self.wordclass is None:\n return None\n for i, ancestor in enumerate(self.ancestors_descending()):\n if self.ancestors_descending()[i + 1].wordclass is not None:\n return ancestor", "def find_parents(self, tagname):\n res = []\n if self._tagname == tagname:\n res = [self]\n if self._parent is not None:\n res += self._parent.find_parents(tagname)\n return res", "def _determine_parent(self, caller):\n self.msgin(4, \"determine_parent\", caller)\n\n parent = None\n if caller:\n pname = caller.identifier\n\n if isinstance(caller, Package):\n parent = caller\n\n elif '.' in pname:\n pname = pname[:pname.rfind('.')]\n parent = self.findNode(pname)\n\n elif caller.packagepath:\n # XXX: I have no idea why this line\n # is necessary.\n parent = self.findNode(pname)\n\n self.msgout(4, \"determine_parent ->\", parent)\n return parent", "def get_parent(self):\n return self._find_by_locator().parent", "def find_parent(self):\n pass", "def getAncestorOfType(self, *args):\n return _libsbml.SBase_getAncestorOfType(self, *args)", "def parent( self, selector = None ): \n tmpList = []\n for node in self.nodeList:\n if node.parentNode:\n tmpList += self.getUniqueNodes( tmpList, [ node.parentNode ] )\n if selector:\n return HtmlNodeList( tmpList, self.htmlDom, self.nodeList, self ).filter( selector )\n else:\n tmpList = sorted( tmpList, key = lambda x: x.pos )\n return HtmlNodeList( tmpList, self.htmlDom, self.nodeList,self)", "def getPythonTag(self, tagsStack, lineNumber, indentChars, tagName, tagTypeDecidingMethod):\n # DOC {{{\n # }}}\n\n # CODE {{{\n # compute the indentation level\n indentLevel = self.computeIndentationLevel(indentChars)\n # get the parent tag\n parentTag = self.getParentTag(tagsStack)\n\n # handle an enclosed tag {{{\n while (parentTag):\n # if the indent level of the parent tag is greater than of the current tag, use parent tag of the parent tag {{{\n if (parentTag.indentLevel >= indentLevel):\n del tagsStack[-1]\n # }}}\n # otherwise we have all information on the current tag and can return it {{{\n else:\n # create the tag\n tag = PythonTag(tagTypeDecidingMethod(parentTag.type), tagName, \"%s.%s\" % (parentTag.fullName, tagName,), lineNumber, indentLevel)\n\n # break the loop\n break\n # }}}\n\n # use parent tag of the parent tag\n parentTag = self.getParentTag(tagsStack)\n # }}}\n # handle a top-indent level tag {{{\n else:\n # create the tag\n tag = PythonTag(tagTypeDecidingMethod(None), tagName, tagName, lineNumber, indentLevel)\n # }}}\n\n # add the tag to the list of tags\n tagsStack.append(tag)\n\n # return the tag\n return tag\n # }}}", "def findParent(self, name=None, attrs={}, **kwargs):\r\n # NOTE: We can't use _findOne because findParents takes a different\r\n # set of arguments.\r\n r = None\r\n l = self.findParents(name, attrs, 1)\r\n if l:\r\n r = l[0]\r\n return r", "def get_parent(root_node: ast.AST, node: ast.AST):\n try:\n return node.parent\n except AttributeError:\n add_parent_info(root_node)\n return node.parent", "def parent(self, u):\n return self._ll_tree.get_parent(u)", "def get_parent(self) -> Optional[\"BaseSegment\"]:\n if not self._parent:\n return None\n _parent = self._parent()\n if not _parent or self not in _parent.segments:\n return None\n return _parent", "def parent(self) -> Optional[Heirarchical]:\n return None", "def get_parent(self, element):\n return element.find_elements_by_class_name(\"wrap-text\")[2].get_attribute(\"innerHTML\").strip()", "def get_top_parent(node):\n\n\ttop_node = cmds.listRelatives(node, p=True)\n\twhile top_node:\n\t\tnode = top_node[0]\n\t\ttop_node = cmds.listRelatives(node, p=True)\n\treturn node" ]
[ "0.66215205", "0.62002236", "0.61979115", "0.5927018", "0.58290815", "0.5811783", "0.57534534", "0.566354", "0.5655644", "0.5645672", "0.5637724", "0.56282586", "0.5618066", "0.5561548", "0.5546117", "0.553552", "0.5524999", "0.5514182", "0.55125684", "0.5498307", "0.5488604", "0.5478035", "0.547505", "0.5463625", "0.5443401", "0.5417668", "0.5409191", "0.53764325", "0.5371912", "0.53692245" ]
0.72540027
0
Returns instance of PythonTag() based on the specified data.
def getPythonTag(self, tagsStack, lineNumber, indentChars, tagName, tagTypeDecidingMethod): # DOC {{{ # }}} # CODE {{{ # compute the indentation level indentLevel = self.computeIndentationLevel(indentChars) # get the parent tag parentTag = self.getParentTag(tagsStack) # handle an enclosed tag {{{ while (parentTag): # if the indent level of the parent tag is greater than of the current tag, use parent tag of the parent tag {{{ if (parentTag.indentLevel >= indentLevel): del tagsStack[-1] # }}} # otherwise we have all information on the current tag and can return it {{{ else: # create the tag tag = PythonTag(tagTypeDecidingMethod(parentTag.type), tagName, "%s.%s" % (parentTag.fullName, tagName,), lineNumber, indentLevel) # break the loop break # }}} # use parent tag of the parent tag parentTag = self.getParentTag(tagsStack) # }}} # handle a top-indent level tag {{{ else: # create the tag tag = PythonTag(tagTypeDecidingMethod(None), tagName, tagName, lineNumber, indentLevel) # }}} # add the tag to the list of tags tagsStack.append(tag) # return the tag return tag # }}}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getNodeClassFromData(self, data: OrderedDict):\n return Node if self.node_class_selector is None else self.node_class_selector(data)", "def tag(self) -> 'Tag':\n # project/lineage must exist so let's fetch it outside of try-except\n project = self.project.key\n lineage = self.lineage.key\n try:\n generation = self.key\n except self.Listing.Empty: # generation doesn't exist\n LOGGER.debug('No previous generations found - using a null tag')\n return NOTAG\n return TAGS(self.registry, project, lineage, generation)", "def get_tag(tag):\r\n from tagging.models import Tag\r\n if isinstance(tag, Tag):\r\n return tag\r\n\r\n try:\r\n if isinstance(tag, types.StringTypes):\r\n return Tag.objects.get(name=tag)\r\n elif isinstance(tag, (types.IntType, types.LongType)):\r\n return Tag.objects.get(id=tag)\r\n except Tag.DoesNotExist:\r\n pass\r\n\r\n return None", "def generic_tag_compiler(parser, token, params, varargs, varkw, defaults,\n name, takes_context, node_class):\n bits = token.split_contents()[1:]\n args, kwargs = parse_bits(parser, bits, params, varargs, varkw,\n defaults, takes_context, name)\n return node_class(takes_context, args, kwargs)", "def openTag ( x ):\n assert str(type(x)) == \"<type 'str'>\"\n tag = \"<\" + str ( x ) + \">\"\n assert str ( type ( tag ) ) == \"<type 'str'>\"\n return tag", "def tag(self, tag_name):\r\n return Tag(self, tag_name)", "def getTagData(tagname,data):\n tags = rhevGet(\"/api/tags\")\n doc = libxml2.parseDoc(tags)\n ctxt = doc.xpathNewContext()\n res = ctxt.xpathEval(\"/tags/tag[name[position()=1]= '\" + tagname + \"']\")\n return res[0].prop(data)", "def pop_tag(data):\n if data and is_tag(data[0]):\n return data.pop(0)", "def from_etree(self, data):\r\n if data.tag == 'request':\r\n # if \"object\" or \"objects\" exists, return deserialized forms.\r\n elements = data.getchildren()\r\n for element in elements:\r\n if element.tag in ('object', 'objects'):\r\n return self.from_etree(element)\r\n return dict((element.tag, self.from_etree(element)) for element in elements)\r\n elif data.tag == 'object' or data.get('type') == 'hash':\r\n return dict((element.tag, self.from_etree(element)) for element in data.getchildren())\r\n elif data.tag == 'objects' or data.get('type') == 'list':\r\n return [self.from_etree(element) for element in data.getchildren()]\r\n else:\r\n type_string = data.get('type')\r\n if type_string in ('string', None):\r\n return data.text\r\n elif type_string == 'integer':\r\n return int(data.text)\r\n elif type_string == 'float':\r\n return float(data.text)\r\n elif type_string == 'boolean':\r\n if data.text == 'True':\r\n return True\r\n else:\r\n return False\r\n else:\r\n return None", "def get_tag_template(self, name):\n return self.__datacatalog.get_tag_template(name=name)", "def process_python(data, code):\n\tx=data\n\treturn eval(code)", "def tag(self, *arguments, **kwargs):\n return self.get_output('tag', *arguments, **kwargs)", "def tag(self) -> 'genmod.Tag':\n return self._generation.tag", "def get_instance_from_words(data):\n inst = Dataset.get_instance_template()\n inst[\"words\"] = data\n return inst", "def domToPyObj(domNode, keepContainers=0, objPattern=None, objParentClass=None):\n\n objPattern = objPattern or '_XO_'\n objParentClass = objParentClass or objPattern\n # does the tag-named class exist, or should we create it?\n # klass = '_XO_'+py_name(domNode.nodeName)\n klass = objPattern + py_name(domNode.nodeName)\n\n try:\n safe_eval(klass)\n except NameError:\n # exec ('class %s(_XO_): pass' % klass)\n exec('class %s(%s): pass' % (klass, objParentClass))\n # create an instance of the tag-named class\n pyObj = eval('%s()' % klass)\n\n # attach any tag attributes as instance attributes\n attr_dict = domNode.attributes\n if attr_dict is None:\n attr_dict = {}\n for key in attr_dict.keys():\n setattr(pyObj, py_name(key), attr_dict[key].value)\n\n # for nodes with character markup, might want the literal XML\n dom_node_xml = ''\n intro_PCDATA, subtag, exit_PCDATA = (0, 0, 0)\n\n # now look at the actual tag contents (subtags and PCDATA)\n for node in domNode.childNodes:\n node_name = py_name(node.nodeName)\n if keepContainers > KeepContainers.NEVER:\n dom_node_xml += node.toxml()\n\n # PCDATA is a kind of node, but not a new subtag\n # print \"Node name: %s\" % node.nodeName\n if node.nodeName == '#text':\n # if hasattr(pyObj, 'PCDATA'):\n if 'PCDATA' in pyObj.__dict__.keys():\n pyObj.PCDATA += node.nodeValue\n elif string.strip(node.nodeValue): # only use \"real\" node contents\n pyObj.PCDATA = node.nodeValue # (not bare whitespace)\n if not subtag:\n intro_PCDATA = 1\n else:\n exit_PCDATA = 1\n\n # does a pyObj attribute corresponding to the subtag already exist?\n # elif hasattr(pyObj, node_name):\n elif node_name in pyObj.__dict__.keys():\n # convert a single child object into a list of children\n if type(getattr(pyObj, node_name)) is not ListType:\n setattr(pyObj, node_name, [getattr(pyObj, node_name)])\n # add the new subtag to the list of children\n getattr(pyObj, node_name).append(domToPyObj(node, keepContainers, objPattern))\n\n # start out by creating a child object as attribute value\n else:\n setattr(pyObj, node_name, domToPyObj(node, keepContainers, objPattern))\n subtag = 1\n\n # See if we want to save the literal character string of element\n if keepContainers <= KeepContainers.NEVER:\n pass\n elif keepContainers >= KeepContainers.ALWAYS:\n pyObj._XML = dom_node_xml\n else: # if domNode appears to contain char markup, save _XML\n if subtag and (intro_PCDATA or exit_PCDATA):\n pyObj._XML = dom_node_xml\n\n return pyObj", "def from_xml(cls, xml_data, system, id_generator):\r\n\r\n xml_object = etree.fromstring(xml_data)\r\n system.error_tracker('WARNING: the <{tag}> tag is deprecated. '\r\n 'Instead, use <customtag impl=\"{tag}\" attr1=\"...\" attr2=\"...\"/>. '\r\n .format(tag=xml_object.tag))\r\n\r\n tag = xml_object.tag\r\n xml_object.tag = 'customtag'\r\n xml_object.attrib['impl'] = tag\r\n\r\n return system.process_xml(etree.tostring(xml_object))", "def get_template_arg(self, tag, the_args, for_class):\n if the_args:\n args = the_args.split(\",\")\n args = [x.strip() for x in args]\n else:\n args = []\n\n class_name = \"\"\n if for_class:\n class_name = for_class.name\n\n bad_arg = False\n\n tag = tag.upper();\n if tag == \"NAME\":\n if len(args):\n class_name = args[-1]\n for i in self.classes:\n if i.name == class_name:\n return i.name\n bad_arg = True\n\n if tag == \"STRUCT\":\n if len(args):\n class_name = args[-1]\n for i in self.classes:\n if i.name == class_name:\n return i.class_struct_name\n bad_arg = True\n\n if tag == \"TYPE_STRUCT\":\n if len(args):\n class_name = args[-1]\n for i in self.classes:\n if i.name == class_name:\n return i.type_struct_name\n bad_arg = True\n\n if tag == \"NEW\":\n if len(args):\n class_name = args[-1]\n for i in self.classes:\n if i.name == class_name:\n return \"%s()\" % i.class_new_func_name\n bad_arg = True\n\n if tag == \"IS_INSTANCE\":\n if len(args) > 1:\n class_name = args[-1]\n for i in self.classes:\n if i.name == class_name:\n return \"%s(%s)\" % (i.class_is_instance_func_name, args[0])\n bad_arg = True\n\n if tag == \"CAST\":\n if len(args) > 1:\n class_name = args[-1]\n for i in self.classes:\n if i.name == class_name:\n return \"reinterpret_cast<%s*>(%s)\" % (i.class_struct_name, args[0])\n bad_arg = True\n\n if tag == \"COPY\":\n if len(args) > 1:\n class_name = args[-1]\n for i in self.classes:\n if i.name == class_name:\n return \"%s(%s)\" % (i.class_copy_func_name, args[0])\n bad_arg = True\n\n if bad_arg:\n raise ValueError(\"Bad arguments '%s' to template tag '%s' (object:%s)\" % (the_args, tag, class_name))\n raise ValueError(\"Unknown template tag '%s'\" % tag)", "def get_tag(self, name: str) -> Tag:\n if not name:\n raise TypeError(\"The given tag name is illegal\")\n\n try:\n tag = next(self._generate_tags(name))\n except StopIteration as error:\n raise ResourceNotExistError(resource=\"tag\", identification=name) from error\n\n return tag", "def create_or_get_tag(self, tag_name: str, *args, **kwargs):\n\n tag_data = api.create_or_get_tag(\n tag_name,\n *args,\n api_key=self.__creds.api_key_v2, \n **kwargs)\n return en.Tag(tag_data)", "def from_data(cls, data):\n return object.__new__(cls)", "def _get_tag(self):\n return self.__tag", "def real_tag(x):\n if _debug: real_tag._debug(\"real_tag %r\", x)\n\n b = xtob(x)\n tag = Tag(Tag.applicationTagClass, Tag.realAppTag, len(b), b)\n if _debug: real_tag._debug(\" - tag: %r\", tag)\n\n return tag", "def init_data_item(self, data):\n construct_str = 'nodeitem.NodeItem('\n attrs = list()\n if data is not None:\n for attr, value in data.iteritems():\n if attr != 'data':\n attrs.append('%s=%s' % (attr, value))\n construct_str += str(attrs).strip('[]').replace('u\\'', '').replace('\\'', '')\n construct_str += ')'\n return eval(construct_str)", "def createContainer(tag, data={}): #@NoSelf", "def tag(self, text):\n\t\tpass", "def _data_tag_element(dataarray, encoding, datatype, ordering):\n import zlib\n ord = array_index_order_codes.npcode[ordering]\n enclabel = gifti_encoding_codes.label[encoding]\n if enclabel == 'ASCII':\n da = _arr2txt(dataarray, datatype)\n elif enclabel in ('B64BIN', 'B64GZ'):\n out = dataarray.tostring(ord)\n if enclabel == 'B64GZ':\n out = zlib.compress(out)\n da = base64.b64encode(out).decode()\n elif enclabel == 'External':\n raise NotImplementedError(\"In what format are the external files?\")\n else:\n da = ''\n\n data = xml.Element('Data')\n data.text = da\n return data", "def __new__(cls, data=None,\n customization=None,\n ignore_nonstandard_types=True,\n homogenise_fields=True):\n\n if data is None:\n return super(BibTexParser, cls).__new__(cls)\n else:\n # For backwards compatibility: if data is given, parse and return the `BibDatabase` object instead of the\n # parser.\n parser = BibTexParser()\n parser.customization = customization\n parser.ignore_nonstandard_types = ignore_nonstandard_types\n parser.homogenise_fields = homogenise_fields\n return parser.parse(data)", "def tag(tag_name, parser):\n return parser >> (lambda x: Tag(tag_name, x))", "def get(self, name):\n # type: (str) -> Optional[Tag]\n tagname = name[3:] if name.startswith('end') else name # type: str\n\n if tagname not in self.tags:\n return None\n return self.tags[tagname if tagname == name else 'END']", "def create_tags():\n\n INPUT = \"\"\"\n \"Python general\",Python\n R,\"Other Programming Languages\"\n Java,\"Other Programming Languages\"\n C-Languages,\"Other Programming Languages\"\n Analytics,\"Data Science\"\n Visualization,\"Data Science\"\n \"Big Data\",\"Data Science\"\n Predictions,\"Data Science\"\n MongoDB,Databases\n \"Web Servers and MicroFWs (Flask/Tornado/Nginx/...)\",Web\n Ipython,Python\n \"Web General\",Web\n Socket,DevOps\n Django,\"Application Frameworks\"\n Docker,DevOps\n Security,Security\n Privacy,Security\n Odoo,\"Application Frameworks\"\n \"Scientific Libraries (Numpy/Pandas/SciKit/...)\",\"Data Science\"\n Pyramid,\"Application Frameworks\"\n Plone,\"Application Frameworks\"\n \"Data Science\",\"Data Science\"\n Machine-Learning,\"Data Science\"\n PostgreSQL,Databases\n Django-Girls,Community\n Agile,\"Development Methods\"\n Documentation,Programming\n \"DevOps general\",DevOps\n Community,Community\n \"Natural Language Processing\",\"Data Science\"\n PyPy,Python\n Open-Source,\"Open Source\"\n Linux,\"Operating Systems\"\n \"SQL Alchemy\",Databases\n Communication,Community\n Tooling,Programming\n \"Test Libraries (pyTest/node/...)\",Testing\n MySQL,Databases\n Packaging,Python\n \"JavaScript Web Frameworks (AngularJS/ReactJS/...)\",Web\n \"Internet of Things (IoT)\",Hardware\n Performance,Programming\n Saltstack,DevOps\n Management,\"Development Methods\"\n Scrum,\"Development Methods\"\n Kanban,\"Development Methods\"\n Internationalization,Programming\n \"Behavior Driven Development (BDD)\",\"Development Methods\"\n HTML5,Web\n NoSQL,Databases\n OpenGL,Web\n \"Test Driven Development (TDD)\",Testing\n Education,Educational\n CPython,Python\n APIs,Web\n \"Python 3\",Python\n \"Best Practice\",\"Best Practice and Use Cases\"\n Development,Programming\n Testing,Testing\n Beginners,Educational\n Programming,Programming\n Cython,Python\n \"Deep Learning\",\"Data Science\"\n Unix,\"Operating Systems\"\n \"Case Study\",\"Case Study\"\n E-Commerce,Web\n \"Distributed Systems\",DevOps\n \"Functional Programming\",Programming\n Architecture,Programming\n OpenStack,DevOps\n \"Raspberry PI\",Hardware\n Teaching,\"Everything Else\"\n \"Meta Classes\",Programming\n \"Public Cloud (AWS/Google/...)\",DevOps\n \"Augmented Reality\",\"Everything Else\"\n Engineering,\"Everything Else\"\n Physics,Sciences\n \"Clean Code\",Educational\n \"System Administration\",DevOps\n Mix-Ins,Programming\n \"Static Analysis\",\"Everything Else\"\n \"Compiler and Interpreters\",Python\n Type-Hinting,Programming\n \"Web Crawling\",Web\n JavaScript,\"Other Programming Languages\"\n NodeJS,Web\n \"Conferences and Meet-Ups\",Community\n Databases,Databases\n Infrastructure,DevOps\n \"Elastic Search\",Databases\n Go-Lang,\"Other Programming Languages\"\n HTTP,Web\n Operations,DevOps\n \"Configuration Management (Ansible/Fabric/Chef/...)\",DevOps\n \"Deployment/Continuous Integration and Delivery\",DevOps\n Jenkins,Testing\n Science,Sciences\n Authentication,Security\n 3D,\"Everything Else\"\n Blender,\"Everything Else\"\n Diversity,Community\n Robotics,Hardware\n Human-Machine-Interaction,Hardware\n Debugging,Testing\n \"Euro Python and EPS\",Community\n LaTeX,\"Other Programming Languages\"\n Game-Development,\"Everything Else\"\n Kivy,Python\n Cross-Platform-Development,Python\n Git,DevOps\n PyQt,Programming\n Virtualization,DevOps\n \"Software Design\",Programming\n Multi-Processing,Programming\n Multi-Threading,Programming\n Windows,\"Operating Systems\"\n \"Messaging and Job Queues (RabbitMQ/Redis/...)\",DevOps\n \"Fun and Humor\",\"Everything Else\"\n Command-Line,Programming\n CMS,Web\n \"GEO and GIS\",\"Everything Else\"\n \"Graph Databases\",Databases\n Abstractions,\"Everything Else\"\n \"Code Analysis\",Programming\n Wearables,Hardware\n Mobile,Web\n \"Jupyter/iPython Notebook\",Python\n RESTful,Web\n Cryptography,Security\n OpenCV,Hardware\n \"ASYNC / Concurreny\",Programming\n \"Virtual Env\",Programming\n PyPi,Python\n Micro-Computers,Hardware\n Microservices,Programming\n Scaling,DevOps\n \"Python Software Foundation (PSF)\",Community\n workforce,Business\n DIY,\"Everything Else\"\n \"Image Processing\",\"Everything Else\"\n \"Mac OS X\",\"Operating Systems\"\n \"Data Structures\",Programming\n \"System Architecture\",DevOps\n Algorithms,\"Data Science\"\n PyLadies,Community\n \"The Answer to Life the Universe and Everything Else\",\"Everything Else\"\n Gadgets,Hardware\n \"All Other Programming Languages\",\"Other Programming Languages\"\n \"Use Case\",\"Best Practice and Use Cases\"\n Sensors,Hardware\n \"Other Hardware\",Hardware\n failures/mistakes,\"Best Practice and Use Cases\"\n clients,Business\n freelancing,Business\n \"Mind Bending\",\"Everything Else\"\n Templating,Web\n legacy-code,Programming\n MicroPython,Python\n \"Python 2\",Python\n python,Python\n Data,\"Data Science\"\n Structures,\"Data Science\"\n Web,Web\n Business,Business\n Notebook,\"Data Science\"\n Jupyter/iPython,\"Data Science\"\n Life,Community\n Universe,Sciences\n Deep,\"Data Science\"\n Learning,\"Data Science\"\n Internet,Web\n \"Internet of Things\",DevOps\n EPS,Community\n EuroPython,Community\n \"Open Stack\",DevOps\n finance,\"\"\n Trading,\"\"\n \"\"\".strip()\n\n buffer = StringIO(INPUT)\n\n reader = csv.reader(buffer)\n for line in reader:\n ConferenceTag.objects.create(\n name=line[0].strip(), category=line[1].strip()\n )\n print(\"Created tag\", line[0].strip())" ]
[ "0.5189606", "0.5115586", "0.5112532", "0.51076484", "0.51050496", "0.5092901", "0.508095", "0.5072821", "0.50573117", "0.5042166", "0.49817452", "0.4949033", "0.49404618", "0.49232757", "0.49211174", "0.49181822", "0.4915639", "0.48730016", "0.4843272", "0.47862923", "0.4766219", "0.4753728", "0.47483084", "0.47379243", "0.4732054", "0.4728616", "0.4727806", "0.4721076", "0.4695881", "0.46782252" ]
0.5681472
0
Returns tag type of the current tag based on its previous tag (super tag) for classes.
def tagClassTypeDecidingMethod(self, parentTagType): # DOC {{{ # }}} # CODE {{{ # is always class no matter what return PythonTag.TT_CLASS # }}}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_class_for(self, elem):\r\n\t\treturn self.__tag_to_cls.get(elem.tag, self.__default_cls)", "def get_type(self, ):\n return self.attrs.get(self.AttributeNames.TYPE, None)", "def get_class_for_tag(self, tag):\r\n return self._mapping[tag]", "def nic_tag_type(self):\n # return type of the nictag or empty string if self.nic_tag is not found in Node.all_nictags\n return Node.all_nictags().get(self.nic_tag, '')", "def get_class_name(self):\n\n if \"class\" in self._root.attrib:\n return self._root.attrib['class']\n else:\n return self._root.tag", "def type(self):\n if self._type is None:\n self._type = None if len(self) == 0 else self.top.__class__\n return self._type", "def stor_type(self):\n type = self.type\n if isinstance(type, Enum): return type.base\n return type", "def get_tag_class(stext, tag_=\"pre\"):\n if \"<\" + tag_ + \" class=\" in stext and \">\" in stext:\n sclass = stext.split('=')[1]\n sclass = sclass[:sclass.index('>')]\n if \"'\" in sclass:\n sclass = sclass.replace(\"'\", '')\n elif '\"' in sclass:\n sclass = sclass.replace('\"', '')\n else:\n sclass = \"\"\n return sclass", "def getMostEnclosingTag(tagNode, keyClasses):\n mostEnclosingTag = tagNode\n currentTag = tagNode\n while currentTag.parent!=None:\n currentTag = currentTag.parent\n # parent tags can be different from what we have in the 'tags' variable.\n # if the tag has the classes we were looking for in article tags, then it is a tag we want\n if currentTag.get('class') and len(set(currentTag.get('class')) & set(artClasses))>0:\n # this tag has what \n mostEnclosingTag = currentTag\n return mostEnclosingTag", "def getParentType(soup, refs, currentType, tagType='entitytype'):\n propSchema = soup.find( 'schema', attrs={'namespace': getNamespace(currentType)})\n \n if propSchema is None:\n return False, None, None, None\n propEntity = propSchema.find( tagType, attrs={'name': getType(currentType)})\n \n if propEntity is None:\n return False, None, None, None\n\n currentType = propEntity.get('basetype')\n if currentType is None:\n return False, None, None, None\n \n currentType = currentType.replace('#','')\n SchemaNamespace, SchemaType = getNamespace(currentType), getType(currentType)\n propSchema = soup.find( 'schema', attrs={'namespace': SchemaNamespace})\n\n if propSchema is None:\n success, innerSoup, uri = getSchemaDetails(\n *refs.get(SchemaNamespace, (None,None)))\n if not success:\n return False, None, None, None\n innerRefs = getReferenceDetails(innerSoup)\n propSchema = innerSoup.find(\n 'schema', attrs={'namespace': SchemaNamespace})\n if propSchema is None:\n return False, None, None, None\n else:\n innerSoup = soup\n innerRefs = refs\n\n return True, innerSoup, innerRefs, currentType", "def get_tag(self, tag_type: str) -> str:\n if tag_type in self.tags:\n return self.tags[tag_type]\n return None", "def guess_type(object):\n # retrieve a list of classes\n classes = (\n re.match(\"<class '(.+)'>\", str(object.__class__)).groups()[0].split(\".\")\n )\n # Return the most specific one\n return classes[-1]", "def type(self) -> Type[ClassType]:\n return self._type", "def get_type(self):\n if not self.xmlnode.hasProp(\"type\"):\n self.upgrade()\n return from_utf8(self.xmlnode.prop(\"type\"))", "def _get_xml_tag(doc):\n tag = type(doc).type_key.split('.')[3]\n tag = convert.str_to_camel_case(tag)\n\n return tag", "def get_tag(self) -> int:\n return self.tag", "def _get_tag(self):\n return self.__tag", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type" ]
[ "0.582632", "0.57994694", "0.57834005", "0.5781188", "0.5757738", "0.5732667", "0.5722571", "0.56800544", "0.56718004", "0.5639041", "0.55776334", "0.5570752", "0.5524703", "0.5488256", "0.54447544", "0.542426", "0.5419941", "0.5353941", "0.5353941", "0.5353941", "0.5353941", "0.5353941", "0.5353941", "0.5353941", "0.5353941", "0.5353941", "0.5353941", "0.5353941", "0.5353941", "0.5353941" ]
0.67080295
0
Returns tag type of the current tag based on its previous tag (super tag) for functions/methods.
def tagFunctionTypeDecidingMethod(self, parentTagType): # DOC {{{ # }}} # CODE {{{ if (parentTagType == PythonTag.TT_CLASS): return PythonTag.TT_METHOD else: return PythonTag.TT_FUNCTION # }}}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_tag(self):\n return self.__tag", "def get_tag(self, tag_type: str) -> str:\n if tag_type in self.tags:\n return self.tags[tag_type]\n return None", "def nic_tag_type(self):\n # return type of the nictag or empty string if self.nic_tag is not found in Node.all_nictags\n return Node.all_nictags().get(self.nic_tag, '')", "def _get_xml_tag(doc):\n tag = type(doc).type_key.split('.')[3]\n tag = convert.str_to_camel_case(tag)\n\n return tag", "def tag(self):\n return self._tag", "def tagClassTypeDecidingMethod(self, parentTagType):\n # DOC {{{\n # }}}\n\n # CODE {{{\n # is always class no matter what\n return PythonTag.TT_CLASS\n # }}}", "def get_tag(self) -> int:\n return self.tag", "def get_tag(self):\n return self.tag", "def tag(self):\n return self.tag_", "def tag(self):\n return self._tag", "def stor_type(self):\n type = self.type\n if isinstance(type, Enum): return type.base\n return type", "def get_type(self, ):\n return self.attrs.get(self.AttributeNames.TYPE, None)", "def tag(self) -> 'genmod.Tag':\n return self._generation.tag", "def getTypeCode(self):\n return _libsbml.ReplacedBy_getTypeCode(self)", "def tag(self) -> int:\n return self.proto.tag", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type", "def _get_type(self):\n return self.__type" ]
[ "0.6046639", "0.5874162", "0.5833042", "0.5807177", "0.5753416", "0.57458633", "0.5724362", "0.56941104", "0.56460464", "0.56113297", "0.55841863", "0.55154127", "0.55130136", "0.54260707", "0.5419026", "0.5407151", "0.5407151", "0.5407151", "0.5407151", "0.5407151", "0.5407151", "0.5407151", "0.5407151", "0.5407151", "0.5407151", "0.5407151", "0.5407151", "0.5407151", "0.5407151", "0.5407151" ]
0.5995309
1
Initializes instances of VimReadlineBuffer().
def __init__(self, vimBuffer): # DOC {{{ # }}} # CODE {{{ # remember the settings self.vimBuffer = vimBuffer # initialize instance attributes {{{ self.currentLine = -1 self.bufferLines = len(vimBuffer) # }}} # }}}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, buf):\n self.lines = buf.splitlines()\n self.col_offs = [OffsetList() for i in range(len(self.lines))]\n self.col_lens = [len(line) for line in self.lines]", "def init_readline():\n if g.command_line:\n return\n\n if has_readline:\n g.READLINE_FILE = os.path.join(get_config_dir(), \"input_history\")\n\n if os.path.exists(g.READLINE_FILE):\n readline.read_history_file(g.READLINE_FILE)\n dbg(c.g + \"Read history file\" + c.w)", "def __init__(self, vim):\n\n # sys.stderr.writelines([repr(e)+'\\n' for e in traceback.extract_stack()])\n\n self.vim = vim\n self.status = Pinyin_Status.wait\n self.wrapper = None", "def _initialize_buffers(self) -> None:", "def init(self) -> None:\n self.started = False\n self.lines = []\n self.text = ''\n self.graphics = ''\n self.ids = {}\n self.first_line_added = False\n\n self.used_fonts = set()\n self.current_line_used_fonts = set()\n self.current_height = 0\n self.lines = []\n\n line_width = self.width - (self.indent if self.is_first_line else 0)\n self.current_line = PDFTextLine(\n self.fonts, line_width, self.text_align, self.line_height\n )\n\n self.last_indent = 0\n self.last_state = self.last_factor = self.last_fill = None\n self.last_color = self.last_stroke_width = None\n\n self.y_ = 0", "def __init__(self):\n self.buffer_ = [None] * 128 * 128\n self.cursors = ArrayList()", "def __init__(self):\n self.command_parser = CmdParser()\n self.line_edition = LineEdit()\n self.communication = Communication_C()\n self.print_answer = Print_answer()\n self.sighdl = Sig()\n sys.argv.pop(0)\n self.args = sys.argv\n self.history = []\n self.command = []\n self.answer = {}", "def init_buffer(self, molecule):\n file_name = \"log\"\n with open(file_name, \"r\") as f:\n log = f.read()\n\n self.nbasis = re.findall('NBasis=\\s+(\\d+)\\s+', log)\n self.nbasis = int(self.nbasis[0])\n self.nfc = re.findall('NFC=\\s+(\\d+)\\s+', log)\n self.nfc = int(self.nfc[0])\n self.nocc = re.findall('NOA=\\s+(\\d+)\\s+', log)\n self.nocc = int(self.nocc[0])\n self.nvirt = re.findall('NVA=\\s+(\\d+)\\s+', log)\n self.nvirt = int(self.nvirt[0])\n self.norb = self.nocc + self.nvirt\n\n self.pos_old = np.zeros((molecule.nat_qm, molecule.ndim))\n self.ao_overlap = np.zeros((self.nbasis, self.nbasis))\n self.mo_coef_old = np.zeros((self.norb, self.nbasis))\n self.mo_coef_new = np.zeros((self.norb, self.nbasis))\n self.ci_coef_old = np.zeros((molecule.nst, self.nocc, self.nvirt))\n self.ci_coef_new = np.zeros((molecule.nst, self.nocc, self.nvirt))", "def __init__(self):\n self.start = datetime.datetime.now()\n self.linecount = 0\n if sys.stdout.isatty():\n self.term = Terminal()\n sys.stdout.write(self.term.clear())", "def __init__(self, keep_last_n_lines=5) :\r\n self.contextLines_ = keep_last_n_lines\r\n self.data_ = CircularBuffer(1024)\r\n self.lineNumber_ = 1\r\n self.charNumber_ = 0", "def __init__(self, height, width, yloc, xloc):\n AnsiWindow.__init__(self, height, width, yloc, xloc)\n self._position = self._position_last = 0\n self._quit = False\n self.content = list()\n self.keyset = VI_KEYSET.copy()\n self.init_keystrokes()", "def setup_buffer(self):\n vim.command(\"setlocal buftype=nofile\")\n vim.command(\"setlocal bufhidden=wipe\")\n vim.command(\"setlocal encoding=utf-8\")\n vim.command(\"setlocal nobuflisted\")\n vim.command(\"setlocal noundofile\")\n vim.command(\"setlocal nobackup\")\n vim.command(\"setlocal noswapfile\")\n vim.command(\"setlocal nowrap\")\n vim.command(\"setlocal nonumber\")\n if vim.eval(\"&hidden\") == '0':\n vim.command(\"set hidden\")\n self.nohidden_set = True # mmh...", "def __init__(self):\n self.read_input()\n self.update_binaries()", "def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n\n self._line_num = -1\n self._prev_comment_line_num = -1\n self._prev_non_empty = -1\n self._in_same_block = True\n self._block_alerted = False\n self._reserved_token = SENTINEL_TOKEN", "def re_init_buffer(self):\n #~ print(self.verts)\n #~ print(self.texcoords)\n #~ print(self.inds)\n self.shape.buf[0].re_init(pts=np.array(self.verts, 'f'),texcoords=np.array(self.texcoords, 'f'))", "def __init__(self):\n Parser.__init__(self)\n self.__line_number = 0 # initialize the line number to 0", "def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self._lines: TokenLines = defaultdict(list)", "def initialize(self):\n # TODO: This seems wrong, the user setting value is never used anywhere.\n if 'EnErrorStyle' not in self._vim.vars:\n self._vim.vars['EnErrorStyle'] = 'EnError'\n self._vim.command('highlight EnErrorStyle ctermbg=red gui=underline')\n\n # TODO: this SHOULD be a buffer-local setting only, and since it should\n # apply to all Scala files, ftplugin is the ideal place to set it. I'm\n # not even sure how this is currently working when only set once.\n self._vim.command('set omnifunc=EnCompleteFunc')\n\n # TODO: custom filetype ftplugin\n self._vim.command(\n 'autocmd FileType package_info nnoremap <buffer> <Space> :call EnPackageDecl()<CR>')\n self._vim.command('autocmd FileType package_info setlocal splitright')", "def setup(self):\n readline.parse_and_bind(\"set enable-keypad on\")\n readline.set_completer(self.complete)\n readline.set_completer_delims(\" \\t\\n;\")\n readline.parse_and_bind(\"tab: complete\")", "def __init__(self, buffer_size=DEFAULT_STREAM_BUFFER_SIZE):\n self._reader = cv2.VideoCapture()\n self._queue = deque(maxlen=buffer_size)\n self._stop_event = threading.Event()\n self._video_info = {}", "def __init__(self, raw, progress_bar):\r\n io.BufferedReader.__init__(self, raw)\r\n self.progress_bar = progress_bar", "def __init__(self):\n self.buffer = bytearray()", "def __init__ (self, istream) :\r\n ReaderA.__init__(self) # call parent\r\n self.is_ = istream\r\n self.cached_ = CircularBuffer(132, True)", "def __init__(self):\n # Initialise a dictionary to store arbitrary\n # attributes taken from the program logfile\n self.__dict = {}\n # List of tables\n self.__tables = []\n # List of keytexts\n self.__keytexts = []\n # For fragment retrieval\n self.set_source_file(\"\")\n self.set_startline(-1)\n self.set_endline(-1)\n # Flags\n self.__nonzero = False", "def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self._lines: TokenLines = defaultdict(list)\n self._docstrings = get_docstring_tokens(self.file_tokens)", "def _init():\n line.set_data([], [])\n return line,", "def __init__(self):\n # Initialise the base class\n fragment.__init__(self)\n # Initialise program-specific flags and\n # attributes\n self.__isccp4 = False\n self.__termination = False\n # List of keyword lines\n self.__keywords = []\n # Dictionary of logical name/filename pairs\n self.__logicalnames = {}", "def terminal_init(self):\n pass", "def __init__(self):\n self.block_stack = []\n\n # TODO do I still need this?\n resource_path = None\n if not resource_path:\n resource_path = os.path.join(\n os.path.split(__file__)[0], \"../pymarkdown/resources\"\n )\n InlineHelper.initialize(resource_path)", "def init_buffer(self):\n \n self.shape.buf = [pi3d.Buffer(self.shape, self.verts, self.texcoords, self.inds, self.norms)]\n self.shape.set_draw_details(self.shader, [self.spritesheet.img])" ]
[ "0.648197", "0.6288714", "0.61934733", "0.6109114", "0.60832447", "0.592465", "0.58992684", "0.58968043", "0.5819736", "0.5803652", "0.58010024", "0.5799181", "0.5773363", "0.5733199", "0.5715456", "0.56676733", "0.56251436", "0.56089485", "0.5603505", "0.55618817", "0.5560599", "0.5521216", "0.55047953", "0.54861253", "0.5469634", "0.5456726", "0.54472274", "0.5434568", "0.541576", "0.53954506" ]
0.79332894
0
Returns the index of line in 'tagLineNumbers' list that is nearest to the specified cursor row.
def getNearestLineIndex(row, tagLineNumbers): # DOC {{{ # }}} # CODE {{{ # initialize local auxiliary variables {{{ nearestLineNumber = -1 nearestLineIndex = -1 # }}} # go through all tag line numbers and find the one nearest to the specified row {{{ for lineIndex, lineNumber in enumerate(tagLineNumbers): # if the current line is nearer the current cursor position, take it {{{ if (nearestLineNumber < lineNumber <= row): nearestLineNumber = lineNumber nearestLineIndex = lineIndex # }}} # if we've got past the current cursor position, let's end the search {{{ if (lineNumber >= row): break # }}} # }}} # return index of the line with the nearest tag return nearestLineIndex # }}}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getLinescanPos(self):\n return self.handle.pos().toPoint()", "def index_tag_in_lines(lines, tag):\n for index, line in enumerate(lines):\n if tag in line:\n return index\n raise ValueError(f'{tag} not found.')", "def get_corresponding_lineno(self, lineno: int) -> int:\n for template_line, code_line in reversed(self.debug_info):\n if code_line <= lineno:\n return template_line\n return 1", "def get_current_line(self, document):\r\n return document.get_iter_at_mark(document.get_insert()).get_line() + 1", "def Find_Line_By_XY( self, x, y ):\r\n for i in self.handle_list:\r\n #examine the bounding box of each line\r\n bbox = self.canvas_one.bbox( i.line_handle )\r\n xb1 = bbox[ 0 ]\r\n yb = ( bbox[ 1 ] + bbox[ 3 ] ) / 2\r\n xb2 = bbox[ 2 ]\r\n if x >= xb1 and x <= xb2 and abs( y-yb ) <= cb.ytick / 2:\r\n #found, return handle\r\n return i\r\n #not found return -1\r\n return -1", "def _get_linespan(self, lnum):\n lcount = self.get_linecount()\n _, q, _ = slice(lnum).indices(lcount)\n if q < 0 or q >= lcount:\n raise IndexError(\"line number %d not in 0..%d\" % (q, lcount))\n\n start = self.__linepos[q] + 1\n if q < lcount - 1:\n end = self.__linepos[q + 1]\n else:\n end = len(self.input) - 1\n\n return start, end + 1", "def _get_line_after_cursor(self):\n return self.input_buffer()[self.cursor_offset():].split('\\n', 1)[0]", "def line_at_cursor(code: str, cursor_pos: int = 0):\n offset = 0\n lines = code.splitlines(True)\n for line in lines:\n next_offset = offset + len(line)\n if not line.endswith('\\n'):\n # If the last line doesn't have a trailing newline, treat it as if\n # it does so that the cursor at the end of the line still counts\n # as being on that line.\n next_offset += 1\n if next_offset > cursor_pos:\n break\n offset = next_offset\n else:\n line = \"\"\n return (line, offset)", "def Find_Line_By_Node( self, node ):\r\n for i in self.handle_list:\r\n if i.node == node:\r\n #found, return handle\r\n return i\r\n #not found return -1\r\n return -1", "def _get_linepos(self, pos):\n t = self.input\n if pos < 0 or pos > len(t):\n raise IndexError(\"position %d not in 0..%d\" % (pos, len(t)))\n\n lpc = self.__linepos\n\n # Locate the smallest known line index whose end is at or after p.\n def locate(p):\n self._update_linetab(p)\n lo = 0\n hi = len(lpc) - 1\n if lpc[hi] < p:\n return hi\n\n # Invariant: lpc[lo] < p; lpc[hi] >= p\n while lo + 1 < hi:\n mid = (lo + hi) // 2\n if lpc[mid] > p: hi = mid\n elif lpc[mid] < p: lo = mid\n else: return mid - 1\n return hi - 1\n\n lnum = locate(pos)\n start, end = self._get_linespan(lnum)\n cnum = pos - start\n return lnum, cnum", "def getline(self, lnum=None):\n return self._vim.current.buffer[lnum] if lnum else self._vim.current.line", "def get_line_nr(view, point):\n return view.rowcol(point)[0] + 1", "def get_linepos(self, pos):\n lnum, cnum = self._get_linepos(pos)\n return lnum + self.LINE_NUM_BASE, cnum", "def _get_line(self, lnum):\n start, end = self._get_linespan(lnum)\n return self.input[start:end]", "def lineNumber( self ):\n return self.commands[0].lineNumber if len(self.commands) >= 1 else None", "def cursor_coordinates(self):\n text = self.getText()\n lines = text.split(\"\\n\")\n pos = self.getCursorPos()\n if pos == 0:\n return (0, 0)\n i = 0\n cursor_row = -1\n cursor_col = -1\n for row, line in enumerate(lines):\n i += len(line) + 1 # we need to include \"\\n\"\n if pos < i:\n cursor_row = row\n cursor_col = pos - i + len(line) + 1\n break\n return (cursor_col, cursor_row)", "def LineNumber(self):\n ret = libxml2mod.xmlTextReaderLocatorLineNumber(self._o)\n return ret", "def line_number(self):\n return self._line_number", "def _search(listing, absolute_idx):\n if not listing:\n return 0\n if len(listing) == 1:\n return 0 if absolute_idx <= listing[0] else 1\n\n for idx, line_break_idx in enumerate(listing):\n if line_break_idx >= absolute_idx:\n return idx", "def get_headline_position(self, headline: Headline) -> Tuple[int, int]:\n return self.get_regex_position(headline.name)", "def editor_line(self) -> int:\n return self.raw_line # raw_line is already one-indexed.", "def lineNumber(self):\n if self.__lineNumber is None:\n self.__lineNumber = self.__source.count(\"\\n\", 0, self.__offset) + 1\n\n return self.__lineNumber", "def _insertion_index(points, point):\n distance = sys.float_info.max\n index = None\n begin = points[-1]\n for i, p in enumerate(points):\n temp = _distance_to_line(begin, p, point)\n if temp < distance:\n distance = temp\n index = i\n begin = p\n return index", "def get_curpos(self):\n for i in range(len(self.tree)):\n if self.path == self.tree[i][2]:\n return i\n else:\n return -1", "def get_line(self, lnum):\n return self._get_line(lnum - self.LINE_NUM_BASE)", "def get_nearest_row(self):\n return (self.rect.top - (self.screen.get_height() // 12)) // self.maze.block_size", "def find_next_number(line, pos=0):\n m = number_re.search(line[pos:])\n if m:\n span = m.span()\n return (span[0]+pos,span[1]+pos)", "def get_linenumber():\n\n # inspect.stack()[0][2] returns line number in this function\n lineno = str(inspect.stack()[1][2])\n\n return lineno", "def _findPosition(self, key):\n for i in range(len(self._entryList)):\n if self._entryList[i].key == key:\n return i\n return None", "def lineOffset(self):\n if self.__lineOffset is None:\n self.__lineOffset = self.__offset - self.__source.rfind(\"\\n\", 0, self.__offset) - 1\n\n return self.__lineOffset" ]
[ "0.649168", "0.6456299", "0.6421644", "0.6305069", "0.6262338", "0.62518704", "0.6186208", "0.61382073", "0.6082272", "0.6054712", "0.60263366", "0.5928631", "0.59212524", "0.5885107", "0.5832795", "0.5810198", "0.57756764", "0.57733077", "0.57636684", "0.5742054", "0.5691415", "0.5684812", "0.56757724", "0.5669003", "0.565311", "0.5613092", "0.5608887", "0.5584407", "0.55683607", "0.5550334" ]
0.8415959
0
Reads the tags for the specified buffer number. Returns a tuple (taglinenumber[buffer], tags[buffer],).
def getTags(bufferNumber, changedTick): # DOC {{{ # }}} # CODE {{{ # define global variables global TAGLINENUMBERS, TAGS, BUFFERTICKS # return immediately if there's no need to update the tags {{{ if (BUFFERTICKS.get(bufferNumber, None) == changedTick): return (TAGLINENUMBERS[bufferNumber], TAGS[bufferNumber],) # }}} # get the tags {{{ simpleTagsParser = SimplePythonTagsParser(VimReadlineBuffer(vim.current.buffer)) tagLineNumbers, tags = simpleTagsParser.getTags() # }}} # update the global variables {{{ TAGS[bufferNumber] = tags TAGLINENUMBERS[bufferNumber] = tagLineNumbers BUFFERTICKS[bufferNumber] = changedTick # }}} # return the tuple (tagLineNumbers, tags,) return (tagLineNumbers, tags,) # }}}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def findTag(bufferNumber, changedTick):\n # DOC {{{\n # }}}\n\n # CODE {{{\n # try to find the best tag {{{\n try:\n # get the tags data for the current buffer\n tagLineNumbers, tags = getTags(bufferNumber, changedTick)\n\n # link to vim's internal data {{{\n currentBuffer = vim.current.buffer\n currentWindow = vim.current.window\n row, col = currentWindow.cursor\n # }}}\n\n # get the index of the nearest line\n nearestLineIndex = getNearestLineIndex(row, tagLineNumbers)\n\n # if any line was found, try to find if the tag is appropriate {{{\n # (ie. the cursor can be below the last tag but on a code that has nothing\n # to do with the tag, because it's indented differently, in such case no\n # appropriate tag has been found.)\n while (nearestLineIndex > -1):\n # get the line number of the nearest tag\n nearestLineNumber = tagLineNumbers[nearestLineIndex]\n\n # walk through all the lines in range (nearestTagLine, cursorRow) {{{\n for lineNumber in range(nearestLineNumber + 1, row):\n # get the current line\n line = currentBuffer[lineNumber]\n\n # count the indentation of the line, if it's lower than the tag's, the tag is invalid {{{\n if (len(line)):\n # initialize local auxiliary variables {{{\n lineStart = 0\n i = 0\n # }}}\n\n # compute the indentation of the line {{{\n while ((i < len(line)) and (line[i].isspace())):\n # move the start of the line code {{{\n if (line[i] == '\\t'):\n lineStart += SimplePythonTagsParser.TABSIZE\n else:\n lineStart += 1\n # }}}\n\n # go to the next character on the line\n i += 1\n # }}}\n\n # if the line contains only spaces, skip it {{{\n if (i == len(line)):\n continue\n # }}}\n # if the next character is a '#' (python comment), skip the line {{{\n if (line[i] == '#'):\n continue\n # }}}\n # if the next character is a ')', skip the line {{{\n # this is so that the following style works correctly:\n #\n # def foo(\n # args,\n # ):\n # pass\n if (line[i] == ')'):\n continue\n # }}}\n\n # if the line's indentation starts before or at the nearest tag's one, the tag is invalid {{{\n if (lineStart <= tags[nearestLineNumber].indentLevel):\n nearestLineIndex -= 1\n break\n # }}}\n # }}}\n # }}}\n # the tag is appropriate, so use it {{{\n else:\n break\n # }}}\n # }}}\n # no appropriate tag has been found {{{\n else:\n nearestLineNumber = -1\n # }}}\n\n # describe the cursor position (what tag the cursor is on) {{{\n # reset the description\n tagDescription = \"\"\n\n # if an appropriate tag has been found, set the description accordingly {{{\n if (nearestLineNumber > -1):\n tagInfo = tags[nearestLineNumber]\n tagDescription = \"[%s]\" % (tagInfo.fullName, ) # not using PythonTag.TAG_TYPE_NAME[tagInfo.type] because ENOSPC\n # }}}\n # }}}\n\n # update the variable for the status line so it get updated with the new description\n vim.command(\"let w:PHStatusLine=\\\"%s\\\"\" % (tagDescription,))\n # }}}\n\n # handle possible exceptions {{{\n except Exception:\n # bury into the traceback {{{\n ec, ei, tb = sys.exc_info()\n while (tb != None):\n if (tb.tb_next == None):\n break\n tb = tb.tb_next\n # }}}\n\n # spit out the error {{{\n print(\"ERROR: %s %s %s:%u\" % (ec.__name__, ei, tb.tb_frame.f_code.co_filename, tb.tb_lineno,))\n time.sleep(0.5)\n # }}}\n # }}}\n # }}}", "def getTags(number=None):", "def deleteTags(bufferNumber):\n # DOC {{{\n # }}}\n\n # CODE {{{\n # define global variables\n global TAGS, TAGLINENUMBERS, BUFFERTICKS\n\n # try to delete the tags for the buffer {{{\n try:\n del TAGS[bufferNumber]\n del TAGLINENUMBERS[bufferNumber]\n del BUFFERTICKS[bufferNumber]\n except:\n pass\n # }}}\n # }}}", "def get_all_tags(buffer, name=''):\n\n if name:\n name = name + ' '\n result = ''\n loc = buffer.beginning_of_buffer()\n while loc < buffer.end_of_buffer():\n over = loc.get_overlays()\n if over != []:\n loc2 = loc.forward_overlay(over[0]) - 1\n result = result + name + over[0].name() \\\n + ' %s:%s %s:%s\\n' % (loc.line(), loc.column(),\n loc2.line(), loc2.column())\n loc = loc2 + 1\n else:\n loc = loc.forward_overlay()\n return result", "def _readtag(self):\n tag = Tag()\n tag.tag = self.reader.readint(1)\n tag.len = self.reader.readint(2)\n\n if tag.len > 0:\n tag.data = self.reader.read(tag.len)\n return tag", "def get_buffer(number):\n\n buffers = [buffer for buffer in vim.buffers if buffer.number == number]\n assert len(buffers) == 1\n return buffers[0]", "def read_tags(reader_ip, event_type):\n # Open socket using reader IP address\n cmd = rapid.Command(reader_ip)\n cmd.open()\n print \"Connection to %s opened\" % (reader_ip)\n\n # Reader Login\n cmd.execute(\"reader.login\", (\"admin\", \"readeradmin\"))\n rc = cmd.execute(\"reader.who_am_i\", ())\n print \"Logged in as: %s \" % rc\n\n # Open an event channel and get id\n id = cmd.getEventChannel(event_callback)\n print \"Event Channel ID %s created\" % id\n\n # Register for event_type\n cmd.execute(\"reader.events.register\", (id, event_type))\n print \"Registered for %s on Ch. %s\" % (event_type, id)\n\n # start tag read in active mode\n cmd.set(\"setup.operating_mode\", \"active\")\n print \"Mode: Active\"\n\n # stdout redirection for creating tag list\n stdout = sys.stdout #backup original stdout to console\n sys.stdout = open(\"tag_list.log\", \"w\")\n\n # wait for some tag reads\n time.sleep(1)\n\n # stop tag read in standby mode\n cmd.set(\"setup.operating_mode\", \"standby\")\n sys.stdout.close() # close log file\n sys.stdout = stdout # revert to console output\n print \"Mode: Standby\"\n print \"./tag_list.log generated\"\n\n # Unregister for event_type\n cmd.execute(\"reader.events.unregister\", (id, event_type))\n print \"Unregistered for %s on Ch. %s\" % (event_type, id)\n\n # Close the command connection and event channel\n cmd.close()\n print \"Connection Closed\"", "def get_pos_tags(blob):\n return blob.pos_tags", "def getTags(self):\n # DOC {{{\n # }}}\n\n # CODE {{{\n # initialize the resulting list of the tag line numbers and the tag information {{{\n tagLineNumbers = []\n tags = {}\n # }}}\n\n # initalize local auxiliary variables {{{\n tagsStack = []\n lineNumber = 0\n # }}}\n\n # go through all the lines in the source and localize all python tags in it {{{\n while 1:\n # get next line\n line = self.source.readline()\n\n # finish if this is the end of the source {{{\n if (line == ''):\n break\n # }}}\n\n # increase the line number\n lineNumber += 1\n\n # extract the line indentation characters and its content {{{\n lineMatch = self.COMMENTS_INDENT_RE.match(line)\n lineContent = lineMatch.group(2)\n # }}}\n\n # handle the class tag {{{\n # match for the class tag\n tagMatch = self.CLASS_RE.match(lineContent)\n\n # if the class tag has been found, store some information on it {{{\n if (tagMatch):\n currentTag = self.getPythonTag(tagsStack, lineNumber, lineMatch.group(1),\n tagMatch.group(1), self.tagClassTypeDecidingMethod)\n tagLineNumbers.append(lineNumber)\n tags[lineNumber] = currentTag\n # }}}\n # }}}\n # handle the function/method/none tag {{{\n else:\n # match for the method/function tag\n tagMatch = self.METHOD_RE.match(lineContent)\n\n # if the method/function tag has been found, store some information on it {{{\n if (tagMatch):\n currentTag = self.getPythonTag(tagsStack, lineNumber, lineMatch.group(1),\n tagMatch.group(1), self.tagFunctionTypeDecidingMethod)\n tagLineNumbers.append(lineNumber)\n tags[lineNumber] = currentTag\n # }}}\n # }}}\n # }}}\n\n # return the tags data for the source\n return (tagLineNumbers, tags,)\n # }}}", "def all_lines_with_tag(mm, tag, nline_max=1024*1024):\n all_idx = []\n for iline in range(nline_max):\n idx = mm.find(tag.encode())\n if idx == -1:\n break\n mm.seek(idx)\n all_idx.append(idx)\n mm.readline()\n\n # guard\n if iline >= nline_max-1:\n raise RuntimeError('may need to increase nline_max')\n return all_idx", "def get_tags(self, tags, filename):\n return self.get_tags_batch(tags, [filename])[0]", "def gettags(comment):\r\n\r\n tags = []\r\n\r\n tag = None\r\n datatype = None\r\n name = None\r\n tag_lineno = lineno = 0\r\n tag_text = []\r\n\r\n for line in comment.split('\\n'):\r\n line = line.strip()\r\n if line.startswith(\"@\"):\r\n tags.append((tag_lineno, tag, datatype, name, '\\n'.join(tag_text)))\r\n parts = line.split(None, 3)\r\n if len(parts) == 1:\r\n datatype = ''\r\n name = ''\r\n tag_text = []\r\n elif len(parts) == 2:\r\n datatype = parts[1]\r\n name = ''\r\n tag_text = []\r\n elif len(parts) == 3:\r\n datatype = parts[1]\r\n name = parts[2]\r\n tag_text = []\r\n elif len(parts) == 4:\r\n datatype = parts[1]\r\n name = parts[2]\r\n tag_text = [parts[3].lstrip()]\r\n tag = parts[0][1:]\r\n tag_lineno = lineno\r\n else:\r\n if line:\r\n tag_text.append(line)\r\n lineno += 1\r\n\r\n tags.append((tag_lineno, tag, datatype, name, '\\n'.join(tag_text)))\r\n\r\n return tags", "def gettag(self):\n cmd = [\"git\", \"tag\"]\n p = Popen(cmd, cwd=self.filename, stdout=PIPE)\n data, res = p.communicate()\n return data.decode(\"utf-8\").split(\"\\n\")", "def read_next_tag_or_seek(flv_tags):\n # is this the good place to put?\n if not flv_tags.version:\n flv_tags.parse_header()\n tag = None\n position = flv_tags.f.tell()\n try:\n tag = flv_tags.get_next_tag()\n except tags.EndOfFile:\n flv_tags.f.seek(position)\n raise tags.EndOfFile\n return tag", "def tag_counts (count_file):\r\n tagcounts = defaultdict(int)\r\n f = open(count_file, 'r')\r\n for line in f:\r\n fields = line.split()\r\n if fields[1] != 'WORDTAG':\r\n continue\r\n count = int(fields[0])\r\n tag = fields[2]\r\n tagcounts[tag] += count \r\n f.close() \r\n return tagcounts", "def get_tag(file, tag):\r\n import re\r\n\r\n # make sure the necessary globals are initialised\r\n global filenames # set of processed files\r\n if 'filenames' not in globals():\r\n filenames = set()\r\n global tags # dictionary of cached tag values\r\n if 'tags' not in globals() : # the collection has not yet been initialized\r\n tags = {}\r\n\r\n\r\n if file not in filenames:\r\n # file has not been processed yet\r\n\ttry:\r\n\t f = open(file, \"rt\")\r\n\texcept IOError:\r\n\t logger.warning(\"File '%s' not found.\", file)\r\n\t return \"*** ERROR *** File %s Not found***\\n\" % file\r\n\r\n\t# matches up to 5 chars at start of line followed the \"{{{\" \r\n\t# followed by tag name followed by up to five chars \r\n\t# with optional trailing white space.\r\n\tstarttag = '^(\\s*).{0,5}\\{{3}(\\S+).{0,5}\\s*$'\r\n\tstartre = re.compile(starttag)\r\n\t# matches up to 5 chars followed by \"}}}\" followed by up to 5 chars and\r\n\t# optional trailing white space.\r\n\tendtag = \"^\\s*.{0,5}\\}{3}.{0,5}\\s*$\"\r\n\tendre = re.compile(endtag)\r\n\tcapturing = False # are we capturing?\r\n\tcurtagname = \"\"\r\n\ttagvalue = \"\"\r\n\ttrim = 0\r\n\r\n\twhile True:\r\n\t l = f.readline()\r\n\t if not l: break\r\n\t if capturing:\r\n\t if endre.match(l):\r\n\t\t capturing = False\r\n\t\t tags[(file, curtagname)] = tagvalue\r\n\t\t tagvalue = ''\r\n\t\telse:\r\n\t\t tagvalue += l[trim:]\r\n\t\t tagvalue += '\\n'\r\n\r\n\r\n\t else:\r\n\t m = startre.match(l)\r\n\t\tif m: # we have a start tag\r\n trim = len(m.group(1))\r\n\t\t curtagname = m.group(2)\r\n\t\t capturing = True\r\n\r\n\tf.close()\r\n filenames.add(file)\r\n\r\n\r\n try:\r\n return tags[(file,tag)]\r\n except KeyError:\r\n\tlogger.warning(\"Tag '%(tag)s' not found in %(file)s\", \r\n\t\t\t{'file':file, 'tag':tag})\r\n\r\n\treturn \"*** ERROR *** Tag %(tag)s not found in file %(file)s ***\\n\" % \\\r\n\t\t\t\t{'file':file, 'tag':tag}", "def get_tags_from_file(tag_file):\n tags = []\n with open(tag_file) as f:\n lines = f.readlines()\n for line in lines:\n if not line.startswith(\" \") and not line.startswith(\"\\n\"):\n tag = line.strip()\n tags.append(tag)\n return tags", "def get_tag_index(self) -> List[str]:\n path = os.path.join(self.directory_path, \"__tags.json\")\n if not os.path.exists(path):\n return list()\n try:\n with open(path) as f:\n return json.load(f)\n except json.decoder.JSONDecodeError:\n print(f\"Could not get tag index. Check file: {path}\")", "def metadataGeoTags(tif: TiffFile):\n geoTag: TiffTag = tif.pages[0].tags.get('GeoKeyDirectoryTag')\n if geoTag is not None:\n g: TiffTag = tif.pages[0].tags.get(34737)\n g2: TiffTag = tif.pages[0].tags.get(34736)\n g3: TiffTag = tif.pages[0].tags.get(33922)\n g4: TiffTag = tif.pages[0].tags.get(33550)\n\n tags = [(geoTag.code, 'H', geoTag.count, geoTag.value),\n (g.code, 's', g.count, g.value),\n (g2.code, 'd', g2.count, g2.value),\n (g3.code, 'd', g3.count, g3.value),\n (g4.code, 'd', g4.count, g4.value)]\n return tags\n else:\n print('no geo tags in file')", "def GetTags(tag, btype, indent):\n assert tag in COLOR_SCHEME\n assert btype in ['match', 'diff']\n fbegin = BEGIN_TAG % COLOR_SCHEME[tag][btype]\n bbegin = BEGIN_TAG % COLOR_SCHEME[tag]['bckgrnd']\n lend = END_TAG\n nl_plus_indent = '\\n'\n if indent > 0:\n nl_plus_indent += bbegin + cgi.escape(\" \"*indent) + lend\n return fbegin, lend, nl_plus_indent", "def readNamedTag(bstream):\r\n #print(\"Reading Named Tag\\n\")\r\n tbyte = bstream.read(1)[0] # read 1 byte and get its numerical value #read 1 byte, switch type generated depending (stream-reader type 'abstract?' factory\r\n #print(\"Byte read: %d\" % tbyte)\r\n tname = TAG_String(bstream).value\r\n #print(\"Name read: %s\" % tname)\r\n #print(\"RNamedT - name is %s\" %tname)\r\n tpayload = TAGLIST[tbyte](bstream)\r\n tpayload.name = tname\r\n return (tname, tpayload)\r\n #object type = bleh based on the number 0-255 you just read. Which should be a 10... for TAG_Compound.\r", "def read(filepath_or_buffer: FilePathOrBuffer) -> Grid:\n with _handle_buf(filepath_or_buffer) as buf:\n return ZincParser(ZincTokenizer(buf)).parse()", "def getTagList(tags):\n tags = tags[1:len(tags)-1]\n return tags.split('><')", "def getTagsNum(self):\r\n self.gettags()", "def read_tagged_word_list(filename):\n # TODO: write and test this method\n print 'reading tagged file'", "def get_records_with_tag(inp,tag):\n if type(inp) == type(''): # Assume inp is a filename\n inp = read_cml(inp)\n return inp.findall(\".//%s\" % tag)", "def read_file(self, file_path): \n logging.info('Lendo arquivo de {0}'.format(file_path))\n file_with_tags = open(file_path, \"r\", encoding='utf-8')\n return file_with_tags.readlines()", "def get_tag(self, tag, filename):\n return self.get_tag_batch(tag, [filename])[0]", "def word_tag_counts (count_file):\r\n wordtagcounts = defaultdict(list)\r\n f = open(count_file, 'r')\r\n for line in f:\r\n fields = line.split(\" \")\r\n if fields[1] != 'WORDTAG':\r\n continue\r\n count = int(fields[0].strip())\r\n tag = fields[2].strip()\r\n word = fields[3].strip()\r\n wordtagcounts[word].append((tag, count)) \r\n f.close() \r\n return wordtagcounts", "def tags(self):\n return tuple([x.strip() for x in self._dict.get('tags').split(',')])" ]
[ "0.67446077", "0.6337529", "0.6291825", "0.61587244", "0.5640741", "0.5421108", "0.5401528", "0.53968465", "0.53458995", "0.53208137", "0.5273067", "0.52147645", "0.51437765", "0.5076457", "0.5067522", "0.5062207", "0.50597274", "0.5055507", "0.4999549", "0.49994224", "0.4990335", "0.4959095", "0.49401346", "0.48882172", "0.4850272", "0.48077288", "0.47923303", "0.47712857", "0.47513536", "0.4742131" ]
0.7614175
0
Tries to find the best tag for the current cursor position.
def findTag(bufferNumber, changedTick): # DOC {{{ # }}} # CODE {{{ # try to find the best tag {{{ try: # get the tags data for the current buffer tagLineNumbers, tags = getTags(bufferNumber, changedTick) # link to vim's internal data {{{ currentBuffer = vim.current.buffer currentWindow = vim.current.window row, col = currentWindow.cursor # }}} # get the index of the nearest line nearestLineIndex = getNearestLineIndex(row, tagLineNumbers) # if any line was found, try to find if the tag is appropriate {{{ # (ie. the cursor can be below the last tag but on a code that has nothing # to do with the tag, because it's indented differently, in such case no # appropriate tag has been found.) while (nearestLineIndex > -1): # get the line number of the nearest tag nearestLineNumber = tagLineNumbers[nearestLineIndex] # walk through all the lines in range (nearestTagLine, cursorRow) {{{ for lineNumber in range(nearestLineNumber + 1, row): # get the current line line = currentBuffer[lineNumber] # count the indentation of the line, if it's lower than the tag's, the tag is invalid {{{ if (len(line)): # initialize local auxiliary variables {{{ lineStart = 0 i = 0 # }}} # compute the indentation of the line {{{ while ((i < len(line)) and (line[i].isspace())): # move the start of the line code {{{ if (line[i] == '\t'): lineStart += SimplePythonTagsParser.TABSIZE else: lineStart += 1 # }}} # go to the next character on the line i += 1 # }}} # if the line contains only spaces, skip it {{{ if (i == len(line)): continue # }}} # if the next character is a '#' (python comment), skip the line {{{ if (line[i] == '#'): continue # }}} # if the next character is a ')', skip the line {{{ # this is so that the following style works correctly: # # def foo( # args, # ): # pass if (line[i] == ')'): continue # }}} # if the line's indentation starts before or at the nearest tag's one, the tag is invalid {{{ if (lineStart <= tags[nearestLineNumber].indentLevel): nearestLineIndex -= 1 break # }}} # }}} # }}} # the tag is appropriate, so use it {{{ else: break # }}} # }}} # no appropriate tag has been found {{{ else: nearestLineNumber = -1 # }}} # describe the cursor position (what tag the cursor is on) {{{ # reset the description tagDescription = "" # if an appropriate tag has been found, set the description accordingly {{{ if (nearestLineNumber > -1): tagInfo = tags[nearestLineNumber] tagDescription = "[%s]" % (tagInfo.fullName, ) # not using PythonTag.TAG_TYPE_NAME[tagInfo.type] because ENOSPC # }}} # }}} # update the variable for the status line so it get updated with the new description vim.command("let w:PHStatusLine=\"%s\"" % (tagDescription,)) # }}} # handle possible exceptions {{{ except Exception: # bury into the traceback {{{ ec, ei, tb = sys.exc_info() while (tb != None): if (tb.tb_next == None): break tb = tb.tb_next # }}} # spit out the error {{{ print("ERROR: %s %s %s:%u" % (ec.__name__, ei, tb.tb_frame.f_code.co_filename, tb.tb_lineno,)) time.sleep(0.5) # }}} # }}} # }}}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_max_tag(self, word):\n count = []\n for tag in self.pos_tags:\n count.append(self.tag_word_data.count((tag, word)))\n max_index = np.argmax(np.asarray(count))\n return self.pos_tags[max_index]", "def find_first_tag(self, tag):\n for lm, _ in self.search(tag=tag):\n return lm", "def tag_word(self, w): \n if self.unknown(w):\n return self.default_tag\n else:\n return max(self.word_tags[w], key=self.word_tags[w].get)", "def get_tag_for_word(self, word: str):\n doc = self.model(word)\n for token in doc:\n return token.pos_", "def get_cursor(self):\n k, font, txt = self._cursor, self.font, self.txt\n index2pixel = self._index2pixel\n\n return Rect(index2pixel(k), font.size(txt[k]))\n print k", "def getMostEnclosingTag(tagNode, keyClasses):\n mostEnclosingTag = tagNode\n currentTag = tagNode\n while currentTag.parent!=None:\n currentTag = currentTag.parent\n # parent tags can be different from what we have in the 'tags' variable.\n # if the tag has the classes we were looking for in article tags, then it is a tag we want\n if currentTag.get('class') and len(set(currentTag.get('class')) & set(artClasses))>0:\n # this tag has what \n mostEnclosingTag = currentTag\n return mostEnclosingTag", "def get_best_position(self):\n # Todo: implement\n best_value_global = -inf\n position = None\n for particle in self.particles:\n if particle.best_value >= best_value_global:\n position = particle.best_position\n best_value_global = particle.best_value\n return position", "def get_parser_best(self):\n if len(self.parses):\n return min(self, key=lambda parse: parse.parser_rank)\n else:\n return None", "def find_tag(tag_hash):\n for i in tags_fin:\n if tag_hash == i[2]:\n return i[1]", "def find_highest_tag(tag_list: list) -> str:\n highest_tag = None\n return_tag = None\n\n for tag in tag_list:\n clean_tag = convert_tag(tag)\n\n if clean_tag is None:\n continue\n\n if not highest_tag:\n highest_tag = clean_tag\n return_tag = tag\n else:\n if clean_tag > highest_tag:\n highest_tag = clean_tag\n return_tag = tag\n\n return return_tag", "def word_under_cursor_pos(self):\n self._vim.command('normal e')\n end = self.cursor()\n self._vim.command('normal b')\n beg = self.cursor()\n return beg, end", "def search_sentence(target, sentences, tags, distance_evaluator=DistanceEvaluators.JACCARD):\n tag_id = 'VOID'\n best_sentence = ''\n best_distance = float('Infinity')\n x = list(zip(sentences, tags))\n for sentence, tag in zip(sentences, tags):\n #print(sentence)\n #print(\"\\n\\n\\n\" + tag)\n distance = distance_evaluator(sentence, target)\n if distance < best_distance:\n tag_id = tag\n best_sentence = sentence\n best_distance = distance\n return tag_id, best_sentence, best_distance", "def locate(self, pos):\n for obj in self.wrappers:\n if obj.start <= pos < obj.end:\n for sub in getattr(obj, 'attributes', ()):\n if sub.start <= pos < sub.end:\n return sub\n return obj\n else:\n if pos == len(self.input):\n return self.wrappers[-1]\n raise IndexError(\"position %d out of range\" % pos)", "def getMostLikelyPos(self):\n mostLikelyPos = None\n mostLikelyProb = None\n beliefDist = self.getBeliefDistribution()\n for part in self.particles:\n currProb = beliefDist[part]\n if mostLikelyPos is None or currProb > mostLikelyProb:\n mostLikelyPos = part \n mostLikelyProb = currProb\n return mostLikelyPos", "def read_next_tag_or_seek(flv_tags):\n # is this the good place to put?\n if not flv_tags.version:\n flv_tags.parse_header()\n tag = None\n position = flv_tags.f.tell()\n try:\n tag = flv_tags.get_next_tag()\n except tags.EndOfFile:\n flv_tags.f.seek(position)\n raise tags.EndOfFile\n return tag", "def get_latest_tag(self, repo: git.Repo) -> Tuple[Optional[\n git.refs.tag.TagReference], Optional[semantic_version.Version]]:\n raw_tag = self._search_strategy(\n repo=repo, branch=self._branch)\n if raw_tag is None:\n return None, None\n sem_tag = semantic_version.Version(\n tag_search_strategy.clean_tag_name(str(raw_tag)))\n return raw_tag, sem_tag", "def _get_tag(self, current_path, commit_sha):\n command = [\"git\", \"describe\", \"--tags\", commit_sha]\n p = subprocess.Popen(\n command,\n stdout=PIPE,\n stderr=PIPE,\n cwd=os.path.join(self.root_dir, current_path),\n )\n output, error = p.communicate()\n if p.returncode == 0:\n return output.decode(\"utf-8\").strip()\n elif \"fatal: no tags can describe '{}'.\".format(commit_sha) in error.decode(\n \"utf-8\"\n ).lower():\n return None\n elif \"fatal: no names found\" in error.decode(\"utf-8\").lower():\n return None\n else:\n raise Exception(\n \"Error [{}] occurred while executing [{}] command to get nearest tag associated with branch.\".format(\n error.decode(\"utf-8\"), \" \".join(command)\n )\n )", "def tag_one(self, tokens, index, history):\n tag = None\n for tagger in self._taggers:\n tag = tagger.choose_tag(tokens, index, history)\n if tag is not None:\n break\n return tag", "def youngest(self):\n # Your implementation here", "def find_most_compatible_match(self, candidate):\n best_matchIdx = -1\n best_matchVal = 0\n len_of_match = len(self.match)\n if not candidate.any():\n return None\n for i in candidate:\n if self.W[len_of_match][i] > best_matchVal:\n best_matchVal = self.W[len_of_match][i]\n best_matchIdx = i\n return best_matchIdx", "def get_next_available_tag(self):\n try:\n return self.available_tags.pop()\n except IndexError:\n return False", "def best_sequence(self, T, pos, psi, phi, fix_tags=[]):\n for idx, m in fix_tags:\n phi[idx - 1, m] = 100\n # if fix_idx:\n # phi[fix_idx - 1, fix_m] = 100\n msgs, pointers = max_product(T, pos, psi, phi, True)\n tags_dict = get_best_tags(T, msgs, pointers)\n tags = []\n for i in range(1, len(T) + 1):\n tags.append(self.get_tag(tags_dict[str(i)]))\n return tags", "def get_default_tag(self, tags):\n tags_counter = Counter()\n for tag in tags:\n tags_counter[tag] += 1\n\n if len(tags_counter) == 2 and list(tags_counter.values())[0] == list(tags_counter.values())[1]:\n return ut.find_positive_tag(tags_counter.keys())\n\n return tags_counter.most_common(1)[0][0]", "def getPosTagAt(self, pos):\n return self.sentence[pos].getPosTag()", "def get_best_thread(self, question, tag_name):\n thread_ids, thread_embeddings = self.__load_embeddings_by_tag(tag_name)\n\n # HINT: you have already implemented a similar routine in the 3rd assignment.\n question_vec = question_to_vec(question, self.word_embeddings, self.embeddings_dim).reshape(1, -1)\n best_thread = pairwise_distances_argmin(question_vec,thread_embeddings, metric=\"cosine\")\n return thread_ids[best_thread][0]", "def get_closest(occurences, content, k=25):\n result = []\n o = occurences[0] #get first\n for idx in o:\n res = 0\n for i in range(1, len(occurences)): #other than first\n oo = occurences[i]\n where = bisect_left(oo, idx)\n #try both, after and before the binary searched index (if exists)\n try:\n res += min(abs(oo[min(len(oo)-1, where+1)]-idx), abs(oo[min(len(oo)-1, where)]-idx))\n except:\n print(\"Something went wrong here\")\n result.append((res, idx))\n \n #if res < best:\n # best = res\n # best_where = (idx)\n\n result = sorted(result)\n final = []\n \n for score, i in tqdm(result):\n f = False\n for ii in final:\n if abs(i-ii) <= 200:\n f = True\n break\n if not f:\n final.append(i)\n\n paragraphs = []\n\n for idx in final:\n paragraphs.append(get_para(content, idx))\n\n print(\"DONE HERE\")\n return paragraphs[:k]", "def index_in_tag(self):\n if hasattr(self, '_m_index_in_tag'):\n return self._m_index_in_tag if hasattr(self, '_m_index_in_tag') else None\n\n self._m_index_in_tag = (self.tag - 35)\n return self._m_index_in_tag if hasattr(self, '_m_index_in_tag') else None", "def _find_closest_in_range(ranges: Iterable[CT], what_to_find: CT) -> Optional[CT]:\n\n ranges = sorted(ranges)\n\n while ranges:\n\n middle_item_index = len(ranges) // 2\n middle_item = ranges[middle_item_index]\n\n if what_to_find == middle_item:\n return what_to_find\n\n elif what_to_find > middle_item:\n\n if len(ranges) == 1:\n return middle_item\n\n ranges = ranges[middle_item_index:]\n\n elif what_to_find < middle_item:\n\n if ranges[middle_item_index - 1] < what_to_find:\n return ranges[middle_item_index - 1]\n\n ranges = ranges[:middle_item_index]", "def get_pos(term):\n # pylint: disable=invalid-name\n # Invalid variable name \"Position\"\n Position = collections.namedtuple('Position', ('row', 'column'))\n\n pos = Position(*term.get_location(timeout=5.0))\n\n if -1 in pos:\n print('stdin: not a human', file=sys.stderr)\n exit(2)\n\n return pos", "def get_best_span_index(doc_spans, position):\n best_score = None\n best_span_index = None\n for (span_index, doc_span) in enumerate(doc_spans):\n end = doc_span.start + doc_span.length - 1\n if position < doc_span.start:\n continue\n if position > end:\n continue\n num_left_context = position - doc_span.start\n num_right_context = end - position\n score = min(num_left_context, num_right_context) + 0.01 * doc_span.length\n if best_score is None or score > best_score:\n best_score = score\n best_span_index = span_index\n return best_span_index" ]
[ "0.5941577", "0.5846168", "0.5729062", "0.552021", "0.54697645", "0.54619795", "0.54302764", "0.53901774", "0.53870636", "0.5386353", "0.5366242", "0.53650975", "0.5352395", "0.5349397", "0.534793", "0.53451484", "0.5316854", "0.5307668", "0.530126", "0.5299683", "0.52880454", "0.52867573", "0.52829826", "0.52731586", "0.52644026", "0.5263105", "0.5235249", "0.521907", "0.52000594", "0.5199659" ]
0.6160137
0
Removes tags data for the specified buffer number.
def deleteTags(bufferNumber): # DOC {{{ # }}} # CODE {{{ # define global variables global TAGS, TAGLINENUMBERS, BUFFERTICKS # try to delete the tags for the buffer {{{ try: del TAGS[bufferNumber] del TAGLINENUMBERS[bufferNumber] del BUFFERTICKS[bufferNumber] except: pass # }}} # }}}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def port_buffer_drop():", "def _remove_buffer(self):\n if self._buffer is not None:\n self._engine.remove_window(self._buffer)\n self._buffer = None\n self._region = None", "def remove_tag(self, index):\n\n model_index = self.GetItemData(index)\n self.DeleteItem(model_index)\n del self._clientData[model_index]", "def removeDataAt(self, address: ghidra.program.model.address.Address) -> None:\n ...", "def remove_tag(args):", "def delete_packets(self, num):\n for i in range(num):\n del self._packets[0]", "def remove_header(filename, packet_num):\n bin_data = np.fromfile(filename, dtype=np.int16)\n index = []\n for i in range(packet_num):\n j = i * 735\n index.append([j, j + 1, j + 2, j + 3, j + 4, j + 5, j + 6])\n output = np.delete(bin_data, index)\n return output", "def remove_tag(self, tag):\n if tag in self.tags:\n index = self.tags.index(tag)\n self.tags[index:index + 1] = []\n self.stop_times[index:index + 1] = []", "def _remove_buffers(state):\n buffer_paths, buffers = [], []\n state = _separate_buffers(state, [], buffer_paths, buffers)\n return state, buffer_paths, buffers", "def remove_tag(self, tag: str) -> None:\n tags = self.get_tag_index()\n tags.remove(tag)\n self.write_tag_index(list(set(tags)))", "def delete_tag(tag):\n tag.destroy()", "def remove_entry(self, number: int) -> None:\n raise NotImplementedError", "def remove_tag(self, dataset: \"Dataset\", tag: \"DatasetTag\"):\n raise NotImplementedError", "def remove_gifti_data_array(self, ith):\n self.darrays.pop(ith)", "def remove_buffered_packets(self):\n seq = self.next_seq\n while True:\n p = self.buffer.pop(seq, None)\n if p is None:\n break\n else:\n seq += len(p.data)\n yield p", "def remove_to_destroy(total_buffer,to_destroy):\n totbuf=np.copy(total_buffer)\n for val,begInd,endInd in to_destroy:\n for j in range(endInd-begInd):\n index_beg = begInd+j\n totbuf[ total_buffer[:,:,index_beg]==val,index_beg]=0\n return totbuf", "def remove(self, index):\n self.data.pop(index)", "def untagAll(self, authenticationToken, guid):\r\n self.send_untagAll(authenticationToken, guid)\r\n self.recv_untagAll()", "def untagAll(self, authenticationToken, guid):\r\n pass", "def clear_buffer(self):\n for i, value in enumerate(self.buffer):\n self.buffer[i] = 0", "def delTags(self):\r\n for tag in self.tags:\r\n self.canvasCirkt.delete(tag)\r\n self.canvasCirkt.update()", "def remove_cells(self, tag):\n tagged_cells = self.get_cells(tag)\n if tagged_cells:\n print(f\"- removing cells tagged {tag} from {self.filename}\")\n self.content.cells = filter(lambda cell: cell not in tagged_cells, self.content.cells)", "def pop_tag(data):\n if data and is_tag(data[0]):\n return data.pop(0)", "def delete_tag(filename, tag_name):\n storeapps = APP.config[\"storage\"]\n filename = filename.encode(\"utf-8\")\n\n try:\n application = list(nativeapps.io.ls(storeapps, r\".*\" + filename + \"$\"))[0]\n meta_path = os.path.join(os.path.dirname(application), \"metadata.json\")\n metadata = json.loads(nativeapps.io.readfile(meta_path))\n tags = metadata.get(\"tags\", [])\n if tag_name in tags:\n tags.remove(tag_name)\n metadata[\"tags\"] = tags\n nativeapps.io.writefile(meta_path, json.dumps(metadata))\n except IndexError:\n return \"Unknown application: %s\" % (application), 404\n\n return \"removed\", 200", "def UnlockSeqBuf(self,number):\r\n r = CALL('UnlockSeqBuf',self,INT(number),self.image)\r\n return self.CheckForSuccessError(r)", "def pop_item(self, index):\n ix, obj = self.items\n if index < len(ix):\n self.d_buffer.pop(ix[index])\n else:\n raise IndexError('Buffer does not have {0} elements'.format(index))", "def findTag(bufferNumber, changedTick):\n # DOC {{{\n # }}}\n\n # CODE {{{\n # try to find the best tag {{{\n try:\n # get the tags data for the current buffer\n tagLineNumbers, tags = getTags(bufferNumber, changedTick)\n\n # link to vim's internal data {{{\n currentBuffer = vim.current.buffer\n currentWindow = vim.current.window\n row, col = currentWindow.cursor\n # }}}\n\n # get the index of the nearest line\n nearestLineIndex = getNearestLineIndex(row, tagLineNumbers)\n\n # if any line was found, try to find if the tag is appropriate {{{\n # (ie. the cursor can be below the last tag but on a code that has nothing\n # to do with the tag, because it's indented differently, in such case no\n # appropriate tag has been found.)\n while (nearestLineIndex > -1):\n # get the line number of the nearest tag\n nearestLineNumber = tagLineNumbers[nearestLineIndex]\n\n # walk through all the lines in range (nearestTagLine, cursorRow) {{{\n for lineNumber in range(nearestLineNumber + 1, row):\n # get the current line\n line = currentBuffer[lineNumber]\n\n # count the indentation of the line, if it's lower than the tag's, the tag is invalid {{{\n if (len(line)):\n # initialize local auxiliary variables {{{\n lineStart = 0\n i = 0\n # }}}\n\n # compute the indentation of the line {{{\n while ((i < len(line)) and (line[i].isspace())):\n # move the start of the line code {{{\n if (line[i] == '\\t'):\n lineStart += SimplePythonTagsParser.TABSIZE\n else:\n lineStart += 1\n # }}}\n\n # go to the next character on the line\n i += 1\n # }}}\n\n # if the line contains only spaces, skip it {{{\n if (i == len(line)):\n continue\n # }}}\n # if the next character is a '#' (python comment), skip the line {{{\n if (line[i] == '#'):\n continue\n # }}}\n # if the next character is a ')', skip the line {{{\n # this is so that the following style works correctly:\n #\n # def foo(\n # args,\n # ):\n # pass\n if (line[i] == ')'):\n continue\n # }}}\n\n # if the line's indentation starts before or at the nearest tag's one, the tag is invalid {{{\n if (lineStart <= tags[nearestLineNumber].indentLevel):\n nearestLineIndex -= 1\n break\n # }}}\n # }}}\n # }}}\n # the tag is appropriate, so use it {{{\n else:\n break\n # }}}\n # }}}\n # no appropriate tag has been found {{{\n else:\n nearestLineNumber = -1\n # }}}\n\n # describe the cursor position (what tag the cursor is on) {{{\n # reset the description\n tagDescription = \"\"\n\n # if an appropriate tag has been found, set the description accordingly {{{\n if (nearestLineNumber > -1):\n tagInfo = tags[nearestLineNumber]\n tagDescription = \"[%s]\" % (tagInfo.fullName, ) # not using PythonTag.TAG_TYPE_NAME[tagInfo.type] because ENOSPC\n # }}}\n # }}}\n\n # update the variable for the status line so it get updated with the new description\n vim.command(\"let w:PHStatusLine=\\\"%s\\\"\" % (tagDescription,))\n # }}}\n\n # handle possible exceptions {{{\n except Exception:\n # bury into the traceback {{{\n ec, ei, tb = sys.exc_info()\n while (tb != None):\n if (tb.tb_next == None):\n break\n tb = tb.tb_next\n # }}}\n\n # spit out the error {{{\n print(\"ERROR: %s %s %s:%u\" % (ec.__name__, ei, tb.tb_frame.f_code.co_filename, tb.tb_lineno,))\n time.sleep(0.5)\n # }}}\n # }}}\n # }}}", "def remove_tag(self, key, value=None):\r\n if value:\r\n tags = {key : value}\r\n else:\r\n tags = [key]\r\n status = self.connection.delete_tags([self.id], tags)\r\n if key in self.tags:\r\n del self.tags[key]", "def discard_key_from_tag(self,tag,key):\r\n\r\n # with shelf\r\n if self.using_shelf:\r\n\r\n self.tag_dict[tag].discard(key)\r\n\r\n\r\n #with database\r\n if self.using_database:\r\n value_tuple = (notebookname,tag,key,)\r\n db_cursor.execute(\"DELETE FROM tags_to_keys\"\r\n +\" WHERE notebook=? AND tag=?\"\r\n +\" AND keyword=?;\",\r\n value_tuple)", "def Remove(self, version_number):\n self.dict.pop(str(version_number))" ]
[ "0.57889014", "0.5715625", "0.57064915", "0.569658", "0.56588185", "0.5638952", "0.5633699", "0.5570281", "0.55377984", "0.5502692", "0.54355866", "0.5407464", "0.5381097", "0.53683925", "0.5340239", "0.5336506", "0.53318745", "0.53140545", "0.53066534", "0.52959603", "0.5267054", "0.52593344", "0.52443165", "0.52158463", "0.5199341", "0.51913255", "0.517962", "0.51694673", "0.5150832", "0.5128002" ]
0.8191595
0
When a resource record is deleted, delete all related attachments. When a bucket or collection is deleted, it removes the attachments of every underlying records.
def on_delete_record(event): keep_old_files = asbool(utils.setting_value(event.request, 'keep_old_files', default=False)) # Retrieve attachments for these records using links. resource_name = event.payload['resource_name'] filter_field = '%s_uri' % resource_name uri = event.payload['uri'] utils.delete_attachment(event.request, link_field=filter_field, uri=uri, keep_old_files=keep_old_files)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unlink(self):\n if not self:\n return True\n \n # for recomputing fields\n self.modified(self._fields)\n \n self._check_concurrency()\n \n self.check_access_rights('unlink')\n \n # Check if the records are used as default properties.\n refs = ['%s,%s' % (self._name, i) for i in self.ids]\n if self.env['ir.property'].search([('res_id', '=', False), ('value_reference', 'in', refs)]):\n raise UserError(_('Unable to delete this document because it is used as a default property'))\n \n # Delete the records' properties.\n with self.env.norecompute():\n self.env['ir.property'].search([('res_id', 'in', refs)]).unlink()\n self.delete_workflow()\n self.check_access_rule('unlink')\n \n cr = self._cr\n Data = self.env['ir.model.data'].sudo().with_context({})\n Defaults = self.env['ir.default'].sudo()\n Attachment = self.env['ir.attachment']\n \n for sub_ids in cr.split_for_in_conditions(self.ids):\n query = \"DELETE FROM %s WHERE id IN %%s\" % self._table\n cr.execute(query, (sub_ids,))\n \n # Removing the ir_model_data reference if the record being deleted\n # is a record created by xml/csv file, as these are not connected\n # with real database foreign keys, and would be dangling references.\n #\n # Note: the following steps are performed as superuser to avoid\n # access rights restrictions, and with no context to avoid possible\n # side-effects during admin calls.\n data = Data.search([('model', '=', self._name), ('res_id', 'in', sub_ids)])\n if data:\n data.unlink()\n \n # For the same reason, remove the defaults having some of the\n # records as value\n Defaults.discard_records(self.browse(sub_ids))\n \n # For the same reason, remove the relevant records in ir_attachment\n # (the search is performed with sql as the search method of\n # ir_attachment is overridden to hide attachments of deleted\n # records)\n query = 'SELECT id FROM ir_attachment WHERE res_model=%s AND res_id IN %s'\n cr.execute(query, (self._name, sub_ids))\n attachments = Attachment.browse([row[0] for row in cr.fetchall()])\n if attachments:\n attachments.unlink()\n \n # invalidate the *whole* cache, since the orm does not handle all\n # changes made in the database, like cascading delete!\n self.invalidate_cache()\n \n # recompute new-style fields\n if self.env.recompute and self._context.get('recompute', True):\n self.recompute()\n # auditing: deletions are infrequent and leave no trace in the database\n _unlink.info('User #%s deleted %s records with IDs: %r', self._uid, self._name, self.ids)\n return True", "def post_provider_attachment_delete(self, resource_id, resource_dict):\n pass", "def pre_customer_attachment_delete(self, resource_id):\n pass", "def post_customer_attachment_delete(self, resource_id, resource_dict):\n pass", "def pre_provider_attachment_delete(self, resource_id):\n pass", "def delete_record_files(self, record, logStat):\n from corrdb.common.models import FileModel\n final_result = True\n for _file_id in record.resources:\n _file = FileModel.objects.with_id(_file_id)\n result = self.delete_record_file(_file, logStat)\n if not result:\n final_result = result\n return final_result", "def delete_records(self, records_to_delete):\n for record in records_to_delete:\n self.records.remove(record)\n self._store_writer.remove_img_file(record)\n\n self._process_change()", "def attachments_delete(self,\r\n document_id,\r\n attachment_id):\r\n\r\n # Validate required parameters\r\n self.validate_parameters(document_id=document_id,\r\n attachment_id=attachment_id)\r\n\r\n # Prepare query URL\r\n _query_builder = Configuration.get_base_uri()\r\n _query_builder += '/signature/documents/{documentId}/attachments/{attachmentId}'\r\n _query_builder = APIHelper.append_url_with_template_parameters(_query_builder, { \r\n 'documentId': document_id,\r\n 'attachmentId': attachment_id\r\n })\r\n _query_url = APIHelper.clean_url(_query_builder)\r\n\r\n # Prepare headers\r\n _headers = {\r\n 'accept': 'application/json'\r\n }\r\n\r\n # Prepare and execute request\r\n _request = self.http_client.delete(_query_url, headers=_headers)\r\n OAuth2.apply(_request)\r\n _context = self.execute_request(_request)\r\n self.validate_response(_context)\r\n\r\n # Return appropriate type\r\n return APIHelper.json_deserialize(_context.response.raw_body)", "def delete_record(records):\n delete_record()", "def disassociate_s3_resources(memberAccountId=None, associatedS3Resources=None):\n pass", "def delete(self, identifier):\n self.get(identifier)\n conn = self.get_connector()\n cursor = conn.cursor()\n\n query = \"delete from {0} where {2}={1}\".format(\n self.ressource_config[\"table\"],\n identifier,\n self.model.pk_field.name)\n try:\n cursor.execute(query)\n except sqlite3.IntegrityError, e:\n message = \"\"\n if \"foreign\" in e.message:\n message = \"\"\"another ressource depends on this\n object. Cloud not delete before all ressources\n depending on it are also deleted\"\"\"\n\n raise BadRequest(message)\n\n conn.commit()\n conn.close()", "def group_delete(user_id, resource_type, resource_id):\n logging.info('Deleting %s %d...', resource_type, resource_id)\n soundcloud.delete('/e1/me/{}_reposts/{}'.format(resource_type, resource_id))\n db.record_deletion(user_id, resource_type, resource_id)\n db.commit()", "def delete_record(self, record_id):\r\n self.record.deleteObject(id=record_id)", "def finalizer():\n for resource_type in pods, pvcs, storageclasses, secrets:\n for resource in resource_type:\n resource.delete()\n resource.ocp.wait_for_delete(resource.name)\n if pools:\n # Delete only the RBD pool\n pools[0].delete()\n if projects:\n for project in projects:\n project.delete(resource_name=project.namespace)\n project.wait_for_delete(project.namespace)", "def attachment_deleted(self, attachment):\n if 'attachment' not in self.sources:\n return\n gnp = GrowlNotificationPacket(notification='ticket',\n title='Attachment deleted',\n description=attachment.title)\n gs = GrowlSender(self.env)\n gs.notify(self._get_hosts('attachment'), gnp)", "def test_object_delete(self):\n self.add_attachments() # attach the attachments\n\n # we have 2 attachments\n self.assertEqual(3, self.eightythreeb.attachment_set.all().count())\n # delete a single object\n self.eightythreeb.attachment_set.all()[0].delete()\n # we should now have 2 active attachments\n self.assertEqual(2, self.eightythreeb.attachment_set.all().count())\n # and 1 deleted\n self.assertEqual(1, self.eightythreeb.attachment_set.deleted().count())", "def delete(self, using=None, keep_parents=False, **kwargs):\n self.cache_expire()\n\n cls = self.__class__.__name__\n if cls == \"Piece\":\n for a in self.attachments.all():\n a.delete(**kwargs)\n for m in self.movements.all():\n m.delete(**kwargs)\n\n if cls == \"Movement\":\n for a in self.attachments.all():\n a.delete(**kwargs)\n\n super().delete(using, keep_parents)\n\n if kwargs.get(\"ignore_solr\"):\n pass\n elif kwargs.get(\"commit_solr\", True):\n self.solr_delete(commit=True)\n else:\n self.solr_delete(commit=False)", "def purgeRecord(self, record):\n\n if record.uid in self._cache[IndexType.uid]:\n del self._cache[IndexType.uid][record.uid]\n\n try:\n if record.guid in self._cache[IndexType.guid]:\n del self._cache[IndexType.guid][record.guid]\n except AttributeError:\n pass\n\n try:\n typeName = record.recordType.name\n for name in record.shortNames:\n key = (typeName, name)\n if key in self._cache[IndexType.shortName]:\n del self._cache[IndexType.shortName][key]\n except AttributeError:\n pass\n\n try:\n for emailAddress in record.emailAddresses:\n if emailAddress in self._cache[IndexType.emailAddress]:\n del self._cache[IndexType.emailAddress][emailAddress]\n except AttributeError:\n pass", "def delete(self, *args, **kwargs):\n self.file.delete(save=False)\n self.thumbnail.delete(save=False)\n\n super(File, self).delete(*args, **kwargs)", "def clean_resource() -> list:\n helpers.starting_clean_print(RESOURCE_NAME)\n resource_client = boto3.client(BOTO3_NAME)\n resources = get_resources(resource_client)\n terminated_items = delete_resources(resource_client, resources)\n helpers.finished_clean_print(RESOURCE_NAME, terminated_items)\n return terminated_items", "def replay_delete(sender, instance, **kwargs):\n pass\n # Temporarily disabled\n\n #print(\"deleting file from S3\")\n # False so FileField doesn't save the model\n #instance.file.delete(False)", "def purge(self):\n from models.accounts import Account\n\n # Make sure we have access to the associated account frame\n if not isinstance(self.account, Account):\n self.account = Account.one(Q._id == self.account)\n\n # Get the backend required to delete the asset\n backend = self.account.get_backend_instance()\n\n # Delete the original file\n backend.delete(self.store_key)\n\n # Delete all variation files\n for variation in self.variations:\n backend.delete(variation.store_key)\n\n self.delete()", "def delete(openstack_resource):\n openstack_resource.delete()", "def _cleanup_uploads(self):\n logger.debug(\"Performing blob upload cleanup\")\n\n while True:\n # Find all blob uploads older than the threshold (typically a week) and delete them.\n with UseThenDisconnect(app.config):\n stale_upload = model.get_stale_blob_upload(DELETION_DATE_THRESHOLD)\n if stale_upload is None:\n logger.debug(\"No additional stale blob uploads found\")\n return\n\n # Remove the stale upload from storage.\n logger.debug(\"Removing stale blob upload %s\", stale_upload.uuid)\n assert stale_upload.created <= (datetime.utcnow() - DELETION_DATE_THRESHOLD)\n\n try:\n storage.cancel_chunked_upload(\n [stale_upload.location_name], stale_upload.uuid, stale_upload.storage_metadata\n )\n except Exception as ex:\n logger.debug(\n \"Got error when trying to cancel chunked upload %s: %s\",\n stale_upload.uuid,\n ex.message,\n )\n\n # Delete the stale upload's row.\n with UseThenDisconnect(app.config):\n model.delete_blob_upload(stale_upload)\n\n logger.debug(\"Removed stale blob upload %s\", stale_upload.uuid)", "def test_aws_service_api_volume_attachment_delete(self):\n pass", "def s3_delete_data(self):\n\n self.k.delete()", "def delete(self,\n signal_kwargs=None,\n **write_concern):\n self._config.write_to_log(f\"Deleting {self.patientId} and associated documents...\")\n for references in [self.outcomeEvents,\n self.measurements,\n self.criticalCare]:\n for doc in references:\n doc.delete(signal_kwargs=signal_kwargs, **write_concern)\n super().delete(self=self,\n signal_kwargs=signal_kwargs,\n **write_concern)\n self._config.write_to_log(f\"Deleted patient and asssociated documents.\")", "def delete(self):\n for obj in self:\n _unset_related_objects_relations(obj)\n\n self.update(deleted=now())", "def auto_delete_related_models_on_task_delete(sender, instance, **kwargs):\n SkyLabFile.objects.filter(task=instance).delete()\n TaskLog.objects.filter(task=instance).delete()", "def auto_delete_file_on_delete(sender, instance, **kwargs):\n to_delete = [\n instance.photo,\n instance.photo2,\n instance.photo3\n ]\n for photo in to_delete:\n if photo:\n if os.path.isfile(photo.path):\n os.remove(photo.path)" ]
[ "0.6843386", "0.67490435", "0.6740739", "0.66893613", "0.66396093", "0.65184605", "0.64819264", "0.6070061", "0.60316944", "0.59595275", "0.5957353", "0.59099954", "0.5882116", "0.5862595", "0.5855745", "0.5803972", "0.57956177", "0.577427", "0.5731602", "0.5699655", "0.56986195", "0.5688985", "0.5688883", "0.5688204", "0.56776446", "0.56736845", "0.56460005", "0.56441903", "0.55909216", "0.55827576" ]
0.7568625
0
Refresh the index by recomputing the embeddings for all points.
def refresh_index(self): synchronize() # TODO: add logger call here self._compute_embeddings()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reindex(self):\n self._index = {w: i for i, w in enumerate(self._words)}\n self.n, self.d = self._vecs.shape\n assert self.n == len(self._words) == len(self._index)\n self._neighbors = None", "def _index(self, corpus):\n\n # Transform documents to embeddings vectors\n ids, dimensions, stream = self.embedder.model.index(corpus)\n\n # Load streamed embeddings back to memory\n embeddings = np.empty((len(ids), dimensions), dtype=np.float32)\n with open(stream, \"rb\") as queue:\n for x in range(embeddings.shape[0]):\n embeddings[x] = pickle.load(queue)\n\n # Remove temporary file\n os.remove(stream)\n\n all_text = []\n for para_id, text, _ in corpus:\n all_text.append([text, para_id])\n\n df = pd.DataFrame(all_text, columns=[\"text\", \"paragraph_id\"])\n\n embedding_path = os.path.join(\n self.index_path, self.embed_paths[\"embeddings\"])\n dataframe_path = os.path.join(\n self.index_path, self.embed_paths[\"dataframe\"])\n ids_path = os.path.join(self.index_path, self.embed_paths[\"ids\"])\n\n # Load new data\n if os.path.isfile(embedding_path) and (self.encoder_args[\"overwrite\"] is False):\n logger.info(f\"Loading new data from {embedding_path}\")\n\n # Load existing embeddings\n old_embeddings = np.load(embedding_path) # LOAD EMBEDDINGS\n # Remove embeddings with document id overlaps\n embeddings = np.vstack((old_embeddings, embeddings))\n\n # load IDs\n old_ids = [doc_id[:-1] for doc_id in open_txt(ids_path)]\n logger.debug(f\"New ID Length = {len(ids)}\")\n logger.debug(f\"Old ID Length = {len(old_ids)}\")\n # Remove document ids overlaps\n logger.debug(f\"New ID Length = {len(ids)}\")\n ids = old_ids + ids\n logger.debug(f\"Merged ID Length = {len(ids)}\")\n\n # Append new dataframe\n old_df = pd.read_csv(dataframe_path)\n df = pd.concat([old_df, df])\n\n # Store embeddings and document index\n # for future reference\n np.save(embedding_path, embeddings)\n with open(ids_path, \"w\") as fp:\n fp.writelines([i + \"\\n\" for i in ids])\n\n # Save data csv\n df.to_csv(dataframe_path, index=False)\n\n # Normalize embeddings\n self.embedder.normalize(embeddings)\n\n # Save embeddings metadata\n self.embedder.config[\"ids\"] = ids\n self.embedder.config[\"dimensions\"] = dimensions\n\n # Create embeddings index\n logger.info(f\"Creating embeddings and index\")\n self.embedder.embeddings = ANN.create(self.embedder.config)\n logger.info(f\"Created embeddings\")\n\n # Build the index\n self.embedder.embeddings.index(embeddings)\n logger.info(f\"Built the embeddings index\")", "def _warm_cache(self):\n for word, index in self.word_to_index.items():\n self.embedding_layer.weight.data[index].copy_(torch.from_numpy(self.embedder.get_word_vector(word)))", "def clear_indexes(self):\n for keypoints in self:\n keypoints.clear_index()", "def _reinit_indexes(self):\n print('Reinitializing indexes...')\n for identity in self.groundtruth_metadata.keys():\n self.groundtruth_metadata[identity]['index'] = 0\n print('Indexes reinitialized!')", "def reset_weights(self):\n np.random.seed(self.seed)\n self.node_embedding = xavier_normal(size=(self.vocab_size, self.layer1_size), as_type=np.float32)\n self.context_embedding = xavier_normal(size=(self.vocab_size, self.layer1_size), as_type=np.float32)\n\n\n self.centroid = np.zeros((self.k, self.layer1_size), dtype=np.float32)\n self.covariance_mat = np.zeros((self.k, self.layer1_size, self.layer1_size), dtype=np.float32)\n self.inv_covariance_mat = np.zeros((self.k, self.layer1_size, self.layer1_size), dtype=np.float32)\n self.pi = np.zeros((self.vocab_size, self.k), dtype=np.float32)", "def reindex(self):", "def reindex(self):", "def fit(self):\n\n self.bow_archives_by_paperid = {userid: [self.dictionary.doc2bow(doc) for doc in archive] \\\n for userid, archive in self.kp_archives_by_paperid.items()}\n\n self.bow_archives_by_userid = {userid: [self.dictionary.doc2bow(doc) for doc in archive] \\\n for userid, archive in self.kp_archives_by_userid.items()}\n\n flattened_archives = [\n bow for archive in self.bow_archives_by_paperid.values() for bow in archive]\n\n self.index = SparseMatrixSimilarity(\n [self.tfidf[bow] for bow in flattened_archives],\n num_features=len(self.dictionary)\n )", "def reset(self):\n self.curr_idx = 0\n #shuffle data in each bucket\n random.shuffle(self.idx)\n for i, buck in enumerate(self.sentences):\n self.indices[i], self.sentences[i], self.characters[i], self.label[i] = shuffle(self.indices[i],\n self.sentences[i],\n self.characters[i],\n self.label[i])\n\n self.ndindex = []\n self.ndsent = []\n self.ndchar = []\n self.ndlabel = []\n\n #for each bucket of data\n for i, buck in enumerate(self.sentences):\n #append the lists with an array\n self.ndindex.append(ndarray.array(self.indices[i], dtype=self.dtype))\n self.ndsent.append(ndarray.array(self.sentences[i], dtype=self.dtype))\n self.ndchar.append(ndarray.array(self.characters[i], dtype=self.dtype))\n self.ndlabel.append(ndarray.array(self.label[i], dtype=self.dtype))", "def restart(self):\n # Note: No need to regenerate the data, just reset the idx\n self.prepare_for_use()", "def restart(self):\n # Note: No need to regenerate the data, just reset the idx\n self.prepare_for_use()", "def restart(self):\n # Note: No need to regenerate the data, just reset the idx\n self.prepare_for_use()", "def postprocess(self):\n if self.last_num_features == 0:\n return np.empty((0, self.feature_dim))\n\n embedding_out = self.backend.synchronize()[0][:self.last_num_features * self.feature_dim]\n self.embeddings.append(embedding_out)\n embeddings = np.concatenate(self.embeddings).reshape(-1, self.feature_dim)\n embeddings /= np.linalg.norm(embeddings, axis=1, keepdims=True)\n return embeddings", "def build_index(self):\n self.rebuild_index()", "def refresh(self):\n self._list_of_points = []\n self._add_points()", "def _idx_changed(self, idx):\n self.refresh_memory()", "def reindex(self):\n super().reindex()\n self._depths, self._heights = None, None\n for p in self.positions():\n self._compute_depth(p)\n self._compute_height(p)", "def update_embeddings(self, retriever):\n\n docs = self.get_all_documents()\n passages = [d.text for d in docs]\n logger.info(f\"Updating embeddings for {len(passages)} docs ...\")\n embeddings = retriever.embed_passages(passages)\n\n assert len(docs) == len(embeddings)\n\n doc_updates = []\n for doc, emb in zip(docs, embeddings):\n update = {\"_op_type\": \"update\",\n \"_index\": self.index,\n \"_id\": doc.id,\n \"doc\": {self.embedding_field: emb.tolist()},\n }\n doc_updates.append(update)\n\n bulk(self.client, doc_updates, request_timeout=300)", "def update_ixs(self, ixs, embeddings, labels, query_result=None, global_update=True):\n self.memory_embeddings[ixs] = embeddings\n self.memory_labels[ixs] = labels\n self.memory_age[ixs] = 0\n self.memory_relevance[ixs] = 0\n if global_update:\n self.memory_age[self.memory_age != -1] += 1\n if query_result is not None:\n # relevance mask indicates which samples are deemed relevant based on the query results\n # here this is simply which queries were closest to the query embedding, as well as the query itself\n relevance_mask = torch.zeros_like(self.memory_relevance)\n ix_counts = Counter(query_result[\"ix\"].flatten().tolist())\n for ix, count in ix_counts.items():\n relevance_mask[ix] = count\n relevance_mask[ixs] = 1 # assign a relevance of 1 to newly added entries as well\n # keep exponential moving average\n self.memory_relevance = self.relevance_discount * self.memory_relevance + relevance_mask", "def update_ixs(self, ixs, embeddings, labels, query_result=None, global_update=True):\n self.memory_embeddings[ixs] = embeddings\n self.memory_labels[ixs] = labels\n self.memory_age[ixs] = 0\n self.memory_relevance[ixs] = 0\n if global_update:\n self.memory_age[self.memory_age != -1] += 1\n if query_result is not None:\n # relevance mask indicates which samples are deemed relevant based on the query results\n # here this is simply which queries were closest to the query embedding, as well as the query itself\n relevance_mask = torch.zeros_like(self.memory_relevance)\n ix_counts = Counter(query_result[\"ix\"].flatten().tolist())\n for ix, count in ix_counts.items():\n relevance_mask[ix] = count\n relevance_mask[ixs] = 1 # assign a relevance of 1 to newly added entries as well\n # keep exponential moving average\n self.memory_relevance = self.relevance_discount * self.memory_relevance + relevance_mask", "def reindex(self):\n raise NotImplementedError()", "def index(self):\n with self.saver.thread():\n batches = self.collection.enumerate_batches(rank=self.rank)\n for chunk_idx, offset, passages in tqdm.tqdm(batches, disable=self.rank > 0):\n if self.config.resume and self.saver.check_chunk_exists(chunk_idx):\n Run().print_main(\n f\"#> Found chunk {chunk_idx} in the index already, skipping encoding...\"\n )\n continue\n # Encode passages into embeddings with the checkpoint model\n embs, doclens = self.encoder.encode_passages(passages)\n if self.use_gpu:\n assert embs.dtype == torch.float16\n else:\n assert embs.dtype == torch.float32\n embs = embs.half()\n\n Run().print_main(\n f\"#> Saving chunk {chunk_idx}: \\t {len(passages):,} passages \"\n f\"and {embs.size(0):,} embeddings. From #{offset:,} onward.\"\n )\n\n self.saver.save_chunk(\n chunk_idx, offset, embs, doclens\n ) # offset = first passage index in chunk\n del embs, doclens", "def reset(self):\n self.curr_idx = 0\n # shuffle data in each bucket\n random.shuffle(self.idx)\n for i, buck in enumerate(self.utterances):\n self.indices[i], self.utterances[i], self.intents[i] = shuffle(self.indices[i],\n self.utterances[i],\n self.intents[i])\n self.ndindex = []\n self.ndsent = []\n self.ndlabel = []\n\n # append the lists with an array\n for i, buck in enumerate(self.utterances):\n self.ndindex.append(ndarray.array(self.indices[i], dtype=self.dtype))\n self.ndsent.append(ndarray.array(self.utterances[i], dtype=self.dtype))\n self.ndlabel.append(ndarray.array(self.intents[i], dtype=self.dtype))", "def restart(self):\n self.dominated_set = []\n self.np = 0\n self.rank = None\n self.crowding_distance = 0", "def rebuild(self):\n self.from_samples(self.samples)", "def __generate_all_features_indices__(self):\n features = self.features_dict\n histories = self.histories_dict\n for k in range(self.data.getSentencesSize()):\n sentence = self.data.getSentenceByIndex(k)\n tags = self.data.getTagsByIndex(k)\n for i in range(len(sentence)):\n history = HistoryTuple(k, sentence, tags, i)\n history_key = (tags[i], history.getTupleKey())\n features_indices = self.getFeaturesIndices(tags[i], history, True)\n features_key = tuple(features_indices)\n features[features_key] += 1\n if len(features_indices) == 0:\n self.null_histories_set.add(history_key)\n histories[history_key] = features_indices", "def new_iteration(self):\n if (\n self.inner_solutions is not None\n and self.inner_solutions.size(0) > self.raw_samples\n ):\n indices = torch.randperm(n=self.inner_solutions.size(0))[: self.raw_samples]\n self.inner_solutions = self.inner_solutions[indices]\n self.inner_values = self.inner_values[indices]", "def create_embedding(self):\n self.embedding = []\n\n for index in range(1,self.args.window_size+1):\n print(\"\\nOptimization round: \" +str(index)+\"/\"+str(self.args.window_size)+\".\")\n print(\"Creating documents.\")\n clean_documents = self.walk_extracts(index)\n print(\"Fitting model.\")\n model = Word2Vec(clean_documents,\n size = self.args.dimensions,\n window = 1,\n min_count = self.args.min_count,\n sg = 1,\n workers = self.args.workers)\n\n new_embedding = self.get_embedding(model)\n self.embedding = self.embedding +[new_embedding]\n self.embedding = np.concatenate(self.embedding, axis = 1)", "def build_index(self):\n self.create_index()\n logger.debug(f\"Building index with {self.n_trees} trees.\")\n\n for i in range(len(self.corpus_embeddings)):\n self.index.add_item(i, self.corpus_embeddings[i])\n self.index.build(self.n_trees)" ]
[ "0.6858606", "0.68463606", "0.657868", "0.6389719", "0.6132686", "0.60244477", "0.593333", "0.593333", "0.5909812", "0.5907422", "0.5894825", "0.5894825", "0.5894825", "0.5868806", "0.58638215", "0.5859926", "0.5838459", "0.5817189", "0.5815836", "0.581031", "0.581031", "0.5752151", "0.5730724", "0.5728266", "0.5726174", "0.5719494", "0.57023853", "0.5682874", "0.5675103", "0.56722295" ]
0.8449123
0
Draw a plot of the MSE against lambda. Draw a plot of the MSE of the learning curve for lambda = 0,1.
def plot_mse(mse, lambda0, lambda1, scale, loc='lower right'): import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(211) ax.plot(*zip(*mse)) plt.xlabel('$\lambda$') plt.ylabel('MSE') plt.yticks(scale) ax = fig.add_subplot(212) ax.plot(*zip(*lambda0), label='$\lambda=0$') plt.xlabel('Episode') plt.ylabel('MSE') ax.plot(*zip(*lambda1), label='$\lambda=1$') plt.legend(loc=loc) plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cross_validation_visualization(lambdas, loss_train, loss_test):\n plt.semilogx(lambdas, loss_train, marker=\".\", color='b', label='train error')\n plt.semilogx(lambdas, loss_test, marker=\".\", color='r', label='test error')\n plt.xlabel(\"lambda\")\n plt.ylabel(\"rmse\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"cross_validation_mse\")", "def _plot_rmse(self, val=False):\n _, ax = plt.subplots()\n ax.plot(self.global_rmse, linewidth=3, color='blue', label='Train RMSE')\n ax.set_title('RMSE vs. Number of Iterations')\n if val is not None:\n ax.plot(self.validation_rmse, linewidth=3, color='green', label='Validation RMSE')\n ax.legend()\n plt.show()", "def plotLoss():\n # ssr\n ssr = np.log(gradientDescent(X, y)[1])\n # number of iterations \n iterations = np.log(np.arange(1, len(ssr) + 1, 1))\n # plot reduction of ssr\n plt.plot(iterations, ssr)\n # xlabel\n plt.xlabel(\"Iteration\")\n # ylabel\n plt.ylabel(\"SSR\")\n # title\n plt.title(\"Reduction of SSR by number of Iterations\")\n # show plot \n plt.show()", "def plot_losses(train, test, mode):\n\tplt.figure()\n\tplt.plot(range(len(train)), train, 'r', label='Training')\n\tplt.plot(range(len(test)), test, 'b', label='Testing')\n\tplt.title('MSE Loss (batch type: ' + mode + ')')\n\tplt.legend()\n\tplt.show()", "def plot(self) -> None:\n cw_l2_data_list = list(); cw_linf_data_list = list()\n\n for model in self.model_list:\n cw_l2_data_list.append(joblib.load(model + \"/stat/mse-rmse-si-mae-cw_l2_1.pkl\"))\n\n cw_l2_attack = list(zip(self.model_list, cw_l2_data_list))\n\n for model in self.model_list:\n cw_linf_data_list.append(joblib.load(model + \"/stat/mse-rmse-si-mae-cw_inf_1.pkl\"))\n\n cw_linf_attack = list(zip(self.model_list, cw_linf_data_list))\n\n # RMSE v.s. MAE over change budget\n # There will be one graph for each manipulation\n # CW_L2 ATTACK\n for datum in cw_l2_attack:\n ran_color_list = self._random_color_picker(2)\n fig, axis_1 = plt.subplots()\n\n # Generate x_axis\n x_axis = list()\n for key in datum[1]:\n if float(key) not in x_axis:\n x_axis.append(float(key))\n\n x_axis.sort()\n\n # Sort data in datum[1]\n data_dict = self._sort_dict(x_axis, datum[1])\n\n # PLOT RMSE ON AXIS 1\n # Generate y_axis ticks for RMSE\n rmse_values = list()\n for key in data_dict:\n rmse_values.append(data_dict[key][\"rmse\"])\n\n # Generate 10 ticks for the y_axis\n y_axis_ticks = np.linspace(0.0, 0.6, num=10, endpoint=True)\n\n # Plot RMSE\n axis_1.plot(x_axis, rmse_values, color=ran_color_list[0], linestyle=\"solid\")\n axis_1.set_xlabel(\"Perturbation Budget\")\n axis_1.set_ylabel(\"Root Mean Squared Error (RMSE)\", color=ran_color_list[0])\n axis_1.set_yticks(y_axis_ticks)\n \n for tick_label, tick_line in zip(axis_1.get_yticklabels(), axis_1.get_yticklines()):\n tick_label.set_color(ran_color_list[0])\n tick_line.set_color(ran_color_list[0])\n\n # PLOT MAE ON AXIS 2\n axis_2 = axis_1.twinx()\n\n # Generate y-axis ticks for MAE\n mae_values = list()\n for key in data_dict:\n mae_values.append(data_dict[key][\"mae\"])\n\n\n # Plot MAE\n axis_2.plot(x_axis, mae_values, color=ran_color_list[1], linestyle=\"solid\")\n axis_2.set_ylabel(\"Mean Absolute Error (MAE)\", color=ran_color_list[1])\n axis_2.set_yticks(y_axis_ticks)\n \n for tick_label, tick_line in zip(axis_2.get_yticklabels(), axis_2.get_yticklines()):\n tick_label.set_color(ran_color_list[1])\n tick_line.set_color(ran_color_list[1])\n\n model_tag = datum[0].split(\"/\"); model_tag = model_tag[-1]\n plt.savefig(self.save_path + \"/{}_rmse-and-mae-as-perturbation-budget-increases-for-cw_l2-attack-on-model-{}.png\".format(self.plot_name, model_tag), \n bbox_inches=\"tight\")\n plt.close()\n\n # CW_Linf ATTACK\n for datum in cw_linf_attack:\n ran_color_list = self._random_color_picker(2)\n fig, axis_1 = plt.subplots()\n\n # Generate x_axis\n x_axis = list()\n for key in datum[1]:\n if float(key) not in x_axis:\n x_axis.append(float(key))\n\n x_axis.sort()\n\n # Sort data in datum[1]\n data_dict = self._sort_dict(x_axis, datum[1])\n\n # PLOT RMSE ON AXIS 1\n # Generate y_axis ticks for RMSE\n rmse_values = list()\n for key in data_dict:\n rmse_values.append(data_dict[key][\"rmse\"])\n\n # Plot RMSE\n axis_1.plot(x_axis, rmse_values, color=ran_color_list[0], linestyle=\"solid\")\n axis_1.set_xlabel(\"Perturbation Budget\")\n axis_1.set_ylabel(\"Root Mean Squared Error (RMSE)\", color=ran_color_list[0])\n axis_1.set_yticks(y_axis_ticks)\n\n for tick_label, tick_line in zip(axis_1.get_yticklabels(), axis_1.get_yticklines()):\n tick_label.set_color(ran_color_list[0])\n tick_line.set_color(ran_color_list[0])\n\n # PLOT MAE ON AXIS 2\n axis_2 = axis_1.twinx()\n\n # Generate y-axis ticks for MAE\n mae_values = list()\n for key in data_dict:\n mae_values.append(data_dict[key][\"mae\"])\n\n # Plot MAE\n axis_2.plot(x_axis, mae_values, color=ran_color_list[1], linestyle=\"solid\")\n axis_2.set_ylabel(\"Mean Absolute Error (MAE)\", color=ran_color_list[1])\n axis_2.set_yticks(y_axis_ticks)\n \n for tick_label, tick_line in zip(axis_2.get_yticklabels(), axis_2.get_yticklines()):\n tick_label.set_color(ran_color_list[1])\n tick_line.set_color(ran_color_list[1])\n \n model_tag = datum[0].split(\"/\"); model_tag = model_tag[-1]\n plt.savefig(self.save_path + \"/{}_rmse-and-mae-as-perturbation-budget-increases-for-cw_linf-attack-on-model-{}.png\".format(self.plot_name, model_tag),\n bbox_inches=\"tight\")\n plt.close()\n \"RMSE and MAE as Perturbation Budget increases for CW_Linf attack on model {}\".format(model_tag)\n \n # Scattter Index over the change budget\n # All the manipulations will be put on the same graph.\n # CW_L2 ATTACK\n plt.figure()\n plt.xlabel(\"Perturbation Budget\"); plt.ylabel(\"Scatter Index\")\n ran_color_list = self._random_color_picker(len(cw_l2_attack)); i = 0\n\n # Find maximum scatter index value\n scatter_values = list()\n for datum in cw_l2_attack:\n for key in datum[1]:\n scatter_values.append(datum[1][key][\"scatter_index\"])\n\n # Generate y_axis ticks; generate 10 ticks\n y_axis_ticks = np.linspace(0.0, float(Decimal(str(max(scatter_values))) + Decimal(\"0.1\")), num=10, endpoint=True)\n plt.yticks(y_axis_ticks)\n\n # Generate x_axis\n x_axis = list()\n for datum in cw_l2_attack:\n for key in datum[1]:\n if float(key) not in x_axis:\n x_axis.append(float(key))\n\n x_axis.sort()\n\n formal_names = FormalNameMap()\n for datum in cw_l2_attack:\n values = list()\n data_dict = self._sort_dict(x_axis, datum[1])\n for key in data_dict:\n values.append(data_dict[key][\"scatter_index\"])\n\n # Append values to the plot\n line_name = datum[0].split(\"/\"); line_name = line_name[-1]\n formal_name = formal_names.getformalname(line_name) if formal_names.hasname(line_name) else line_name\n if \"vanilla\" in line_name:\n plt.plot(x_axis, values, color=ran_color_list[i], linewidth=3, linestyle=self._random_linestyle(), label=formal_name)\n\n else:\n plt.plot(x_axis, values, color=ran_color_list[i], linestyle=self._random_linestyle(), label=formal_name)\n \n i += 1\n\n plt.legend()\n plt.savefig(self.save_path + \"/{}_scatter-index-as-perturbation-budget-increases-for-cw_l2-attack.png\".format(self.plot_name),\n bbox_inches=\"tight\")\n plt.close()\n\n # CW_Linf ATTACK\n plt.figure()\n plt.xlabel(\"Perturbation Budget\"); plt.ylabel(\"Scatter Index\")\n ran_color_list = self._random_color_picker(len(cw_linf_attack)); i = 0\n\n # Find maximum scatter index value\n scatter_values = list()\n for datum in cw_linf_attack:\n for key in datum[1]:\n scatter_values.append(datum[1][key][\"scatter_index\"])\n\n # Generate y_axis ticks; generate 10 ticks\n y_axis_ticks = np.linspace(0.0, float(Decimal(str(max(scatter_values))) + Decimal(\"0.1\")), num=10, endpoint=True)\n plt.yticks(y_axis_ticks)\n\n # Generate x_axis\n x_axis = list()\n for datum in cw_l2_attack:\n for key in datum[1]:\n if float(key) not in x_axis:\n x_axis.append(float(key))\n\n x_axis.sort()\n\n formal_names = FormalNameMap()\n for datum in cw_linf_attack:\n values = list()\n data_dict = self._sort_dict(x_axis, datum[1])\n for key in data_dict:\n values.append(data_dict[key][\"scatter_index\"])\n\n # Append values to the plot\n line_name = datum[0].split(\"/\"); line_name = line_name[-1]\n formal_name = formal_names.getformalname(line_name) if formal_names.hasname(line_name) else line_name\n if \"vanilla\" in line_name:\n plt.plot(x_axis, values, color=ran_color_list[i], linewidth=3, linestyle=self._random_linestyle(), label=formal_name)\n\n else: \n plt.plot(x_axis, values, color=ran_color_list[i], linestyle=self._random_linestyle(), label=formal_name)\n \n i += 1\n\n plt.legend()\n plt.savefig(self.save_path + \"/{}_scatter-index-as-perturbation-budget-increases-for-cw_linf-attack.png\".format(self.plot_name),\n bbox_inches=\"tight\")\n plt.close()", "def visualization(epochs, mse_tr, mse_te):\n plt.semilogx(epochs, mse_tr, marker=\".\", color='b', label='train error')\n plt.semilogx(epochs, mse_te, marker=\".\", color='r', label='test error')\n plt.xlabel(\"k\")\n plt.ylabel(\"rmse\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"cross_validation\")", "def MSE(actual, noisy):\n mean_squared_error(actual, noisy)", "def varying_lamda(x, y, z, lambda_min, lambda_max, n_lambda, k, save_fig = None, method = 'Ridge', split = True, train = 0.7, seed = 42, max_iter = 1001, l_min = False, plot_indexes = [0,1,2]):\n\n lambdas = np.array([0] + np.logspace(lambda_min, lambda_max, n_lambda).tolist())\n polynomials = np.array(k)\n X, Y = np.meshgrid(lambdas, polynomials)\n MSE = np.zeros(np.shape(X))\n\n j = 0\n for k in polynomials:\n print(k)\n\n model = regression(x, y, z, k = int(k), split = split, train = train, seed = seed)\n if method == 'Ridge':\n model.SVD()\n i = 0\n for lam in lambdas:\n\n if method == 'Ridge':\n beta = model.Ridge(lam = lam)\n elif method == 'Lasso':\n beta = model.Lasso(lam = lam, max_iter = max_iter)\n\n z_tilde = model.z_tilde(beta = beta, X = model.X_test)\n MSE[j, i] = model.MSE(z_tilde = z_tilde, z = model.z_test)\n i += 1\n j += 1\n\n print('Method = ', method)\n lambdas_min = []\n for i in range(len(polynomials)):\n minimum_index = MSE[i].argmin()\n print('Minimum lambda for polynomial %.i: ' %(polynomials[i]), lambdas[minimum_index], MSE[i].min())\n lambdas_min.append(int(minimum_index))\n\n #plt.pcolormesh(lambdas.tolist() + [lambdas[-1] + lambdas[1]], polynomials.tolist() + [polynomials[-1] + 1], MSE)\n #plt.colorbar()\n #plt.show()\n\n plt.title('MSE for the test data with ' + method)\n plt.contourf(lambdas, polynomials, MSE)\n plt.colorbar()\n plt.ylabel('Polynomial order', fontsize = 14)\n plt.xlabel('Lambda', fontsize = 14)\n try:\n plt.savefig(results_dir + save_fig + 'contour' + '.png')\n except:\n pass\n plt.show()\n\n plt.title('MSE for the test data with ' + method)\n plt.plot(lambdas, MSE[plot_indexes[0], :], label = 'k = ' + str(polynomials[plot_indexes[0]]))\n plt.plot(lambdas, MSE[plot_indexes[1], :], label = 'k = ' + str(polynomials[plot_indexes[1]]))\n plt.plot(lambdas, MSE[plot_indexes[2], :], label = 'k = ' + str(polynomials[plot_indexes[2]]))\n if l_min:\n plt.plot(lambdas[lambdas_min[1]], MSE[1, lambdas_min[1]], 'ro', label = 'Lambda min = %.4g' %(lambdas[lambdas_min[1]]))\n else:\n pass\n plt.legend()\n plt.xlabel('Lambda', fontsize = 14)\n plt.ylabel('MSE', fontsize = 14)\n plt.tight_layout()\n try:\n plt.savefig(results_dir + save_fig + '.png')\n except:\n pass\n plt.show()\n return lambdas_min", "def plot_train_test_errors(train_errors, test_errors, lambda_str , K , path, rng):\n plt.plot(range(rng), train_errors, marker='o', label='Training Data');\n plt.plot(range(rng), test_errors, marker='v', label='Test Data');\n plt.title('ALS-WR Learning Curve, lambda = %s, K = %d'%(lambda_str, K))\n plt.xlabel('Number of Epochs');\n plt.ylabel('RMSE');\n plt.legend()\n plt.grid()\n plt.savefig(\"../results/test_train_rmse_\"+path)\n plt.show()", "def _show_learning_rate():\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(6.4 * 2, 4.8))\n\n # Visualize c_prime\n c_prime_list = np.linspace(1, 100, num=11)\n x_label = f\"c'\"\n y_label = \"Minimum Clusters Size\"\n title = \"\"\n\n ax = axes[0]\n x_list = c_prime_list\n\n # MNIST\n y_list = [161, 16, 14, 15, 20, 21, 24, 27, 30, 30, 35]\n ax.plot(x_list, y_list, label=\"MNIST\")\n\n # Fashion MNIST\n y_list = [63, 12, 12, 15, 18, 19, 22, 25, 26, 28, 30]\n ax.plot(x_list, y_list, label=\"Fashion MNIST\")\n\n # 20 news groups\n y_list = [1297, 724, 221, 80, 52, 51, 54, 54, 52, 60, 60]\n ax.plot(x_list, y_list, label=\"Newsgroups\")\n\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.set_title(title)\n ax.legend()\n ax.set_yscale('log')\n\n # Visualize t0\n t0_list = np.linspace(1, 100, num=11)\n x_label = f\"t0\"\n y_label = \"Minimum Clusters Size\"\n title = \"\"\n\n ax = axes[1]\n x_list = t0_list\n\n # MNIST\n y_list = [16, 16, 16, 16, 16, 17, 16, 16, 16, 16, 16]\n ax.plot(x_list, y_list, label=\"MNIST\")\n\n # Fashion MNIST\n y_list = [12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12]\n ax.plot(x_list, y_list, label=\"Fashion MNIST\")\n\n # 20 news groups\n y_list = [765, 765, 767, 772, 772, 773, 789, 789, 793, 796, 799]\n ax.plot(x_list, y_list, label=\"Newsgroups\")\n\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.set_title(title)\n ax.legend()\n ax.set_yscale('log')\n\n plt.show()", "def plot_loss_curve(num_epochs, losses):\n plt.xlabel('Epochs')\n plt.ylabel('Loss') \n plt.title('Loss Curve') \n plt.plot(range(num_epochs), losses)\n plt.show()", "def plot_learning_curve(self, X_test, y_test, plot_color):\r\n if self.X_scaler is not None:\r\n X_test = self.X_scaler.transform(X_test)\r\n\r\n maes = []\r\n for model_ in self.sub_models:\r\n scale_ = self.y_scaler.scale_ if self.y_scaler is not None else 1\r\n mean_ = self.y_scaler.mean_ if self.y_scaler is not None else 0\r\n y_pred = (model_.predict(X_test) * scale_ + mean_)\r\n maes.append(metrics.mean_absolute_error(y_true=y_test,\r\n y_pred=y_pred))\r\n maes.append(metrics.mean_absolute_error(\r\n y_true=y_test, \r\n y_pred=(self.model_.predict(X_test) * scale_ + mean_)))\r\n pretty_plot(\r\n x=[0.1 * i for i in range(1, 11)], y=maes,\r\n xlabel='Training Data Fraction',\r\n ylabel=self.target_label+' MAE (kcal/mol)',\r\n marker='s', markerfacecolor=plot_color,\r\n markeredgecolor='black', c=plot_color, markersize=30,\r\n markeredgewidth=2, xticksize=24, yticksize=24)\r\n # alternate axes\r\n ax = plt.gca()\r\n secax = ax.secondary_xaxis('top', functions=(\r\n lambda x: x * self.X_train.shape[0],\r\n lambda x: x / self.X_train.shape[0]))\r\n secax.set_xlabel('Number of Training Molecules', fontsize=20)\r\n secax.set_xticklabels([0.1 * i * self.X_train.shape[0] \r\n for i in range(1, 11)],\r\n fontsize=20)\r\n plt.show()", "def plot_cv_errors(errors, lambdas , K , path): \n colors = cycle([\"aqua\", \"black\", \"blue\", \"fuchsia\", \"gray\", \"green\", \"lime\", \"maroon\", \"navy\", \"olive\", \"purple\", \"red\", \"silver\", \"teal\", \"yellow\"])\n \n markers = cycle([ \".\", \",\", \"o\", \"v\" , \"^\" , \">\", \"1\", \"2\", \"3\", \"4\", \"8\", \"s\", \"p\", \"*\", \"h\"])\n \n \n for i, data in enumerate(errors):\n \n lambda_str = ('%f' % lambdas[i]).rstrip('0').rstrip('.')\n plt.plot(range(len(data)), data, marker=next(markers), label='$\\lambda$ = %s'%lambda_str);\n \n plt.ylim(0.975 , 0.99)\n #plt.xlim(0 , 50)\n plt.title('ALS-WR Learning Curve, K = %d'% K)\n plt.xlabel('Number of Epochs');\n plt.ylabel('RMSE');\n plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n plt.grid()\n plt.savefig(\"../results/\"+path)\n plt.show()", "def figure_10_12_b():\n xs = np.arange(-6,6,0.1)\n plt.plot(xs,sigmoid(xs))\n x=2.5\n plt.scatter(x,sigmoid(x))\n plt.plot(xs,logistic_lower_bound(xs,x))\n plt.show()", "def plot(self) -> None:\n if self.__fig is None:\n self.__fig = plt.figure()\n\n xv = []\n yv = []\n for x in np.arange(self.state_min(), self.state_max(), self.state_step()):\n xv.append(x)\n yv.append(self.reward(x))\n ax = self.__fig.gca()\n ax.set_xlabel('X (State)')\n ax.set_ylabel('Y (Reward)')\n ax.set_title('Reward Function')\n ax.plot(xv, yv)\n plt.pause(self.__plot_pause)\n plt.show(block=False)\n return", "def _plot(self, step, rewards, losses):\n plt.figure(figsize=(20, 5))\n plt.subplot(131)\n plt.title('Total Episode Reward')\n plt.plot(rewards)\n plt.subplot(132)\n plt.title('MSE Loss')\n plt.plot(losses)\n plt.show()", "def plot_learning_curve(model, X_train, X_test, y_train, y_test):\n\n m, train_scores, valid_scores = learning_curve(estimator = model, \n X = X_train, y = y_train.ravel(), train_sizes = np.linspace(0.1,1.0, 80))\n\n train_cv_err = np.mean(train_scores, axis=1)\n test_cv_err = np.mean(valid_scores, axis=1)\n tr, = plt.plot(m, train_cv_err)\n ts, = plt.plot(m, test_cv_err)\n plt.legend((tr, ts), ('training error', 'test error'), loc = 'best')\n plt.title('Learning Curve')\n plt.xlabel('Data Points')\n plt.ylabel('Accuracy')", "def rmse(x, y):\n return mse(x, y) ** .5", "def plot_scatter(self):\n if Trainer.y_pred is None or Trainer.y_true is None:\n messagebox.showerror(\"Information\", \"Please train the model first before plotting\")\n return\n\n fig = plt.figure(figsize=(8, 4))\n plt.xlabel(\"Prediction\")\n plt.ylabel(\"Target\")\n plt.figtext(0, 0, f\"RMSE: {self.test_rmse}\", fontsize=13)\n plt.grid()\n plt.scatter(x=Trainer.y_true, y=Trainer.y_pred, c='b', s=1)\n\n win = tk.Toplevel()\n win.wm_title(\"Window\")\n win.geometry(\"1000x500\")\n\n # specify the window as master\n canvas = FigureCanvasTkAgg(fig, master=win)\n canvas.draw()\n canvas.get_tk_widget().grid(row=0, column=0, sticky=tk.W)\n\n # navigation toolbar\n toolbarFrame = tk.Frame(master=win)\n toolbarFrame.grid(row=1, column=0)\n toolbar = NavigationToolbar2Tk(canvas, toolbarFrame)", "def train_test_error(e_train, e_test, model_params):\n\n fig = plt.figure(figsize=FIG_SIZE)\n plt.plot(model_params, e_train, label='Training Set')\n plt.plot(model_params, e_train, label='Test Set')\n plt.xlabel('Model Parameter')\n plt.ylabel('MSE of model')\n plt.legend()\n\n return fig", "def evaluate(y_train,train_preds,y_test,test_preds,model,features):\n \n plt.figure(figsize=(3.5,3))\n train_mae = np.abs(y_train-train_preds).mean()\n test_mae = np.abs(y_test-test_preds).mean()\n print(model, split)\n print('Test MAE: %4.2f'%(test_mae))\n print('Train MAE: %4.2f'%(train_mae))\n plt.plot(y_train,train_preds,'.',c='grey',ms=3,label='Train MAE: %4.2f'%train_mae,alpha=1)\n plt.plot(y_test,test_preds,'.',c=colors[model],ms=3,label='Test MAE: %4.2f'%test_mae,alpha=1)\n #plt.title('%s: Train MAE = %.2f eV; Test MAE = %.2f eV'%(model,np.abs(y_train-train_preds).mean(),np.abs(y_test-test_preds).mean()),fontsize=10)\n\n xlim = plt.gca().get_xlim()\n ylim = plt.gca().get_ylim()\n \n alim = (min(xlim[0],ylim[0]),max(xlim[1],ylim[1]))\n\n plt.plot(alim,alim,'-k',lw=1)\n plt.gca().set_ylim(alim)\n plt.gca().set_xlim(alim)\n\n plt.xlabel('$\\Delta E$ (eV)')\n plt.ylabel('$\\Delta E$ Predicted (eV)')\n\n plt.legend(loc='best')\n\n plt.tight_layout()\n plt.savefig('./parity/'+model+'_'+features+'_'+split+'_parity.pdf')\n \n #Functionality to plot histogram of error if desired\n\n #plt.figure()\n #plt.hist(preds-y,bins=100)\n #plt.xlabel('$\\hat{y} - y$ (eV)')\n #plt.title('%s %s: MAE = %.2f eV'%(model,tt,np.abs(y-preds).mean()))\n\n #xlim = np.array(plt.gca().get_xlim())\n #xmax = np.abs(xlim).max()\n #plt.gca().set_xlim([-xmax,xmax])\n\n #plt.savefig('./output/'+model+'_'+features+'_'+split+'_'+tt+'_hist.pdf')\n\n return", "def plot_learning(self):\n plt.plot([i for i in range(len(self.fitness_list))], self.fitness_list)\n plt.ylabel(\"Fitness\")\n plt.xlabel(\"Iteration\")\n plt.show()", "def plot_cv_train_test(test_avg, train_avg, lambdas, path):\n\n plt.plot(lambdas, test_avg, marker = \"o\", color=\"green\", label=\"validating cv error\")\n plt.plot(lambdas, train_avg, marker = \"v\", color=\"blue\", label=\"training cv error\" )\n \n print(train_avg[0])\n print(test_avg[0])\n \n plt.title(\"Cross Validation Error for Different Regularization Parameters\")\n plt.ylabel(\"10f cv RMSE\")\n plt.ylim(0.86 , 0.99)\n plt.xlabel(\"$\\lambda$\")\n plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n plt.grid()\n plt.savefig(\"../results/\"+path)\n plt.show()", "def plotErrors(losses, model_title ='Shallow Network, SGD, Batch Size = 10'):\n fig, axes = plt.subplots()\n\n x = np.arange(len(losses))\n\n axes.plot(x, losses)\n axes.set_ylabel(\"Loss (cross entropy)\")\n axes.set_xlabel(\"Number of iterations\")\n axes.set_title(model_title) \n\n plt.show() \n\n return None", "def rmse(y_true, y_pred): # -> Any:\n ...", "def plot_costs(self, threshold=0):\n epochs_range = np.arange(threshold, len(self.costs), 1)\n plt.plot(epochs_range, self.costs[threshold:], color='green', marker='o')\n plt.title('Cost function plot. Eta={:.2f} Lambda={:2.2f}'.format(self.eta, self.lambda_r))\n plt.xlabel('Epochs')\n plt.ylabel('Cost')\n plt.grid(True)\n plt.show()", "def parity_plot(y_pred, y_act):\n\n fig = plt.figure(figsize=FIG_SIZE)\n plt.scatter(y_act, y_pred)\n plt.plot([y_act.min(), y_act.max()], [y_act.min(), y_act.max()],\n lw=4, color='r')\n plt.xlabel('Actual')\n plt.ylabel('Predicted')\n\n return fig", "def plot_loss(self):\n train_elbo_range = range(len(self.train_elbo_hist))\n val_elbo_range = range(len(self.val_elbo_hist))\n train_loss_range = range(len(self.train_loss_hist))\n val_loss_range = range(len(self.val_loss_hist))\n\n fig, ax = plt.subplots(2, 2)\n ax[0][0].plot(train_elbo_range, self.train_elbo_hist)\n ax[0][0].title.set_text(\"Train ELBO\")\n ax[0][1].plot(val_elbo_range, self.val_elbo_hist)\n ax[0][1].title.set_text(\"Val ELBO\")\n ax[1][0].plot(train_loss_range, self.train_loss_hist)\n ax[1][0].title.set_text(\"Train MSE\")\n ax[1][1].plot(val_loss_range, self.val_loss_hist)\n ax[1][1].title.set_text(\"Val MSE\")\n plt.tight_layout()\n plt.show()", "def plot_average_MAE(train_datagen, val_datagen, model, gender = None):\n ages = np.arange(15,41)\n y_true, y_pred, true_pred_df = get_ytrue_ypred(model, val_datagen)\n mae_average = get_average_MAE(true_pred_df)\n count_train = get_count_train(train_datagen)\n print(count_train)\n \n fig, ax = plt.subplots(figsize = (12,5))\n ax.plot(ages, mae_average, label = 'Average MAE', linewidth = 2)\n ax.scatter(ages, mae_average)\n ax2 = ax.twinx()\n ax2.plot(ages, count_train, color = 'steelblue',label = 'Count of Images')\n ax2.fill_between(ages,count_train,alpha = 0.1, color='steelblue')\n ax2.set_ylabel('Count of Images per age')\n ax.set_xticks(ages)\n ax.set_xlabel('Age')\n ax.set_ylabel('MAE')\n ax.set_xlim(left=14.5, right=40.5)\n ax.set_ylim(bottom = 0)\n ax2.set_ylim(bottom = 0)\n ax2.grid(None)\n ax2.legend(loc = 'upper center')\n ax.legend()\n if gender == 'M':\n ax.set_title('Average MAE per age - Male')\n elif gender == 'F':\n ax.set_title('Average MAE per age - Female')\n else:\n ax.set_title('Average MAE per age')", "def plot_sigmoid():\n X = np.linspace(-10, 10, 100)\n sX = sigmoid(X)\n plt.figure(figsize=(15,5))\n plt.xlabel(r'$\\theta^Tx^{(i)}$')\n plt.ylabel(r'$h(x^{(i)}, \\theta)$')\n plt.plot(X, sX)\n plt.show()" ]
[ "0.6702873", "0.65018433", "0.64510727", "0.6430095", "0.63851696", "0.63240314", "0.62314105", "0.6179983", "0.6178999", "0.6125472", "0.61104375", "0.6096812", "0.6083498", "0.60187566", "0.5927236", "0.59271985", "0.5897805", "0.5864213", "0.58363026", "0.5832725", "0.5800747", "0.5799684", "0.5779136", "0.5772726", "0.57684994", "0.5766894", "0.57611203", "0.5752273", "0.57439846", "0.57423836" ]
0.7470588
0
! Brief Gets the binary and source files from the Github Release server [in] `tag_name` Git tag of the current release [in] `config` confi metadata set in main.py `List[ReleaseFile]` List of release files `Dict[str, SourceFile]` Dictionary of source files Sends an `HTTP GET` request to github using their REST API to retrieve metadata. The files are not actually downloaded here, just their metadata is gathered and organized in their respective container for later use.
def get_release_files(tag_name, config) -> Tuple[List[ReleaseFile], Dict[str, SourceFile]]: @retry_multi(5) # retry at most 5 times def execute_request(path): """! @brief Performs a GET request with the given path. To be used with Github's REST API. @returns If successful, returns a .JSON object """ headers = { "Accept": "application/vnd.github.v3+json" } url = "https://api.github.com" + path # GET https://api.github.com/<path> Accept: "application/vnd.github.v3+json" response = requests.get(url, headers=headers, timeout=GLOBAL_TIMEOUT) response.raise_for_status() # Raise a RequestException if we failed, and trigger retry return response.json() build_group_regex = re.compile("fs2_open_.*-builds-([^.-]*)(-([^.]*))?.*") # regex for matching binary .zip's and .7z's source_file_regex = re.compile("fs2_open_.*-source-([^.]*)?.*") # regex for matching source .zip's and .7z's # Get the github release metadata of the given tag name response = execute_request( "/repos/{}/releases/tags/{}".format(config["github"]["repo"], tag_name)) # Extract the binary and source files from the response["asset"] metadata binary_files = [] source_files = {} for asset in response["assets"]: url = asset["browser_download_url"] name = asset["name"] group_match = build_group_regex.match(name) if group_match is not None: platform = group_match.group(1) # x64 is the Visual Studio name but for consistency we need Win64 if platform == "x64": platform = "Win64" binary_files.append(ReleaseFile(name, url, platform, group_match.group(3))) else: group_match = source_file_regex.match(name) if group_match is None: continue group = group_match.group(1) source_files[group] = SourceFile(name, url, group) binary_files.sort(key=lambda ReleaseFile: ReleaseFile.name) return binary_files, source_files
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(docker_hub_client, args):\n resp = docker_hub_client.get_tags(args.orgname, args.reponame, args.page)\n if resp['code'] == 200:\n if resp['content']['count'] > 0:\n rows = []\n for repo in resp['content']['results']:\n formatted_date = ''\n if repo['last_updated']:\n formatted_date = dateutil.parser \\\n .parse(repo['last_updated'])\n formatted_date = formatted_date.strftime(\"%Y-%m-%d %H:%M\")\n # Convert full_size in bytes to KB\n size_in_kb = repo['full_size'] / 1024\n formatted_size = readableMemoryFormat(size_in_kb)\n rows.append([repo['name'], formatted_size, formatted_date])\n header = ['Name', 'Size', 'Last updated']\n print_result(args.format, rows, header, resp['content']['count'],\n args.page)\n else:\n print('Error fetching tags for: {0}/{1}'.\n format(args.orgname, args.reponame))", "def get_release_info(version='v1.1-dev', date='2021-07-22'):\n # go to the repository directory\n dir_orig = os.getcwd()\n os.chdir(os.path.dirname(os.path.dirname(__file__)))\n\n # grab git info into string\n try:\n cmd = \"git describe --tags\"\n version = subprocess.check_output(cmd.split(), stderr=subprocess.DEVNULL)\n version = version.decode('utf-8').strip()\n\n # if there are new commits after the latest release\n if '-' in version:\n version, num_commit = version.split('-')[:2]\n version += '-{}'.format(num_commit)\n\n cmd = \"git log -1 --date=short --format=%cd\"\n date = subprocess.check_output(cmd.split(), stderr=subprocess.DEVNULL)\n date = date.decode('utf-8').strip()\n except:\n pass\n\n # go back to the original directory\n os.chdir(dir_orig)\n return version, date", "def download_binaries(binary_name_stem, cwd):\n try:\n all_tags = fetch_git_tags()\n # *release_process.yml* will create a tag and release at first\n second_last_tag = all_tags[-2]\n latest_tag = all_tags[-1]\n\n latest_url = \"https://api.github.com/repos/bytecodealliance/wasm-micro-runtime/releases/latest\"\n print(f\"::notice::query the latest release with {latest_url}...\")\n with urllib.request.urlopen(latest_url) as response:\n body = response.read()\n\n release_name = json.loads(body)[\"name\"]\n\n # WAMR-X.Y.Z -> X.Y.Z\n second_last_sem_ver = second_last_tag[5:]\n latest_sem_ver = latest_tag[5:]\n assert latest_sem_ver in binary_name_stem\n name_stem_in_release = binary_name_stem.replace(\n latest_sem_ver, second_last_sem_ver\n )\n\n # download and rename\n for file_ext in (\".zip\", \".tar.gz\"):\n assets_url = f\"https://github.com/bytecodealliance/wasm-micro-runtime/releases/download/{release_name}/{name_stem_in_release}{file_ext}\"\n local_path = f\"{binary_name_stem}{file_ext}\"\n print(f\"::notice::download from {assets_url} and save as {local_path}...\")\n urllib.request.urlretrieve(assets_url, local_path)\n return True\n except HTTPError as error:\n print(error.status, error.reason)\n except URLError as error:\n print(error.reason)\n except TimeoutError:\n print(\"Request timeout\")\n\n return False", "def _fetch_latest_config_tag():\n github_release_url = config()['github_release_url']\n if config()['github_token']:\n headers = {'Authorization': f\"token {config()['github_token']}\"}\n else:\n headers = {}\n try:\n resp = requests.get(url=github_release_url, headers=headers)\n except Exception as err:\n logging.error(f\"Unable to fetch indexer config from github: {err}\")\n # Ignore any error and continue; try the fetch again later\n return None\n if not resp.ok:\n logging.error(f\"Unable to fetch indexer config from github: {resp.text}\")\n return None\n data = resp.json()\n return data['tag_name']", "def get_release(repo, tag=\"latest\", quiet=False) -> dict:\n api_url = f\"https://api.github.com/repos/{owner}/{repo}\"\n req_url = (\n f\"{api_url}/releases/latest\"\n if tag == \"latest\"\n else f\"{api_url}/releases/tags/{tag}\"\n )\n request = get_request(req_url)\n releases = None\n num_tries = 0\n\n while True:\n num_tries += 1\n try:\n with urllib.request.urlopen(request, timeout=10) as resp:\n result = resp.read()\n remaining = int(resp.headers[\"x-ratelimit-remaining\"])\n if remaining <= 10:\n warnings.warn(\n f\"Only {remaining} GitHub API requests remaining \"\n \"before rate-limiting\"\n )\n break\n except urllib.error.HTTPError as err:\n if err.code == 401 and os.environ.get(\"GITHUB_TOKEN\"):\n raise ValueError(\"GITHUB_TOKEN env is invalid\") from err\n elif err.code == 403 and \"rate limit exceeded\" in err.reason:\n raise ValueError(\n f\"use GITHUB_TOKEN env to bypass rate limit ({err})\"\n ) from err\n elif err.code == 404:\n if releases is None:\n releases = get_releases(repo, quiet)\n if tag not in releases:\n raise ValueError(\n f\"Release {tag} not found (choose from {', '.join(releases)})\"\n )\n elif err.code == 503 and num_tries < max_http_tries:\n # GitHub sometimes returns this error for valid URLs, so retry\n warnings.warn(f\"URL request {num_tries} did not work ({err})\")\n continue\n raise RuntimeError(f\"cannot retrieve data from {req_url}\") from err\n\n release = json.loads(result.decode())\n tag_name = release[\"tag_name\"]\n if not quiet:\n print(f\"fetched release {tag_name!r} info from {owner}/{repo}\")\n\n return release", "def _fetch_srcs(opts, cache_dir, revision, desc=None, refspecs=None):\n\n git_dir = '--git-dir=' + cache_dir\n\n if not desc:\n desc = 'repository: {}'.format(opts.name)\n\n log('fetching most recent sources')\n prepared_fetch_cmd = [\n git_dir,\n 'fetch',\n '--progress',\n '--prune',\n 'origin',\n ]\n\n # limit fetch depth\n target_depth = 1\n if opts._git_depth is not None:\n target_depth = opts._git_depth\n limited_fetch = (target_depth and 'releng.git.no_depth' not in opts._quirks)\n\n depth_cmds = [\n '--depth',\n str(target_depth),\n ]\n\n # if a revision is provided, first attempt to do a revision-specific fetch\n quick_fetch = 'releng.git.no_quick_fetch' not in opts._quirks\n if revision and quick_fetch:\n ls_cmd = [\n 'ls-remote',\n '--exit-code',\n 'origin',\n ]\n debug('checking if tag exists on remote')\n if GIT.execute(ls_cmd + ['--tags', 'refs/tags/{}'.format(revision)],\n cwd=cache_dir, quiet=True):\n debug('attempting a tag reference fetch operation')\n fetch_cmd = list(prepared_fetch_cmd)\n fetch_cmd.append('+refs/tags/{0}:refs/tags/{0}'.format(revision))\n if limited_fetch:\n fetch_cmd.extend(depth_cmds)\n\n if GIT.execute(fetch_cmd, cwd=cache_dir):\n debug('found the reference')\n return True\n\n debug('checking if reference exists on remote')\n if GIT.execute(ls_cmd + ['--heads', 'refs/heads/{}'.format(revision)],\n cwd=cache_dir, quiet=True):\n debug('attempting a head reference fetch operation')\n fetch_cmd = list(prepared_fetch_cmd)\n fetch_cmd.append(\n '+refs/heads/{0}:refs/remotes/origin/{0}'.format(revision))\n if limited_fetch:\n fetch_cmd.extend(depth_cmds)\n\n if GIT.execute(fetch_cmd, cwd=cache_dir):\n debug('found the reference')\n return True\n\n # fetch standard (and configured) refspecs\n std_refspecs = [\n '+refs/heads/*:refs/remotes/origin/*',\n '+refs/tags/*:refs/tags/*',\n ]\n prepared_fetch_cmd.extend(std_refspecs)\n\n # allow fetching addition references if configured (e.g. pull requests)\n if refspecs:\n for ref in refspecs:\n prepared_fetch_cmd.append(\n '+refs/{0}:refs/remotes/origin/{0}'.format(ref))\n\n fetch_cmd = list(prepared_fetch_cmd)\n if limited_fetch:\n fetch_cmd.extend(depth_cmds)\n\n if not GIT.execute(fetch_cmd, cwd=cache_dir):\n err('unable to fetch branches/tags from remote repository')\n return False\n\n if revision:\n verbose('verifying target revision exists')\n exists_state = revision_exists(git_dir, revision)\n if exists_state in REVISION_EXISTS:\n pass\n elif (exists_state == GitExistsType.MISSING_HASH and\n limited_fetch and opts._git_depth is None):\n warn('failed to find hash on depth-limited fetch; fetching all...')\n\n fetch_cmd = list(prepared_fetch_cmd)\n fetch_cmd.append('--unshallow')\n\n if not GIT.execute(fetch_cmd, cwd=cache_dir):\n err('unable to unshallow fetch state')\n return False\n\n if revision_exists(git_dir, revision) not in REVISION_EXISTS:\n err('unable to find matching revision in {}\\n'\n ' (revision: {})', desc, revision)\n return False\n else:\n err('unable to find matching revision in {}\\n'\n 'revision: {})', desc, revision)\n return False\n\n return True", "def get_ftp_files(build_type, tag_name, config) -> List[ReleaseFile] :\n\n tag_regex = re.compile(\"nightly_(.*)\")\n build_group_regex = re.compile(\"nightly_.*-builds-([^.]+).*\")\n\n files = []\n try:\n with FTP(config[\"ftp\"][\"host\"], config[\"ftp\"][\"user\"], config[\"ftp\"][\"pass\"]) as ftp:\n # extract version\n version_str = tag_regex.match(tag_name).group(1)\n\n # extract filepath w/ version\n # then list all ftp hits with that path\n path_template = config[\"ftp\"][\"path\"]\n path = path_template.format(type=build_type, version=version_str)\n file_entries = list(ftp.mlsd(path, [\"type\"]))\n\n # get all ftp hits of type file\n for entry in file_entries:\n if entry[1][\"type\"] == \"file\":\n files.append(entry[0])\n except error_perm:\n print(\"Received permanent FTP error!\")\n return []\n\n out_data = []\n for file in files:\n # from the file list, extract only nightly files\n file_match = build_group_regex.match(file)\n if file_match is None:\n print(\"Ignoring non nightly file '{}'\".format(file))\n continue\n\n group_match = file_match.group(1)\n primary_url = None\n mirrors = []\n\n # x64 is the name Visual Studio uses but Win64 works better for us since that gets displayed in the nightly post\n if \"x64\" in group_match:\n group_match = group_match.replace(\"x64\", \"Win64\")\n\n # construct the download URL list for all mirrors. The first listed ftp location is taken as the Primary\n for mirror in config[\"ftp\"][\"mirrors\"]:\n download_url = mirror.format(type=build_type, version=version_str, file=file)\n if primary_url is None:\n primary_url = download_url\n else:\n mirrors.append(download_url)\n\n # Form the List[ReleaseFile] list with the download URL links\n out_data.append(ReleaseFile(file, primary_url, group_match, None, mirrors))\n\n return out_data", "def get_version():\n parent_dir = os.path.dirname(os.path.realpath(__file__))\n while True:\n if '.git' in os.listdir(parent_dir):\n break\n parent_dir = os.path.dirname(parent_dir)\n git_log = os.path.join(parent_dir,'.git','logs','HEAD')\n handle = open(git_log,'r')\n log_lines = [l.split('\\t') for l in handle.readlines()]\n #now get latest github commit\n url = 'https://api.github.com/repos/thomasvangurp/epiGBS/commits'\n context = ssl._create_unverified_context()\n result = json.load(urllib.urlopen(url,context=context))\n print('')", "def pull(release):\n image = f\"breqwatr/rsyslog:{release}\"\n ecr.pull(image)", "def get_release(request):\r\n\r\n release = raven.fetch_git_sha(os.path.dirname(os.path.dirname(__file__)))\r\n return HttpResponse(json.dumps({\"release\": release[:7]}))", "def push_sources():\n ensure_src_dir()\n push_rev = getattr(env, 'push_rev', None)\n if push_rev is None:\n push_rev = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n local(\"git tag -a {0} -m \\\"Tagged for release\\\"\".format(push_rev))\n local(\"git push origin master --tags\")\n\n with cd(SRC_DIR):\n run(\"git pull origin master\")\n run(\"git fetch -t\")\n run(\"git checkout {0}\".format(push_rev))", "async def get(name, version, results, progress):\n\n buf = BytesIO()\n\n client = httpx.AsyncClient()\n\n async with client.stream(\n \"GET\", f\"https://pydocs.github.io/pkg/{name}-{version}.zip\"\n ) as response:\n total = int(response.headers[\"Content-Length\"])\n\n download_task = progress.add_task(f\"Download {name} {version}\", total=total)\n async for chunk in response.aiter_bytes():\n buf.write(chunk)\n progress.update(download_task, completed=response.num_bytes_downloaded)\n\n if response.status_code != 200:\n results[(name, version)] = None\n else:\n buf.seek(0)\n results[(name, version)] = buf.read()", "def download_dependency(name, info, temp_path, build_path, config):\n if info[\"location\"] == \"s3\":\n logger.info(f\"Collecting {name} {info['version']} from S3\")\n download_dependency_s3(name, info[\"version\"], temp_path, build_path, config)\n\n if info[\"location\"] == \"github\":\n logger.info(f\"Collecting {name} at tag {info['tag']} from repository {info['repository']}\")\n download_dependency_github(name, info['repository'], info[\"tag\"], temp_path, build_path, config)\n\n if info[\"location\"] == \"url\":\n logger.info(f\"Collecting {name} at from {info['url']}\")\n download_dependency_url(name, info['url'], temp_path, build_path, config, zip=info.get(\"zip\", False))", "def get_tags_and_dates(repository_name):\n tags_query = \"SELECT t.name, c.commit_author_date \" \\\n \"FROM github_commit c, release_tag t \" \\\n \"where t.commit_url = c.url and t.repository=?\"\n return dbutils.execute_query(tags_query, (repository_name,), DATABASE_FILE)", "def do_get_repository_manifests(cs, args):\n resp, data = cs.repositories.get_manifests(args.repository, args.tag)\n utils.print_dict(data)", "def create_release(release_files, changelog=\"\", output=\"\") -> str:\n release_notes = \"\"\n if 'TRAVIS_TAG' not in os.environ or not os.environ['TRAVIS_TAG']:\n print('No git tag: not deploying anything')\n return release_notes\n elif os.environ['TRAVIS_SECURE_ENV_VARS'] != 'true':\n print('No secure environment variables: not deploying anything')\n return release_notes\n elif len(release_files) == 0:\n print('No file to release')\n return release_notes\n else:\n print('Creating release from tag {}'.format(os.environ['TRAVIS_TAG']))\n\n headers = {\n 'User-Agent': 'Deploy-Script',\n 'Authorization': 'token {}'.format(os.environ['GH_TOKEN'])\n }\n\n changelog_content = ''\n if changelog:\n with open(changelog, 'r') as changelog_file:\n changelog_content = changelog_file.read()\n\n create_raw_data = {\n \"tag_name\": os.environ['TRAVIS_TAG'],\n \"body\": \"\\n\\n{}\".format(changelog_content)\n }\n\n # if a release exist with this tag_name delete it first\n # this allows to create the release from github website\n url = '/repos/{repo_slug}/releases/tags/{tag}'.format(\n repo_slug=os.environ['TRAVIS_REPO_SLUG'],\n tag=os.environ['TRAVIS_TAG'])\n conn = http.client.HTTPSConnection('api.github.com')\n conn.request('GET', url, headers=headers)\n response = conn.getresponse()\n release = json.loads(response.read().decode())\n\n if 'upload_url' not in release:\n print('Failed to create release!')\n print('Github API replied:')\n print('{} {}'.format(response.status, response.reason))\n print(repr(release))\n exit(-1)\n\n conn = http.client.HTTPSConnection('uploads.github.com')\n for release_file in release_files:\n _, filename = os.path.split(release_file)\n headers['Content-Type'] = 'application/zip'\n url = '{release_url}?name={filename}'.format(release_url=release['upload_url'][:-13], filename=filename)\n print('Upload to {}'.format(url))\n\n with open(release_file, 'rb') as f:\n data = f.read()\n conn.request('POST', url, data, headers)\n\n response = conn.getresponse()\n result = response.read()\n if response.status != 201:\n print('Failed to upload filename {filename}'.format(filename=filename))\n print('Github API replied:')\n print('{} {}'.format(response.status, response.reason))\n print(repr(json.loads(result.decode())))\n print('File:')\n print(' Size: {}'.format(os.path.getsize(release_file)))\n\n if output:\n with open(output, 'w') as f:\n print(\"Writing release notes\")\n print(release_notes)\n f.write(release_notes)", "def do_show(cs, args):\n repo = args.repository\n tag_index = repo.find(':')\n if tag_index != -1:\n tag = repo[tag_index + 1:]\n repo = repo[:tag_index]\n else:\n tag = \"latest\"\n if repo.find('/') == -1:\n repo = \"library/\" + repo\n _, data = cs.repositories.get_manifests(repo, tag)\n utils.print_dict(data)", "def compile_source_metadata(sourcefile, config, year):\n metadata = dict(source_metadata)\n if isinstance(sourcefile, list):\n filename = sourcefile[0]\n else:\n filename = sourcefile\n data_retrieval_time = time.ctime(os.path.getmtime(filename))\n if data_retrieval_time is not None:\n metadata['SourceAcquisitionTime'] = data_retrieval_time\n metadata['SourceFileName'] = sourcefile\n metadata['SourceURL'] = config['url']\n if year in config:\n metadata['SourceVersion'] = config[year]['file_version']\n else:\n import re\n pattern = 'V[0-9]'\n version = re.search(pattern, filename, flags=re.IGNORECASE)\n if version is not None:\n metadata['SourceVersion'] = version.group(0)\n return metadata", "def main(\n req_files,\n verbose=False,\n outdated=False,\n latest=False,\n verbatim=False,\n repo=None,\n path=\"requirements.txt\",\n token=None,\n branch=\"master\",\n url=None,\n delay=None,\n):\n requirements = []\n\n if repo:\n github_url = build_github_url(repo, branch, path, token)\n req_file = get_requirements_file_from_url(github_url)\n requirements.extend(parse_req_file(req_file))\n elif url:\n req_file = get_requirements_file_from_url(url)\n requirements.extend(parse_req_file(req_file))\n else:\n for req_file in req_files:\n requirements.extend(parse_req_file(req_file, verbatim=verbatim))\n req_file.close()\n\n total_time_delta = 0\n max_outdated_time = 0\n results = []\n\n for req, version, ignore in requirements:\n if verbatim and not req:\n results.append(version)\n elif req:\n results.append(\n {\n \"req\": req,\n \"version\": version,\n \"ignore\": ignore,\n \"latest\": request(get_pypi_url(req)),\n \"specified\": request(get_pypi_url(req, version)),\n }\n )\n\n for result in results:\n if isinstance(result, str):\n print(result.replace(\"\\n\", \"\"))\n continue\n\n if result[\"ignore\"]:\n if verbatim:\n print(\"{}=={} # norot\".format(result[\"req\"], result[\"version\"]))\n else:\n print(\"Ignoring updates for {}. \".format(result[\"req\"]))\n continue\n\n req = result[\"req\"]\n version = result[\"version\"]\n\n latest_version, latest_release_date = get_version_and_release_date(\n req, verbose=verbose, response=result[\"latest\"]\n )\n specified_version, specified_release_date = get_version_and_release_date(\n req, version, response=result[\"specified\"]\n )\n\n if latest_release_date and specified_release_date:\n time_delta = (latest_release_date - specified_release_date).days\n total_time_delta = total_time_delta + time_delta\n max_outdated_time = max(time_delta, max_outdated_time)\n\n if verbose:\n if time_delta > 0:\n print(\n \"{} ({}) is {} days out of date. \"\n \"Latest is {}\".format(req, version, time_delta, latest_version)\n )\n elif version != latest_version:\n print(\n \"{} ({}) is out of date. \"\n \"Latest is {}\".format(req, version, latest_version)\n )\n elif not outdated:\n print(\"{} ({}) is up to date\".format(req, version))\n\n if latest and latest_version != specified_version:\n print(\n \"{}=={} # Updated from {}\".format(\n req, latest_version, specified_version\n )\n )\n elif verbatim and latest_version != specified_version:\n print(\n \"{}=={} # Latest {}\".format(req, specified_version, latest_version)\n )\n elif verbatim:\n print(\"{}=={}\".format(req, specified_version))\n\n elif verbatim:\n print(\"{}=={} # Error checking latest version\".format(req, version))\n\n verbatim_str = \"\"\n if verbatim:\n verbatim_str = \"# Generated with piprot {}\\n# \".format(VERSION)\n\n if total_time_delta > 0 and delay is None:\n print(\n \"{}Your requirements are {} \"\n \"days out of date\".format(verbatim_str, total_time_delta)\n )\n sys.exit(1)\n elif delay is not None and max_outdated_time > int(delay):\n print(\n \"{}At least one of your dependencies is {} \"\n \"days out of date which is more than the allowed\"\n \"{} days.\".format(verbatim_str, max_outdated_time, delay)\n )\n sys.exit(1)\n elif delay is not None and max_outdated_time <= int(delay):\n print(\n \"{}All of your dependencies are at most {} \"\n \"days out of date.\".format(verbatim_str, delay)\n )\n else:\n print(\n \"{}Looks like you've been keeping up to date, \"\n \"time for a delicious beverage!\".format(verbatim_str)\n )", "def do_pull_file(dbsync, bibkey):\n pass", "def get_python_package_info(name):\n command = [\"python\", \"setup.py\", \"--name\"]\n with subprocess.Popen(command, stdout=subprocess.PIPE) as proc:\n assert proc.stdout.readline().strip().decode(\"utf-8\") == name\n\n command = [\"python\", \"setup.py\", \"--version\"]\n with subprocess.Popen(command, stdout=subprocess.PIPE) as proc:\n release_version = proc.stdout.readline().strip().decode(\"utf-8\")\n\n command = [\"python\", \"setup.py\", \"--url\"]\n with subprocess.Popen(command, stdout=subprocess.PIPE) as proc:\n github_url = proc.stdout.readline().strip().decode(\"utf-8\")\n\n github_repo = urlparse(github_url)\n assert github_repo.netloc == \"github.com\", \"specified repo is not on GitHub\"\n return (release_version, github_repo)", "def build(ctx):\n if 'cicd' in run('hostname').stdout.strip():\n # Check if we are executing the task from an aws instance\n if requests.get('http://169.254.169.254/latest/meta-data/').status_code == 200:\n git_ref_source = os.environ.get('GIT_SOURCE_BRANCH')\n git_ref_target = os.environ.get('GIT_TARGET_BRANCH')\n run('git fetch --all')\n run('git checkout {}'.format(git_ref_target))\n\n \n tar_name = \"Frontend\"\n #'wordpress-{}-en_CA.tar.gz'.format(WORDPRESS_VERSION)\n #tar_file = open(tar_name, 'wb')\n #tar_file.write(wp_tar.content)\n #tar_file.close()\n\n #run('tar -xzf {}'.format(tar_name))\n \n # Download the postmedia source-code and patches/config\n #clone(git_ref_target, git_ref_source)\n\n # merge (if applicable) and create the release\n if git_ref_source:\n git_pr_id = os.getenv('GIT_PR_ID')\n github_util.put('repos/{}/{}/pulls/{}/merge'.format(GIT_ORG, GIT_REPO, git_pr_id), params={'merge_method': 'squash'})\n version = github_util.get_next_rc()\n github_util.set_release(target_commitish='master', tag=version, prerelease=True)\n build_type = 'release candidate'\n else:\n version = github_util.get_next_hf()\n github_util.set_release(git_ref_target, version)\n build_type = 'hotfix'\n\n # package and upload to S3\n author = os.environ.get('GIT_AUTHOR')\n notes = release_notes(version, author, git_ref_target, git_ref_source, build_type)\n tarball = package(notes, version)\n print(\"No upload to S3\")\n #upload(tarball, S3_BUCKET_STAGE)\n else:\n author = input('please enter your name for the release notes: ')\n\n valid_snapshot_name = False\n while not valid_snapshot_name:\n snapshot_name = input('please enter a name for your snapshot: ')\n snapshot_name = snapshot_name.lower()\n snapshot_name = re.sub('-', '_', snapshot_name)\n\n # domain sections cannot be longer than 63 characters, so snapshot\n # name cannot be longer than 26 (63 minus snapshot-20190128-1713-homesanddesign - 37)\n if (len(snapshot_name) <= 26):\n valid_snapshot_name = True\n else:\n print(\"{} is too long. Please enter a new snapshot name of 28 characters or less.\".format(snapshot_name))\n\n build_type = 'snapshot'\n \n version = '{}_{}_{}'.format(build_type, snapshot_name,\n datetime.datetime.now().strftime(\"%Y%m%d_%H%M\"))\n print(\"Building snapshot {}\".format(version))\n git_ref_target = 'master'\n git_ref_source = 'HEAD'\n notes = release_notes(version, author, git_ref_target, git_ref_source, build_type)\n os.chdir('/opt/')\n if os.path.exists(WORK_DIR):\n os.system('rm -rf {}'.format(WORK_DIR))\n os.mkdir(WORK_DIR)\n tarball = package(notes, version)\n print (\"No upload to S3\")\n #upload(tarball, S3_BUCKET_DEV)", "async def get_releases(\n self, prerelease: bool = False, returnlimit: int = 5\n ) -> [\"AIOGitHubAPIRepositoryRelease\"] or list:\n _endpoint = f\"/repos/{self.full_name}/releases\"\n\n response = await self.client.get(endpoint=_endpoint)\n contents = []\n\n for content in response or []:\n if len(contents) == returnlimit:\n break\n if not prerelease:\n if content.get(\"prerelease\", False):\n continue\n contents.append(AIOGitHubAPIRepositoryRelease(content))\n\n return contents", "def main():\n parser = argparse.ArgumentParser(description='Fetch master build artifacts.')\n parser.add_argument('--token', type=str, help='API token to use')\n parser.add_argument(\n '--job', type=str, help='From what job to fetch artifacts from')\n parser.add_argument(\n '--artifact-download-dir',\n type=str,\n default='.',\n help='Where to download the artifacts')\n parser.add_argument(\n '--build-output-dir',\n type=str,\n default='.',\n help='Generated build files directory to use to compare for bloat')\n parser.add_argument(\n '--report-file',\n type=str,\n default='report.txt',\n help='From what job to fetch artifacts from')\n parser.add_argument(\n '--github-api-token',\n type=str,\n help='Github API token to upload the report as a comment')\n parser.add_argument(\n '--github-repository', type=str, help='Repository to use for PR comments')\n parser.add_argument(\n '--github-comment-pr-number',\n type=str,\n default=None,\n help='To what PR to comment in github')\n parser.add_argument(\n '--log-level',\n default=logging.INFO,\n type=lambda x: getattr(logging, x),\n help='Configure the logging level.')\n args = parser.parse_args()\n\n # Ensures somewhat pretty logging of what is going on\n logging.basicConfig(\n level=args.log_level,\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n coloredlogs.install()\n\n if not args.token or not args.job:\n logging.error(\n 'Required arguments missing. Please specify at least job and token.')\n return\n\n try:\n ci_fetch_artifacts.fetchArtifactsForJob(args.token, args.job,\n args.artifact_download_dir)\n except Exception as e:\n logging.warning('Failed to fetch artifacts: %r', e)\n\n compareResults = generateBloatReport(\n args.report_file,\n args.artifact_download_dir,\n args.build_output_dir,\n title=\"Bloat report for job '%s'\" % args.job)\n\n if args.github_api_token and args.github_repository and args.github_comment_pr_number:\n sendFileAsPrComment(args.job, args.report_file, args.github_api_token,\n args.github_repository,\n int(args.github_comment_pr_number), compareResults)", "def download_dependency_github(name, repo, tag, temp_path, build_path, config):\n wp = os.getcwd()\n os.chdir(temp_path)\n # Clone into the repo, pull the specified tag\n clone_cmd = f\"git clone https://github.com/{repo}.git\"\n tag_cmd = f\"git checkout master && git fetch && git fetch --tags && git checkout {tag}\"\n os.system(clone_cmd)\n os.chdir(name)\n os.system(tag_cmd)\n os.chdir(wp)\n # Move the contents of GameData into the build directory\n shutil.copytree(os.path.join(temp_path, name, \"GameData\", name), os.path.join(build_path, \"GameData\", name))", "def cli(ctx, image_file):\n if not image_file:\n return\n for pull_image in image_file.readline():\n pull_image = pull_image.rstrip('\\n')\n if len(pull_image) == 0:\n continue\n docker.pull(pull_image)\n push_image = '%s/%s/%s' % (DEFAULT_REGISTRY,\n DEFAULR_NAMESPACE,\n pull_image.split('/')[-1])\n docker.tag(pull_image, push_image)\n docker.push(push_image)", "def extract_release_data(self):\r\n data = None\r\n session = self.get_session()\r\n project = session.create(self._config.name)\r\n \r\n session.home = self._config['dir']\r\n \r\n result = self.__find_project(project)\r\n if (result != None) and (self._config.get_boolean('releasable', False)):\r\n if 'baseline.release' in self._config:\r\n data = {}\r\n _logger.info(\"Releasing: '%s'\" % result)\r\n data['name'] = result.objectname\r\n data['database'] = session.database()\r\n data['role'] = ccm.get_role_for_purpose(session, str(self._config['purpose']))\r\n data['dir'] = os.path.normpath(self._config['dir'])\r\n data['pst'] = result.name\r\n data['release'] = self._config['baseline.release']\r\n else:\r\n _logger.warning(\"Could not release \" + result.objectname + \" because the 'baseline.release' property is missing.\")\r\n return data", "def get_repo_info(loader, sha, prov_g):\n user_repo = loader.getFullName()\n repo_title = loader.getRepoTitle()\n repo_desc = loader.getRepoDescription()\n contact_name = loader.getContactName()\n contact_url = loader.getContactUrl()\n commit_list = loader.getCommitList()\n licence_url = loader.getLicenceURL() # This will be None if there is no license\n\n # Add the API URI as a used entity by the activity\n if prov_g:\n prov_g.add_used_entity(loader.getRepoURI())\n\n prev_commit = None\n next_commit = None\n version = sha if sha else commit_list[0]\n if commit_list.index(version) < len(commit_list) - 1:\n prev_commit = commit_list[commit_list.index(version) + 1]\n if commit_list.index(version) > 0:\n next_commit = commit_list[commit_list.index(version) - 1]\n\n info = {\n 'version': version,\n 'title': repo_title,\n 'description': repo_desc,\n 'contact': {\n 'name': contact_name,\n 'url': contact_url\n } \n }\n if licence_url:\n info['license'] = {\n 'name': 'License',\n 'url': licence_url\n }\n\n if type(loader) is GithubLoader:\n basePath = '/api-git/' + user_repo + '/'\n basePath += ('subdir/' + loader.subdir + '/') if loader.subdir else ''\n basePath += ('commit/' + sha + '/') if sha else ''\n if type(loader) is GitlabLoader:\n basePath = '/api-gitlab/' + user_repo + '/query/' \n basePath += ('branch/' + loader.branch + '/') if loader.branch else ''\n basePath += ('subdir/' + loader.subdir.strip('/') + '/') if loader.subdir else ''\n basePath += ('commit/' + sha + '/') if sha else ''\n elif type(loader) is LocalLoader:\n basePath = '/api-local/'\n elif type(loader) is URLLoader:\n basePath = '/api-url/'\n else:\n # TODO: raise error\n glogger.error('Cannot set basePath, loader type unkown')\n\n return prev_commit, next_commit, info, basePath", "def version_check():\n try:\n with open('git.json', 'r') as fp:\n git_md = json.loads(fp.read())\n except IOError:\n # In the event that there is no git metadata, just print null values\n # twice.\n print \"null\"\n print \"null\"\n return\n\n if git_md['GitHub']:\n if git_md['GitHubUser'] is not None and git_md[\n 'GitHubRepo'] is not None:\n latest_release = json.loads(\n urllib2.urlopen(\n \"https://api.github.com/repos/%s/%s/releases/latest\" % (\n git_md['GitHubUser'], git_md['GitHubRepo'])).read())\n latest_tag = latest_release['tag_name']\n\n # Go through all of the tags to see if this commit matches a tag.\n tags = json.loads(\n urllib2.urlopen(\n \"https://api.github.com/repos/%s/%s/git/refs/tags\" % (\n git_md['GitHubUser'], git_md['GitHubRepo'])).read())\n\n current_tag = \"Unreleased\"\n for tag in tags:\n if tag['object']['sha'] == git_md['GitSHA']:\n current_tag = tag['ref'].split('/')[-1]\n\n print current_tag\n print latest_tag\n else:\n print \"MissingGitHubDetails\"\n print \"MissingGitHubDetails\"\n else:\n # In the event that there is a git file, but it doesn't indicate GitHub\n # then just print some stuff indicating that.\n print \"NonGitHub\"\n print \"NonGitHub\"", "def get_git_data():\n global event, commit_data, issue_data\n lock.acquire()\n commit_data = get_commits()\n issue_data = get_issues()\n lock.release()\n event.enter(3600, 1, get_git_data, ())" ]
[ "0.59825385", "0.58761805", "0.57182723", "0.5670623", "0.5653557", "0.56310666", "0.5565626", "0.55542344", "0.5533877", "0.5509498", "0.5492166", "0.5470247", "0.5463593", "0.5457297", "0.54524344", "0.54264504", "0.54025966", "0.53729427", "0.53684837", "0.5364324", "0.5350243", "0.5344721", "0.5338533", "0.5337745", "0.53202593", "0.5313816", "0.53048766", "0.52828187", "0.5281831", "0.5272918" ]
0.80856
0
! Performs a GET request with the given path. To be used with Github's REST API. If successful, returns a .JSON object
def execute_request(path): headers = { "Accept": "application/vnd.github.v3+json" } url = "https://api.github.com" + path # GET https://api.github.com/<path> Accept: "application/vnd.github.v3+json" response = requests.get(url, headers=headers, timeout=GLOBAL_TIMEOUT) response.raise_for_status() # Raise a RequestException if we failed, and trigger retry return response.json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get(self, path):\n r = requests.get(self._url(path))\n assert r.status_code == 200\n return r.json", "def get(self, path):\n url = urljoin(self.api_endpoint, path)\n response = requests.get(url, headers=self.headers)\n if response.status_code == requests.codes.ok:\n return response.json()\n elif response.status_code == requests.codes.not_found:\n return None\n else:\n response.raise_for_status()", "async def get(self, path, params=None, json_data=None):\n response = await self.request('GET', path, params, json_data)\n return response", "def _get(self, path=\"\", query={}, **kwargs):\n qs = urllib.urlencode(query)\n uri = force_json(self.uri + path) + \"?\" + qs\n return self.client.request(uri, method=\"GET\", **kwargs)", "def get(path, params=None):\n url = request_url.format(path)\n req = requests.get(url, params=params)\n return req.json()", "def get(self, path, params=None):\n \n # prep\n get_url = self.url(path)\n \n # request\n response = requests.get(get_url, params=params, auth=self.auth, headers=API.HEADERS)\n\n # test and return\n self.raise_for_status(response)\n return response.json()", "def get(self, url_or_path):\n return self.request.get(url_or_path).json()", "def get(self, *path, **data):\n\t\treturn self.request('GET', *path, **data)", "def api_get(self, path, query=None):\n return self._api_request(path, 'GET', query=query)", "def get(self, path):\n return self.request(path, method='GET')", "def get(self, path):\n response = self._request(\"GET\", path)\n return self._handle_response(response)", "def get(path: str, params={}):\n token = get_token()\n headers = {\n \"Authorization\": f\"Bearer {token}\"\n }\n return requests.get(get_base_url() + path, headers=headers, params=params)", "def request(path):\n headers = {'Accept': 'application/json'}\n try:\n requested_object = requests.get(path, headers=headers)\n requested_object.raise_for_status()\n except requests.exceptions.HTTPError as exception:\n LOGGER.error((inspect.stack()[0][3]) + ': HTTPError = ' +\n str(exception.response.status_code) + ' ' +\n str(exception.response.reason) + ' ' + str(path))\n raise\n except requests.exceptions.InvalidURL as exception:\n LOGGER.error('URLError = ' + str(exception.reason) + ' ' + str(path))\n raise\n except Exception:\n import traceback\n LOGGER.error('Generic exception: ' + traceback.format_exc())\n raise\n else:\n response = requested_object.json()\n return response", "def request_get(self, path, params=None):\n\tif params is None:\n\t\tparams = {}\n\t\trequest_url = self.host_url + path\n\t\ttry:\n\t\t\tresponse = self.session.get(request_url, auth=self.api_key, params=params)\n\t\texcept requests.RequestException as e:\n\t\t\traise self.DataUnavailable(\"Network exception\") from e\n\n\tif response.status_code != 200:\n\t\traise self.DataUnavailable(\n\t\t\t\"Unexpected response status (%s)\" % response.status_code\n\t\t)\n\n\treturn response.json()", "def GET(self, path, params={}):\n request_url = 'https://{0}:{1}/rest/{2}'.format(\n self.settings.api_host,\n self.settings.api_port,\n path\n )\n\n # Make the API request\n response = requests.get(request_url,\n auth = (self.settings.api_user, self.settings.api_password),\n verify = self.settings.verify_ssl,\n headers = self.settings.headers,\n params = params\n )\n\n # Request failed\n if not int(response.status_code) == 200:\n raise Exception('Failed to GET {0}: {1}'.format(request_url, response.json()))\n return response.json()", "def get(self, path=None, ref=None):\r\n params = base.get_params(('ref', ), locals())\r\n url = self.get_url()\r\n\r\n if path:\r\n url = '{0}/{1}'.format(url, path)\r\n\r\n return http.Request('GET', url, params), parsers.parse_json", "def get(self, sha=None, path=None, page=None, per_page=None):\r\n params = base.get_params(\r\n ('sha', 'path', 'page', 'per_page'), locals())\r\n url = '{0}/{1}'.format(self.parent.get_url(), self.path)\r\n\r\n return http.Request('GET', url, params), parsers.parse_json", "def request(host=API_HOST, path=SEARCH_PATH, api_key=API_KEY, url_params=params):\n url_params = url_params or {}\n url = '{0}{1}'.format(host, quote(path.encode('utf8')))\n headers = {\n 'Authorization': 'Bearer %s' % api_key,\n }\n\n response = requests.request('GET', url, headers=headers, params=url_params)\n\n return response.json()", "def get(self, api_path, *args, **kwargs):\n\n\t\treturn self._do_operation(u'get', api_path, *args, **kwargs)", "def request(host, path, api_key, url_params=None):\n url_params = url_params or {}\n url = '{0}{1}'.format(host, quote(path.encode('utf8')))\n headers = {\n 'Authorization': 'Bearer %s' % api_key,\n }\n\n response = requests.request('GET', url, headers=headers, params=url_params)\n\n return response.json()", "def get(self, endpoint, params=None):\n res = requests.get(\"https://api.github.com/\" + endpoint,\n auth=requests.auth.HTTPBasicAuth(self.credentials['username'], self.credentials['token']),\n params=params)\n return res.json()", "async def GET(node_id, path):\n r = session().get(f\"{HOST}:{BASE_PORT + node_id}{path}\")\n if r.status_code != 200:\n raise ValueError(f\"Got bad response {r.status_code} from \" +\n f\"node {node_id} on {path}.\")\n return {\"status_code\": r.status_code, \"data\": r.json()}", "def _get(self, path, params=None):\n return self._api.get_json(path, headers={\"Hawkular-Tenant\": self.tenant_id}, params=params)", "def request(host, path, api_key, url_params=None):\r\n url_params = url_params or {}\r\n url = '{0}{1}'.format(host, quote(path.encode('utf8')))\r\n headers = {\r\n 'Authorization': 'Bearer %s' % api_key,\r\n }\r\n\r\n print(u'Querying {0} ...'.format(url))\r\n \r\n response = requests.request('GET', url, headers=headers, params=url_params)\r\n\r\n return response.json()", "def get(self, path):\n base = 'http://%s:%d' % (self.host, self.port)\n url = '%s/%s' % (base, path)\n\n conn = urllib2.urlopen(url)\n payload = conn.read()\n try: \n payload = json.loads(payload)\n except:\n pass\n\n conn.close()\n\n return payload", "def github_request(self, path, callback, access_token=None,\n method='GET', body=None, **args):\n args[\"access_token\"] = access_token\n url = tornado.httputil.url_concat(self._API_URL + path, args)\n logging.debug('request to ' + url)\n http = tornado.httpclient.AsyncHTTPClient()\n if body is not None:\n body = tornado.escape.json_encode(body)\n logging.debug('body is' + body)\n headers = {}\n headers['User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36'\n \n http.fetch(url, callback=self.async_callback(\n self._parse_response, callback), method=method, body=body, headers=headers)", "def get(self, path: str, params: dict) -> dict:\n return self.request(\"GET\", path, params)", "def get(self, api_path, *args, **kwargs):\n\n return self._do_operation(u'get', api_path, *args, **kwargs)", "def request(host, path, api_key, url_params=None):\n url_params = url_params or {}\n url = '{0}{1}'.format(host, quote(path.encode('utf8')))\n headers = {\n 'Authorization': 'Bearer %s' % api_key,\n }\n\n print(u'Querying {0} ...'.format(url))\n\n response = requests.request('GET', url, headers=headers, params=url_params)\n\n return response.json()", "def request(host, path, api_key, url_params=None):\n url_params = url_params or {}\n url = '{0}{1}'.format(host, quote(path.encode('utf8')))\n headers = {\n 'Authorization': 'Bearer %s' % api_key,\n }\n\n print(u'Querying {0} ...'.format(url))\n\n response = requests.request('GET', url, headers=headers, params=url_params)\n\n return response.json()" ]
[ "0.781501", "0.742558", "0.73796403", "0.7367606", "0.7359657", "0.7286444", "0.7283974", "0.7187162", "0.7146045", "0.7120918", "0.70935255", "0.7091099", "0.7059631", "0.7059341", "0.7053496", "0.70428777", "0.6986437", "0.69172233", "0.6824876", "0.6779515", "0.6770286", "0.6753325", "0.6702093", "0.6663738", "0.66609704", "0.66585326", "0.66337943", "0.66310793", "0.662193", "0.662193" ]
0.78895104
0
! Gets file metadata for nightlies hosted on FTP, as determined by config["ftp"] attributes [in] `build_type` Unknown str [in] `tag_name` Github tag name of the release [in] `config` config metadata set in main.py
def get_ftp_files(build_type, tag_name, config) -> List[ReleaseFile] : tag_regex = re.compile("nightly_(.*)") build_group_regex = re.compile("nightly_.*-builds-([^.]+).*") files = [] try: with FTP(config["ftp"]["host"], config["ftp"]["user"], config["ftp"]["pass"]) as ftp: # extract version version_str = tag_regex.match(tag_name).group(1) # extract filepath w/ version # then list all ftp hits with that path path_template = config["ftp"]["path"] path = path_template.format(type=build_type, version=version_str) file_entries = list(ftp.mlsd(path, ["type"])) # get all ftp hits of type file for entry in file_entries: if entry[1]["type"] == "file": files.append(entry[0]) except error_perm: print("Received permanent FTP error!") return [] out_data = [] for file in files: # from the file list, extract only nightly files file_match = build_group_regex.match(file) if file_match is None: print("Ignoring non nightly file '{}'".format(file)) continue group_match = file_match.group(1) primary_url = None mirrors = [] # x64 is the name Visual Studio uses but Win64 works better for us since that gets displayed in the nightly post if "x64" in group_match: group_match = group_match.replace("x64", "Win64") # construct the download URL list for all mirrors. The first listed ftp location is taken as the Primary for mirror in config["ftp"]["mirrors"]: download_url = mirror.format(type=build_type, version=version_str, file=file) if primary_url is None: primary_url = download_url else: mirrors.append(download_url) # Form the List[ReleaseFile] list with the download URL links out_data.append(ReleaseFile(file, primary_url, group_match, None, mirrors)) return out_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clowder_file_metadata(session, url, fileid):\n try:\n ret = session.get(posixpath.join(url, \"api/files\", fileid, \"metadata.jsonld\"))\n except session.exceptions.RequestException as e:\n print(e)\n sys.exit(1)\n\n return ret", "def get_file_info(filename):\n info = {'buildroot_id': 0}\n info['filename'] = os.path.basename(filename)\n fbytes = os.path.getsize(filename)\n info['filesize'] = int(fbytes)\n # Kojihub only supports checksum_type: md5 for now.\n info['checksum_type'] = 'md5'\n checksum = util.get_md5sum(filename)\n info['checksum'] = checksum\n info['arch'] = 'x86_64'\n if filename.endswith('.tar.gz') or filename.endswith('.tar.xz'):\n info['type'] = 'tarball'\n elif filename.endswith('.deb'):\n info['type'] = 'deb'\n elif filename.endswith('.dsc'):\n info['type'] = 'dsc'\n elif filename.endswith('.log'):\n info['type'] = 'log'\n else:\n raise RuntimeError('unknown extension for %s' % filename)\n info['extra'] = {\n 'typeinfo': {\n 'debian': {},\n },\n }\n return info", "def build_ns_file_metadata(file_type):\r\n file_description = f\"Your Nightscout {file_type} data, last updated at {datetime.utcnow()} UTC.\"\r\n file_tags = [\"open-aps\", \"Nightscout\", file_type, \"json\"]\r\n file_updated = str(datetime.now())\r\n return {\"tags\": file_tags, \"description\": file_description, \"updated_at\": file_updated}", "def get_downloads_metadata():\n global _METADATA\n if _METADATA is None:\n _METADATA = yaml.safe_load(resource_string(__name__, \"downloads.yml\"))\n return _METADATA", "def get_release_files(tag_name, config) -> Tuple[List[ReleaseFile], Dict[str, SourceFile]]:\n\n @retry_multi(5)\t# retry at most 5 times\n def execute_request(path):\n \"\"\"!\n @brief Performs a GET request with the given path. To be used with Github's REST API.\n @returns If successful, returns a .JSON object\n \"\"\"\n headers = {\n \"Accept\": \"application/vnd.github.v3+json\"\n }\n url = \"https://api.github.com\" + path\n\n # GET https://api.github.com/<path> Accept: \"application/vnd.github.v3+json\"\n\n response = requests.get(url, headers=headers, timeout=GLOBAL_TIMEOUT)\n\n response.raise_for_status() # Raise a RequestException if we failed, and trigger retry\n\n return response.json()\n\n build_group_regex = re.compile(\"fs2_open_.*-builds-([^.-]*)(-([^.]*))?.*\") # regex for matching binary .zip's and .7z's\n source_file_regex = re.compile(\"fs2_open_.*-source-([^.]*)?.*\") # regex for matching source .zip's and .7z's\n\n # Get the github release metadata of the given tag name\n response = execute_request(\n \"/repos/{}/releases/tags/{}\".format(config[\"github\"][\"repo\"], tag_name))\n\n # Extract the binary and source files from the response[\"asset\"] metadata\n binary_files = []\n source_files = {}\n for asset in response[\"assets\"]:\n url = asset[\"browser_download_url\"]\n name = asset[\"name\"]\n\n group_match = build_group_regex.match(name)\n\n if group_match is not None:\n platform = group_match.group(1)\n # x64 is the Visual Studio name but for consistency we need Win64\n if platform == \"x64\":\n platform = \"Win64\"\n\n binary_files.append(ReleaseFile(name, url, platform, group_match.group(3)))\n else:\n group_match = source_file_regex.match(name)\n\n if group_match is None:\n continue\n\n group = group_match.group(1)\n\n source_files[group] = SourceFile(name, url, group)\n\n binary_files.sort(key=lambda ReleaseFile: ReleaseFile.name)\n\n return binary_files, source_files", "def parse_remote_metadata(self, timeout=30):\n for metadataUrl in self.metadataUrls:\n if (\n metadataUrl[\"url\"] is not None and metadataUrl[\"format\"].lower() == \"text/xml\"\n ):\n try:\n content = openURL(metadataUrl[\"url\"], timeout=timeout, headers=self.headers, auth=self.auth)\n doc = etree.fromstring(content.read())\n\n if metadataUrl[\"type\"] == \"FGDC\":\n mdelem = doc.find(\".//metadata\")\n if mdelem is not None:\n metadataUrl[\"metadata\"] = Metadata(mdelem)\n else:\n metadataUrl[\"metadata\"] = None\n elif metadataUrl[\"type\"] in [\"TC211\", \"19115\", \"19139\"]:\n mdelem = doc.find(\n \".//\" + nspath_eval(\"gmd:MD_Metadata\", namespaces)\n ) or doc.find(\n \".//\" + nspath_eval(\"gmi:MI_Metadata\", namespaces)\n )\n if mdelem is not None:\n metadataUrl[\"metadata\"] = MD_Metadata(mdelem)\n else:\n metadataUrl[\"metadata\"] = None\n except Exception:\n metadataUrl[\"metadata\"] = None", "def get_meta_info(meta_info_dir):\n files = os.listdir(meta_info_dir)\n retrieval_info = [os.path.join(meta_info_dir, item) for item in files if item.startswith(\"ret\")]\n test_info = [os.path.join(meta_info_dir, item) for item in files if item.startswith(\"test\")]\n return retrieval_info, test_info", "def feature_static_metadata(self):\n # Get binary size\n self.features[\"size\"] = \\\n self.report.get(\"target\", {}).get(\"file\", {}).get(\"size\")\n\n # Get binary timestamp in the UNIX timestamp format\n str_dt = self.report.get(\"static\", {}).get(\"pe_timestamp\")\n ts = None\n if str_dt is not None:\n dt = datetime.datetime.strptime(str_dt, \"%Y-%m-%d %H:%M:%S\")\n ts = int(time.mktime(dt.timetuple()))\n self.features[\"timestamp\"] = ts\n\n # ExifTool output\n et_tokens = [\"FileDescription\", \"OriginalFilename\"]\n for token in et_tokens:\n self.features[token] = None\n for attr in self.report.get(\"static\", {}).get(\"pe_versioninfo\", []):\n attr_name = attr.get(\"name\")\n if attr_name in et_tokens:\n self.features[attr_name] = attr.get(\"value\")\n\n # Magic byte\n self.features[\"magic_byte\"] = \\\n self.report.get(\"target\", {}).get(\"file\", {}).get(\"type\")", "def __init__(self, updater_name, repository_mirrors):\n \n # Do the arguments have the correct format?\n # These checks ensure the arguments have the appropriate\n # number of objects and object types and that all dict\n # keys are properly named.\n # Raise 'tuf.FormatError' if there is a mistmatch.\n tuf.formats.NAME_SCHEMA.check_match(updater_name)\n tuf.formats.MIRRORDICT_SCHEMA.check_match(repository_mirrors)\n \n # Save the validated arguments.\n self.name = updater_name\n self.mirrors = repository_mirrors\n\n # Store the trusted metadata read from disk.\n self.metadata = {}\n \n # Store the currently trusted/verified metadata.\n self.metadata['current'] = {} \n \n # Store the previously trusted/verified metadata.\n self.metadata['previous'] = {}\n\n # Store the file information of all the metadata files. The dict keys are\n # paths, the dict values fileinfo data. This information can help determine\n # whether a metadata file has changed and so needs to be re-downloaded.\n self.fileinfo = {}\n \n # Store the location of the client's metadata directory.\n self.metadata_directory = {}\n \n # Ensure the repository metadata directory has been set.\n if tuf.conf.repository_directory is None:\n message = 'The TUF update client module must specify the directory' \\\n ' containing the local repository files.' \\\n ' \"tuf.conf.repository_directory\" MUST be set.'\n raise tuf.RepositoryError(message)\n\n # Set the path for the current set of metadata files. \n repository_directory = tuf.conf.repository_directory\n current_path = os.path.join(repository_directory, 'metadata', 'current')\n \n # Ensure the current path is valid/exists before saving it.\n if not os.path.exists(current_path):\n message = 'Missing '+repr(current_path)+'. This path must exist and, ' \\\n 'at a minimum, contain the root metadata file.' \n raise tuf.RepositoryError(message)\n self.metadata_directory['current'] = current_path\n \n # Set the path for the previous set of metadata files. \n previous_path = os.path.join(repository_directory, 'metadata', 'previous') \n \n # Ensure the previous path is valid/exists.\n if not os.path.exists(previous_path):\n message = 'Missing '+repr(previous_path)+'. This path must exist.'\n raise tuf.RepositoryError(message)\n self.metadata_directory['previous'] = previous_path\n \n # Load current and previous metadata.\n for metadata_set in ['current', 'previous']:\n for metadata_role in ['root', 'targets', 'release', 'timestamp']:\n self._load_metadata_from_file(metadata_set, metadata_role)\n \n # Raise an exception if the repository is missing the required 'root'\n # metadata.\n if 'root' not in self.metadata['current']:\n message = 'No root of trust! Could not find the \"root.txt\" file.'\n raise tuf.RepositoryError(message)", "def wf_info(workflow_path):\n\n supported_formats = [\"py\", \"wdl\", \"cwl\"]\n file_type = workflow_path.lower().split(\".\")[-1] # Grab the file extension\n workflow_path = workflow_path if \":\" in workflow_path else \"file://\" + workflow_path\n\n if file_type in supported_formats:\n if workflow_path.startswith(\"file://\"):\n version = get_version(file_type, workflow_path[7:])\n elif workflow_path.startswith(\"https://\") or workflow_path.startswith(\n \"http://\"\n ):\n # If file not local go fetch it.\n html = urlopen(workflow_path).read()\n local_loc = os.path.join(os.getcwd(), \"fetchedFromRemote.\" + file_type)\n with open(local_loc, \"w\") as f:\n f.write(html.decode())\n version = wf_info(\"file://\" + local_loc)[\n 0\n ] # Don't take the file_type here, found it above.\n os.remove(\n local_loc\n ) # TODO: Find a way to avoid recreating file before version determination.\n else:\n raise NotImplementedError(\n \"Unsupported workflow file location: {}. Must be local or HTTP(S).\".format(\n workflow_path\n )\n )\n else:\n raise TypeError(\n \"Unsupported workflow type: .{}. Must be {}.\".format(\n file_type, \".py, .cwl, or .wdl\"\n )\n )\n return version, file_type.upper()", "def _update_metadata(self, metadata_role, fileinfo, compression=None):\n\n # Construct the metadata filename as expected by the download/mirror modules.\n metadata_filename = metadata_role + '.txt'\n uncompressed_metadata_filename = metadata_filename\n \n # The 'release' or Targets metadata may be compressed. Add the appropriate\n # extension to 'metadata_filename'. \n if compression == 'gzip':\n metadata_filename = metadata_filename + '.gz'\n\n # Extract file length and file hashes. They will be passed as arguments\n # to 'download_file' function.\n compressed_file_length = fileinfo['length']\n uncompressed_file_hashes = fileinfo['hashes']\n\n # Attempt a file download from each mirror until the file is downloaded and\n # verified. If the signature of the downloaded file is valid, proceed,\n # otherwise log a warning and try the next mirror. 'metadata_file_object'\n # is the file-like object returned by 'download.py'. 'metadata_signable'\n # is the object extracted from 'metadata_file_object'. Metadata saved to\n # files are regarded as 'signable' objects, conformant to\n # 'tuf.formats.SIGNABLE_SCHEMA'.\n #\n # Some metadata (presently timestamp) will be downloaded \"unsafely\", in the\n # sense that we can only estimate its true length and know nothing about\n # its hashes. This is because not all metadata will have other metadata\n # for it; otherwise we will have an infinite regress of metadata signing\n # for each other. In this case, we will download the metadata up to the\n # best length we can get for it, not check its hashes, but perform the rest\n # of the checks (e.g signature verification).\n #\n # Note also that we presently support decompression of only \"safe\"\n # metadata, but this is easily extend to \"unsafe\" metadata as well as\n # \"safe\" targets.\n\n if metadata_role == 'timestamp':\n metadata_file_object = \\\n self.unsafely_get_metadata_file(metadata_role, metadata_filename,\n compressed_file_length)\n else:\n metadata_file_object = \\\n self.safely_get_metadata_file(metadata_role, metadata_filename,\n compressed_file_length,\n uncompressed_file_hashes,\n compression=compression)\n\n # The metadata has been verified. Move the metadata file into place.\n # First, move the 'current' metadata file to the 'previous' directory\n # if it exists.\n current_filepath = os.path.join(self.metadata_directory['current'],\n metadata_filename)\n current_filepath = os.path.abspath(current_filepath)\n tuf.util.ensure_parent_dir(current_filepath)\n \n previous_filepath = os.path.join(self.metadata_directory['previous'],\n metadata_filename)\n previous_filepath = os.path.abspath(previous_filepath)\n if os.path.exists(current_filepath):\n # Previous metadata might not exist, say when delegations are added.\n tuf.util.ensure_parent_dir(previous_filepath)\n shutil.move(current_filepath, previous_filepath)\n\n # Next, move the verified updated metadata file to the 'current' directory.\n # Note that the 'move' method comes from tuf.util's TempFile class.\n # 'metadata_file_object' is an instance of tuf.util.TempFile.\n metadata_signable = tuf.util.load_json_string(metadata_file_object.read())\n if compression == 'gzip':\n current_uncompressed_filepath = \\\n os.path.join(self.metadata_directory['current'],\n uncompressed_metadata_filename)\n current_uncompressed_filepath = \\\n os.path.abspath(current_uncompressed_filepath)\n metadata_file_object.move(current_uncompressed_filepath)\n else:\n metadata_file_object.move(current_filepath)\n\n # Extract the metadata object so we can store it to the metadata store.\n # 'current_metadata_object' set to 'None' if there is not an object\n # stored for 'metadata_role'.\n updated_metadata_object = metadata_signable['signed']\n current_metadata_object = self.metadata['current'].get(metadata_role)\n\n # Finally, update the metadata and fileinfo stores.\n logger.debug('Updated '+repr(current_filepath)+'.')\n self.metadata['previous'][metadata_role] = current_metadata_object\n self.metadata['current'][metadata_role] = updated_metadata_object\n self._update_fileinfo(metadata_filename)", "def get_and_update_metadata():\n if not os.path.exists('.git') and os.path.exists(METADATA_FILENAME):\n with open(METADATA_FILENAME) as fh:\n metadata = json.load(fh)\n else:\n git = Git()\n revision = os.environ.get('TRAVIS_BUILD_NUMBER', git.revision)\n split_version = git.version.split('.')\n split_version[-1] = revision\n version = '.'.join(split_version)\n metadata = {\n 'version': version,\n 'git_hash': git.hash,\n 'git_origin': git.origin,\n 'git_branch': git.branch,\n 'git_version': git.version\n }\n with open(METADATA_FILENAME, 'w') as fh:\n json.dump(metadata, fh)\n return metadata", "def metadata(self):\n return UnpackedSDist(self.find_egg_info_file())", "def getHostFsInfo(hostfs):\n pattern = re.compile('^([^\\.]+)\\.([^\\.]+)\\.([^\\.]+)-(([0-9]+\\.)+([0-9]+))\\.([^\\.]+)$')\n result = pattern.match(hostfs)\n if result is None:\n return None\n else:\n version = result.group(4)\n platform = result.group(1)\n cpu = result.group(2)\n endian = result.group(3)\n ext = result.group(7)\n return {\n 'name': hostfs,\n 'file': hostfs,\n 'filepath': hostfs,\n 'version': version,\n 'platform': platform,\n 'cpu': cpu,\n 'endian': endian,\n 'type': ext\n }", "def get_metadata():\n\n module = __name__.split('.', 1)\n\n pkg = pkg_resources.get_distribution(module[0])\n meta = {\n 'Name': None,\n 'Version': None,\n 'Summary': None,\n 'Home-page': None,\n 'Author': None,\n 'Author-email': None,\n 'License': None,\n }\n\n for line in pkg.get_metadata_lines(\"PKG-INFO\"):\n for par in meta:\n if line.startswith(par + \":\"):\n _, value = line.split(\": \", 1)\n meta[par] = value\n\n return meta", "def extract_metadata(self):\n metadata_file_path = self.create_metadata_file(\".metadata.txt\")\n mt = self.mimetype\n metadata_processing_method = self.metadata_mimetype_methods.get(mt)\n if metadata_processing_method:\n # TODO: should we return metadata and write it here instead of in processing method?\n metadata_processing_method(metadata_file_path)", "def get_cf6_files(config, stid, num_files=1):\n\n # Create directory if it does not exist\n site_directory = '%s/site_data' % config['THETAE_ROOT']\n if config['debug'] > 50:\n print('get_cf6_files: accessing site data in %s' % site_directory)\n\n # Construct the web url address. Check if a special 3-letter station ID is provided.\n nws_url = 'http://forecast.weather.gov/product.php?site=NWS&issuedby=%s&product=CF6&format=TXT'\n try:\n stid3 = config['Stations'][stid]['station_id3']\n except KeyError:\n stid3 = stid[1:].upper()\n nws_url = nws_url % stid3\n\n # Determine how many files (iterations of product) we want to fetch\n if num_files == 1:\n print('get_cf6_files: retrieving latest CF6 file for %s' % stid)\n else:\n print('get_cf6_files: retrieving %s archived CF6 files for %s' % (num_files, stid))\n\n # Fetch files\n for r in range(1, num_files + 1):\n # Format the web address: goes through 'versions' on NWS site which correspond to increasingly older files\n version = 'version=%d&glossary=0' % r\n nws_site = '&'.join((nws_url, version))\n if config['debug'] > 50:\n print('get_cf6_files: fetching from %s' % nws_site)\n response = requests.get(nws_site)\n cf6_data = response.text\n\n # Remove the header\n try:\n body_and_footer = cf6_data.split('CXUS')[1] # Mainland US\n except IndexError:\n try:\n body_and_footer = cf6_data.split('CXHW')[1] # Hawaii\n except IndexError:\n try:\n body_and_footer = cf6_data.split('CXAK')[1] # Alaska\n except IndexError:\n if config['debug'] > 50:\n print('get_cf6_files: bad file from request version %d' % r)\n continue\n body_and_footer_lines = body_and_footer.splitlines()\n if len(body_and_footer_lines) <= 2:\n body_and_footer = cf6_data.split('000')[2]\n\n # Remove the footer\n body = body_and_footer.split('[REMARKS]')[0]\n\n # Find the month and year of the file\n try:\n current_year = re.search('YEAR: *(\\d{4})', body).groups()[0]\n except BaseException:\n if config['debug'] > 9:\n print('get_cf6_files warning: file from request version %d is faulty' % r)\n continue\n try:\n current_month = re.search('MONTH: *(\\D{3,9})', body).groups()[0]\n current_month = current_month.strip() # Gets rid of newlines and whitespace\n datestr = '%s %s' % (current_month, current_year)\n file_date = datetime.strptime(datestr, '%B %Y')\n except: # Some files have a different formatting, although this may be fixed now.\n current_month = re.search('MONTH: *(\\d{2})', body).groups()[0]\n current_month = current_month.strip()\n datestr = '%s %s' % (current_month, current_year)\n file_date = datetime.strptime(datestr, '%m %Y')\n\n # Write to a temporary file, check if output file exists, and if so, make sure the new one has more data\n datestr = file_date.strftime('%Y%m')\n filename = '%s/%s_%s.cli' % (site_directory, stid.upper(), datestr)\n temp_file = '%s/temp.cli' % site_directory\n with open(temp_file, 'w') as out:\n out.write(body)\n\n def file_len(file_name):\n with open(file_name) as f:\n for i, l in enumerate(f):\n pass\n return i + 1\n\n if os.path.isfile(filename):\n old_file_len = file_len(filename)\n new_file_len = file_len(temp_file)\n if old_file_len < new_file_len:\n if config['debug'] > 9:\n print('get_cf6_files: overwriting %s' % filename)\n os.remove(filename)\n os.rename(temp_file, filename)\n else:\n if config['debug'] > 9:\n print('get_cf6_files: %s already exists' % filename)\n else:\n if config['debug'] > 9:\n print('get_cf6_files: writing %s' % filename)\n os.rename(temp_file, filename)", "def _get_metadata(self, pkg_name):\n pkg_name = urllib.parse.quote(pkg_name, safe='@')\n if self.metadatas.get(pkg_name):\n return self.metadatas.get(pkg_name)\n else:\n url = urllib.parse.urljoin(self.REGISTRY, pkg_name)\n try:\n pkg_metadata = requests.get(url).json()\n self.metadatas[pkg_name] = pkg_metadata\n return pkg_metadata\n except urllib.error.HTTPError as e:\n print('Could not download {} from: {} with error: {}'. format(pkg_name, url, e.msg))\n exit(-1)", "def getFileMetadata( self, path ):\n res = self.__checkArgumentFormat( path )\n if not res['OK']:\n return res\n urls = res['Value']\n successful = {}\n failed = {}\n gLogger.debug( \"DIPStorage.getFileMetadata: Attempting to obtain metadata for %s files.\" % len( urls ) )\n serviceClient = RPCClient( self.url )\n for url in urls:\n pfn = url\n if url.find( self.url ) == 0:\n pfn = url[ ( len( self.url ) ):]\n res = serviceClient.getMetadata( pfn )\n if res['OK']:\n if res['Value']['Exists']:\n if res['Value']['Type'] == 'File':\n gLogger.debug( \"DIPStorage.getFileMetadata: Successfully obtained metadata for %s.\" % url )\n successful[url] = res['Value']\n else:\n failed[url] = 'Supplied path is not a file'\n else:\n failed[url] = 'File does not exist'\n else:\n gLogger.error( \"DIPStorage.getFileMetadata: Failed to get metdata for %s.\" % url, res['Message'] )\n failed[url] = res['Message']\n resDict = {'Failed':failed, 'Successful':successful}\n return S_OK( resDict )", "def prepareFluidinfo(runTests):\n if runTests:\n local('make build-clean build', capture=False)\n local('make check-all', capture=False)\n\n local('git archive --prefix=fluidinfo/ -v --format tar HEAD | '\n 'bzip2 > fluidinfo.tar.bz2')\n return datetime.utcnow().strftime('%Y%m%d-%H%M')", "def compile_source_metadata(sourcefile, config, year):\n metadata = dict(source_metadata)\n if isinstance(sourcefile, list):\n filename = sourcefile[0]\n else:\n filename = sourcefile\n data_retrieval_time = time.ctime(os.path.getmtime(filename))\n if data_retrieval_time is not None:\n metadata['SourceAcquisitionTime'] = data_retrieval_time\n metadata['SourceFileName'] = sourcefile\n metadata['SourceURL'] = config['url']\n if year in config:\n metadata['SourceVersion'] = config[year]['file_version']\n else:\n import re\n pattern = 'V[0-9]'\n version = re.search(pattern, filename, flags=re.IGNORECASE)\n if version is not None:\n metadata['SourceVersion'] = version.group(0)\n return metadata", "def run(self, info):\n\n # Write the metadata to the file's xattrs\n self._downloader.to_screen('[metadata] Writing metadata to file\\'s xattrs')\n\n filename = info['filepath']\n\n try:\n xattr_mapping = {\n 'user.xdg.referrer.url': 'webpage_url',\n # 'user.xdg.comment': 'description',\n 'user.dublincore.title': 'title',\n 'user.dublincore.date': 'upload_date',\n 'user.dublincore.description': 'description',\n 'user.dublincore.contributor': 'uploader',\n 'user.dublincore.format': 'format',\n }\n\n num_written = 0\n for xattrname, infoname in xattr_mapping.items():\n\n value = info.get(infoname)\n\n if value:\n if infoname == 'upload_date':\n value = hyphenate_date(value)\n\n byte_value = value.encode('utf-8')\n write_xattr(filename, xattrname, byte_value)\n num_written += 1\n\n return [], info\n\n except XAttrUnavailableError as e:\n self._downloader.report_error(str(e))\n return [], info\n\n except XAttrMetadataError as e:\n if e.reason == 'NO_SPACE':\n self._downloader.report_warning(\n 'There\\'s no disk space left, disk quota exceeded or filesystem xattr limit exceeded. '\n + (('Some ' if num_written else '') + 'extended attributes are not written.').capitalize())\n elif e.reason == 'VALUE_TOO_LONG':\n self._downloader.report_warning(\n 'Unable to write extended attributes due to too long values.')\n else:\n msg = 'This filesystem doesn\\'t support extended attributes. '\n if compat_os_name == 'nt':\n msg += 'You need to use NTFS.'\n else:\n msg += '(You may have to enable them in your /etc/fstab)'\n self._downloader.report_error(msg)\n return [], info", "def _get_remote_files(config):\n if \"cache\" in config:\n return config[\"cache\"]\n out = {}\n for project, folder in _remote_folders(config):\n out.update(_project_files(project, folder))\n return out", "def meta_info_feats(self, output_path, file_types):\n return self.build_meta(output_path, file_types)", "def get_metadata(session, url, filelist):\n metadata = {}\n # Loop over the Clowder dataset image ID list\n for clowder_img in filelist.json():\n # Get metadata for the image from Clowder\n response = clowder_file_metadata(session, url, clowder_img['id'])\n # Metadata from multiple extractors may be present\n for extractor in response.json():\n # Find the extractor called \"deprecatedapi\" which refers to the API used to upload metadata\n if \"user_id\" in extractor['agent']:\n # Save a few metadata elements for convenience\n camera_type = extractor['content']['camera_type']\n perspective = extractor['content']['perspective']\n rotation_angle = extractor['content']['rotation_angle']\n # Store the image ID for later use\n extractor['img_id'] = clowder_img['id']\n if camera_type not in metadata:\n metadata[camera_type] = {}\n if perspective not in metadata[camera_type]:\n metadata[camera_type][perspective] = {}\n metadata[camera_type][perspective][rotation_angle] = extractor\n\n return metadata", "def pullnlink(self,config):\n \n pull = []; link = []\n \n # choose files to pull and link\n for key,value in self.FILES.iteritems():\n \n # link big files\n if key == 'MESH':\n # mesh (merged or partitioned)\n value = expand_part(value,config)\n link.extend(value)\n elif key == 'DIRECT':\n # direct solution\n value = expand_time(value,config)\n link.extend(value)\n elif 'ADJOINT_' in key:\n # adjoint solution\n value = expand_time(value,config)\n link.extend(value)\n #elif key == 'STABILITY':\n #pass\n # copy all other files\n else:\n pull.append(value)\n \n #: for each filename\n \n return pull,link", "def _fetch_current_remote_metadata(conn):\n content = _get(conn, REMOTE_METADATA_FILE)\n metadata = json.loads(content) if content else {}\n return metadata", "def metadata(self, truncate: bool = False) -> Tuple[str, str]:\n\t\tif not self._closed:\n\t\t\tfilename = self.filename\n\t\t\tmd_filename = \"%s.file_md.json.gzip\" % (self.file_path)\n\t\t\tmd_mod_filename = \"%s.file_md.lastmod.gzip\" % (self.file_path)\n\t\t\tlogging.debug(\"Expanding metada (stored as %s.file_md.json.gzip)\", filename)\n\n\t\t\tlast_mod = self.last_modified()\n\t\t\tif os.path.isfile(md_filename):\n\t\t\t\tlogging.debug(\" Found previously extracted JSON file\")\n\t\t\t\tif truncate:\n\t\t\t\t\tself.clear_metadata()\n\t\t\t\telse:\n\t\t\t\t\tmd_json = load_gzipped_json_string(md_filename)\n\t\t\t\t\tmd_mod = load_gzipped_json_string(md_mod_filename)\n\t\t\t\t\tmd_parsed = json.loads(md_json)\n\t\t\t\t\t# check if cached metadata is up to date and\n\t\t\t\t\t# points to correct project folder and filename\n\t\t\t\t\t# if so return cache, otherwise clear it\n\t\t\t\t\tlogging.debug(\" md_mod: %s\", md_mod)\n\t\t\t\t\tlogging.debug(\" last_mod: %s\", last_mod)\n\t\t\t\t\tif md_mod != last_mod or md_parsed.project != self.project or md_parsed.filename != filename:\n\t\t\t\t\t\tself.clear_metadata()\n\t\t\t\t\telse:\n\t\t\t\t\t\tlogging.debug(\" Cache up to date\")\n\t\t\t\t\t\treturn (md_json, last_mod)\n\n\t\t\tds = self.ds\n\t\t\tattrs = ds.attrs.keys()\n\t\t\ttitle = filename if \"title\" not in attrs else ds.attrs.title\n\t\t\tdescr = \"\" if \"description\" not in attrs else ds.attrs.description\n\t\t\turl = \"\" if \"url\" not in attrs else ds.attrs.url\n\t\t\tdoi = \"\" if \"doi\" not in attrs else ds.attrs.doi\n\t\t\t# converts compact ISO timestamps to human-readable ones.\n\t\t\t# Example: \"20180130T155028.262458Z\" becomes \"2018/01/13 15:50\"\n\t\t\tlast_mod_humanreadable = \"{}/{}/{} {}:{}:{}\".format(last_mod[0:4], last_mod[4:6], last_mod[6:8], last_mod[9:11], last_mod[11:13], last_mod[13:15])\n\t\t\t# default to last_modified for older files that do\n\t\t\t# not have a creation_date field\n\t\t\tcreation_date = last_mod_humanreadable if \"creation_date\" not in attrs else ds.attrs.creation_date\n\t\t\t# get arbitrary col/row attribute, they are all lists\n\t\t\t# of equal size. The length equals total cells/genes\n\t\t\ttotal_cells = ds.shape[1]\n\t\t\ttotal_genes = ds.shape[0]\n\n\t\t\tmd_data = {\n\t\t\t\t\"project\": self.project,\n\t\t\t\t\"filename\": filename,\n\t\t\t\t\"dataset\": filename,\n\t\t\t\t\"title\": title,\n\t\t\t\t\"description\": descr,\n\t\t\t\t\"url\": url,\n\t\t\t\t\"doi\": doi,\n\t\t\t\t\"creationDate\": creation_date,\n\t\t\t\t\"lastModified\": last_mod_humanreadable,\n\t\t\t\t\"totalCells\": total_cells,\n\t\t\t\t\"totalGenes\": total_genes,\n\t\t\t}\n\t\t\tlogging.debug(\" Saving extracted metadata as JSON file\")\n\t\t\tmd_json = json.dumps(md_data)\n\t\t\tsave_gzipped_json_string(md_filename, md_json)\n\t\t\tsave_gzipped_json_string(md_mod_filename, json.dumps(last_mod))\n\t\t\treturn (md_json, last_mod)\n\t\treturn None", "def get_scan_from_metadata(meta):\n with open(meta, 'r') as f:\n md = json.load(f)\n\n scan_name = None\n\n if 'lemnatec_measurement_metadata' in md:\n if 'gantry_system_variable_metadata' in md['lemnatec_measurement_metadata']:\n if 'Script copy path on FTP server' in md['lemnatec_measurement_metadata']['gantry_system_variable_metadata']:\n ftp = md['lemnatec_measurement_metadata']['gantry_system_variable_metadata']['Script copy path on FTP server']\n scan_name = os.path.basename(ftp).replace(\".cs\", \"\").lower()\n\n return scan_name", "def get_metadata(self, file_id):\n pass" ]
[ "0.58197266", "0.5817729", "0.57113373", "0.5679368", "0.5664209", "0.56055725", "0.5591246", "0.5514646", "0.54909694", "0.5488954", "0.5428965", "0.5408972", "0.5381461", "0.5341084", "0.5333622", "0.529747", "0.5256132", "0.5247465", "0.5238741", "0.52325314", "0.5205857", "0.52052903", "0.5204908", "0.51961046", "0.5188814", "0.5185052", "0.5169012", "0.51662856", "0.51613486", "0.515314" ]
0.7348779
0
serialize triples to chosen format supported by rdflib, e.g. xml, turtle, n3, etc
def serialize(triples, format='xml'): g = Graph() for k, v in NAMESPACES.iteritems(): g.bind(k, v) for triple in triples: g.add(triple) return g.serialize(format=format)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def serialize_nx_node_to_triples(g, key, node=None):\n\n node = node or g and g.node.get(key) # <curie/key> # ... precis\n\n yield (key, 'a', node.get('type')) # <> a <type>\n\n for attr,value in node.items():\n yield (key, attr, value)\n\n # MultiDiGraph\n for edge in g.edge.get(key):\n # multivalue edges\n # <> linkTo _:ReifiedEdge\n\n # = BNode(), UUID\n # = edge_url\n s = '#e/'.join((key,uuid,))\n yield (s, 'a', 'edgetype')\n yield (s, 'linksFrom', key)\n yield (s, 'linksTo', edge)\n\n for attr, value in edge.items():\n yield (s, attr, edge.get(attr))\n # _:ReifiedEdge attr[n] value[n]", "def serialize(self):\n out = {\"nodes\":[],\n \"idmap\":{i:x for i,x in enumerate(sorted(self.nodes))}} #map for decoding\n r_idmap = {x:i for i,x in out[\"idmap\"].items()} #map for encoding\n for name, node in self.nodes.items():\n tmp = {\"name\": name,\n \"text\": node.text,\n \"neighbors\": []}\n for rtype, dest in node.outgoing_relations:\n if dest in r_idmap:\n tmp[\"neighbors\"].append([\"relation\", rtype, str(r_idmap[dest])])\n else:\n tmp[\"neighbors\"].append([\"literal\", rtype, dest])\n for atype, attribute in node.attributes:\n tmp[\"neighbors\"].append([\"literal\", atype, attribute])\n out[\"nodes\"].append(tmp)\n return json.dumps(out,sort_keys=True)", "def triplify_object(binding):\n triples = []\n if binding.uri:\n triples.append((binding.subject, RDF.type, binding.uri))\n\n if binding.parent is not None:\n parent = binding.parent.subject\n if binding.parent.is_array:\n parent = binding.parent.parent.subject\n triples.append((parent, binding.predicate, binding.subject))\n if binding.reverse is not None:\n triples.append((binding.subject, binding.reverse, parent))\n\n for prop in binding.properties:\n _, prop_triples = triplify(prop)\n triples.extend(prop_triples)\n\n return binding.subject, triples", "def serialize(self, outputDataFile):\n\n print \"Serializing graph to {} ...\".format(outputDataFile)\n fileWrite = open(outputDataFile, \"w\")\n turtle = self.graph.serialize(None, format='n3')\n fileWrite.writelines(turtle)\n fileWrite.close()\n print \"Serialization done.\"", "def test06_serialize(self):\n uri = URIRef('http://ex.org/ldprs')\n g = Graph()\n g.add((uri, RDF.type, URIRef('http://ex.org/some_type')))\n g.add((URIRef('http://ex.org/a'), URIRef('http://ex.org/b'), Literal('LITERAL')))\n r = LDPRS(uri=uri, content=g)\n s = r.serialize()\n self.assertIn('@prefix ldp: <http://www.w3.org/ns/ldp#> .', s)\n self.assertIn('ldprs', s) # might prefix or not\n self.assertIn('some_type', s) # might prefix or not\n self.assertIn('ldp:RDFSource', s)\n self.assertIn('ldp:Resource', s)\n self.assertIn('\"LITERAL\"', s)\n #\n s = r.serialize(omits=['content'])\n self.assertIn('ldprs', s) # might prefix or not\n self.assertNotIn('some_type', s) # might prefix or not\n self.assertIn('ldp:RDFSource', s)\n self.assertIn('ldp:Resource', s)\n self.assertNotIn('\"LITERAL\"', s)", "def rdfGetTriples(id):\n\ttargets = []\n\tfullId = id\n\n#\tlog.info(\"rdfgetTriples(%s)\" % fullId)\n\tif\t':' in id: #Includes full path or namespaces\n\t\tfullId = id\n\telse:\n\t\tfullId = VOCAB + \"/\" + id\n\tsource = URIRef(fullId)\n\t#log.info(\"rdfgetTriples(%s)\" % source)\n\t\n\tfirst = True\n\tunit = None\n\t\n\thomeSetTo = None\n\ttypeOfInLayers = []\n\n\ttry:\n\t\tRDFLIBLOCK.acquire()\n\t\tres = list(STORE.query(GETTRIPS, initBindings={'sub':source}))\n\tfinally:\n\t\tRDFLIBLOCK.release()\n\t\t\n\tfor row in res:\n#\t\tif source == \"http://meta.schema.org/\":\n#\t\tlog.info(\"Triple: %s %s %s %s\" % (source, row.p, row.o, row.g))\n\t\tlayer = str(getRevNss(str(row.g)))\n\t\tif first:\n\t\t\tfirst = False\n\t\t\tunit = api.Unit.GetUnitNoLoad(id,True)\n\t\ts = stripID(source)\n\t\tp = stripID(row.p)\n\t\tif p == \"rdf:type\": \n\t\t\ttypeOfInLayers.append(layer)\n\t\telif(p == \"isPartOf\"):\n\t\t\tif(unit.home != None and unit.home != layer):\n\t\t\t\tlog.info(\"WARNING Cannot set %s home to %s - already set to: %s\" % (s,layer,unit.home))\n\t\t\tunit.home = layer\n\t\t\thomeSetTo = layer\n\t\telif(p == \"category\"):\n\t\t\tunit.category = row.o\n\n\t\tprop = api.Unit.GetUnit(p,True)\n\n\t\tif isinstance(row.o,rdflib.Literal):\n\t\t\tapi.Triple.AddTripleText(unit, prop, row.o, layer)\n\t\telse: \n\t\t\tapi.Triple.AddTriple(unit, prop, api.Unit.GetUnit(stripID(row.o),True), layer)\n\t\t\t\n\t\"\"\" Default Unit.home to core if not specificly set with an 'isPartOf' triple \"\"\"\n\tif(unit and homeSetTo == None):\n\t\tif('core' in typeOfInLayers or len(typeOfInLayers) == 0):\n\t\t\tunit.home = 'core'\n\t\telse:\n\t\t\tlog.info(\"WARNING: %s defined in extensions %s but has no 'isPartOf' triple - cannot default home to core!\" % (id,typeOfInLayers))\n\treturn unit", "def to_jsonld(data, options=None):\n if options is None:\n options = {}\n\n return rdftools.to_jsonld(data, 'turtle')", "def triplelist_value_to_sparql(self, object, extra_triple_strs, varnamespace = None) :\n\t\tif varnamespace == None :\n\t\t\tvarnamespace = self.n.var\n\t\tif type(object) == URIRef :\n\t\t\tif object.find(varnamespace) != -1 :\n\t\t\t\treturn '?'+object[len(varnamespace):]\n\t\t\telif object.find(self.n.bnode) != -1 :\n\t\t\t\tvarname = object[len(self.n.bnode):]\n\t\t\t\tif varname in self.py_to_SPARQL_bnode :\n\t\t\t\t\treturn self.py_to_SPARQL_bnode[varname].n3()\n\t\t\t\telse :\n\t\t\t\t\tbnode = self.next_bnode()\n\t\t\t\t\tself.py_to_SPARQL_bnode[varname] = bnode\n\t\t\t\t\treturn bnode.n3()\n\t\telif type(object) == dict :\n\t\t\troot, triples = self.python_to_SPARQL_long_helper(object, self._uribnodeVar())\n\t\t\textra_triple_strs.append(triples)\n\t\t\treturn root\n\t\t\n\t\treturn self.python_to_n3(object)", "def test_convert(self):\n gd: GraphDocument = json_loader.load(str(ONT), target_class=GraphDocument)\n g = self.converter.convert(gd)\n g.serialize(OUT)\n oi = SparqlImplementation(OntologyResource(OUT))\n # for r in oi.relationships([\"GO:0005773\"]):\n # print(r)\n self.compliance_tester.test_synonyms(oi)\n self.compliance_tester.test_definitions(oi)\n self.compliance_tester.test_sssom_mappings(oi)\n self.compliance_tester.test_relationships(oi)", "def triples():", "def __n3_to_str(triple):\n s, p, o = triple\n s = s.n3()\n p = p.n3()\n o = o.n3()\n if s.startswith('<') and s.endswith('>'):\n s = s[1:len(s) - 1]\n if p.startswith('<') and p.endswith('>'):\n p = p[1:len(p) - 1]\n if o.startswith('<') and o.endswith('>'):\n o = o[1:len(o) - 1]\n return (s, p, o)", "def to_rdf(self,graph,prefix,uri):\n \n def add_variable_list_data(pos,\n variable,\n variable_label,\n variable_type,\n SPSS_measurement_level,\n SPSS_user_missing_values,\n value_labels):\n \"Adds the data from a variable_list variable to the RDFlib graph\"\n \n graph.add((dd_namespace[variable],RDF.type,RDF.Property))\n graph.add((dd_namespace[variable],ukds_namespace.pos,rdflib.Literal(int(pos))))\n graph.add((dd_namespace[variable],ukds_namespace.variable,rdflib.Literal(variable)))\n graph.add((dd_namespace[variable],ukds_namespace.variable_label,rdflib.Literal(variable_label)))\n graph.add((dd_namespace[variable],ukds_namespace.variable_type,rdflib.Literal(variable_type)))\n graph.add((dd_namespace[variable],ukds_namespace.SPSS_measurement_level,rdflib.Literal(SPSS_measurement_level)))\n \n if SPSS_user_missing_values:\n for x in SPSS_user_missing_values.split(','):\n graph.add((dd_namespace[variable],ukds_namespace.SPSS_user_missing_values,rdflib.Literal(x)))\n \n if value_labels:\n for k,v in value_labels.items():\n a=rdflib.BNode()\n graph.add((dd_namespace[variable],ukds_namespace.value_labels,a))\n graph.add((a,ukds_namespace.label,rdflib.Literal(v)))\n graph.add((a,ukds_namespace.value,rdflib.Literal(str(k))))\n \n \n dd_namespace=rdflib.Namespace(uri)\n graph.bind(prefix,dd_namespace)\n ukds_namespace=rdflib.Namespace(r'http://purl.org/berg/ontology/UKDS/')\n graph.bind('ukds',ukds_namespace)\n \n for x in self.variable_list:\n add_variable_list_data(**x)\n \n return graph", "def dumps(xs, model=None, properties=False, indent=True, **kwargs):\n xs = list(xs)\n \n if not xs:\n return ''\n\n if model is None:\n model = xs[0].__class__\n\n if not hasattr(model, 'to_triples'):\n raise TypeError(\n '{} class does not implement to_triples()'.format(model.__name__)\n )\n\n codec = XMRSCodec()\n graphs = [\n codec.triples_to_graph(\n model.to_triples(model.from_xmrs(x), properties=properties)\n )\n for x in xs\n ]\n\n if 'pretty_print' in kwargs:\n indent = kwargs['pretty_print']\n\n return penman.dumps(graphs, cls=XMRSCodec, indent=indent)", "def to_ttl(self,filename,prefix,uri):\n def write_variable_list_data(file,\n prefix,\n pos,\n variable,\n variable_label,\n variable_type,\n SPSS_measurement_level,\n SPSS_user_missing_values,\n value_labels):\n \"\"\"Writes the data from a variable_list variable to the file\n \n \"\"\"\n \n l=[]\n l.append('%s:%s a rdf:Property' % (prefix,variable))\n l.append('ukds:pos %s' % pos)\n l.append('ukds:variable \"%s\"' % variable)\n l.append('ukds:variable_label \"%s\"' % variable_label)\n l.append('ukds:variable_type \"%s\"' % variable_type)\n l.append('ukds:SPSS_measurement_level \"%s\"' % SPSS_measurement_level)\n \n if SPSS_user_missing_values:\n l1=[]\n for x in SPSS_user_missing_values.split(','):\n l1.append('\"%s\"' % x)\n l.append('ukds:SPSS_user_missing_values %s' % ' ,\\t\\t'.join(l1))\n \n if value_labels:\n l2=[]\n for k,v in value_labels.items():\n l2.append('[ ukds:label \"%s\" ; ukds:value \"%s\" ]' % (v,k))\n l.append('ukds:value_labels %s' % ' ,\\n\\t\\t'.join(l2))\n \n file.write(' ;\\n\\t'.join(l)+' .\\n\\n')\n \n with open(filename,'w',encoding=\"UTF-8\") as file:\n file.write('@prefix %s: <%s> .\\n' % (prefix,uri))\n file.write('@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .\\n')\n file.write('@prefix ukds: <http://purl.org/berg/ontology/UKDS/> .\\n')\n file.write('\\n') \n \n for x in self.variable_list:\n \n write_variable_list_data(file,prefix,**x)\n \n# file_index=0\n# i=iter(self.variable_list)\n# index=0\n# \n# while True:\n# \n# with open(filename+'_'+str(file_index)+'.ttl','w',encoding=\"UTF-8\") as file:\n# file.write('@prefix %s: <%s> .\\n' % (prefix,uri))\n# file.write('@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .\\n')\n# file.write('@prefix ukds: <http://purl.org/berg/ontology/UKDS/> .\\n')\n# file.write('\\n')\n# \n# while True: \n# \n# try:\n# x = next(i)\n# except StopIteration:\n# return\n# \n# write_variable_list_data(file,prefix,**x)\n# \n# if index%250==0:\n# filesize_mb=os.path.getsize(filename+'_'+str(file_index)+'.ttl')/(1024*1024.0)\n# if filesize_mb>10000:\n# file_index+=1\n# break\n# \n# index+=1\n return", "def localize_triples(triples: List[Dict[str, str]], graphs: List[str]) -> Iterable[Dict[str, str]]:\n for (s, p, o) in triples:\n for graph in graphs:\n yield {\n 'subject': format_term(s),\n 'predicate': format_term(p),\n 'object': format_term(o),\n 'graph': graph\n }", "def to_jsonld(data, format):\n # pyld only supports parsing of nquads. Other formats are first\n # converted into nquads via rdflib.Graph.\n if format != 'nquads':\n graph = from_string(data, format)\n data = graph.serialize(format='nquads')\n # The above conversion introduces blank node identifiers for\n # triples that have no graph label (the fourth value).\n # Remove these to avoid modifying the data.\n data = _remove_blank_graph_labels(data)\n\n # Note: the N-Quads mime type is \"application/n-quads\"\n # (http://www.w3.org/TR/n-quads/), but pyld drops the dash.\n return jsonld.from_rdf(data, { 'format': 'application/nquads' })", "def try6():\n sample_file = '/Users/mayankkejriwal/datasets/eswc2017/triples_sample.ttl'\n with codecs.open(sample_file, 'r', 'utf-8') as f:\n for line in f:\n triple_dict = EmbeddingGenerator.EmbeddingGenerator.parse_line_into_triple(line)\n if not triple_dict:\n continue\n # print type(triple_dict['object'])\n # print triple_dict\n print triple_dict['subject'].n3()[1:-1]\n # print triple_dict['predicate']==URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#type')", "def make():\n\n starttime = time.time()\n\n ds = load_inputfiles()\n inputtime = time.time()\n\n out = process(ds)\n skosify_process(out)\n processtime = time.time()\n\n # Add skos:inScheme to all concepts\n # for q in roald.triples((None, RDF.type, SKOS.Concept)):\n # out.add((q[0], SKOS.inScheme, vocabulary))\n\n # xml:\n #\n # <rdf:Description rdf:about=\"http://folk.uio.no/knuthe/emne/data/xml/#REAL030177\">\n # <rdf:type rdf:resource=\"http://www.w3.org/2004/02/skos/core#Concept\"/>\n # <skos:prefLabel xml:lang=\"nb\">Matematiske tabeller</skos:prefLabel>\n # </rdf:Description>\n #\n\n # pretty-xml:\n #\n # <skos:Concept rdf:about=\"http://folk.uio.no/knuthe/emne/data/xml/#REAL030177\">\n # <skos:prefLabel xml:lang=\"nb\">Matematiske tabeller</skos:prefLabel>\n # </skos:Concept>\n #\n\n logger.info('Serializing...')\n\n s = OrderedXMLSerializer(out)\n s.serialize(open('realfagstermer.rdf.xml', 'w'), max_depth=1)\n logger.info('Wrote realfagstermer.rdf.xml')\n\n out.serialize(open('realfagstermer.nt', 'w'), format='nt')\n logger.info('Wrote realfagstermer.nt')\n\n SD = Namespace('http://www.w3.org/ns/sparql-service-description#')\n\n s = OrderedTurtleSerializer(out)\n\n # These will appear first in the file and be ordered by URI\n s.topClasses = [SKOS.ConceptScheme,\n FOAF.Organization,\n SD.Service,\n SD.Dataset,\n SD.Graph,\n SD.NamedGraph,\n SKOS.Concept]\n\n fobj = open('realfagstermer.ttl', 'w')\n fobj.write('@base <http://data.ub.uio.no/> .\\n')\n s.serialize(fobj, base='http://data.ub.uio.no/')\n logger.info('Wrote realfagstermer.ttl')\n\n now = int(time.time())\n # now = datetime.datetime.now()\n s = json.load(open('stats.json', 'r'))\n current = stats(out)\n current['ts'] = now\n s.append(current)\n\n json.dump(current, open('stats_current.json', 'w'), indent=2, sort_keys=True)\n json.dump(s, open('stats.json', 'w'), indent=2)\n\n endtime = time.time()\n\n logging.info(\"Reading input files took %d seconds\",\n (inputtime - starttime))\n logging.info(\"Processing took %d seconds\",\n (processtime - inputtime))\n logging.info(\"Writing output file took %d seconds\",\n (endtime - processtime))\n logging.info(\"Total time taken: %d seconds\", (endtime - starttime))", "def cli(yamlfile, **kwargs):\n print(RDFGenerator(yamlfile, **kwargs).serialize(**kwargs))", "def encode_nodes(nodes):\n n = []\n for node in nodes:\n n.extend([node[0], dottedQuadToNum(node[1].host), node[1].port])\n return struct.pack(\"!\" + \"20sIH\" * len(nodes), *n)", "def test_multiple_triples(self):\n self.graph.add((artis, RDF.type, zoo))\n self.graph.add((artis, RDF.type, org))\n self.graph.add((berlin_zoo, RDF.type, zoo))\n self.assertEquals(len(list(self.graph.triples((None, None, None)))), 3)\n\n self.assertEquals(len(list(self.graph.triples((artis, None, None)))), 2)\n self.assertEquals(len(list(self.graph.triples((None, RDF.type, None)))), 3)\n self.assertEquals(len(list(self.graph.triples((None, None, zoo)))), 2)\n self.assertEquals(len(list(self.graph.triples((None, None, org)))), 1)", "def triples( # type: ignore[override]\n self, spo: \"_TriplePatternType\", context: Optional[\"_ContextType\"] = None\n ) -> Iterator[Tuple[\"_TripleType\", None]]:\n\n s, p, o = spo\n\n vars = []\n if not s:\n s = Variable(\"s\")\n vars.append(s)\n\n if not p:\n p = Variable(\"p\")\n vars.append(p)\n if not o:\n o = Variable(\"o\")\n vars.append(o)\n\n if vars:\n v = \" \".join([term.n3() for term in vars])\n verb = \"SELECT %s \" % v\n else:\n verb = \"ASK\"\n\n nts = self.node_to_sparql\n query = \"%s { %s %s %s }\" % (verb, nts(s), nts(p), nts(o))\n\n # The ORDER BY is necessary\n if (\n hasattr(context, LIMIT)\n or hasattr(context, OFFSET)\n or hasattr(context, ORDERBY)\n ):\n var = None\n if isinstance(s, Variable):\n var = s\n elif isinstance(p, Variable):\n var = p\n elif isinstance(o, Variable):\n var = o\n elif hasattr(context, ORDERBY) and isinstance(\n getattr(context, ORDERBY), Variable\n ):\n var = getattr(context, ORDERBY)\n # type error: Item \"None\" of \"Optional[Variable]\" has no attribute \"n3\"\n query = query + \" %s %s\" % (ORDERBY, var.n3()) # type: ignore[union-attr]\n\n try:\n query = query + \" LIMIT %s\" % int(getattr(context, LIMIT))\n except (ValueError, TypeError, AttributeError):\n pass\n try:\n query = query + \" OFFSET %s\" % int(getattr(context, OFFSET))\n except (ValueError, TypeError, AttributeError):\n pass\n\n result = self._query(\n query,\n # type error: Item \"None\" of \"Optional[Graph]\" has no attribute \"identifier\"\n default_graph=context.identifier if self._is_contextual(context) else None, # type: ignore[union-attr]\n )\n\n if vars:\n if type(result) == tuple:\n if result[0] == 401:\n raise ValueError(\n \"It looks like you need to authenticate with this SPARQL Store. HTTP unauthorized\"\n )\n for row in result:\n if TYPE_CHECKING:\n # This will be a ResultRow because if vars is truthish then\n # the query will be a SELECT query.\n assert isinstance(row, ResultRow)\n yield (\n # type error: No overload variant of \"get\" of \"ResultRow\" matches argument types \"Node\", \"Node\"\n row.get(s, s), # type: ignore[call-overload]\n row.get(p, p), # type: ignore[call-overload]\n row.get(o, o), # type: ignore[call-overload]\n ), None # why is the context here not the passed in graph 'context'?\n else:\n if result.askAnswer:\n yield (s, p, o), None", "def export_to_file(self, path, graph_format):\n try:\n logging.info(\"Saving RDF data to \" + str(path))\n with open(path, \"wb\") as out_file:\n out_file.write(self.g.serialize(format=graph_format, encoding=\"UTF-8\"))\n except Exception as e:\n logging.error(\"Error while saving RDF results \"+str(e))", "def serialize(self, buff):\n try:\n buff.write(_get_struct_i().pack(self.numberOfTSPTurtles))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def rules(self, t, cycle_num):\n s, p, o = t\n # rdf1\n self.store_triple((p, rdf_type, Property))\n # rdfs4a\n if cycle_num == 1:\n self.store_triple((s, rdf_type, Resource))\n # rdfs4b\n if cycle_num == 1:\n self.store_triple((o, rdf_type, Resource))\n if p == rdfs_domain:\n # rdfs2\n for uuu, Y, yyy in self.graph.triples((None, s, None)):\n self.store_triple((uuu, rdf_type, o))\n if p == rdfs_range:\n # rdfs3\n for uuu, Y, vvv in self.graph.triples((None, s, None)):\n self.store_triple((vvv, rdf_type, o))\n if p == subPropertyOf:\n # rdfs5\n for Z, Y, xxx in self.graph.triples((o, subPropertyOf, None)):\n self.store_triple((s, subPropertyOf, xxx))\n # rdfs7\n for zzz, Z, www in self.graph.triples((None, s, None)):\n self.store_triple((zzz, o, www))\n if p == rdf_type and o == Property:\n # rdfs6\n self.store_triple((s, subPropertyOf, s))\n if p == rdf_type and o == Class:\n # rdfs8\n self.store_triple((s, subClassOf, Resource))\n # rdfs10\n self.store_triple((s, subClassOf, s))\n if p == subClassOf:\n # rdfs9\n for vvv, Y, Z in self.graph.triples((None, rdf_type, s)):\n self.store_triple((vvv, rdf_type, o))\n # rdfs11\n for Z, Y, xxx in self.graph.triples((o, subClassOf, None)):\n self.store_triple((s, subClassOf, xxx))\n if p == rdf_type and o == ContainerMembershipProperty:\n # rdfs12\n self.store_triple((s, subPropertyOf, member))\n if p == rdf_type and o == Datatype:\n self.store_triple((s, subClassOf, Literal))", "def triples(self, triple_pattern, context=None):\n subject, predicate, obj = triple_pattern\n\n \"\"\"we have no bnodes in our data\"\"\"\n if isinstance(subject, BNode) or isinstance(object, BNode):\n return self.__emptygen()\n if RDF.type == predicate and obj is not None:\n return self.type_triples(subject, predicate, obj)\n elif predicate in nodeRelatedPredicates:\n return self.nodes(subject, predicate, obj)\n elif predicate in stepAssociatedPredicates:\n return self.steps(subject, predicate, obj)\n elif RDFS.label == predicate:\n return self.paths(subject, predicate, obj)\n elif subject is None and predicate is None and obj is None:\n return chain(self.nodes(subject, predicate, obj),\n self.steps(subject, predicate, obj),\n self.paths(subject, predicate, obj))\n elif subject is not None:\n if type(subject) == PathIriRef:\n return self.paths(subject, predicate, obj)\n elif type(subject) == StepBeginIriRef or type(subject) == StepEndIriRef:\n return self.steps(subject, predicate, obj)\n elif type(subject) == NodeIriRef:\n return self.nodes(subject, predicate, obj)\n elif type(subject) == StepIriRef:\n return self.steps(subject, predicate, obj)\n\n subject_iri_parts = subject.toPython().split('/')\n if 'node' == subject_iri_parts[-2] and self.odgi_graph.has_node(int(subject_iri_parts[-1])):\n handle = self.odgi_graph.get_handle(int(subject_iri_parts[-1]))\n ns = NodeIriRef(handle, self.odgi_graph, self.base)\n return chain(self.handle_to_triples(predicate, obj, handle),\n self.handle_to_edge_triples(ns, predicate, obj))\n elif 'path' == subject_iri_parts[-4] and 'step' == subject_iri_parts[-2]:\n return self.steps(subject, predicate, obj)\n elif 'path' == subject_iri_parts[-2]:\n return self.paths(subject, predicate, obj)\n else:\n return self.__emptygen()\n else:\n return self.__emptygen()", "def export(fileprefix, hedges):\n with open(fileprefix + '.txt', 'w') as f:\n for h in hedges:\n s = \"\"\n for node in h[0]: #each node in the tail\n s += str(node) + \"|\"\n s = s[:-1]\n s += '\\t'\n for node in h[1]: #each node in the head\n s += str(node) + \"|\"\n s = s[:-1]\n s += '\\t'\n s += '1' + '\\n' #assigns weight for the hedge, currently always set to 1\n f.write(s)", "def test_write_tsv3():\n graph = NxGraph()\n graph.add_node(\"A\", id=\"A\", **{\"name\": \"Node A\", \"category\": [\"biolink:NamedThing\", \"biolink:Gene\"]})\n graph.add_node(\"B\", id=\"B\", **{\"name\": \"Node B\"})\n graph.add_node(\"C\", id=\"C\", **{\"name\": \"Node C\"})\n graph.add_node(\"D\", id=\"D\", **{\"name\": \"Node D\"})\n graph.add_node(\"E\", id=\"E\", **{\"name\": \"Node E\"})\n graph.add_node(\"F\", id=\"F\", **{\"name\": \"Node F\"})\n graph.add_edge(\n \"B\", \"A\", **{\"subject\": \"B\", \"object\": \"A\", \"predicate\": \"biolink:sub_class_of\"}\n )\n graph.add_edge(\n \"C\", \"B\", **{\"subject\": \"C\", \"object\": \"B\", \"predicate\": \"biolink:sub_class_of\"}\n )\n graph.add_edge(\n \"D\", \"C\", **{\"subject\": \"D\", \"object\": \"C\", \"predicate\": \"biolink:sub_class_of\"}\n )\n graph.add_edge(\n \"D\", \"A\", **{\"subject\": \"D\", \"object\": \"A\", \"predicate\": \"biolink:related_to\"}\n )\n graph.add_edge(\n \"E\", \"D\", **{\"subject\": \"E\", \"object\": \"D\", \"predicate\": \"biolink:sub_class_of\"}\n )\n graph.add_edge(\n \"F\", \"D\", **{\"subject\": \"F\", \"object\": \"D\", \"predicate\": \"biolink:sub_class_of\"}\n )\n t = Transformer()\n s = TsvSink(\n owner=t,\n filename=os.path.join(TARGET_DIR, \"test_graph_archive\"),\n format=\"tsv\",\n compression=\"tar.gz\",\n node_properties={\"id\", \"name\"},\n edge_properties={\"subject\", \"predicate\", \"object\", \"relation\"},\n )\n for n, data in graph.nodes(data=True):\n s.write_node(data)\n for u, v, k, data in graph.edges(data=True, keys=True):\n s.write_edge(data)\n s.finalize()\n\n assert os.path.exists(os.path.join(TARGET_DIR, \"test_graph_archive.tar.gz\"))", "def test_graph_str():\n node_list = []\n node_list.append(Node({'A':['B','C']}))\n node_list.append(Node({'B':['C','D']}))\n node_list.append(Node({'C':['D']}))\n g = Graph(node_list)\n assert str(g) == \"[{'A':['B','C']},{'B':['C','D']},{'C':['D']}]\"", "def serialize(mode):\r\n serialize_version(mode)\r\n vcb.serialize(mode) \r\n for x in xfrms:\r\n x.serialize(mode)" ]
[ "0.6063675", "0.5935797", "0.5933383", "0.5895876", "0.5845123", "0.5759756", "0.5708165", "0.5590648", "0.5548313", "0.55361754", "0.553367", "0.54797417", "0.54766667", "0.5440754", "0.5416236", "0.5388414", "0.53399545", "0.52932197", "0.52865195", "0.52275985", "0.522483", "0.5215533", "0.5212517", "0.5196402", "0.5181358", "0.5167583", "0.5149812", "0.5146782", "0.51378465", "0.5128672" ]
0.7633531
0
Returns an unbound port number on 127.0.0.1.
def find_unbound_port(): while True: port = random.randint(*PORT_RANGE) sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: sock.bind(("127.0.0.1", port)) return port except socket.error: print("randomly generated port %d is bound. Trying again." % port)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_unused_port():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)\n sock.bind(('127.0.0.1', 0))\n sock.listen(socket.SOMAXCONN)\n ipaddr, port = sock.getsockname()\n sock.close()\n return port", "def GetUnreservedAvailableLocalPort():\n tmp = socket.socket()\n tmp.bind(('', 0))\n port = tmp.getsockname()[1]\n tmp.close()\n\n return port", "def select_unused_port():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.bind(('127.0.0.1', 0))\n _, port = sock.getsockname()\n sock.close()\n return port", "def _get_unused_udp_port():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.bind(('', 0))\n port = s.getsockname()[1]\n s.close()\n return port", "def get_safe_port():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.bind((LOCALHOST, 0))\n port = sock.getsockname()[1]\n sock.close()\n return port", "def free_port():\n\n with socket.socket() as sock:\n sock.bind(('', 0))\n return sock.getsockname()[1]", "def get_unused_port(port):\n if port is None or port < 1024 or port > 49151:\n port = random.randint(1024, 49151)\n while True:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n s.bind(('', port)) # Try to open port\n except socket.error as e:\n if e.errno is 98: # Errorno 98 means address already bound\n port += 1\n continue\n raise e\n s.close()\n return port", "def get_unused_port():\n port, s = get_unused_port_and_socket()\n s.close()\n return port", "def free_port():\n free_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n free_socket.bind(('0.0.0.0', 0))\n free_socket.listen(5)\n port = free_socket.getsockname()[1]\n free_socket.close()\n return port", "def free_port():\n free_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n free_socket.bind(('0.0.0.0', 0))\n free_socket.listen(5)\n port = free_socket.getsockname()[1]\n free_socket.close()\n return port", "def get_free_port():\n s = socket.socket()\n s.bind(('', 0))\n _, port = s.getsockname()\n s.close()\n return port", "def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):\n tempsock = socket.socket(family, socktype)\n port = bind_port(tempsock)\n tempsock.close()\n del tempsock\n return port", "def test_get_unused_port() -> None:\n available_port = get_unused_port()\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n sock.bind((\"\", available_port))\n assert int(sock.getsockname()[1]) == available_port", "def get_free_port():\n s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)\n s.bind(('127.0.0.1', 0))\n _, port = s.getsockname()\n s.close()\n return port", "def _find_free_port():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.bind(('localhost', 0))\n _, port = sock.getsockname()\n sock.close()\n\n return port", "def _get_free_port():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n with closing(s):\n s.bind((\"localhost\", 0))\n return s.getsockname()[1]", "def get_free_port():\n s = socket.socket(socket.AF_INET, type=socket.SOCK_STREAM)\n s.bind((\"localhost\", 0))\n address, port = s.getsockname()\n s.close()\n return port", "def _find_free_port():\n import socket\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # Binding to port 0 will cause the OS to find an available port for us\n sock.bind((\"\", 0))\n port = sock.getsockname()[1]\n sock.close()\n # NOTE: there is still a chance the port could be taken by other processes.\n return port", "def _find_free_port():\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:\n s.bind((\"localhost\", 0))\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n return s.getsockname()[1]", "def find_first_available_port():\n skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n skt.bind((\"0.0.0.0\", 0))\n _, port = skt.getsockname()\n skt.close()\n return port", "def get_free_local_port():\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:\n s.bind(('localhost', 0))\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n return s.getsockname()[1]", "def get_free_port():\n sock = socket.socket()\n\n # bind to a random port (so that the OS automatically assigns us a free port)\n sock.bind(('', 0))\n\n # obtain the random port value\n port = sock.getsockname()[1]\n\n # close the socket so that the port gets free\n sock.close()\n\n return port", "def get_available_port() -> int:\n with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:\n sock.bind(('', 0))\n _, port = sock.getsockname()\n return int(port)", "def get_free_port(address=\"\"):\n\n s = socket(AF_INET, SOCK_STREAM)\n s.bind((address, 0)) # lgtm [py/bind-socket-all-network-interfaces]\n port = s.getsockname()[1]\n s.close()\n return port", "def find_available_local_port():\n infos = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM)\n family, proto, _, _, addr = next(iter(infos))\n sock = socket.socket(family, proto)\n sock.bind(addr)\n addr, port = sock.getsockname()[:2]\n sock.close()\n return port", "def _get_unused_port(hostname):\n for port in range(8000, 9001):\n if _check_port_available(hostname, port):\n return port", "def get_free_port():\n max_tries = 0\n while max_tries < MITM_MAX_TRIES:\n max_tries += 1\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(('', 0))\n port = s.getsockname()[1]\n except Exception:\n sleep(1)\n else:\n return port\n return None", "def make_port(self):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((\"0.0.0.0\", 0))\n return s.getsockname()[1]", "def new_port():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)\n for i in range(12042, 16042):\n try:\n s.bind(('127.0.0.1', i))\n s.close()\n return i\n except socket.error, e:\n pass\n raise Exception('No local port available')", "def clean_port(self):\n port = self.cleaned_data['port']\n if not port:\n port = 0\n return port" ]
[ "0.79000366", "0.7862578", "0.7742362", "0.7707025", "0.7701062", "0.7686607", "0.75873804", "0.75825536", "0.7564691", "0.7556933", "0.75550276", "0.7499802", "0.7461294", "0.74481964", "0.74385625", "0.7360805", "0.7261695", "0.71875924", "0.7181387", "0.7119721", "0.7051851", "0.70450956", "0.7040719", "0.7025971", "0.70182675", "0.70044255", "0.7002423", "0.69239527", "0.6830975", "0.6790529" ]
0.7919572
0
Sends an email to a single recipient straight to his MTA. Looks up for the MX DNS records of the recipient SMTP server and attempts the delivery through them.
def send(self): answers = dns.resolver.query(self.domain, 'MX') try: for answer in answers: ex = answer.exchange.to_text() server = smtplib.SMTP(ex) server.set_debuglevel(self.verbose) server.sendmail(self.sender, [self.recipient], self.message.as_string()) server.quit() except OSError as e: if e.errno is errno.ENETUNREACH: print('Looks like port 25 is blocked') raise e
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sendmail(self, message=None, subject=None, recipients=None):\n\n if not recipients:\n recipients = self.recipients\n if len(recipients) == 0:\n return False\n if not message:\n message = self.message\n if len(message) == 0:\n return False\n if not subject:\n subject = self.subject\n message = message.replace(\"—\", \" - \")\n message = message.replace(\"”\", \"\\\"\")\n message = '\\n'.join(textwrap.wrap(message,72))\n if not self.session:\n self.session = smtplib.SMTP(self.smtpserver)\n if self.smtpuser:\n self.session.login(self.smtpuser, self.smtppass)\n error = \"\"\n for recipient in recipients:\n if type(recipient) == type([]) or type(recipient) == type(\n ('','')):\n mailaddr = recipient[1]\n recipient = email.utils.formataddr(recipient)\n else:\n realname, mailaddr = email.utils.parseaddr(recipient)\n\n smtpresult = self.session.sendmail(self.sender, [mailaddr],\n self.makeMessage(recipient, message,\n subject))\n if smtpresult:\n for recip in smtpresult.keys():\n error += \"Couldn't delivery mail to: %s Error: %s\\n\" % (\n recip, smtpresult[recip][0], smtpresult[recip][1])\n else:\n print(\"Message sent successfully to %s\" % recipient)\n\n if error != \"\":\n print(\"%s\" % error)", "def send_mail_raise_smtp(messages):\n raise SMTPRecipientsRefused(recipients=messages[0].recipients())", "def send_email(email: str, name: str, message, db: Session):\n msg = MIMEText(message)\n msg[\"Subject\"] = name\n msg[\"From\"] = \"[email protected]\"\n msg[\"To\"] = email\n with smtplib.SMTP(host=\"localhost\", port=8025) as s:\n try:\n s.sendmail(msg[\"From\"], [email], msg.as_string())\n logger.info(\"Recipient reached at {}\".format(email))\n except smtplib.SMTPRecipientsRefused:\n logger.error(\"Recipient refused at {}\".format(email))\n raise\n mark_person_emailed(db, email)", "def send_email(recipient, subject, message):\n from_email = os.getenv(\"EMAIL_SENDER\")\n status = send_mail(subject, message, from_email, [recipient])\n return status", "def sendmail(message, recipient):\n \n import smtplib\n\n fromaddr = \"[email protected]\"\n toaddrs = recipient + \"@someaddress.com\"\n\n # Add the From: and To: headers at the start!\n msg = (\"From: %s\\r\\nTo: %s\\r\\n\\r\\n\" %(fromaddr, \", \".join(toaddrs)))\n msg = msg + str(message[0]) + message[1]\n\n server = smtplib.SMTP('localhost')\n server.set_debuglevel(0)\n server.sendmail(fromaddr, toaddrs, msg)\n server.quit()", "def send(\r\n self,\r\n to = '', #list of email addresses - Required\r\n subject='None', #message's subject - Required\r\n message_text='None', #message body in plain text - Required\r\n message_html=None, #message body in html - Optional\r\n attachments=None, #list of truples [(filename, file_contents)] - Optional\r\n cc = None, #list of email addresses to CC message to\r\n bcc = None, #list of email addresses to BCC message to\r\n reply_to = None, #single email address to have replies send to\r\n ): \r\n if not isinstance(to, list):\r\n to = [to]\r\n\r\n try:\r\n if self.settings.private.email_server == 'gae':\r\n from google.appengine.api import mail\r\n #untested on GAE, but in theory should work\r\n #http://code.google.com/appengine/docs/python/mail/emailmessagefields.html\r\n mail.send_mail(sender=self.settings.private.email_sender, to=to,\r\n subject=subject, body=message_text, html=message_html, attachments=attachments, cc = cc,\r\n bcc = bcc, reply_to = reply_to)\r\n else:\r\n\r\n msg = self.buildMIME(sender = self.settings.private.email_sender,\r\n recipients = to, subject = subject,\r\n message_text = message_text, message_html = message_html,\r\n attachments = attachments,\r\n cc = cc, bcc = bcc, reply_to = reply_to)\r\n #print 'message'+msg.as_string()\r\n #Build MIME body\r\n (host, port) = self.settings.mail.server.split(':')\r\n\r\n if self.settings.mail.ssl: \r\n try:\r\n server = smtplib.SMTP_SSL(host, port)\r\n except:\r\n # ERROR python <= 2.6\r\n pass\r\n else:\r\n server = smtplib.SMTP(host, port)\r\n\r\n if self.settings.mail.login:\r\n try:\r\n server.ehlo_or_helo_if_needed()\r\n except SMTPHeloError:\r\n logger.info(\"SMTP Helo Error in HELO\")\r\n\r\n if self.settings.mail.use_tls:\r\n try:\r\n server.starttls()\r\n except SMTPHeloError:\r\n logger.info(\"SMTP Helo Error in STARTTLS\")\r\n except SMTPException:\r\n logger.info(\"Server does not support TLS\")\r\n\r\n except RuntimeError:\r\n logger.info(\"Python version does not support TLS (<= 2.6?)\")\r\n\r\n try:\r\n server.ehlo_or_helo_if_needed()\r\n except SMTPHeloError:\r\n logger.info(\"SMTP Helo Error in HELO\")\r\n\r\n (username, password) = self.settings.mail.login.split(':')\r\n try:\r\n server.login(username, password)\r\n except SMTPHeloError:\r\n logger.info(\"SMTP Helo Error in LOGIN\")\r\n\r\n except SMTPAuthenticationError:\r\n logger.info(\"Invalid username/password combination\")\r\n\r\n except SMTPException:\r\n logger.info(\"SMTP error in login\")\r\n\r\n try:\r\n server.sendmail(self.settings.private.email_sender, to, msg.as_string())\r\n server.quit()\r\n\r\n except SMTPRecipientsRefused:\r\n logger.info(\"All recipients were refused. Nobody got the mail.\")\r\n\r\n except SMTPHeloError:\r\n logger.info(\"The server didn't reply properly to the HELO greeting.\")\r\n\r\n except SMTPSenderRefused:\r\n logger.info(\"The server didn't accept the from_addr.\")\r\n\r\n except SMTPDataError:\r\n logger.info(\"The server replied with an unexpected error code (other than a refusal of a recipient).\")\r\n \r\n except Exception, e:\r\n return False\r\n return True", "def send( self, msg, mto=None, mfrom=None, subject=None, encode=None, from_member=None, \\\n IsAntiSpam=None, IsReturnReceiptTo=None, IsConfirmReadingTo=None, \\\n object_url=None, raise_exc=MissingValue ):\n count = 0\n if not self.address():\n LOG( 'MailSender.send', TRACE, 'SMTP address is not defined')\n return 0\n\n try:\n if not isinstance( msg, MailMessage ):\n count = MailHost.send( self, msg, mto, mfrom, subject, encode )\n LOG( 'MailSender.send', TRACE, 'sent mail messages: count [%s], from %s to %s ' % ( count, mfrom, mto ))\n self.close()\n return count\n\n if subject is not None:\n msg.set_header( 'subject', subject )\n else:\n subject = ''\n\n if 'date' not in msg:\n msg.set_header( 'date', formatdate( None, 1 ) )\n if 'message-id' not in msg:\n msg.set_header( 'message-id', make_msgid() )\n if 'x-mailer' not in msg:\n msg.set_header( 'x-mailer', Config.MailerName % self._class_version )\n\n membership = getToolByName( self, 'portal_membership', None )\n properties = getToolByName( self, 'portal_properties', None )\n if membership is None or properties is None:\n return 0\n\n member = mname = None\n\n if mfrom is None:\n if from_member and not membership.isAnonymousUser():\n member = membership.getAuthenticatedMember()\n elif 'from' in msg:\n mfrom = parseaddr( msg.get( 'from', decode=1 ) )[1]\n if not mfrom:\n mfrom = properties.getProperty( 'email_from_address' )\n else:\n mname = properties.getProperty( 'email_from_name' )\n if IsAntiSpam != 0:\n try: mfrom = properties.getProperty( 'email_antispam' )\n except: pass\n if not mfrom:\n mfrom = properties.getProperty( 'email_from_address' )\n else:\n mname = None\n else:\n if type(mfrom) is StringType:\n if mfrom.find('@') < 0:\n member = membership.getMemberById( mfrom )\n elif isinstance( mfrom, MemberData ):\n member = mfrom\n\n if member is not None:\n mname = member.getMemberName()\n mfrom = member.getMemberEmail()\n\n if not mfrom:\n mfrom = getSecurityManager().getUser().getUserName()\n\n if 'from' not in msg:\n msg.set_header( 'from', (mname, mfrom) )\n\n list_to = None\n\n if mto is None:\n mdict = {}\n for header in ( 'to', 'cc', 'bcc', 'resent-to', 'resent-cc' ):\n for mname, email in getaddresses( msg.get_all( header ) ):\n if email:\n mdict[ email ] = header\n mto = mdict.keys()\n elif 'to' in msg:\n list_to = []\n\n if 'bcc' in msg:\n msg.remove_header( 'bcc' )\n\n if IsReturnReceiptTo:\n msg.set_header( 'Disposition-Notification-To', mfrom )\n msg.set_header( 'Return-Receipt-To', mfrom )\n\n if IsConfirmReadingTo:\n msg.set_header( 'Return-Receipt-To', mfrom )\n msg.set_header( 'Disposition-Notification-To', mfrom )\n msg.set_header( 'X-Confirm-Reading-To', mfrom )\n\n no_mail = membership.getGroupMembers('_NO_MAIL_') or []\n if mto and type(mto) is StringType:\n mto = [ mto ]\n check_list_to = []\n\n for item in mto:\n member = None\n if type(item) is StringType:\n if item.find('@') < 0:\n member = membership.getMemberById( item )\n elif isinstance( item, MemberData ):\n member = item\n\n if member is not None:\n mname = member.getMemberName()\n email = member.getMemberEmail()\n else:\n mname = None\n email = str(item)\n\n if member is not None and member.getUserName() in no_mail:\n continue\n\n if not email or email == '' or email == 'None' or email.find('@') < 1:\n LOG( 'MailSender.send', ERROR, 'no e-mail address for user \"%s\", subject \"%s\", users: %s' % \\\n ( item, subject, `mto` ))\n continue\n\n if email in check_list_to:\n continue\n check_list_to.append( email )\n\n if list_to is None:\n msg.set_header( 'to', (mname, email) )\n count += self._send( mfrom, [email], msg )\n else:\n list_to.append( email )\n\n if list_to:\n count = self._send( mfrom, list_to, msg )\n\n # TODO: find a way to disconnect only after request is processed\n self.close()\n\n if count:\n LOG('MailSender.send', INFO, 'mail address list: object [%s]\\n>from %s to %s\\n>total messages %s' % \\\n ( object_url or subject, mfrom, check_list_to, count ))\n else:\n LOG('MailSender.send', INFO, 'no mail')\n except:\n if raise_exc or raise_exc is MissingValue:\n raise\n else:\n LOG('MailSender.send', ERROR, '[%s] sending failed' % self.address(), error=exc_info())\n\n return count", "def process_error_mail(recipient, sender, journal_id):\n\n if recipient in ['', None]:\n #: Simpley Error Mail!\n #: TODO: error marking...\n return True\n\n try:\n param = return_path_from_address(recipient)\n assert param['message_id'] != \"\"\n assert param['domain'] != \"\"\n\n try:\n #: Jourmal mail object\n journal_msg = Journal.objects.get(id=journal_id).mailobject()\n error_address = journal_msg.get('X-Failed-Recipients')\n except:\n pass\n\n try:\n #: Find message\n msg = Publish.objects.get(\n id=int(param['message_id']),\n publish__site__domain=param['domain'])\n\n # X-Failed-Recipients SHOULD be checked ?\n assert(\n error_address is None or\n error_address == msg.member.address)\n\n #: increment bounce number\n #: this mailbox will be disabled sometimes later.\n msg.member.bounces = msg.member.bounces + 1\n msg.member.save()\n\n #:\n return True\n\n except:\n pass\n\n except exceptions.AttributeError:\n # May be normal address..\n # Other handler will be called.\n return False\n\n return False", "def send(self, to_addrs, subject, message, from_addr=None):\n if not from_addr: from_addr = self.user\n data = \"From: %s\\nTo: %s\\nSubject: %s\\n\\n%s\" \\\n % (from_addr, to_addrs, subject, message)\n try:\n server = smtplib.SMTP(self.host)\n server.ehlo()\n server.starttls()\n server.ehlo() # This must be done before and after starttls().\n server.login(self.user, self.password)\n server.sendmail(from_addr, to_addrs, data)\n except:\n raise\n try:\n server.quit() # This always fails and can safely be ignored.\n except:\n pass", "def send_email( # pylint: disable=too-many-arguments\n recipients, subject, body, html_body=None, reply_to=None, swallow_errors=False\n):\n recipients = enforce_list(recipients)\n reply_to = enforce_list(reply_to or [])\n\n params = {\n \"from_email\": settings.DEFAULT_FROM_EMAIL,\n \"to\": recipients,\n \"subject\": subject,\n \"body\": body,\n \"reply_to\": enforce_list(reply_to or settings.DEFAULT_FROM_EMAIL),\n }\n if html_body:\n message = EmailMultiAlternatives(**params)\n message.attach_alternative(html_body, \"text/html\")\n else:\n message = EmailMessage(**params)\n\n logger.info(\"Sending email with subject %s\", subject)\n try:\n message.send()\n except Exception:\n logger.exception(\"Error sending email with subject %s\", subject)\n if not swallow_errors:\n raise", "def send_email(self, to_address, subject, body, cc_recipients=[]):\n\n # Build and send message\n msg = Message(\n account=self.account,\n folder=self.account.sent,\n subject=subject,\n body= HTMLBody(body),\n to_recipients=[Mailbox(email_address=to_address)],\n cc_recipients=[(Mailbox(email_address=x)) for x in cc_recipients]\n )\n\n msg.send_and_save()\n print(\"Message to {} sent.\".format(to_address))", "def send(self, email):\r\n smtp = smtplib.SMTP(self.server, self.port)\r\n smtp.ehlo()\r\n \r\n if self.tls:\r\n smtp.starttls()\r\n smtp.ehlo()\r\n\r\n if self.user and self.passwd:\r\n smtp.login(self.user, self.passwd)\r\n\r\n smtp.sendmail(email.from_address, email.to + email.ccs, str(email))\r\n if email.bccs:\r\n email.root['X-antroy-sent'] = \"True\"\r\n smtp.sendmail(email.from_address, email.bccs, str(email))\r\n del email.root['X-antroy-sent']\r\n smtp.quit()", "def send_email(self, message, mail_server_id=None, smtp_server=None, smtp_port=None,\n smtp_user=None, smtp_password=None, smtp_encryption=None, smtp_debug=False,\n smtp_session=None):\n # Use the default bounce address **only if** no Return-Path was\n # provided by caller. Caller may be using Variable Envelope Return\n # Path (VERP) to detect no-longer valid email addresses.\n if smtp_user:\n _logger.error(\"smpt session --------------------\")\n _logger.error(smtp_user)\n smtp_from = smtp_user\n else:\n smtp_from = message['Return-Path'] or self._get_default_bounce_address() or message['From']\n assert smtp_from, \"The Return-Path or From header is required for any outbound email\"\n\n # The email's \"Envelope From\" (Return-Path), and all recipient addresses must only contain ASCII characters.\n from_rfc2822 = extract_rfc2822_addresses(smtp_from)\n assert from_rfc2822, (\"Malformed 'Return-Path' or 'From' address: %r - \"\n \"It should contain one valid plain ASCII email\") % smtp_from\n # use last extracted email, to support rarities like 'Support@MyComp <[email protected]>'\n smtp_from = from_rfc2822[-1]\n email_to = message['To']\n email_cc = message['Cc']\n email_bcc = message['Bcc']\n del message['Bcc']\n\n smtp_to_list = [\n address\n for base in [email_to, email_cc, email_bcc]\n for address in extract_rfc2822_addresses(base)\n if address\n ]\n assert smtp_to_list, self.NO_VALID_RECIPIENT\n\n x_forge_to = message['X-Forge-To']\n if x_forge_to:\n # `To:` header forged, e.g. for posting on mail.channels, to avoid confusion\n del message['X-Forge-To']\n del message['To'] # avoid multiple To: headers!\n message['To'] = x_forge_to\n\n # Do not actually send emails in testing mode!\n if getattr(threading.currentThread(), 'testing', False) or self.env.registry.in_test_mode():\n _test_logger.info(\"skip sending email in test mode\")\n return message['Message-Id']\n\n try:\n message_id = message['Message-Id']\n smtp = smtp_session\n smtp = smtp or self.connect(\n smtp_server, smtp_port, smtp_user, smtp_password,\n smtp_encryption, smtp_debug, mail_server_id=mail_server_id)\n smtp.sendmail(smtp_from, smtp_to_list, message.as_string())\n # do not quit() a pre-established smtp_session\n if not smtp_session:\n smtp.quit()\n except smtplib.SMTPServerDisconnected:\n raise\n except Exception as e:\n params = (ustr(smtp_server), e.__class__.__name__, ustr(e))\n msg = _(\"Mail delivery failed via SMTP server '%s'.\\n%s: %s\") % params\n _logger.info(msg)\n raise MailDeliveryException(_(\"Mail Delivery Failed\"), msg)\n return message_id", "def send_email(to_addresses, subject, messages):\n from_address = email_from\n to_list = []\n if from_address is None:\n from_address = settings.SERVER_EMAIL\n\n if isinstance(to_addresses, list) and isinstance(messages, list):\n\n if len(to_addresses) == len(messages):\n data = []\n for idx, message in enumerate(messages):\n if settings.DEBUG:\n data.append((subject, message, from_address,\n ['[email protected]',]))\n to_list.append('[email protected]')\n else:\n data.append((subject, message, from_address,\n [to_addresses[idx],]))\n to_list.append(to_addresses[idx])\n\n use_mass_email = True\n else:\n use_mass_email = False\n if settings.DEBUG:\n logger.debug('Overwriting the email: sending to @example.com.')\n # Overwrite sender address in debug mode\n to_addresses = ['[email protected]',]\n to_list.append('[email protected]')\n\n out = None\n if use_mass_email:\n try:\n out = send_mass_mail(tuple(data), fail_silently=False)\n except Exception as e:\n logger.error(('An error occurred when sending mass emails [%s]' %\n str(e)))\n else:\n if subject and messages and from_address:\n try:\n out = _send_mail(subject, messages, from_address, to_addresses,\n fail_silently=False)\n except Exception as e:\n logger.error(('An error occurred when sending email to %s, '\n 'with subject [%s]. Error = %s') % (\n str(to_addresses),\n subject,\n str(e)))\n\n return out, to_list", "def test_email():\n recipients = configs[\"email_to\"].split(\", \")\n email_body = test_email_content()\n if configs[\"smtp_ssl\"] == 1:\n server = smtplib.SMTP_SSL(configs[\"smtp_server\"])\n elif configs[\"smtp_tls\"] == 1:\n server = smtplib.SMTP(configs[\"smtp_server\"])\n server.starttls()\n else:\n server = smtplib.SMTP(configs[\"smtp_server\"])\n\n if configs[\"smtp_authentication\"] == 1:\n server.login(configs[\"username\"], configs[\"password\"])\n\n server.sendmail(configs[\"email_from\"], recipients, email_body)\n server.quit()", "def send(self, smtp_server_instance: SMTPServer = None):\n\t\tif not self.can_send_now():\n\t\t\treturn\n\n\t\twith SendMailContext(self, smtp_server_instance) as ctx:\n\t\t\tmessage = None\n\t\t\tfor recipient in self.recipients:\n\t\t\t\tif recipient.is_mail_sent():\n\t\t\t\t\tcontinue\n\n\t\t\t\tmessage = ctx.build_message(recipient.recipient)\n\t\t\t\tif method := get_hook_method(\"override_email_send\"):\n\t\t\t\t\tmethod(self, self.sender, recipient.recipient, message)\n\t\t\t\telse:\n\t\t\t\t\tif not frappe.flags.in_test:\n\t\t\t\t\t\tctx.smtp_server.session.sendmail(\n\t\t\t\t\t\t\tfrom_addr=self.sender, to_addrs=recipient.recipient, msg=message\n\t\t\t\t\t\t)\n\n\t\t\t\tctx.update_recipient_status_to_sent(recipient)\n\n\t\t\tif frappe.flags.in_test:\n\t\t\t\tfrappe.flags.sent_mail = message\n\t\t\t\treturn\n\n\t\t\tif ctx.email_account_doc.append_emails_to_sent_folder:\n\t\t\t\tctx.email_account_doc.append_email_to_sent_folder(message)", "def send_email(self, fromaddr, addrs, message=\"\"):\n smtp = smtplib.SMTP(self._server, self._port)\n smtp.sendmail(fromaddr, addrs, message)\n smtp.quit()", "def _send_smtp(message, subject, to, to_name, sender, sender_name):\n host = app.config.get('MAIL_HOST')\n\n if not host:\n raise MailFailure('SMTP Server Not Configured')\n\n try:\n server = smtplib.SMTP(host)\n except (smtplib.SMTPConnectError, socket.error) as ex:\n app.logger.error('Unable to send mail: %s', str(ex))\n raise MailFailure('Error connecting to SMTP server.')\n\n msg = text.MIMEText(message)\n msg['Subject'] = subject\n msg['To'] = email.utils.formataddr((to_name, to))\n msg['From'] = email.utils.formataddr((sender_name, sender))\n\n try:\n if app.debug:\n server.set_debuglevel(True)\n server.sendmail(sender, [to], msg.as_string())\n except (smtplib.SMTPException, socket.error) as ex:\n app.logger.error('Unable to send mail: %s', str(ex))\n raise MailFailure('Error sending mail to SMTP server.')\n finally:\n try:\n server.quit()\n except smtplib.SMTPException:\n pass", "def _send(self, email_message):\n if not email_message.to:\n return False\n try:\n if (isinstance(email_message,gmail.EmailMessage)):\n e = message\n elif (isinstance(email_message,mail.EmailMessage)):\n e = gmail.EmailMessage(sender=email_message.from_email,\n to=email_message.to,\n subject=email_message.subject,\n body=email_message.body)\n if email_message.extra_headers.get('Reply-To', None):\n e.reply_to = email_message.extra_headers['Reply-To']\n if email_message.bcc:\n e.bcc = list(email_message.bcc)\n #TODO - add support for html messages and attachments...\n e.send()\n except:\n if not self.fail_silently:\n raise\n return False\n return True", "def send_mail(email):\n return email.send()", "def send_email(self, to, subject, message):\n\n email_to = \"[email protected]\"\n try:\n mx_alarm = AlertEmail(email_to, self.subject, self.message)\n mx_alarm.send()\n print(\"\\t{} |{}| Successfully sent email.\".format(Timer.OK, self.tinfo['name']))\n return True\n except Exception as e:\n print(\"\\t{} Exception in send_email! {}\".format(Timer.FAIL, e))", "def send(self):\n msg = MIMEText(self.body) # prepare body\n s = smtplib.SMTP(self.mail_server)\n self._connect_to_exchange(s)\n for receiver in iter(self.to_adress):\n if '@' not in receiver:\n receiver = '{rcv}@cbs.nl'.format(rcv=receiver)\n msg['Subject'] = self.subject\n msg['From'] = self.from_adress\n msg['To'] = receiver\n s.sendmail(self.from_adress, [receiver], msg.as_string())\n s.quit()", "def mail(server, from_address, from_pass, address_list, msg, port = 25):\n\n smtp_mail = smtplib.SMTP(server,port)\n smtp_mail.starttls()\n smtp_mail.login(from_address, from_pass)\n smtp_mail.sendmail(from_address, address_list, msg) \n smtp_mail.quit()", "def send(self):\n return send_mail(self.subject, self.message, self.sender, self.recipients, fail_silently=False)", "def simple_send_email(sender, recipient, subject, message, server=EMAIL_HOST, port=EMAIL_PORT):\n headers = [\"From: \" + sender,\n \"Subject: \" + subject,\n \"To: \" + recipient,\n \"MIME-Version: 1.0\",\n \"Content-Type: text/plain\"]\n headers = \"\\r\\n\".join(headers)\n\n session = smtplib.SMTP(server, port)\n\n session.ehlo()\n session.starttls()\n session.ehlo()\n session.login(EMAIL_HOST_USER, EMAIL_HOST_PASSWORD)\n\n session.sendmail(sender, recipient, headers + \"\\r\\n\\r\\n\" + message)\n session.close()", "def send_individual_email(cls, subject, body, recipient):\n # Since .send_batch() returns a list, we need to return the first in the list\n responses = cls.send_batch(subject, body, [recipient])\n return responses[0]", "def send_mail(message: MIMEText):\n try:\n server_host = Config.get_property(\"smtp.server-host\")\n server_port = Config.get_property(\"smtp.server-port\")\n user = Config.get_property(\"smtp.user\")\n password = Config.get_property(\"smtp.password\")\n receiver = Config.get_property(\"smtp.receiver\")\n\n if None in [server_host, server_port, user, password, receiver] or \\\n \"\" in [server_host, server_port, user, password, receiver]:\n return True\n\n send_from = __format_addr(\"noreply\", user)\n send_to = __format_addr(getpass.getuser(), receiver)\n\n message[\"From\"] = send_from\n message[\"To\"] = send_to\n\n smtp = smtplib.SMTP(server_host, server_port)\n smtp.login(user, password)\n smtp.sendmail(user, [receiver, ], message.as_string())\n smtp.quit()\n return True\n except Exception as e:\n print(e)", "def send_email(subject, message, recipient_list, from_email=None,\n fail_silently=False, connection=None):\n if not from_email:\n from_email = _s('SERVER_EMAIL') or _s('DEFAULT_FROM_EMAIL')\n try:\n subj = unicode(subject)\n except UnicodeDecodeError:\n subj = subject.decode('utf8')\n datatuple = [(subj, message, from_email, [recipient],) \\\n for recipient in recipient_list]\n send_mass_mail(datatuple)", "def mail(note,\n sender,\n recipients,\n cc_recipients=[],\n attachments=[],\n subject = '',\n verbosity = 0):\n if verbosity > 1:\n msgb(\"SENDING EMAIL\")\n note = [x.rstrip() for x in note]\n body = '\\n'.join(note)\n att = []\n for attachment in attachments:\n att.append( (attachment, os.path.basename(attachment)) )\n try:\n _send_email(recipients,\n sender,\n subject,\n body,\n att,\n cc_recipients,\n verbosity)\n except:\n die(\"Sending email failed\")\n return 0", "def test_send_email(self):\n\t\trecipient = \"\"\n\t\tself.email.send_email(self.subject, recipient, self.content)" ]
[ "0.6710717", "0.6321565", "0.6236836", "0.62081426", "0.61137784", "0.6100117", "0.60243607", "0.6002686", "0.5987678", "0.59080464", "0.58790296", "0.5877092", "0.5850349", "0.584703", "0.5835367", "0.5779802", "0.5771508", "0.57684636", "0.5767812", "0.5765022", "0.5761345", "0.57382244", "0.5729019", "0.5724844", "0.57062954", "0.5704703", "0.5683378", "0.56705517", "0.56703377", "0.56674117" ]
0.7115084
0
Starts an iterative task which update org admins.
def updateOrgAdmins(request): return updateRole('gsoc_org_admin')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def actualize(self):\r\n for guild, settings in self.bot.settings.items():\r\n # Grab the roles and their requirements\r\n guild = self.bot.get_guild(guild)\r\n base_member = settings.get(\"rank_basic_member_role_id\")\r\n base_member = guild.get_role(base_member)\r\n active_member = settings.get(\"rank_active_member_role_id\")\r\n active_member = guild.get_role(active_member)\r\n active_member_days = settings.get(\"rank_active_member_required_days\")\r\n active_member_activity = settings.get(\"rank_active_member_required_activity\")\r\n junior_mod = settings.get(\"rank_junior_mod_role_id\")\r\n junior_mod = guild.get_role(junior_mod)\r\n junior_mod_days = settings.get(\"rank_junior_mod_required_days\")\r\n junior_mod_activity = settings.get(\"rank_junior_mod_required_activity\")\r\n senior_mod = settings.get(\"rank_senior_mod_role_id\")\r\n senior_mod = guild.get_role(senior_mod)\r\n admin = settings.get(\"rank_admin_role_id\")\r\n admin = guild.get_role(admin)\r\n # Fetch the list of members eligible for each rank by their activity\r\n active_member_eligible = await util_users.fetch_users_by_days_and_activity(\r\n self.bot, guild, active_member_days, active_member_activity\r\n )\r\n active_member_eligible = [i[0] for i in active_member_eligible]\r\n junior_mod_eligible = await util_users.fetch_users_by_days_and_activity(\r\n self.bot, guild, junior_mod_days, junior_mod_activity\r\n )\r\n junior_mod_eligible = [i[0] for i in junior_mod_eligible]\r\n # Iterate over all members to edit their roles\r\n for member in [i for i in guild.members if not i.bot]:\r\n # Admin check\r\n if admin in member.roles:\r\n continue\r\n # Senior mod check\r\n elif senior_mod in member.roles:\r\n continue\r\n # Junior mod check\r\n elif junior_mod in member.roles:\r\n if member.id not in junior_mod_eligible:\r\n await member.remove_roles(junior_mod)\r\n await member.add_roles(active_member)\r\n # Active member check\r\n elif active_member in member.roles:\r\n if member.id not in active_member_eligible:\r\n await member.remove_roles(active_member)\r\n await member.add_roles(base_member)\r\n # Base member check\r\n elif base_member in member.roles:\r\n if member.id in active_member_eligible:\r\n await member.add_roles(active_member)", "def run(self):\n modify_tasks = filter(self._task_filter, acm.FAelTask.Select(''))\n print([task.Name() for task in modify_tasks])\n for task in modify_tasks:\n #new_task = task.Clone()\n self._update(task)\n try:\n task.Commit()\n except:\n print('Skipping: Task already exists')", "def _update_activities(self) -> None:\n self.hass.async_create_task(\n async_update_programs_and_zones(self.hass, self._entry)\n )", "def command(self):\n from tg import config\n self.basic_setup()\n\n # reset LDAP database and chroot home folders (if supported)\n self.reset_LDAP(config)\n self.reset_chroot()\n\n # exclude these users from the command\n special_users = ['*anonymous', 'root']\n\n # add users to LDAP and asynchronously create chroot user home folder\n count = 0\n users = User.query.find({'username': {'$nin': special_users}}).all()\n task = None\n for user in users:\n self.add_LDAP_user(user, config)\n if self.HAS_CHROOT:\n task = register_ldap.post(user.username)\n count += 1\n print \"Refreshed {} users.\".format(count)\n\n # wait for last task to complete\n if task:\n MonQTask.wait_for_tasks(query={\n '_id': task._id, 'state': {'$in': ['ready', 'busy']}\n }, timeout=240000)\n\n # asynchronously upload user public keys\n count = 0\n if self.HAS_CHROOT:\n for user in users:\n public_key = getattr(user, 'public_key', None)\n if public_key:\n upload_ssh_ldap.post(user.username, public_key)\n count += 1\n print \"Uploaded {} public keys.\".format(count)", "def _update_all_tasks(self) -> None:\n for task in self.tasks:\n task.update()", "def _migrate_users(correct_course_key, role, lower_org):\r\n for user in orm['auth.user'].objects.filter(groups=group).all():\r\n entry = orm['student.courseaccessrole'](\r\n role=role, user=user,\r\n org=correct_course_key.org, course_id=correct_course_key\r\n )\r\n try:\r\n entry.save()\r\n except IntegrityError:\r\n # already stored\r\n pass\r\n orgs[lower_org] = correct_course_key.org", "def admin(self, **kwargs):\n with self.user(**kwargs):\n g.admin = True\n yield", "def invite_site_users(users):\n #group(run_cron.s(item) for item in sites).delay()\n pass", "def __reloadAdmins(self, admin_id):\n for admin_username in admin_main.getLoader().getAllUsernames():\n try:\n admin_obj=admin_main.getLoader().getAdminByName(admin_username)\n if admin_obj.creator_id == admin_id:\n admin_main.getLoader().loadAdmin(admin_obj.getAdminID())\n else:\n for lock_obj in admin_obj.getLocks():\n if lock_obj.getLockerID()==admin_id:\n admin_main.getLoader().loadAdmin(admin_obj.getAdminID())\n break\n except:\n logException(LOG_DEBUG)", "def test_add_admin_to_org(self):\n pass", "def update_sysadmin_users():\n \n require('environment', provided_by=env.environments)\n servers = ec2_instances(filters=env.filters, cls=OpenRuralWebInstance,\n inst_kwargs={'deploy_user': env.deploy_user,\n 'instance_type': env.instance_type})\n for server in servers:\n server.create_users()\n server.update_deployer_keys()", "async def admin(self, ctx):\n if ctx.message.author.top_role.name.lower() == 'officer':\n await ctx.message.channel.send(\n 'List of useable commands for the parent command: **admin**\\n\\n **eboard admin auto** - updates the '\n 'new seats given current election data.\\n\\n**eboard admin set <position> <User#0000>** - assigns a '\n 'position to target user.\\n\\n**eboard admin remove <position> <User#0000>** - remove a target user '\n 'from their position.\\n\\n**eboard admin list** - lists the positions in the SQLite table.')", "def sync_org(config, orgs):\n\n logger = logging.getLogger(\"sync-org\")\n\n for org in orgs:\n logger.info(\"Syncing {} organization\".format(org))\n config.get_manager().sync_org(org)", "def sync_nas(self, users_from_db: Iterator):", "def handle(self, *args, **options):\r\n username = 'populate_creators_command'\r\n email = '[email protected]'\r\n try:\r\n admin = User.objects.create_user(username, email, 'foo')\r\n admin.is_staff = True\r\n admin.save()\r\n except IntegrityError:\r\n # If the script did not complete the last time it was run,\r\n # the admin user will already exist.\r\n admin = User.objects.get(username=username, email=email)\r\n\r\n for user in get_users_with_role(CourseInstructorRole.ROLE):\r\n add_user_with_status_granted(admin, user)\r\n\r\n # Some users will be both staff and instructors. Those folks have been\r\n # added with status granted above, and add_user_with_status_unrequested\r\n # will not try to add them again if they already exist in the course creator database.\r\n for user in get_users_with_role(CourseStaffRole.ROLE):\r\n add_user_with_status_unrequested(user)\r\n\r\n # There could be users who are not in either staff or instructor (they've\r\n # never actually done anything in Studio). I plan to add those as unrequested\r\n # when they first go to their dashboard.\r\n\r\n admin.delete()", "def update_users(self):\n user_list = []\n try:\n all_users_list = self.helper.list_all_users()\n users_to_update = []\n for email in all_users_list:\n user_info = self.get_by_id(UserInfo, email)\n if user_info:\n # Only update the model in the Datastore if one of the fields has\n # changed.\n is_user_cloud_admin = self.helper.is_user_cloud_admin(email)\n can_upload_apps = self.helper.can_upload_apps(email)\n owned_apps = self.helper.get_owned_apps(email)\n dash_layout_settings = self.get_dash_layout_settings(user_info)\n stored_layout_settings = user_info.dash_layout_settings\n if stored_layout_settings:\n dash_change = \\\n (dash_layout_settings.get(\"nav\") != stored_layout_settings.get(\n \"nav\")) or \\\n (dash_layout_settings.get(\"panel\") != stored_layout_settings.get(\n \"panel\"))\n else:\n dash_change = True\n\n if user_info.is_user_cloud_admin != is_user_cloud_admin or \\\n user_info.can_upload_apps != can_upload_apps or \\\n dash_change or \\\n user_info.owned_apps != owned_apps:\n user_info.is_user_cloud_admin = is_user_cloud_admin\n user_info.can_upload_apps = can_upload_apps\n user_info.owned_apps = owned_apps\n user_info.dash_layout_settings = dash_layout_settings\n users_to_update.append(user_info)\n\n # Either way, add the user's info to the list of all user's info.\n user_list.append(user_info)\n else:\n user_info = UserInfo(id=email)\n user_info.is_user_cloud_admin = self.helper.is_user_cloud_admin(email)\n user_info.can_upload_apps = self.helper.can_upload_apps(email)\n user_info.owned_apps = self.helper.get_owned_apps(email)\n user_info.dash_layout_settings = self.get_dash_layout_settings(\n user_info=user_info)\n users_to_update.append(user_info)\n user_list.append(user_info)\n ndb.put_multi(users_to_update)\n return user_list\n except Exception as err:\n logging.exception(err)\n return []", "def test_index_program_enrolled_users(self):\n enrollments = [ProgramEnrollmentFactory.create() for _ in range(2)]\n enrollment_ids = [enrollment.id for enrollment in enrollments]\n index_program_enrolled_users(enrollment_ids)\n assert list(\n self.index_program_enrolled_users_mock.call_args[0][0].values_list('id', flat=True)\n ) == enrollment_ids\n for enrollment in enrollments:\n self.send_automatic_emails_mock.assert_any_call(enrollment)\n self.refresh_index_mock.assert_called_with(get_default_alias())", "def update_admin_ids():\n admin_emails_config = Registry.get_config_property(\n 'admin_emails')\n if not admin_emails_config:\n return []\n\n admin_ids = []\n for email in admin_emails_config.value:\n user_id = user_services.get_user_id_from_email(email)\n if user_id is not None:\n admin_ids.append(user_id)\n else:\n raise Exception('Bad admin email: %s' % email)\n return admin_ids", "async def assert_requester_is_admin(auth: Auth, request: SynapseRequest) -> None:\n requester = await auth.get_user_by_req(request)\n await assert_user_is_admin(auth, requester)", "def update_synchronization():\n logger.debug(\"Update synchronizations started\")\n for sa in SocialAttributes.objects.filter(start_page_token__isnull=False):\n if should_sync(sa.user, 'google-oauth2', 'tasks.gdrive'):\n if sa.user.social_auth.filter(provider='google-oauth2').first():\n access_token, refresh_token = get_google_tokens(sa.user)\n subtask(sync_gdrive_changes).delay(sa.user, access_token, refresh_token, sa.start_page_token)\n else:\n logger.info(\"Gdrive oauth token for user '%s' already in use, skipping sync ...\", sa.user.username)", "def run(self):\n self.export_users()", "def approve(self):\n if (self.status == self.APPROVED):\n pass\n\n print ('starting approval process by adding events to the primary cal')\n\n primary_calendar = self.course.calendar_courses.get(primary=True)\n # print ('primary = ' + primary_calendar)\n for event in self.events.all():\n d = event.date\n start = datetime.datetime(d.year, d.month, d.day)\n start = timezone.make_aware(start, timezone.get_current_timezone())\n start = start + datetime.timedelta(hours=8)\n end = start + datetime.timedelta(hours=1)\n\n params = {\n 'calendar': primary_calendar,\n 'title': event.title,\n 'start': start,\n 'end': end\n }\n CalendarEvent.objects.create(**params)\n event.approved = True\n event.save()\n\n print ('trying to set syllabus to approved')\n\n try:\n syllabus = self.syllabus.all()[0]\n syllabus.approved = True\n syllabus.course = self.course\n syllabus.save()\n except:\n print ('dang, that failed, but continuing nonetheless.')\n pass\n\n\n print ('creating students from roster-students')\n\n\n for student in self.students.all():\n email = student.email\n if email:\n user = utils.get_or_create_user(email, student.first_name, student.last_name)\n school = self.course.domain\n user_student = utils.get_or_create_student(school, user)\n\n self.course.enroll_by_roster(user_student, self)\n\n student.approved = True\n student.save()\n\n print ('instructors')\n\n for instructor in self.instructors.all():\n instructor.approved = True\n instructor.save()\n\n print ('approving done')\n\n\n self.status = self.APPROVED\n self.save()\n\n add_notification(\n self.created_by.user,\n 'Your class set for {}, is approved and published!'.format(self.course)\n )", "def test_update(self):\n\n\n username,userpass = self.testdata.find_account_for('toolsubmitter')\n\n self.utils.account.login_as(username,userpass)\n\n self.contribtool.update(TOOLNAME,username,userpass)", "def run(self):\n self.update_repos()", "def test_pre_fill_and_assign(self):\n users = []\n for i in range(1, 50):\n users.append(User.objects.create_user(username=\"u{0}\".format(i)))\n pre_fill.main([\"--managers\", \"--workshift\"])\n utils.make_workshift_pool_hours(semester=self.semester)\n # Assign manager shifts beforehand\n for user, manager in zip(users, Manager.objects.all()):\n manager.incumbent = UserProfile.objects.get(user=user)\n manager.save()\n unfinished = utils.auto_assign_shifts(self.semester)\n self.assertEqual([], unfinished)", "async def admin(ctx):\n info = await(bot.application_info())\n mention = info.owner.mention\n message = \"My administrator is the glorious {}. Fear them, for they are mighty.\".format(mention)\n await(ctx.send(message))", "def __reloadUsers(self, admin_id):\n user_main.getUserPool().reloadUsersWithFilter(lambda loaded_user:loaded_user.getBasicUser().getOwnerObj().getAdminID()==admin_id)", "def test_admin_update_user_taskrun(self):\r\n\r\n with self.flask_app.test_request_context('/'):\r\n user_taskrun = TaskRunFactory.create()\r\n\r\n assert self.mock_admin.id != user_taskrun.user.id\r\n assert_raises(Forbidden,\r\n getattr(require, 'taskrun').update,\r\n user_taskrun)", "def update_owner(current_owner_email: str, new_owner_email: str):\n current_owner_id = find_user_id(current_owner_email)\n new_owner_id = find_user_id(new_owner_email) \n \n \"\"\" This block is executed to check if email addresses provided are associated with two Looker users \"\"\"\n \n if type(new_owner_id) != int and type(new_owner_id) != int:\n print(\"The email addresses for both the current owner and the new owner are not associated with any Looker user id\")\n\n elif type(current_owner_id) != int: \n print(\"The email address for the current owner is not associated with any Looker user id\")\n\n elif type(new_owner_id) != int:\n print(\"The email address for the new owner is not associated with any Looker user id\")\n\n else: \n body = {}\n body['user_id'] = new_owner_id\n find = find_schedules(current_owner_id) \n for i in find.values(): \n sdk.update_scheduled_plan(i,body)\n print(\"Successfully transfer all schedules of \" + current_owner_email + \" to \" + new_owner_email)", "def update_all(self):\n self.update_head_node_ip()\n self.get_database_info()\n self.update_users()" ]
[ "0.56836027", "0.5528564", "0.53884643", "0.5359931", "0.53472096", "0.5343001", "0.53295594", "0.53265554", "0.5268255", "0.5263433", "0.5232607", "0.52205235", "0.5219957", "0.52131385", "0.5176218", "0.5130246", "0.5106985", "0.50729245", "0.50512886", "0.50468695", "0.50466895", "0.49739996", "0.4948427", "0.4935979", "0.4935885", "0.49351472", "0.49297175", "0.48980778", "0.48974994", "0.4891305" ]
0.59802717
0
Returns GSoCProfile or GCIProfile which corresponds to the specified entity.
def _getProfileForRole(entity, profile_model): if isinstance(entity, profile_model): return entity if isinstance(entity, OrgAdmin) or isinstance(entity, Mentor): key_name = entity.program.key().name() + '/' + entity.user.key().name() else: key_name = entity.key().name() parent = entity.user return profile_model.get_by_key_name(key_name, parent=parent)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _getProfileFromUser(self):\n # Make sure user is authenticated\n user = endpoints.get_current_user()\n if not user:\n raise endpoints.UnauthorizedException('Authorization required')\n # Get Profile from datastore\n user_id = user.email()\n p_key = ndb.Key(Profile, user_id)\n profile = p_key.get()\n # Create new Profile if not there\n if not profile:\n profile = Profile(\n key = p_key,\n displayName = user.nickname(),\n mainEmail= user.email(),\n teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),\n )\n profile.put()\n return profile", "def get_full_profile(self) -> Profile:\n return Profile(**{**self.profile, **self.contact})", "def getprofile(self, *args, **kwargs):\n return _image.image_getprofile(self, *args, **kwargs)", "def get_profile(profile_id):\n profile = Profile.objects.get(id=profile_id)\n return profile", "def get(self, entity, schema):\n return jsonify(entity.profiles.get_or_404(schema=schema).to_json()), 200", "def get_profile(tag, platform=\"pc\", region=\"eu\"):\n #\n try:\n context = ssl._create_unverified_context()\n profile = json.load(\n const.codec(urlopen(const.URL + platform + \"/\" + region + \"/\" + tag + \"/profile\", context=context)))\n #\n if \"error\" in profile:\n raise BattleTagNotFound(profile['error'])\n exit(1)\n #\n result = pr.Profile(profile['data']['username'],\n profile['data']['level'],\n profile['data']['games']['quick']['wins'],\n profile['data']['games']['competitive']['wins'],\n profile['data']['games']['competitive']['lost'],\n profile['data']['playtime']['quick'],\n profile['data']['playtime']['competitive'],\n profile['data']['avatar'],\n profile['data']['competitive']['rank'])\n return result\n except urllib.error.URLError as e:\n print(\"An error occurred when fetching stats\\n\" + e)\n exit(1)\n except Exception as e:\n print(\"An error occurred:\\n \" + str(e))\n exit(1)", "def get_profile(request):\n p_obj = Profile.objects.filter(hashid=request.session.get('profile', '-'))\n if len(p_obj):\n return p_obj[0]\n else:\n return None", "def get_profile():\n if environ['DB_INSTANCE'] in request.url_root:\n profile_id = request.form['id']\n profile = ndb.Key(Profile, profile_id).get()\n if profile is not None:\n activity_data = json.loads(profile.activity_data)\n items = activity_data.get('items', [])\n item = items[0]\n return json.dumps(item)\n \n # else (not DB_INSTANCE)\n return ''", "def get_current_profile() -> Optional[Profile]:\n return _PROFILE[-1] if _PROFILE else None", "def get_profile(self):\n endpoint = '/profile'\n return self.get_request(endpoint)", "def get_gensec(self, obj):\n if obj.gensec is None:\n return None\n serializer = UserProfileSerializer(obj.gensec)\n return serializer.data", "def get_user_profile(self):\n return self.user.profile", "def getProfile(self, profile):\n for network in self.networks:\n if network.getProfileName() == profile:\n return network\n else:\n raise Exception('Network with profile name \"%s\" not found' % profile)", "def get_related_entity(self, entity):\n try:\n return getattr(self, entity if entity[-1] == \"s\" else entity.upper())\n except AttributeError:\n raise FilterError(f\" No related entity: {entity}\")", "def get_user_profile(self):\n\t\treturn Job(SDK.PrlSrv_GetUserProfile(self.handle)[0])", "def getProfile(self):\n # GET /profile\n debugMain('getProfile')\n return self._genericGet('/profile')", "def get(self, currency, entity):\n check_inputs(currency=currency, entity=entity)\n entity_stats = entitiesDAO.get_entity(currency, entity)\n if entity_stats:\n entity_stats['tags'] = entitiesDAO.\\\n list_entity_tags(currency, entity_stats['entity'])\n entity_stats['tag_coherence'] = compute_tag_coherence(\n entity_stats['tags'])\n return entity_stats\n abort(404,\n \"Entity {} not found in currency {}\".format(entity, currency))", "def get_profile(user):\n if user.is_authenticated():\n # Return the PootleProfile associated with authenticated users\n return user.get_profile()\n else:\n # Anonymous users get the PootleProfile associated with the 'nobody' user\n return User.objects.get(username='nobody').get_profile()", "def details(profile, instance_profile):\n client = boto3client.get(\"iam\", profile)\n params = {}\n params[\"InstanceProfileName\"] = instance_profile\n return client.get_instance_profile(**params)", "def fb_profile(self):\n return FBProfile.objects.get(fb_id=self.fb_id)", "def head(self, entity):\n link = '<{url}>; rel=\"https://tent.io/rels/profile\"'.format(\n url=url_for('entity.profile', entity=entity.name, _external=True))\n resp = jsonify(entity.to_json())\n resp.headers['Link'] = link\n \n return resp", "def get_profile(self, uid: str, *, channel_model: Optional[ChannelModel] = None) -> Optional[Profile]:\n ctype = ChannelType.UNKNOWN\n\n if channel_model:\n ctype = LineApiUtils.get_channel_type(channel_model.token)\n\n try:\n if ctype == ChannelType.GROUP_PUB_TEXT:\n return self._core.get_group_member_profile(channel_model.token, uid, timeout=1000)\n\n if ctype == ChannelType.GROUP_PRV_TEXT:\n return self._core.get_room_member_profile(channel_model.token, uid, timeout=1000)\n\n return self._core.get_profile(uid, timeout=1000)\n except LineBotApiError as ex:\n # 404 seems to be the legacy status code upon user not found\n # 400 is the status code returned upon user not found (2020/09/23)\n if ex.status_code in (404, 400):\n return None\n\n raise ex\n except requests.exceptions.ConnectionError:\n return self.get_profile(uid, channel_model=channel_model)", "def select_sub_profile(self, entity_state: State) -> str:\n for matcher in self._matchers:\n sub_profile = matcher.match(entity_state, self._source_entity)\n if sub_profile:\n return sub_profile\n\n return self._config.default", "def get_entity(self):\n if self.override_entity and not self.override_entity.abstract_entity:\n return self.override_entity\n elif self.get_role():\n return self.get_role().entity\n return None", "def get_profile():\n # Create the netCDF file\n nc = make_ctd_file()\n\n # Return a profile object with all available chemicals in the CTD data\n return ambient.Profile(nc, chem_names='all')", "def get(self, entity):\n\t\treturn entity.get_component(self.component_type)", "def profile(self) -> Profile:\n return self._profile", "def profile(self) -> Profile:\n return self._profile", "def get_user_custom_profile_by_nick(self, nick):\n full_profile = self.get_user_full_profile_by_nick(nick)\n\n if hasattr(full_profile, 'custom_profile'):\n return str(full_profile.custom_profile)", "def get_profile_get(self, components, destinyMembershipId, membershipType):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Destiny2/{membershipType}/Profile/{destinyMembershipId}/\"))" ]
[ "0.60517067", "0.5931097", "0.5920047", "0.57531446", "0.5752912", "0.57454103", "0.5593141", "0.5578978", "0.5446997", "0.54374474", "0.5414628", "0.5366052", "0.5310331", "0.53044224", "0.5297717", "0.528342", "0.5280851", "0.5238035", "0.5170172", "0.5164785", "0.51386154", "0.51162285", "0.5098992", "0.509524", "0.5094018", "0.5092746", "0.50927097", "0.50927097", "0.5087239", "0.5079519" ]
0.67691934
0
Returns Key instance of the Profile which corresponds to the Role which is represented by the specified Key.
def _getProfileKeyForRoleKey(key, profile_model): entity = db.get(key) profile = _getProfileForRole(entity, profile_model) return profile.key()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_key(self, role):\n\n for key, role_name in self.assignable_roles[0].items():\n if role_name == role.name:\n return key", "def get_key(self, key):\n ret = None\n qkey = key.__qualname__\n ret = self.get(qkey)\n if not ret:\n # check all entries if qualname match\n for k in self:\n if k.__qualname__ == qkey:\n return self.get(k)\n return", "def get_custom_key_from_key(key):\n key_custom = CustomIterator.key_from_protobuf(key.to_protobuf())\n key_custom._type = SubclassMap.get()[key_custom.kind]\n return key_custom", "def get_key(self):\n return self._determine_key()", "def get_key(self):\n return self.key", "def get_key(self):\n return self.key", "def key(self) -> Key:\n return self._key", "def key(key):\n return key", "def key(self):\n return self.account_name()", "def key(self):\n return self.account_name()", "def getKey(self):\n return self.key", "def _getProfileForRole(entity, profile_model):\n\n if isinstance(entity, profile_model):\n return entity\n\n if isinstance(entity, OrgAdmin) or isinstance(entity, Mentor):\n key_name = entity.program.key().name() + '/' + entity.user.key().name()\n else:\n key_name = entity.key().name()\n\n parent = entity.user\n return profile_model.get_by_key_name(key_name, parent=parent)", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")", "def key(self) -> str:\n return pulumi.get(self, \"key\")" ]
[ "0.66111445", "0.58805066", "0.58505315", "0.5831204", "0.5803281", "0.5803281", "0.57678306", "0.5754127", "0.56673247", "0.56673247", "0.5639647", "0.5624423", "0.56116235", "0.56116235", "0.56116235", "0.56116235", "0.56116235", "0.56116235", "0.56116235", "0.56116235", "0.56116235", "0.56116235", "0.56116235", "0.56116235", "0.56116235", "0.56116235", "0.56116235", "0.56116235", "0.56116235", "0.56116235" ]
0.828253
0
1. Convert ifg phase data into numpy binary files. 2. Save the preread_ifgs dict with information about the ifgs that are later used for fast loading of Ifg files in IfgPart class
def _create_ifg_dict(dest_tifs, params): ifgs_dict = {} nifgs = len(dest_tifs) process_tifs = mpiops.array_split(dest_tifs) for d in process_tifs: ifg = shared._prep_ifg(d, params) ifgs_dict[d] = PrereadIfg(path=d, nan_fraction=ifg.nan_fraction, master=ifg.master, slave=ifg.slave, time_span=ifg.time_span, nrows=ifg.nrows, ncols=ifg.ncols, metadata=ifg.meta_data) ifg.close() ifgs_dict = _join_dicts(mpiops.comm.allgather(ifgs_dict)) preread_ifgs_file = join(params[cf.TMPDIR], 'preread_ifgs.pk') if mpiops.rank == MASTER_PROCESS: # add some extra information that's also useful later gt, md, wkt = shared.get_geotiff_header_info(process_tifs[0]) epochlist = algorithm.get_epochs(ifgs_dict)[0] log.info('Found {} unique epochs in the {} interferogram network'.format(len(epochlist.dates), nifgs)) ifgs_dict['epochlist'] = epochlist ifgs_dict['gt'] = gt ifgs_dict['md'] = md ifgs_dict['wkt'] = wkt # dump ifgs_dict file for later use cp.dump(ifgs_dict, open(preread_ifgs_file, 'wb')) mpiops.comm.barrier() preread_ifgs = OrderedDict(sorted(cp.load(open(preread_ifgs_file, 'rb')).items())) log.debug('Finished converting phase_data to numpy in process {}'.format(mpiops.rank)) return preread_ifgs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_ifgs(ifg_paths, params, rows, cols):\n\n if mpiops.size > 1: # turn of multiprocessing during mpi jobs\n params[cf.PARALLEL] = False\n outdir = params[cf.TMPDIR]\n if not os.path.exists(outdir):\n shared.mkdir_p(outdir)\n\n tiles = mpiops.run_once(get_tiles, ifg_paths[0], rows, cols)\n\n preread_ifgs = _create_ifg_dict(ifg_paths, params=params)\n\n # validate user supplied ref pixel\n refpixel.validate_supplied_lat_lon(params)\n refpx, refpy = _ref_pixel_calc(ifg_paths, params)\n\n # remove non ifg keys\n _ = [preread_ifgs.pop(k) for k in ['gt', 'epochlist', 'md', 'wkt']]\n\n multi_paths = params[cf.INTERFEROGRAM_FILES]\n _orb_fit_calc(multi_paths, params, preread_ifgs)\n\n _ref_phase_estimation(ifg_paths, params, refpx, refpy)\n\n shared.save_numpy_phase(ifg_paths, tiles, params)\n _mst_calc(ifg_paths, params, tiles, preread_ifgs)\n\n # spatio-temporal aps filter\n wrap_spatio_temporal_filter(ifg_paths, params, tiles, preread_ifgs)\n\n maxvar, vcmt = _maxvar_vcm_calc(ifg_paths, params, preread_ifgs)\n # save phase data tiles as numpy array for timeseries and stackrate calc\n\n shared.save_numpy_phase(ifg_paths, tiles, params)\n\n _timeseries_calc(ifg_paths, params, vcmt, tiles, preread_ifgs)\n\n _stack_calc(ifg_paths, params, vcmt, tiles, preread_ifgs)\n\n log.info('PyRate workflow completed')\n return (refpx, refpy), maxvar, vcmt", "def convert_data(self):\n print('Saving data for post-processing in: {}'.format(self.RunDir))\n\n for ig in self.Set:\n print(ig.Name)\n # Phase Array - Convert to CM\n phs = roipy.tools.load_half(ig)\n data = phs * ig.Phs2cm\n outname = os.path.join(self.RunDir, 'd_' + ig.Name.replace('unw','npy')) #d for displacement\n ig.ProcName = outname\n np.save(outname,data)\n\n # Copy rsc\n shutil.copyfile(ig.Path + '.rsc', outname + '.rsc')\n\n # Nan Values as mask array\n maskname = outname.replace('d_', 'nans_')\n nans = np.isnan(data) # NOTE: ROI_PAC saves nans as exact 0.0, risks scraping some true data\n np.save(maskname, nans)", "def load_files(self):\n print('Saving numpy mask arrays in {0}'.format(self.ProcDir))\n\n if not os.path.isdir(self.ProcDir): os.mkdir(self.ProcDir)\n if not os.path.isdir(self.OutDir): os.mkdir(self.OutDir)\n\n self.Files = {}\n for ig in self.Set:\n phase = roipy.tools.load_half(ig,2)\n # convert wavelength to displacements\n # NOTE: make attributes of commonly used values in rsc: float(ig.Rsc['WAVELENGTH'])\n disp = phase * (ig.Wavelength / (4*np.pi))\n igram = ma.array(disp, mask=ma.nomask)\n name = self.save_ma(ig, igram) #Mask_ array is just zeros at this point..\n self.Files[ig.ID] = name\n\n print('load_files() complete: {0} interferograms'.format(self.Set.Nig))", "def gslib_to(fgslib, fout=False):\n\n with open(fgslib, 'r') as f:\n gr = next(f).strip().split()\n nvar = int(next(f).strip())\n vars = [next(f).strip() for i in range(nvar)]\n\n gslib_data = np.loadtxt(fgslib, skiprows=2+nvar) # Load data\n dict_out = dict() # Initialise list of numpy arrays\n for iv, v in enumerate(vars):\n vname = v.split('_real')[0]\n dict_out[vname] = np.ascontiguousarray(np.reshape(gslib_data[:, iv], [int(i) for i in gr[:3]], order='F'))\n\n if fout == 'pickle':\n outfile = fgslib[:-5] + 'dat'\n with open(outfile, 'wb') as pickle_file:\n pickle.dump(dict_out, pickle_file, protocol=pickle.HIGHEST_PROTOCOL)\n elif fout == 'h5':\n import h5py\n with h5py.File(fgslib[:-5]+'h5', 'w') as hf:\n for key in dict_out.keys():\n hf.create_dataset(key, data=dict_out[key], compression=True)\n return dict_out", "def save_equilibrator_bin_data(self, npz_file_name):\n preprocess_dict = {'cids': self.params['cids']}\n for k, v in self.params.items():\n if k.find('preprocess_') != -1:\n preprocess_dict[k.replace('preprocess_', '')] = v\n np.savez_compressed(npz_file_name, **preprocess_dict)", "def save_fida(self, path):\n # input is Dimensions are channel x rep x mega x isis x t\n # FID-A seems to accomodate only 4: t x chan x rep x subSpecs\n # TODO: see if ISIS and MEGA subspecs are differentiated\n\n # permute the axes to t x chan x rep x mega x isis\n fids = np.transpose(self.fid, (4, 0,1,2,3))\n specs = np.transpose(self.spec, (4, 0,1,2,3))\n # reshape to combine subspecs\n dims = list(fids.shape[0:-2])\n dims.append(-1)\n fids = np.reshape(fids, tuple(dims))\n specs = np.reshape(specs, tuple(dims))\n\n # remove last dimensi if there are no subSpecs\n fids = np.squeeze(fids)\n specs = np.squeeze(specs)\n\n # fp to avoid int64 errors\n dim_dict = {'t': 1.0, 'coils': 2.0, 'averages': 3.0, 'subSpecs': 0.0, 'extras': 0.0}\n\n # there are still subSpectra\n if fids.ndim == 4:\n subspecs = fids.shape[-1]\n rawSubspecs = fids.shape[-1]\n dim_dict['subSpecs'] = 4.0\n else:\n subspecs = 0\n rawSubspecs = 0\n\n if self.fid.shape[0]==1:\n addedrcvrs = 1\n else:\n addedrcvrs = 0\n\n B0 = self.larmor/util.GYROMAGNETIC_RATIO[self.nucleus]\n\n n_averages = float(self.fid.shape[self.dimnames['rep']])\n # fids - time domain MRS data.\n # specs - frequency domain MRS data.\n # t - vector of time values for plotting in the time domain [s]\n # ppm - vector of frequency values for plotting in the frequency domain\n # [ppm]\n # sz - size of the fids and specs arrays\n # date - date that the data was acquired or simulated\n # averages - number of averages in the dataset (possibly altered by\n # processing)\n # rawAverages - number of averages in the original dataset (not altered by\n # processing).\n # subspecs - number of subspectra (ISIS, edit on/off, etc) in the dataset\n # (possibly altered by processing).\n # rawSubspecs - number of subspectra (ISIS, edit on/off, etc) in the original\n # dataset (not altered by processing). Bo - magnetic field strength [Tesla]\n # txfrq - Centre frequnecy [MHz];\n # linewidth - linewidth of data (only used for simulated data) [Hz]\n # n - number of spectral points\n # dwelltime - dwell time of the data in the time domain [s] (dwelltime =\n # 1/spectralwidth)\n # sim - type of simulation (ideal vs. shaped pulses), only used for\n # simulated data.\n # te seq dims\n # - echo time of acquisition [ms], only used for simulated data - type of sequence used (only used for simulated data).\n # - structure specifying which data dimensions are stored along\n # which dimensions of the fids/specs arrays. Fields include:\n # t - time/frequency dimension (usually this is 1, the first\n # dimension of the fids/specs array).\n # coils - for multiple receiver array, this is the dimension of\n # the arrayed receiver data (can be 2, 3 or 4). averages - for multiple averages, this is the dimension of the\n # averages (can be 2, 3 or 4).\n # subSpecs - in the case of subtraction data (ISIS, MEGA-PRESS), this\n # is the dimension of the subSpectra (can be 2, 3 or 4).\n\n\n mdict = {'fids': fids, 'specs': specs, 't': self.t,\n 'ppm': self.ppm, 'sz': np.float_(fids.shape), 'date': '',\n 'averages': n_averages, 'rawAverages': n_averages,\n 'subspecs': float(subspecs), 'rawSubspecs': float(rawSubspecs), 'Bo': B0,\n 'txfrq': self.larmor, 'dwelltime': 1.0/self.sw,\n 'spectralwidth': self.sw, 'seq': self._sequence_name,\n 'dims': dim_dict, 'te': self.te * 1e3, 'tr': self.tr * 1e3,\n 'pointsToLeftshift': 0}\n\n # writtentostruct\n # gotparams\n # filtered\n # zeropadded\n # freqcorrected\n # phasecorrected\n # averaged\n # addedrcvrs\n # Subtracted\n # Writtentotext\n # Downsampled\n # avgNormalized\n # isISIS\n # - Has the dataset been written to a structure (1 or 0)\n # - Have the parameters been retrieved from the dataset (1 or 0)\n # - Has the dataset been filtered (1 or 0)\n # - Has the dataset been zeropadded (1 or 0)\n # - Has the dataset been frequency corrected (1 or 0) - Has the dataset been phase corrected (1 or 0)\n # - Have the averages been combined (1 or 0)\n # - Have the rcvr channels been combined (1 or 0).\n # - Have the subspecs been subtracted (1 or 0)\n # - Has the data been written to text file (1 or 0) - has the data been resampled to a different\n # spectral resolution (1 or 0)\n # - Has the data been amplitude scaled following\n # combination of the averages (1 or 0)\n # - Does the dataset contain ISIS subspectra (1 or 0)\n\n flags = {'writtentostruct': 1, 'gotparams': 1, 'filtered': 0,\n 'zeropadded': 0, 'freqcorrected': 0, 'phasecorrected': 0,\n 'averaged': int(n_averages == 1), 'addedrcvrs': addedrcvrs,\n 'subtracted': 0, 'Writtentotext': 0, 'Downsampled': 0,\n 'avgNormalized': 0, 'isISIS': int(self.is_special),\n 'leftshifted': 0}\n\n if self.sequence_type == 'STEAM':\n mdict['tm'] = self.tm\n\n mdict['flags'] = flags\n scipy.io.savemat(path, {'svs': mdict}, format='5', long_field_names=True)", "def savedata(outfile):\n\n global BTRACK, GSTRUC, NPIX\n \n print('SAVING DATA to '+outfile)\n\n # Back up any existing file\n picklefile = outfile.replace('.fits','.pkl')\n backpicklefile = picklefile+'.backup' \n if os.path.exists(picklefile):\n if os.path.exists(backpicklefile):\n shutil.move(picklefile,backpicklefile)\n \n # Write tracking structures to pickle file\n with open(picklefile, 'wb') as f:\n pickle.dump(BTRACK, f)\n pickle.dump(GSTRUC, f) \n\n # Remove backup file if it exists\n if os.path.exists(backpicklefile):\n os.remove(backpicklefile)\n \n # Construct gstruc output structure\n count = GSTRUC['count']\n ngauss = GSTRUC['ngauss']\n dtype = np.dtype([('x',int),('y',int),('par',float,3),('sigpar',float,3),('rms',float),\n ('noise',float),('lon',float),('lat',float)])\n gstruc = np.zeros(ngauss,dtype=dtype)\n cnt = 0\n for i in range(count):\n tstr1 = GSTRUC['data'][i]\n ngauss1 = len(tstr1['par'])//3\n gstruc1 = np.zeros(ngauss1,dtype=dtype)\n gstruc1['x'] = tstr1['x']\n gstruc1['y'] = tstr1['y']\n gstruc1['lon'] = tstr1['lon']\n gstruc1['lat'] = tstr1['lat'] \n gstruc1['rms'] = tstr1['rms']\n gstruc1['noise'] = tstr1['noise']\n gstruc1['par'] = tstr1['par'].reshape(ngauss1,3)\n gstruc1['sigpar'] = tstr1['sigpar'].reshape(ngauss1,3)\n gstruc[cnt:cnt+ngauss1] = gstruc1\n cnt += ngauss1\n gstruc = Table(gstruc)\n gstruc.write(outfile,overwrite=True)\n print(str(len(gstruc))+' gaussians')\n \n return gstruc", "def SaveNIFTI(data, file_path):\n if(np.iscomplex(data).any()):\n data = abs(data)\n nii = nib.Nifti1Image(data, np.eye(4)) \n nib.save(nii, file_path)", "def save2nifti(self, file_path):\n #Define nifti1 datatype codes\n NIFTI_TYPE_UINT8 = 2 # unsigned char\n NIFTI_TYPE_INT16 = 4 # signed short\n NIFTI_TYPE_INT32 = 8 # signed int.\n NIFTI_TYPE_FLOAT32 = 16 # 32 bit float.\n NIFTI_TYPE_COMPLEX64 = 32 # 64 bit complex = 2 32 bit floats\n NIFTI_TYPE_FLOAT64 = 64 # 64 bit float = double.\n NIFTI_TYPE_RGB24 = 128 # 3 8 bit bytes.\n NIFTI_TYPE_INT8 = 256 # signed char.\n NIFTI_TYPE_UINT16 = 512 # unsigned short.\n NIFTI_TYPE_UINT32 = 768 # unsigned int.\n NIFTI_TYPE_INT64 = 1024 #signed long long.\n NIFTI_TYPE_UINT64 = 1280 # unsigned long long.\n NIFTI_TYPE_FLOAT128 = 1536 # 128 bit float = long double.\n NIFTI_TYPE_COMPLEX128 = 1792 #128 bit complex = 2 64 bit floats.\n NIFTI_TYPE_COMPLEX256 = 2048 # 256 bit complex = 2 128 bit floats\n NIFTI_TYPE_RGBA32 = 2304 # 4 8 bit bytes.\n\n #Detect the data type of the input data.\n data_type = {\n np.uint8: NIFTI_TYPE_UINT8,\n np.uint16: NIFTI_TYPE_UINT16,\n np.uint32: NIFTI_TYPE_UINT32,\n np.float32: NIFTI_TYPE_FLOAT32,\n np.int16: NIFTI_TYPE_INT16,\n np.int32: NIFTI_TYPE_INT32,\n np.int8: NIFTI_TYPE_INT8\n }\n if sys.maxint > 2 ** 32: # The platform is 64 bit\n data_type[np.float128] = NIFTI_TYPE_FLOAT128\n data_type[np.float64] = NIFTI_TYPE_FLOAT64\n data_type[np.int64] = NIFTI_TYPE_INT64\n data_type[np.uint64] = NIFTI_TYPE_UINT64\n data_type[np.complex64] = NIFTI_TYPE_COMPLEX64\n data_type[np.complex128] = NIFTI_TYPE_COMPLEX128\n data_type[np.complex256] = NIFTI_TYPE_COMPLEX256\n\n data = np.rot90(self._data, 3)\n if data_type.has_key(data.dtype.type):\n self._header['datatype'] = data_type[data.dtype.type]\n self._header['cal_max'] = data.max()\n self._header['cal_min'] = 0\n image = nib.nifti1.Nifti1Image(data, None, self._header)\n nib.nifti1.save(image, file_path)", "def save_intrinsics(self, save_dir):\n if not osp.isfile(\n osp.join(save_dir, 'intrinsics', 'intrinsics.npy')):\n np.save(osp.join(\n save_dir, 'intrinsics', 'intrinsics'), self.camera_model.K)", "def reduce_and_save():\n ### Get the signature information\n sig_info = pd.read_csv(join(FILE_PATH, \"GSE92742_Broad_LINCS_sig_info.txt\"), sep=\"\\t\")\n ### Columns are:\n ### Index([u'sig_id', u'pert_id', u'pert_iname', u'pert_type', u'cell_id',\n ### u'pert_dose', u'pert_dose_unit', u'pert_idose', u'pert_time',\n ### u'pert_time_unit', u'pert_itime', u'distil_id'],\n ### dtype='object')\n\n ### Filter for signature ids for small molecule pertubagens\n small_mol_sigs = sig_info['sig_id'][sig_info['pert_type'] == \"trt_cp\"]\n ### Results in 205034 signatures\n\n ### Read in the gene info\n gene_info = pd.read_csv(join(FILE_PATH, \"GSE92742_Broad_LINCS_gene_info.txt\"), sep='\\t')\n ### Index([u'pr_gene_id', u'pr_gene_symbol', u'pr_gene_title', u'pr_is_lm',\n ### u'pr_is_bing'],\n ### dtype='object')\n\n landmark_gene_ids = gene_info['pr_gene_id'][gene_info['pr_is_lm'] == 1] #Filters for directly measured transcripts\n ### Results in the 978 landmark pr_gene_ids\n\n ### LOAD in the main file filtering the columns so that only the small molecules signatures are loaded and the\n ### rows such that only the landmark genes are loaded into their custom gctoo container type\n relevent_sigs_gctoo = parse(join(FILE_PATH, \"GSE92742_Broad_LINCS_Level5_COMPZ.MODZ_n473647x12328.gctx\"),\n cid=small_mol_sigs, rid=landmark_gene_ids)\n # print small_mol_sigs.data_df.shape\n ### Should write an intermediate file with dimensions (978, 205034)\n write_gctx.write(relevent_sigs_gctoo, join(FILE_PATH, \"lm_sm_aggz\"))", "def save_segmentation_samples(self, dest=\"./Datasets/IsophonicsSegmentation.seg\", song_indices=[0, 10, 20, 30, 40, 50, 60, 70], hop_length=512, norm_to_C=False, spectrogram_generator=log_mel_spectrogram, n_frames=500):\n data = []\n chords = []\n gold_targets = []\n # Iterate over all song indices on the input\n for song_ind in song_indices:\n # Prprocess audio\n preprocessed_audio = IsophonicsDataset.preprocess_audio(\n waveform=self.DATA[song_ind].WAVEFORM,\n sample_rate=self.DATA[song_ind].SAMPLE_RATE,\n spectrogram_generator=spectrogram_generator,\n nfft=self.NFFT, hop_length=hop_length,\n norm_to_C=norm_to_C, key=self.KEYS[song_ind].get_first_key()\n ).swapaxes(0,1)\n\n num_samples, _ = preprocessed_audio.shape\n\n # Convert data and chord targets to sequences\n data_in_seqs, targets_in_seqs = Dataset.songs_to_sequences(\n FEATURESs=[preprocessed_audio],\n CHORDs=[self.CHORDS[song_ind]],\n TIME_BINSs=[[float(i)/(float(self.SAMPLE_RATE) / float(hop_length)) for i in range(num_samples)]],\n KEYs=self.KEYS[song_ind].get_first_key(),\n n_frames=n_frames,\n norm_to_C=norm_to_C\n )\n\n # Add song's sequences to lists as a new element\n data.append(data_in_seqs)\n chords.append(targets_in_seqs)\n gold_targets.append(SegmentationCRNN.labels2changes(targets = chords[-1]))\n\n # Save all three np arrays generated in this function .. data, chords, gold_targets aka chord changes\n with lzma.open(dest, \"wb\") as dataset_file:\n pickle.dump((data, chords, gold_targets), dataset_file)\n\n print(\"[INFO] The Isophonics segmentation samples was saved successfully.\")", "def read_npz(chro,HiCData):\r\n data = {}\r\n inter = HiCData[chro]\r\n max_bin = max(inter['bin1'].max(), inter['bin2'].max())\r\n for i in range(max_bin+1):\r\n data[i] = []\r\n for i in inter:\r\n data[i['bin1']].append((i['bin1'],i['bin2'],i['IF']))\r\n\r\n dtype = np.dtype({'names':['bin1','bin2','IF'],\r\n 'formats':[np.int, np.int, np.float]})\r\n for k,v in data.items():\r\n v = np.array(v,dtype = dtype)\r\n data[k] = v\r\n\r\n return data", "def save_nifti(self, path):\n meta = {'te': self.te, 'tr': self.tr, 'sw': self.sw}\n if self.sequence_type == 'STEAM':\n meta['tm'] = self.tm\n\n # store real and imaginary components in last 2 dims\n component_fid = np.stack((np.real(self.fid),np.imag(self.fid)), -2)\n nifti = nib.Nifti2Image(component_fid, self.transform.get_matrix(), extra=meta)\n nib.save(nifti, path)", "def save_calib_data(self):\r\n \r\n #get data to save\r\n x0 = self.ui.x0.value()\r\n x1 = self.ui.x1.value()\r\n y0 = self.ui.y0.value()\r\n y1 = self.ui.y1.value()\r\n \r\n directory, fileName = os.path.split(self.ui.imagePath.text())\r\n nofpixels = (max(x0,x1)-min(x0,x1))*(max(y0,y1)-min(y0,y1))\r\n\r\n #create a list with everything to be saved \r\n #(fast way in Python to build a string)\r\n strList = [self.ui.filmNumber.text(),#indentifier\r\n fileName, #file name\r\n \"{:d}\".format(x0),#coordinates\r\n \"{:d}\".format(y0),\r\n \"{:d}\".format(x1),\r\n \"{:d}\".format(y1),\r\n \"{:d}\".format(nofpixels)] \r\n \r\n \r\n #save the channel data\r\n for channel in [0,1,2]:\r\n avg = np.average(self.npImg[y0:y1,x0:x1,channel])\r\n std = np.std(self.npImg[y0:y1,x0:x1,channel])\r\n strList.append(\"{:.3f}\".format(avg))\r\n strList.append(\"{:.3f}\".format(std))\r\n\r\n #concatenate the list, using tab as a seperator\r\n saveStr = '\\t'.join(strList)+\"\\n\"\r\n \r\n self.saveTablePath = self.check_save_table_path(self.ui.saveTablePath.text())\r\n \r\n if self.saveTablePath == \"\":\r\n logging.error(\"no valid file selected, nothing written\")\r\n else:\r\n with open(self.saveTablePath,\"a\") as saveTable:\r\n saveTable.write(saveStr)\r\n logging.info((\"info for \"+self.ui.filmNumber.text()+\" written to file\"))", "def _multiprocessing_save_srp(in_out_path):\n vp, np = in_out_path\n if not os.path.exists(np + '.npz'):\n save_compact_fft(\n np,\n embedding_fft(load_one_srp_embedding(vp))\n )\n return np", "def run(self):\r\n #print 'WriteFITS_IDI.run'\r\n\r\n # construct the name of the file\r\n readfits = self.previous_results['readfits']\r\n obs_date = readfits['obs date']\r\n idifitsfile = '%s.idi.fits' % obs_date\r\n\r\n configxml = 'firi.xml'\r\n\r\n # midnight on date to Julian day\r\n obs_date_midnight = astro_time.Time('%s-%s-%sT00:00:00' %\r\n (obs_date[:4], obs_date[4:6], obs_date[6:8]), format='isot')\r\n obs_date_midnight = obs_date_midnight.jd\r\n\r\n rdate = astro_time.Time(obs_date_midnight, format='jd',\r\n out_subfmt='date')\r\n rdate = rdate.iso\r\n\r\n # number of days after midnight at obs start\r\n obs_date_time = astro_time.Time('%s-%s-%s:%s:%s' %\r\n (obs_date[:4], obs_date[4:6], obs_date[6:11], obs_date[11:13],\r\n obs_date[13:]), format='isot')\r\n obs_date_time = obs_date_time.jd - obs_date_midnight\r\n\r\n # get specific items from the results that will be need in\r\n # the reduction\r\n reduce_interferogram = self.previous_results['reduceinterferogram']\r\n data_quality = reduce_interferogram['data_quality']\r\n scan_uvspectra = reduce_interferogram['scan_uvspectra']\r\n\r\n wavenumber = scan_uvspectra[0].wavenumber\r\n\r\n # construct lists of the values to be stored in each Table column\r\n n_uvspectra = max(scan_uvspectra.keys()) + 1\r\n mcomplex = 3\r\n mstokes = 1\r\n mfreq = len(wavenumber)\r\n mra = 1\r\n mdec = 1\r\n\r\n uv_data = np.zeros([n_uvspectra, mdec, mra, mfreq, mstokes, mcomplex])\r\n u = np.zeros([n_uvspectra])\r\n v = np.zeros([n_uvspectra])\r\n w = np.zeros([n_uvspectra])\r\n dates = np.zeros([n_uvspectra])\r\n times = np.zeros([n_uvspectra])\r\n baselines = np.zeros([n_uvspectra], dtype=np.int)\r\n freqid = np.ones([n_uvspectra], dtype=np.int)\r\n\r\n for k,val in scan_uvspectra.items():\r\n uv_data[k,0,0,:,0,0] = val.spectrum.real\r\n uv_data[k,0,0,:,0,1] = val.spectrum.imag\r\n uv_data[k,0,0,:,0,2] = np.ones(val.spectrum.real.shape)\r\n u[k] = np.mean(val.baseline_x)\r\n v[k] = np.mean(val.baseline_y)\r\n w[k] = np.mean(val.baseline_z)\r\n dates[k] = obs_date_midnight\r\n times[k] = obs_date_time + (np.mean(val.time) / (3600 * 24))\r\n baselines[k] = 258\r\n\r\n # external_params is referred to inside config.xml and can be\r\n # used to set parameters there\r\n light_speed = constants.c.to('m/s').value\r\n external_params = {'NCHAN':len(wavenumber),\r\n 'RDATE':rdate,\r\n 'REF_FREQ':0.0 * 100 * light_speed,\r\n 'CHAN_BW':np.abs(wavenumber[1] - wavenumber[0]) * \\\r\n 100 * light_speed}\r\n\r\n print \"Out: %s\\nConfig: %s\"%(idifitsfile, configxml)\r\n\r\n print('\\nConfiguring Array geography')\r\n print('--------------------------')\r\n # Meaningless numbers, hopefully not needed by any CASA method \r\n # that we want to use\r\n (latitude, longitude, elevation) = ('00:00:00.00', '00:00:00.00', 0)\r\n now = datetime.datetime.now()\r\n\r\n # Make ourselves an Array (pyEphem observer)\r\n array_geometry_m = np.array([\r\n [0.0, 0.0, 0.0],\r\n [0.0, 80.0, 0.0]], dtype = 'float32')\r\n beach = Array(lat=latitude, long=longitude, elev=elevation, date=now,\r\n antennas=array_geometry_m)\r\n\r\n print('\\nConfiguring phase source')\r\n print('--------------------------')\r\n # The source is our phase centre for UVW coordinates\r\n line = \"%s,f,%s,%s,%s,%d\" % ('Deep Space', '00:00:00',\r\n '00:00:00', '1', 2000)\r\n source = ephem.readdb(line)\r\n source.compute(beach)\r\n print \"Name: %s \\nRA: %s \\nDEC: %s\"%(source.name, source.ra, source.dec)\r\n\r\n # Make a new blank FITS HDU\r\n print('\\nCreating PRIMARY HDU')\r\n print('------------------------------------')\r\n hdu = make_primary(config=configxml, external_params=external_params)\r\n print repr(hdu.header)\r\n\r\n # Go through and generate required tables\r\n print('\\nCreating ARRAY_GEOMETRY')\r\n print('------------------------------------')\r\n tbl_array_geometry = make_array_geometry(config=configxml, num_rows=2,\r\n external_params=external_params)\r\n tbl_array_geometry = config_array_geometry(tbl_array_geometry,\r\n array_geometry_m)\r\n print repr(tbl_array_geometry.header)\r\n\r\n print('\\nCreating FREQUENCY')\r\n print('------------------------------------')\r\n tbl_frequency = make_frequency(config=configxml, num_rows=1,\r\n external_params=external_params)\r\n tbl_frequency = config_frequency(tbl_frequency,\r\n external_params=external_params)\r\n print repr(tbl_frequency.header)\r\n\r\n print('\\nCreating SOURCE')\r\n print('------------------------------------')\r\n tbl_source = make_source(config=configxml, num_rows=1,\r\n external_params=external_params)\r\n tbl_source = config_source(tbl_source, source)\r\n print repr(tbl_source.header)\r\n\r\n print('\\nCreating ANTENNA')\r\n print('------------------------------------')\r\n tbl_antenna = make_antenna(config=configxml, num_rows=2,\r\n external_params=external_params)\r\n tbl_antenna = config_antenna(tbl_antenna)\r\n print repr(tbl_antenna.header)\r\n\r\n print('\\nCreating UV_DATA')\r\n print('------------------------------------')\r\n\r\n print 'Data dimensions: %i dumps, %i chans, %i pols, %i data' % (\r\n n_uvspectra, mfreq, mstokes, mcomplex)\r\n\r\n print('Generating blank UV_DATA rows...')\r\n tbl_uv_data = make_uv_data(config=configxml, num_rows=n_uvspectra,\r\n external_params=external_params)\r\n\r\n timesorted = np.argsort(times)\r\n\r\n for k in timesorted:\r\n tbl_uv_data.data[k]['FLUX'] = uv_data[k,0,0,:,0,:].ravel()\r\n tbl_uv_data.data[k]['UU'] = u[k] / light_speed\r\n tbl_uv_data.data[k]['VV'] = v[k] / light_speed\r\n tbl_uv_data.data[k]['WW'] = w[k] / light_speed\r\n tbl_uv_data.data[k]['BASELINE'] = baselines[k]\r\n tbl_uv_data.data[k]['DATE'] = dates[k]\r\n tbl_uv_data.data[k]['TIME'] = times[k]\r\n tbl_uv_data.data[k]['SOURCE'] = 1\r\n tbl_uv_data.data[k]['FREQID'] = 1\r\n tbl_uv_data.data[k]['INTTIM'] = 3\r\n\r\n print repr(tbl_uv_data.header)\r\n \r\n hdulist = pyfits.HDUList(hdus=\r\n [hdu,\r\n tbl_array_geometry,\r\n tbl_source, \r\n tbl_frequency,\r\n tbl_antenna,\r\n tbl_uv_data])\r\n\r\n print('Verifying integrity...') \r\n hdulist.verify()\r\n \r\n if(os.path.isfile(idifitsfile)):\r\n print('Removing existing file...')\r\n os.remove(idifitsfile)\r\n print('Writing to file...')\r\n hdulist.writeto(idifitsfile)\r\n\r\n print('Done.')\r\n\r\n self.result['idifitsfile'] = idifitsfile\r\n\r\n return self.result", "def save_data_to_disk(self):\n Omega_M = self.theta_fid[0]\n for key in self.data.keys():\n np.save(f'./preloaded_data/{Omega_M}_{self.delta_theta[0]}_{key}.npy', self.data[key])", "def save_specific_waveforms_to_file(real_data_array, synth_data_array, data_labels, nlloc_hyp_filename, inversion_type, outdir, shift_idxs=[], normallise_data=False):\n # Normalise data, if specified:\n if normallise_data:\n for i in range(len(data_labels)):\n real_data_array[i,:] = real_data_array[i,:] / np.max(np.abs(real_data_array[i,:]))\n synth_data_array[i,:] = synth_data_array[i,:] / np.max(np.abs(synth_data_array[i,:]))\n # Put waveform data in dict format:\n out_wf_dict = {}\n for i in range(len(data_labels)):\n out_wf_dict[data_labels[i]] = {}\n out_wf_dict[data_labels[i]][\"real_wf\"] = real_data_array[i,:]\n if len(shift_idxs)>0:\n shift_idx_curr = shift_idxs[i]\n out_wf_dict[data_labels[i]][\"synth_wf\"] = np.roll(synth_data_array[i,:], int(shift_idx_curr))\n else:\n out_wf_dict[data_labels[i]][\"synth_wf\"] = synth_data_array[i,:]\n # Get uid for filename:\n uid, stations = get_event_uid_and_station_data_MTFIT_FORMAT_from_nonlinloc_hyp_file(nlloc_hyp_filename)\n # And write to file:\n out_fname = outdir+\"/\"+uid+\"_FW_\"+inversion_type+\".wfs\"\n print(\"Saving FW inversion to file:\", out_fname)\n pickle.dump(out_wf_dict, open(out_fname, \"wb\"))", "def readimfile():\r\n global numpops\r\n global gv\r\n imfile = open(gv[\"imfilename\"],\"r\")\r\n gv[\"useghost\"] = False\r\n imfileline = imfile.readline()\r\n gv[\"newercode\"] = False\r\n while imfileline != '':\r\n if imfileline.upper().find(\"IMa3 program compiled on\".upper()) >= 0:\r\n import re\r\n from datetime import datetime\r\n linesplit = mysplit(imfileline.strip(),\",\")\r\n date = \" \".join(linesplit[4:7])\r\n ## changed ima3 format afer sep 12 2017\r\n newcodedatetime = datetime.strptime(\"sep 12 2017\", '%b %d %Y')\r\n filedatetime = datetime.strptime(date, '%b %d %Y')\r\n gv[\"newercode\"] = filedatetime >= newcodedatetime\r\n if imfileline.upper().find(\"Command line string :\".upper()) >= 0:\r\n checkimcommandline(imfileline[22:])\r\n break\r\n imfileline = imfile.readline()\r\n slist = [[\"ghost status\",False,check_ghost_status,\"Model options on command line\"],\\\r\n [\"inputfile\",False,get_input_file_name,\"Text from input file:\"],\\\r\n [\"pop names\",False,get_population_names,\"Population Names\"],\\\r\n [\"pop tree\",False,get_population_tree,\"Population Tree :\"],\\\r\n [\"population size parameter info\",False,get_popsize_param,\"MARGINAL DISTRIBUTION VALUES AND HISTOGRAMS OF POPULATION SIZE AND MIGRATION PARAMETERS\"],\\\r\n [\"splitting time parameter info\",False,get_t_param,\"MARGINAL DISTRIBUTION VALUES AND HISTOGRAMS OF PARAMETERS IN MCMC\"],\\\r\n [\"migration parameter info\",False,get_2NM,\"Marginal Peak Locations and Probabilities\"],\\\r\n [\"demographic scales\",gv[\"skipdemographicscaling\"],get_demog_scales,\"MARGINAL DISTRIBUTION VALUES IN DEMOGRAPHIC UNITS\"] #,\\\r\n## [\"parameter priors\",False,get_parameter_priors,\"Parameter Priors\"] \\ ignore this I think\r\n ]\r\n while imfileline != '':\r\n if imfileline.upper().find(\"LOCATIONS OF PARAMETER ESTIMATES IN THIS FILE\".upper()) >= 0:\r\n while True:\r\n imfileline = imfile.readline()\r\n if imfileline.upper().find(\"Hyperparameter\".upper()) >= 0:\r\n print ( \"**IMfig error - input while was run using hyperparameters\")\r\n quit()\r\n if imfileline.upper().find(\"ESTIMATED POSTERIOR PROBABILITIES OF POPULATION TREE TOPOLOGIES\".upper()) >= 0:\r\n print ( \"**IMfig error - input while was generated to estimate phyhlogeny\")\r\n quit()\r\n if imfileline.upper().find(\"INPUT AND STARTING INFORMATION\".upper()) >= 0:\r\n break\r\n checkdone = True\r\n for i in range(len(slist)):\r\n checkdone = checkdone and slist[i][1]\r\n if slist[i][1] == False and imfileline.upper().find(slist[i][3].upper()) >= 0:\r\n if slist[i][0] == \"ghost status\":\r\n slist[i][2](imfile,imfileline,slist[i][3])\r\n else:\r\n slist[i].append(slist[i][2](imfile,imfileline,slist[i][3]))\r\n slist[i][1] = True\r\n if slist[i][0] == \"pop names\":\r\n numpops = len(slist[i][4])\r\n if checkdone:\r\n break\r\n imfileline = imfile.readline()\r\n if \"**NO DATA **\" in imfileline:\r\n print ( \"**IMfig error - input while was run without data\")\r\n quit()\r\n imfile.close()\r\n (scaledpop,scaledtime) = ([],[])\r\n if gv[\"skipdemographicscaling\"]:\r\n slist[7][1] = False\r\n else:\r\n if len(slist[7]) == 4:\r\n print ( \"**IMfig error - Information in demographic units not found, use -d option\")\r\n## printcommandset()\r\n quit()\r\n if len(slist[7][4])==3:\r\n (scaledpop, scaledtime) = calc_scaledvals(slist)\r\n if gv[\"excludeghost\"] and gv[\"useghost\"]:\r\n slist,scaledpop,scaledtime = removeghost(slist,scaledpop,scaledtime)\r\n return slist, scaledpop, scaledtime", "def save2nifti(self, file_path):\n # Define nifti1 datatype codes\n NIFTI_TYPE_UINT8 = 2 # unsigned char\n NIFTI_TYPE_INT16 = 4 # signed short\n NIFTI_TYPE_INT32 = 8 # signed int.\n NIFTI_TYPE_FLOAT32 = 16 # 32 bit float.\n NIFTI_TYPE_COMPLEX64 = 32 # 64 bit complex = 2 32 bit floats\n NIFTI_TYPE_FLOAT64 = 64 # 64 bit float = double.\n NIFTI_TYPE_RGB24 = 128 # 3 8 bit bytes.\n NIFTI_TYPE_INT8 = 256 # signed char.\n NIFTI_TYPE_UINT16 = 512 # unsigned short.\n NIFTI_TYPE_UINT32 = 768 # unsigned int.\n NIFTI_TYPE_INT64 = 1024 # signed long long.\n NIFTI_TYPE_UINT64 = 1280 # unsigned long long.\n NIFTI_TYPE_FLOAT128 = 1536 # 128 bit float = long double.\n NIFTI_TYPE_COMPLEX128 = 1792 # 128 bit complex = 2 64 bit floats.\n NIFTI_TYPE_COMPLEX256 = 2048 # 256 bit complex = 2 128 bit floats\n NIFTI_TYPE_RGBA32 = 2304 # 4 8 bit bytes.\n\n # Detect the data type of the input data.\n data_type = {\n np.uint8: NIFTI_TYPE_UINT8,\n np.uint16: NIFTI_TYPE_UINT16,\n np.uint32: NIFTI_TYPE_UINT32,\n np.float32: NIFTI_TYPE_FLOAT32,\n np.int16: NIFTI_TYPE_INT16,\n np.int32: NIFTI_TYPE_INT32,\n np.int8: NIFTI_TYPE_INT8\n }\n if sys.maxint > 2 ** 32: # The platform is 64 bit\n data_type[np.float128] = NIFTI_TYPE_FLOAT128\n data_type[np.float64] = NIFTI_TYPE_FLOAT64\n data_type[np.int64] = NIFTI_TYPE_INT64\n data_type[np.uint64] = NIFTI_TYPE_UINT64\n data_type[np.complex64] = NIFTI_TYPE_COMPLEX64\n data_type[np.complex128] = NIFTI_TYPE_COMPLEX128\n data_type[np.complex256] = NIFTI_TYPE_COMPLEX256\n\n header = nib.Nifti1Header()\n if self.data.shape[1] == 1:\n new_shape = (self.data.shape[0], 1, 1)\n else:\n new_shape = (self.data.shape[0], 1, 1, self.data.shape[1])\n data = self.data.reshape(new_shape)\n\n if data.dtype.type in data_type:\n header['datatype'] = data_type[data.dtype.type]\n header['cal_max'] = data.max()\n header['cal_min'] = data.min()\n image = nib.Nifti1Image(data, None, header)\n nib.nifti1.save(image, file_path)", "def save_grtrans_image(grt_obj):\n I_im = grt_obj.ivals[:,0,0].reshape(npix,npix).flatten()\n Q_im = grt_obj.ivals[:,1,0].reshape(npix,npix).flatten()\n U_im = grt_obj.ivals[:,2,0].reshape(npix,npix).flatten()\n V_im = grt_obj.ivals[:,3,0].reshape(npix,npix).flatten()\n\n # convert to Tb\n factor = 3.254e13/(RF**2 * psize_rad**2)\n I_im *= factor\n Q_im *= factor\n U_im *= factor\n V_im *= factor\n\n x = np.array([[i for i in range(npix)] for j in range(npix)]).flatten()\n y = np.array([[j for i in range(npix)] for j in range(npix)]).flatten()\n\n x -= npix/2\n y -= npix/2\n x = x*psize_uas\n y = y*psize_uas\n\n outdat = np.vstack((x.T,y.T,I_im.T,Q_im.T,U_im.T,V_im.T)).T\n np.savetxt('../rrjet_and_riaf/'+FNAME,outdat)\n #np.savetxt('../rrjet_and_riaf/grtrans_jet_compare_positron_noconv.txt',outdat)\n return", "def prepareData(args):\n print(\"Starting preprocessing\")\n\n # params\n max_midi = args['max_midi']\n min_midi = args['min_midi']\n note_range = args['note_range']\n window_size = args['window_size']\n sr = args['sr']\n hop_length = args['hop_length']\n wav_dir = args['wav_dir']\n\n datapath = os.path.join(args['proj_root'], 'Features')\n bin_multiple = int(args['bin_multiple'])\n\n framecnt = 0\n maxFramesPerFile = args['maxFramesPerFile']\n maxFrames = args['maxFrames']\n\n fileappend = str(maxFramesPerFile) + 'pf_max' + str(maxFrames) + '.dat'\n\n filenameIN = os.path.join(datapath, 'input_' + fileappend)\n filenameOUT = os.path.join(datapath, 'output_' + fileappend)\n\n if os.path.isfile(filenameIN) and os.path.isfile(filenameOUT):\n n_bins = note_range * bin_multiple\n print('loading precomputed data from ' + filenameIN)\n mmi = np.memmap(filenameIN, mode='r', dtype=\"float64\")\n inputs = np.reshape(mmi, (-1, window_size, n_bins))\n\n mmo = np.memmap(filenameOUT, mode='r', dtype=\"float64\")\n outputs = np.reshape(mmo, (-1, note_range))\n\n return inputs, outputs, datapath\n\n inputs, outputs = [], []\n addCnt, errCnt = 0, 0\n\n # hack to deal with high PPQ from MAPS\n # https://github.com/craffel/pretty-midi/issues/112\n pretty_midi.pretty_midi.MAX_TICK = 1e10\n\n for s in os.listdir(wav_dir):\n subdir = os.path.join(wav_dir, s)\n if not os.path.isdir(subdir):\n continue\n # recursively search in subdir\n print(subdir)\n for dp, dn, filenames in os.walk(subdir):\n # in each level of the directory, look at filenames ending with .mid\n for f in filenames:\n # if there exists a .wav file and .midi file with the same name\n\n if f.endswith('.wav'):\n audio_filename = f\n fprefix = audio_filename.split('.wav')[0]\n mid_fn = fprefix + '.mid'\n txt_fn = fprefix + '.txt'\n print(\"Handling files {}\".format(fprefix))\n if mid_fn in filenames:\n # extract_features\n audio_filename = os.path.join(dp, audio_filename)\n inputnp = extract_features(audio_filename, args)\n times = librosa.frames_to_time(np.arange(inputnp.shape[0]), sr=sr, hop_length=hop_length)\n # mid2outputnp\n mid_fn = os.path.join(dp, mid_fn)\n pm_mid = pretty_midi.PrettyMIDI(mid_fn)\n\n outputnp = mid2outputnp(pm_mid, times, args)\n\n # check that num onsets is equal\n if inputnp.shape[0] == outputnp.shape[0]:\n # Some filtering highly pragmatic filtering on the data!!\n # take only frames that are \"sufficiently loud\", ...\n good2take = np.array(inputnp.max(axis=(1, 2)) > 0.05)\n # ... and always omit the last frame as this has been padded ...\n good2take[-1] = False # omit last\n # ... and only take frames with at least one true label (i.e. some tone is played)\n good2take = good2take & (outputnp.max(axis=1) > 0)\n outputnp = outputnp[good2take, ]\n inputnp = inputnp[good2take, ]\n\n addCnt += 1\n if inputnp.shape[0] > maxFramesPerFile > 0:\n inputnp = inputnp[:maxFramesPerFile]\n outputnp = outputnp[:maxFramesPerFile]\n framecnt += inputnp.shape[0]\n print(\"framecnt is {}\".format(framecnt))\n inputs.append(inputnp)\n outputs.append(outputnp)\n else:\n print(\"error for fprefix {}\".format(fprefix))\n errCnt += 1\n print(inputnp.shape)\n print(outputnp.shape)\n\n if framecnt > maxFrames > 0:\n print(\"have enought frames, leaving {}\".format(subdir))\n break\n if framecnt > maxFrames > 0:\n print(\"have enought frames, leaving {}\".format(wav_dir))\n break\n\n if framecnt > maxFrames > 0:\n print(\"have enought frames, leaving {}\".format(wav_dir))\n break\n\n print(\"{} examples in dataset\".format(addCnt))\n print(\"{} examples couldnt be processed\".format(errCnt))\n\n # concatenate dynamic list to numpy list of example\n if addCnt:\n inputs = np.concatenate(inputs)\n outputs = np.concatenate(outputs)\n\n print(\"inputs.shape\")\n print(inputs.shape)\n print(\"outputs.shape\")\n print(outputs.shape)\n mmi = np.memmap(filename=filenameIN, mode='w+', shape=inputs.shape, dtype=\"float64\")\n mmi[:] = inputs[:]\n mmo = np.memmap(filename=filenameOUT, mode='w+', shape=outputs.shape, dtype=\"float64\")\n mmo[:] = outputs[:]\n del mmi\n del mmo\n\n return inputs, outputs, datapath", "def to_files(self, gen, filenames=None):\n\n if filenames:\n self.filenames = filenames\n\n for f, arr in zip(self.pathgen, gen):\n np.save(f, arr)", "def _read(self):\n # initializng data dictionary\n self.data={}\n\n f = FortranFile(self.filename)\n # Default omnivor binary header\n self.data['MK'] = f.readInts('i')\n self.data['itime'] = f.readInts('i')\n self.data['version'] = f.readString()\n self.data['file_id'] = f.readInts('i')\n self.data['sversion'] = f.readString()\n # Velocity field\n self.data['stype'] = f.readString()\n self.data['is_grid'] = f.readInts('i')\n nCPs = f.readInts('i')\n self.data['nCPs'] = nCPs\n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n #print('File is a velocity grid file')\n n1 = f.readInts('i')\n n2 = f.readInts('i')\n n3 = f.readInts('i')\n self.data['n1'] = n1\n self.data['n2'] = n2\n self.data['n3'] = n3\n self.data['is_straight'] = f.readInts('i')\n self.data['v1'] = f.readReals(real_char)\n self.data['v2'] = f.readReals(real_char)\n self.data['v3'] = f.readReals(real_char)\n\n CPs_raw = f.readReals(real_char)\n Utot_raw = f.readReals(real_char)\n CPs = np.reshape(CPs_raw,(3,nCPs),order = 'F')\n Utot = np.reshape(Utot_raw,(3,nCPs),order = 'F')\n\n acc=-1\n CPsTab = np.zeros((3, n1,n2,n3))\n UtotTab = np.zeros((3, n1,n2,n3))\n # Reshaping the nasty way (this is natural order). \n for i in range(0,n1):\n for j in range(0,n2):\n for k in range(0,n3):\n acc=acc+1\n CPsTab[0:3,i,j,k] = CPs[0:3,acc]\n UtotTab[0:3,i,j,k] = Utot[0:3,acc]\n\n self.data['CPs'] = CPs\n self.data['CPsTab'] = CPsTab\n self.data['Utot'] = Utot\n self.data['UtotTab'] = UtotTab", "def save(self):\n # Sanity checks\n assert len(self.actions) == len(self.rewards)\n assert len(self.actions) == len(self.episode_starts)\n assert len(self.actions) == len(self.images_path)\n assert len(self.actions) == len(self.ground_truth_states)\n assert len(self.target_positions) == self.episode_idx + 1\n\n data = {\n 'rewards': np.array(self.rewards),\n 'actions': np.array(self.actions),\n 'episode_starts': np.array(self.episode_starts)\n }\n\n ground_truth = {\n 'target_positions': np.array(self.target_positions),\n 'ground_truth_states': np.array(self.ground_truth_states),\n 'images_path': np.array(self.images_path)\n }\n print(\"Saving preprocessed data...\")\n np.savez('{}/preprocessed_data.npz'.format(self.data_folder), **data)\n np.savez('{}/ground_truth.npz'.format(self.data_folder), **ground_truth)", "def associate_files(self):\n # Open starinfo file and define structured array\n starinfo_file = self.starinfo_file\n nstar = sum(1 for line in open(starinfo_file))\n infoname = ['obj', 'std', 'caldir', 'altname']\n infofmt = ['|S25', '|S25', '|S25', '|S25']\n starinfo = np.zeros(nstar, dtype={\n 'names': infoname, 'formats': infofmt})\n with open(starinfo_file, 'r') as arq:\n for i in range(nstar):\n linelist = arq.readline().split()\n for j in range(len(infoname)):\n starinfo[i][j] = linelist[j]\n\n if self.stored_sens:\n self.load_storedsens()\n\n os.chdir(self.raw_dir)\n\n l = glob.glob('*.fits')\n l.sort()\n\n headers = []\n headers_ext1 = []\n for i in l:\n try:\n headers.append(fits.getheader(i, ext=0))\n headers_ext1.append(fits.getheader(i, ext=1))\n except IOError:\n print('IOError reading file {:s}.'.format(i))\n raise SystemExit(0)\n\n oversc = np.array(\n [('overscan') in i for i in headers_ext1], dtype='bool')\n\n mjds = np.array([i['mjd-obs'] for i in headers_ext1], dtype='float32')\n idx = np.arange(len(l))\n\n images = np.array([\n l[i] for i in idx if (\n (headers[i]['obstype'] == 'OBJECT') &\n (headers[i]['object'] != 'Twilight') &\n (headers[i]['obsclass'] != 'acq'))])\n\n field_names = [\n 'filename', 'observatory', 'instrument', 'detector',\n 'grating', 'filter1', 'obsclass', 'object', 'obstype',\n 'grating_wl', 'overscan', 'mjd', 'ccdsum']\n types = [\n 'S120', 'S60', 'S60', 'S60', 'S60', 'S60', 'S60', 'S60', 'S60',\n 'float32', 'bool', 'float32', 'S60']\n hdrkeys = [\n 'observat', 'instrume', 'detector', 'grating', 'filter1',\n 'obsclass', 'object', 'obstype', 'grwlen']\n\n hdrpars_type = [\n (field_names[i], types[i]) for i in range(len(field_names))]\n\n hdrpars = np.array([\n ((l[i],) + tuple([headers[i][j] for j in hdrkeys]) +\n (oversc[i],) + (mjds[i],) + (headers_ext1[i]['ccdsum'],))\n for i in idx], dtype=hdrpars_type)\n\n associated = []\n\n for i, j in enumerate(images):\n\n # Take great care when changing this.\n hdr = fits.getheader(j, ext=0)\n hdr_ext1 = fits.getheader(j, ext=1)\n mjd = hdr_ext1['mjd-obs']\n\n element = {\n 'image': j, 'observatory': hdr['observat'],\n 'instrument': hdr['instrume'],\n 'detector': hdr['detector'], 'grating_wl': hdr['grwlen'],\n 'mjd': mjd, 'grating': hdr['grating'],\n 'filter1': hdr['filter1'], 'obsclass': hdr['obsclass'],\n 'object': hdr['object']}\n\n if self.stored_sens:\n ssf = self.stored_sensfunc\n element['standard_star'] = ssf['filename'][\n (ssf['observatory'] == hdr['observat']) &\n (ssf['detector'] == hdr['detector']) &\n (ssf['grating'] == hdr['grating']) &\n (ssf['instrument'] == hdr['instrume']) &\n (ssf['filter1'] == hdr['filter1']) &\n (ssf['maskname'] == hdr['maskname'])]\n else:\n element['standard_star'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'OBJECT') &\n (np.array([k in ['partnerCal', 'progCal']\n for k in hdrpars['obsclass']], dtype='bool')) &\n (hdrpars['object'] != 'Twilight') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (hdrpars['grating'] == hdr['grating']) &\n (hdrpars['filter1'] == hdr['filter1']) &\n (abs(hdrpars['grating_wl'] - hdr['grwlen']) <=\n self.cfg.getfloat('associations', 'stdstar_wltol')) &\n (abs(mjds - mjd) <=\n self.cfg.getfloat('associations', 'stdstar_ttol'))]\n\n element['flat'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'FLAT') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['grating'] == hdr['grating']) &\n (hdrpars['grating_wl'] == hdr['grwlen']) &\n (hdrpars['detector'] == hdr['detector']) &\n (abs(mjds - mjd) <= self.cfg.getfloat('associations',\n 'flat_ttol'))]\n\n element['twilight'] = hdrpars['filename'][\n (hdrpars['object'] == 'Twilight') &\n (hdrpars['obstype'] == 'OBJECT') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (hdrpars['grating'] == hdr['grating']) &\n (abs(hdrpars['grating_wl'] - hdr['grwlen']) <=\n self.cfg.getfloat('associations', 'twilight_wltol')) &\n (abs(mjds - mjd) <=\n self.cfg.getfloat('associations', 'twilight_ttol'))]\n\n c = 'twilight'\n if len(element[c]) > 1:\n element[c] = closest_in_time(element[c], j)\n elif len(element[c]) == 1:\n element[c] = element[c][0]\n elif len(element[c]) == 0:\n element[c] = ''\n\n # A flat close to the twilight observation for a better\n # response function.\n if element['twilight']:\n twipars = hdrpars[hdrpars['filename'] == element['twilight']]\n element['twilight_flat'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'FLAT') &\n (hdrpars['observatory'] == twipars['observatory']) &\n (hdrpars['detector'] == twipars['detector']) &\n (hdrpars['grating'] == twipars['grating']) &\n (hdrpars['grating_wl'] == twipars['grating_wl']) &\n (abs(mjds - twipars['mjd']) <= self.cfg.getfloat(\n 'associations', 'twilight_ttol'))]\n else:\n element['twilight_flat'] = np.array([], dtype='S60')\n\n element['arc'] = hdrpars['filename'][\n # (hdrpars['object'] == 'CuAr') &\n (hdrpars['obstype'] == 'ARC') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (hdrpars['grating'] == hdr['grating']) &\n (hdrpars['grating_wl'] == hdr['grwlen']) &\n (abs(mjds - mjd) <=\n self.cfg.getfloat('associations', 'arc_ttol'))]\n\n element['bias'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'BIAS') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (abs(mjds - mjd) <=\n self.cfg.getfloat('associations', 'bias_ttol')) &\n (\n (hdrpars['overscan'] & (self.fl_over == 'yes')) |\n (~hdrpars['overscan'] & (self.fl_over == 'no'))\n )]\n\n im = fits.open(element['image'])\n ishape = np.array(im[1].data.shape, dtype='float32')\n im.close()\n del(im)\n\n validBiases = np.ones(len(element['bias']), dtype='bool')\n k = 0\n\n for biasImage in element['bias']:\n\n bias = fits.open(biasImage)\n bshape = np.array(bias[1].data.shape, dtype='float32')\n bias.close()\n del(bias)\n\n #\n # Elinates biases if they differ in array size from\n # the science image. Small differences are normal due to\n # the overscan subtraction in processed bias frames.\n #\n if np.any(np.abs(bshape / ishape - 1.0) > 0.10):\n validBiases[k] = False\n\n k += 1\n\n element['bias'] = element['bias'][validBiases]\n del(k)\n\n element['bpm'] = hdrpars['filename'][\n (hdrpars['obstype'] == 'BPM') &\n (hdrpars['observatory'] == hdr['observat']) &\n (hdrpars['detector'] == hdr['detector']) &\n (hdrpars['ccdsum'] == hdr_ext1['ccdsum'])]\n\n categories = ['flat', 'bias', 'arc', 'standard_star',\n 'bpm', 'twilight_flat']\n\n for c in categories:\n if len(element[c]) > 1:\n element[c] = closest_in_time(element[c], j)\n elif len(element[c]) == 0:\n element[c] = ''\n elif len(element[c]) == 1:\n element[c] = (element[c])[0]\n\n associated.append(element)\n\n # Define mdf filename\n # Based in gprepare.cl\n # Did not account for observation in Nod-and-Shuffle\n for i in associated:\n header_flat = [\n k for j, k in enumerate(headers) if l[j] == i['flat']\n ]\n if len(header_flat):\n header_flat = header_flat[0]\n MaskName = header_flat['maskname']\n if MaskName == \"IFU-2\":\n slits = 'both'\n elif MaskName == \"IFU-B\":\n slits = 'blue'\n elif MaskName == \"IFU-R\":\n slits = 'red'\n i['slits'] = slits\n\n if self.object_filter:\n objs = self.object_filter.split(',')\n sci_ims = [\n i for i in associated if (\n (i['obsclass'] == 'science') &\n (i['object'] in objs))]\n else:\n sci_ims = [i for i in associated if i['obsclass'] == 'science']\n\n if self.all_stars:\n std_ims = [\n i for i in associated if i['obsclass'] in ['partnerCal',\n 'progCal']]\n else:\n used_stds = [i['standard_star'] for i in sci_ims]\n std_ims = [i for i in associated if i['image'] in used_stds]\n\n # Get star info from starinfo.dat\n possible_names = np.concatenate((starinfo['obj'], starinfo['std'],\n starinfo['altname']))\n n_names = len(possible_names)\n\n for i, j in enumerate(possible_names):\n possible_names[i] = (j.lower()).replace(' ', '')\n\n for i in std_ims:\n # Removes the 'standard_star' key if the dictionary\n # element in question refers to a standard star.\n del i['standard_star']\n starname = (i['object'].lower()).replace(' ', '')\n\n try:\n stdstar_idx = (\n np.arange(n_names)[possible_names == starname] %\n (n_names / 3))[0]\n except:\n raise Exception(\n 'Standard star named {:s} not found in file {:s}'.\n format(starname, starinfo_file))\n\n i['stdstar'] = starinfo[stdstar_idx]['std']\n\n if starinfo[stdstar_idx]['caldir'] == 'gireds_data':\n i['caldir'] = pkg_resources.resource_filename(\n 'gireds', 'data/')\n else:\n i['caldir'] = starinfo[stdstar_idx]['caldir']\n\n self.sci = sci_ims\n self.std = std_ims\n\n # Writes the file association dictionary to an ASCII file\n # in the run directory.\n\n if not self.dry_run:\n try:\n os.mkdir(self.products_dir)\n except OSError as err:\n if err.errno == 17:\n pass\n else:\n raise err\n try:\n os.mkdir(self.run_dir)\n except OSError as err:\n if err.errno == 17:\n pass\n else:\n raise err\n\n if not self.dry_run:\n os.chdir(self.run_dir)\n json.dump(\n sci_ims, open('file_associations_sci.dat', 'w'),\n sort_keys=True, indent=4)\n json.dump(\n std_ims, open('file_associations_std.dat', 'w'),\n sort_keys=True, indent=4)", "def generateIpcf(inFile, outFile):\n projectData = parseIarData(inFile)\n if projectData.deviceName == None:\n raise Exception(\"Invalid file format for input file.\\n\")\n\n cSrcDups = getDuplicateFiles(projectData.cSrcList)\n asmSrcDups = getDuplicateFiles(projectData.asmSrcList)\n if len(cSrcDups) != 0 or len(asmSrcDups) != 0:\n print(\"WARNING: IAR Embedded Workbench does not support files with the same \"\n \"name even if they have different extensions. There were multiple source \"\n \"files detected with name(s):\", end = ' ')\n print(*cSrcDups, sep = \";\", end=' ')\n print(*asmSrcDups, sep = \";\")\n\n root = ElementTree.Element(ELEM_IAR_PROJ_CONN, {ATTR_NAME: projectData.projectName, ATTR_VERSION: '1.9'})\n coreSuffix = \"\"\n if projectData.core == \"CM0p\":\n coreSuffix = 'M0+'\n elif projectData.core == \"CM4\":\n coreSuffix = 'M4'\n else:\n raise Exception(\"Core %s not supported by this export mechanism.\\n\" % projectData.core)\n \n # Device element\n deviceElem = ElementTree.SubElement(root, ELEM_DEVICE)\n nameElem = ElementTree.SubElement(deviceElem, ELEM_NAME)\n # The device name format needs to match the name of the device in the \n # IAR database. Expected format for Cypress devices is {MPN-name}M4/{MPN-name}M0+\n nameElem.text = projectData.deviceName + coreSuffix\n\n # The IAR compiler and assembler need to have the defines and include paths \n # specified separately. The same list is passed to both below.\n\n # Include paths and Asm Include paths\n includeElem = ElementTree.SubElement(root, ELEM_INC_PATH)\n asmIncludeElem = ElementTree.SubElement(root, ELEM_ASM_INC_PATH)\n for inc in projectData.includePathList:\n cleanPath = cleanUpPath(inc)\n pathElem = ElementTree.SubElement(includeElem, ELEM_PATH)\n pathElem.text = cleanPath\n pathElem = ElementTree.SubElement(asmIncludeElem, ELEM_PATH)\n pathElem.text = cleanPath\n\n # Defines and Asm defines\n defElem = ElementTree.SubElement(root, ELEM_DEFS)\n asmDefElem = ElementTree.SubElement(root, ELEM_ASM_DEFS)\n for define in projectData.defineList:\n cleanDef = cleanUpDefine(define)\n pathElem = ElementTree.SubElement(defElem, ELEM_DEF)\n pathElem.text = cleanDef\n pathElem = ElementTree.SubElement(asmDefElem, ELEM_DEF)\n pathElem.text = cleanDef\n\n # Linker script file\n linkerFileElem = ElementTree.SubElement(root, ELEM_LINK_FILE)\n linkerPathElem = ElementTree.SubElement(linkerFileElem, ELEM_PATH)\n linkerPathElem.text = cleanUpPath(projectData.linkerScript)\n\n # Lib Files\n linkerOptsElem = ElementTree.SubElement(root, ELEM_LINK_OPTS)\n for lib in projectData.libsList:\n argElem = ElementTree.SubElement(linkerOptsElem, ELEM_ARG)\n argElem.text = cleanUpPath(lib)\n \n groupDict = defaultdict(list)\n filterFiles(groupDict, projectData.headersList)\n filterFiles(groupDict, projectData.cSrcList)\n filterFiles(groupDict, projectData.asmSrcList)\n\n filesElem = ElementTree.SubElement(root, ELEM_FILES)\n for grp in sorted(groupDict.keys()):\n if grp != GRP_UNFILTERED:\n createGroup(filesElem, grp, groupDict[grp])\n\n for fl in groupDict[GRP_UNFILTERED]:\n pathElem = ElementTree.SubElement(filesElem, ELEM_PATH)\n pathElem.text = fl\n\n # The printPretty function takes care of encoding hence the file is\n # opened in binary mode.\n with open(outFile, 'wb') as opf:\n opf.write(printPretty(root))", "def save_data(self, f): \n if not self.sampling:\n self.convert_to_array()\n np.save(f, self.reads)", "def save(self, format='npz'):\n _path = os.getenv('STARTERLITE') + '/output/grf/%s.%s' % (self.fn, format)\n _wf_dict = {'grf': self.survey_maps, 'coords': self.survey_map_coords}\n np.savez(_path, **_wf_dict)" ]
[ "0.6318846", "0.59935594", "0.58033705", "0.557617", "0.5541329", "0.548497", "0.54715943", "0.54680747", "0.54556257", "0.5448563", "0.5441124", "0.543736", "0.54259264", "0.5376901", "0.53260976", "0.5311196", "0.5308559", "0.5306656", "0.52739114", "0.524727", "0.52424717", "0.52275544", "0.52234864", "0.52132034", "0.52070785", "0.51888937", "0.51636505", "0.5157772", "0.51564246", "0.5149056" ]
0.69364214
0
MPI wrapper function for MST calculation
def _mst_calc(dest_tifs, params, tiles, preread_ifgs): process_tiles = mpiops.array_split(tiles) log.info('Calculating minimum spanning tree matrix') def _save_mst_tile(tile, i, preread_ifgs): """ Convenient inner loop for mst tile saving """ mst_tile = mst.mst_multiprocessing(tile, dest_tifs, preread_ifgs, params) # locally save the mst_mat mst_file_process_n = join(params[cf.TMPDIR], 'mst_mat_{}.npy'.format(i)) np.save(file=mst_file_process_n, arr=mst_tile) for t in process_tiles: _save_mst_tile(t, t.index, preread_ifgs) log.debug('Finished mst calculation for process {}'.format(mpiops.rank)) mpiops.comm.barrier()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def local_mediation(task):\n\tatlas = 'power'\n\tproject='hcp'\n\tknown_membership,network_names,num_nodes,name_int_dict = network_labels(atlas)\n\tsubjects = np.load('%s/dynamic_mod/results/%s_%s_%s_subs_fz.npy' %(homedir,'hcp',task,atlas))\n\tstatic_results = graph_metrics(subjects,task,atlas,run_version='fz')\n\tmatrices = static_results['matrices']\n\tsubject_pcs = static_results['subject_pcs']\n\tsubject_wmds = static_results['subject_wmds']\n\tsubject_mods = static_results['subject_mods']\n\tmod_wmd_corr = np.zeros(subject_wmds.shape[1])\n\tfor i in range(subject_pcs.shape[1]):\n\t\tmod_wmd_corr[i] = nan_pearsonr(subject_mods,subject_wmds[:,i])[0]\n\tmean_conn = np.nanmean(matrices,axis=0)\n\te_tresh = np.percentile(mean_conn,85)\n\tsubject_wmds[np.isnan(subject_pcs)] = 0.0\n\tm = np.zeros((264,264,264))\n\tpool = Pool(40)\n\tfor n in range(264):\n\t\tprint n\n\t\tsys.stdout.flush()\n\t\tvariables = []\n\t\tfor i,j in combinations(range(264),2):\n\t\t\tvariables.append(pd.DataFrame(data={'pc':subject_wmds[:,n],'weight':matrices[:,i,j],'q':subject_mods},index=range(len(subject_pcs))))\n\t\tresults = pool.map(multi_med,variables)\n\t\tfor r,i in zip(results,combinations(range(264),2)):\n\t\t\tm[n,i[0],i[1]] = r\n\t\t\tm[n,i[1],i[0]] = r\n\t\tnp.save('/home/despoB/mb3152/dynamic_mod/results/full_med_matrix_new_%s_wmds.npy'%(task),m)", "def doMPIMD(CONFIGFILE, debug): \n # Read in the call arguments\n CONFIGFILE = CONFIGFILE\n debug = bool(debug == \"True\")\n # Initialize MD module\n md_module = MD_module(CONFIGFILE = CONFIGFILE, debug = debug)\n md_module.loadIterationFromDumpFile()\n if debug:\n if rank == 0:\n sys.stderr.write(\"Number of MPI processes: {0}\\n\".format(size))\n sys.stderr.flush()\n comm.barrier()\n # Every node works only on segments modulo their rank \n workcount = 0\n md_skip_count = 0\n for loop_bin in md_module.iteration:\n for loop_segment in loop_bin:\n #if not loop_bin.isConverged():\n if workcount % size == rank:\n # Run MD on this node\n md_module.runSegmentMD(loop_segment)\n #else:\n # md_skip_count += 1\n # if workcount % size == rank:\n # # Run MD skip\n # md_module.SkipSegmentMD(loop_segment, workcount, md_skip_count)\n workcount += 1\n # Log if rank 0\n if rank == 0:\n md_module.printMdStatus(loop_segment, workcount, md_skip_count)\n \n # Wait for all processes to finish\n comm.barrier()\n if rank == 0:\n md_module.printMdStatus(loop_segment, workcount, md_skip_count)\n #sys.stdout.write(\"\\n\")\n if debug:\n sys.stdout.write(\"Finishing MPI\\n\")\n sys.stdout.flush()\n # Remove the iteration dump file\n if not debug:\n md_module.removeIterationDumpFile()", "def make_parallel_MPI(function):\n\n def wrapper(*args, **kwargs):\n\n # Checks that the essential paremeters are there\n assert not kwargs['out_allPartTypes'] is None\n assert not kwargs['simulation_name'] is None\n\n # Generate a simulation object and oush it to **kwargs\n sim = Simulation(simulation_name=kwargs['simulation_name'])\n kwargs['simulation'] = sim\n\n # Set-up the MPI allocation schedule\n process = 0\n process_iterator = itertools.product(sim.clusterIDAllowed, sim.redshiftAllowed)\n\n for halo_num, redshift in process_iterator:\n\n if process % size == rank:\n\n cluster_obj = Cluster(clusterID=int(halo_num), redshift=redshift_str2num(redshift))\n file_name = sim.cluster_prefix + sim.halo_Num(halo_num) + redshift\n fileCompletePath = sim.pathSave + '/' + sim.simulation + '_output/collective_output/' + file_name + '.hdf5'\n\n kwargs['cluster'] = cluster_obj\n kwargs['fileCompletePath'] = fileCompletePath\n\n print('CPU ({}/{}) is processing halo {} @ z = {} ------ process ID: {}'.format(rank, size, cluster_obj.clusterID, cluster_obj.redshift, process))\n # Each CPU loops over all apertures - this avoids concurrence in file reading\n # The loop over apertures is defined explicitly in the wrapped function.\n function(*args, **kwargs)\n\n process += 1\n\n return wrapper", "def mpirun(self):\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n size = comm.Get_size()\n print rank \n print size\n data = []\n dcds = self.getdcds()\n for i in range(0, len(dcds)):\n pid = i % size \n if pid == rank:\n dcd = dcds[i]\n dcdpath = self.d + \"/\" + dcd\n data.extend(self.metric(self.dcdtopsf(dcd), dcdpath))\n self.write(data)", "def mpisync(func, comm=MPI.COMM_WORLD):\n def mpifunc(*args, **kwargs):\n if comm.Get_rank() == 0:\n res = func(*args, **kwargs)\n else:\n res = None\n res = comm.bcast(res, root=0)\n return res\n return mpifunc", "def PARALLEL_worker_mc_inv(procnum, num_samples_per_processor, inversion_type, M_amplitude, green_func_array, real_data_array, comparison_metric, perform_normallised_waveform_inversion, compare_all_waveforms_simultaneously, return_dict_MTs, return_dict_similarity_values_all_samples, return_dict_shift_idxs, return_dict_MT_single_force_rel_amps, return_dict_medium_1_medium_2_rel_amp_ratios, invert_for_ratio_of_multiple_media_greens_func_switch, green_func_phase_labels, num_phase_types_for_media_ratios, invert_for_relative_magnitudes_switch=False, rel_exp_mag_range=[1.,1.], auto_shift_for_best_fit=True):\n print(\"Processing for process:\", procnum, \"for \", num_samples_per_processor, \"samples.\")\n \n # Define temp data stores for current process:\n tmp_MTs = np.zeros((len(green_func_array[0,:,0]), num_samples_per_processor), dtype=float)\n tmp_similarity_values_all_samples = np.zeros(num_samples_per_processor, dtype=float)\n tmp_shift_idxs_all_samples = []\n if inversion_type == \"DC_single_force_couple\" or inversion_type == \"DC_single_force_no_coupling\" or inversion_type == \"DC_crack_couple\" or inversion_type == \"single_force_crack_no_coupling\":\n tmp_MT_single_force_rel_amps = np.zeros(num_samples_per_processor, dtype=float)\n else:\n tmp_MT_single_force_rel_amps = []\n if invert_for_ratio_of_multiple_media_greens_func_switch:\n tmp_medium_1_medium_2_rel_amp_ratios = np.zeros(num_samples_per_processor, dtype=float)\n else:\n tmp_medium_1_medium_2_rel_amp_ratios = []\n if invert_for_ratio_of_multiple_media_greens_func_switch:\n if num_phase_types_for_media_ratios>0:\n tmp_frac_medium_2_diff_phases_dict = {} # Dictionary for temp storing of phase fractions of medium 1\n tmp_medium_1_medium_2_rel_amp_ratios_multi_phases = np.zeros((num_samples_per_processor, 3), dtype=float)\n else:\n tmp_medium_1_medium_2_rel_amp_ratios_multi_phases = []\n \n # Sort greens function storage if processing for multiple media:\n if invert_for_ratio_of_multiple_media_greens_func_switch:\n green_func_array_total_both_media = green_func_array.copy()\n \n # 3. Loop over samples, checking how well a given MT sample synthetic wavefrom from the forward model compares to the real data:\n for i in range(num_samples_per_processor):\n # Generate random medium amplitude ratio and associated greens functions (if required):\n if invert_for_ratio_of_multiple_media_greens_func_switch:\n # If want to invert for ratio of meduim 1 to medium 2 separately for different phases:\n if num_phase_types_for_media_ratios>0:\n # Generate different phase fractions:\n tmp_frac_medium_2_diff_phases_dict[\"P\"] = np.random.uniform(0.0, 1.0)\n tmp_frac_medium_2_diff_phases_dict[\"S\"] = np.random.uniform(0.0, 1.0)\n tmp_frac_medium_2_diff_phases_dict[\"surface\"] = np.random.uniform(0.0, 1.0)\n # Generate associated greens functions:\n green_func_array = np.zeros(np.shape(green_func_array_total_both_media[:,:,:,0]), dtype=float)\n # Loop over greens function for each station-phase:\n for j in range(len(green_func_phase_labels)):\n tmp_frac_medium_2 = tmp_frac_medium_2_diff_phases_dict[green_func_phase_labels[j]] # Get fraction for specific phase, for specific greens functions for specific station-phase\n green_func_array[j, :, :] = (1. - tmp_frac_medium_2)*green_func_array_total_both_media[j,:,:,0] + tmp_frac_medium_2*green_func_array_total_both_media[j,:,:,1] \n # Otherwise generate single fraction value and associated greens functions:\n else:\n frac_medium_2 = np.random.uniform(0.0, 1.0)\n green_func_array = (1. - frac_medium_2)*green_func_array[:,:,:,0] + frac_medium_2*green_func_array[:,:,:,1]\n \n # 4. Generate synthetic waveform for current sample:\n # Vary moment amplitude randomly if specified:\n if invert_for_relative_magnitudes_switch:\n M_amplitude_exp_factor = np.random.uniform(low=rel_exp_mag_range[0], high=rel_exp_mag_range[1])\n M_amplitude = 10.**M_amplitude_exp_factor\n # And generate waveform from source mechanism tensor:\n if inversion_type==\"full_mt\":\n MT_curr_sample = generate_random_MT()*M_amplitude # Generate a random MT sample\n elif inversion_type==\"full_mt_Lune_samp\":\n MT_curr_sample = generate_random_MT_Lune_samp()*M_amplitude # Generate a random MT sample, sampled uniformly in Lune space\n elif inversion_type==\"DC\":\n MT_curr_sample = generate_random_DC_MT()*M_amplitude # Generate a random DC sample\n elif inversion_type==\"single_force\":\n MT_curr_sample = generate_random_single_force_vector()*M_amplitude # Generate a random single force sample\n elif inversion_type == \"DC_single_force_couple\":\n MT_curr_sample, random_DC_to_single_force_amp_frac = generate_random_DC_single_force_coupled_tensor() # Generate a random DC-single-force coupled sample, with associated relative amplitude of DC to single force\n MT_curr_sample = MT_curr_sample*M_amplitude\n elif inversion_type == \"DC_single_force_no_coupling\":\n MT_curr_sample, random_DC_to_single_force_amp_frac = generate_random_DC_single_force_uncoupled_tensor()\n MT_curr_sample = MT_curr_sample*M_amplitude\n elif inversion_type == \"DC_crack_couple\":\n MT_curr_sample, random_DC_to_single_force_amp_frac = generate_random_DC_crack_coupled_tensor()\n MT_curr_sample = MT_curr_sample*M_amplitude\n elif inversion_type == \"single_force_crack_no_coupling\":\n MT_curr_sample, random_DC_to_single_force_amp_frac = generate_random_single_force_crack_uncoupled_tensor()\n MT_curr_sample = MT_curr_sample*M_amplitude\n synth_waveform_curr_sample = forward_model(green_func_array, MT_curr_sample) # Note: Greens functions must be of similar amplitude units going into here...\n \n # 5. Compare real data to synthetic waveform (using variance reduction or other comparison metric), to assign probability that data matches current model:\n similarity_curr_sample, shift_idxs = compare_synth_to_real_waveforms(real_data_array, synth_waveform_curr_sample, comparison_metric, perform_normallised_waveform_inversion, compare_all_waveforms_simultaneously, auto_shift_for_best_fit) \n \n # 6. Append results to data store:\n tmp_MTs[:,i] = MT_curr_sample[:,0]\n tmp_similarity_values_all_samples[i] = similarity_curr_sample\n tmp_shift_idxs_all_samples.append(list(shift_idxs))\n if inversion_type == \"DC_single_force_couple\" or inversion_type == \"DC_single_force_no_coupling\" or inversion_type == \"DC_crack_couple\" or inversion_type == \"single_force_crack_no_coupling\":\n tmp_MT_single_force_rel_amps[i] = random_DC_to_single_force_amp_frac\n if invert_for_ratio_of_multiple_media_greens_func_switch:\n if num_phase_types_for_media_ratios>0:\n tmp_medium_1_medium_2_rel_amp_ratios_multi_phases[i,0] = tmp_frac_medium_2_diff_phases_dict[\"P\"]\n tmp_medium_1_medium_2_rel_amp_ratios_multi_phases[i,1] = tmp_frac_medium_2_diff_phases_dict[\"S\"]\n tmp_medium_1_medium_2_rel_amp_ratios_multi_phases[i,2] = tmp_frac_medium_2_diff_phases_dict[\"surface\"]\n else:\n tmp_medium_1_medium_2_rel_amp_ratios[i] = frac_medium_2\n \n if i % 10000 == 0:\n print(\"Processor number:\", procnum, \"- Processed for\",i,\"samples out of\",num_samples_per_processor,\"samples\")\n \n # 7. And convert misfit measure to likelihood function probability:\n tmp_similarity_values_all_samples = np.exp(-(1.-tmp_similarity_values_all_samples)/2.)\n \n # And return values back to script:\n return_dict_MTs[procnum] = tmp_MTs\n return_dict_similarity_values_all_samples[procnum] = tmp_similarity_values_all_samples\n return_dict_shift_idxs[procnum] = tmp_shift_idxs_all_samples\n return_dict_MT_single_force_rel_amps[procnum] = tmp_MT_single_force_rel_amps\n if num_phase_types_for_media_ratios>0:\n return_dict_medium_1_medium_2_rel_amp_ratios[procnum] = tmp_medium_1_medium_2_rel_amp_ratios_multi_phases\n else:\n return_dict_medium_1_medium_2_rel_amp_ratios[procnum] = tmp_medium_1_medium_2_rel_amp_ratios\n print(\"Finished processing process:\", procnum, \"for \", num_samples_per_processor, \"samples.\")", "def test_MPI_Parallel_Interface(comm):\n\n def printMPI(msg):\n for i in range(comm.Get_size()):\n comm.barrier()\n if comm.Get_rank() == i:\n print(\"Proc {}: {}\".format(i, msg))\n\n n = 10\n\n par = MPI_Objective_Interface(mp.Extended_Rosenbrock, nb_domain_grid_pts=n,\n comm=comm)\n\n printMPI(par.counts)\n\n # ref = mp.Extended_Rosenbrock\n\n np.testing.assert_array_equal(\n mp.Extended_Rosenbrock.startpoint(n)[par.subdomain_slices],\n par.startpoint())\n np.testing.assert_almost_equal(\n mp.Extended_Rosenbrock.f(mp.Extended_Rosenbrock.startpoint(n)),\n par.f(par.startpoint()),\n err_msg=\"Different Function Value at startpoint\")\n np.testing.assert_allclose(\n mp.Extended_Rosenbrock.grad(mp.Extended_Rosenbrock.startpoint(n))[\n par.subdomain_slices],\n par.grad(par.startpoint()),\n err_msg=\"Different Gradient Value at startpoint\")", "def run_split_with_solidsWIS(self,mc):\n top, bot = self.outs\n feed = self.ins[0]\n top.copy_like(feed)\n bot.copy_like(top)\n top_mass = top.mass\n WIS=1-mc\n F_mass_ins = sum(top_mass*self.split)\n F_mass_tot_out = F_mass_ins/WIS\n F_mass_sol_out = F_mass_tot_out - F_mass_ins\n F_mass_sol_in = feed.F_mass - F_mass_ins\n \n x_sol = F_mass_sol_out/F_mass_sol_in\n self.split[self.split==0] = x_sol\n top_mass[:] *= self.split\n bot.mass[:] -= top_mass", "def run_split_with_solidsWIS(self,mc):\n top, bot = self.outs\n feed = self.ins[0]\n top.copy_like(feed)\n bot.copy_like(top)\n top_mass = top.mass\n WIS=1-mc\n F_mass_ins = sum(top_mass*self.split)\n F_mass_tot_out = F_mass_ins/WIS\n F_mass_sol_out = F_mass_tot_out - F_mass_ins\n F_mass_sol_in = feed.F_mass - F_mass_ins\n \n x_sol = F_mass_sol_out/F_mass_sol_in\n self.split[self.split==0] = x_sol\n top_mass[:] *= self.split\n bot.mass[:] -= top_mass", "def _data_parallel_master(self, intermediates):\n\n # Always using same \"device order\" makes the ReduceAdd operation faster.\n # Thanks to:: Tete Xiao (http://tetexiao.com/)\n intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device())\n\n to_reduce = [i[1][:2] for i in intermediates]\n to_reduce = [j for i in to_reduce for j in i] # flatten\n target_gpus = [i[1].sum.get_device() for i in intermediates]\n\n sum_size = sum([i[1].sum_size for i in intermediates])\n sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)\n mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size)\n\n broadcasted = Broadcast.apply(target_gpus, mean, inv_std)\n\n outputs = []\n for i, rec in enumerate(intermediates):\n outputs.append((rec[0], _MasterMessage(*broadcasted[i * 2:i * 2 + 2])))\n\n return outputs", "def speedup(n0,l,ntarray=np.arange(100),marray=np.arange(100)):\n\n# initialise variables\n\n Sup_m2 = np.zeros(np.size(marray))\n Sup_m3 = np.zeros(np.size(marray))\n Sup_m4 = np.zeros(np.size(marray))\n Sup_nt2 = np.zeros(np.size(ntarray))\n Sup_nt3 = np.zeros(np.size(ntarray))\n Sup_nt4 = np.zeros(np.size(ntarray))\n\n# fix nt at the meadian value of ntarray\n# run test_stats_omp over the range of marray to collect the walltimes for one and two threads\n# calculate the speed up and store it in Sup_mi where i is the number of threads\n\n nt = int(np.around(np.mean(ntarray)))\n for m in np.arange(1,np.size(marray)+1):\n wall_1thread = ns.test_stats_omp(n0,l,nt,m,1)\n wall_2thread = ns.test_stats_omp(n0,l,nt,m,2)\n wall_3thread = ns.test_stats_omp(n0,l,nt,m,3)\n wall_4thread = ns.test_stats_omp(n0,l,nt,m,4)\n Sup_m2[m-1] = wall_1thread/wall_2thread\n Sup_m3[m-1] = wall_1thread/wall_3thread \n Sup_m4[m-1] = wall_1thread/wall_4thread \n \n# fix m at the median value of marray\n# run test_stats_omp over the range of ntarray to collect the walltimes for one and two threads\n# calculate the speed up and store it in Sup_nti where i is the number of threads\n\n m = int(np.around(np.median(marray)))\n for nt in np.arange(1,np.size(ntarray)+1):\n wall_1thread = ns.test_stats_omp(n0,l,nt,m,1)\n wall_2thread = ns.test_stats_omp(n0,l,nt,m,2)\n wall_3thread = ns.test_stats_omp(n0,l,nt,m,3)\n wall_4thread = ns.test_stats_omp(n0,l,nt,m,4)\n Sup_nt2[nt-1] = wall_1thread/wall_2thread\n Sup_nt3[nt-1] = wall_1thread/wall_3thread\n Sup_nt4[nt-1] = wall_1thread/wall_4thread\n\n# make sure marray and ntarray are suitable to create a plot\n\n m = np.arange(1,np.size(marray)+1)\n nt = np.arange(1,np.size(ntarray)+1)\n\n# plot Sup_nti against nt \n\n plt.figure()\n plt.plot(m, Sup_nt2, 'b', label ='2 Threads')\n plt.plot(nt, Sup_nt3,'r', label ='3 Threads')\n plt.plot(nt, Sup_nt4, 'g', label ='4 Threads')\n plt.xlabel('number of realizations')\n plt.ylabel('speedup')\n plt.title('plot of speedup vs number of realizations')\n plt.legend(loc='best')\n plt.show()\n\n# plot Sup_mi against m\n\n plt.figure()\n plt.plot(m, Sup_m2, 'b', label ='2 Threads')\n plt.plot(nt, Sup_m3,'r', label ='3 Threads')\n plt.plot(nt, Sup_m4, 'g', label ='4 Threads')\n plt.xlabel('number of new nodes')\n plt.ylabel('speedup')\n plt.title('plot of speedup vs number of new nodes')\n plt.legend(loc='best')\n plt.show()\n \n# plot Sup_nt4 and Sup_m4 against nt and m to compare which has the greater effect\n \n plt.figure()\n plt.plot(nt, Sup_nt4, 'b', label='varying nt 4 threads')\n plt.plot(m, Sup_m4, 'r', label='varying m 4 threads')\n plt.xlabel('number of realizations/new nodes')\n plt.ylabel('speedup')\n plt.title('comparison of speedup when varying m to speed up when varying nt')\n plt.legend(loc='best')", "def main(config):\n all_procs = []\n result_q = mp.Queue()\n for seed in config[\"seeds\"]:\n config[\"seed\"] = seed\n p = mp.Process(target=run, args=(config, result_q))\n p.start()\n all_procs.append(p)\n\n for p in all_procs:\n p.join()\n\n all_returns = [result_q.get() for p in all_procs]\n mean_per_restart = np.mean(all_returns, axis=1)\n mean, std = np.mean(mean_per_restart), np.std(mean_per_restart)\n\n # Return the negative since we're minimizing the function\n # .. the metric minimized is suggested from Duan et al. (2016)\n return -(mean - std)", "def sweep_multiprocessing(self,sweep_n,start,end,points,filename='./test.txt'):\n ###############################\n ##multiprocessing preparation\n ##############################\n core = 10\n points = points//core*core # points per thread\n self.result = [[0.0 for i in range(self.n+1)]for j in range(points)]#this is the matrix which store the result, it will be saved to file later.\n job = self.allocate_job(start,end,points,core)\n\n \n ################################\n ##This are codes for progress bar\n ###############################\n prog = ProgressBar(0, points, 50, mode='fixed', char='#')\n ##the linear algebra start here\n a = np.zeros(self.N)\n a[self.N-1] = 1 #1 because rho_11+rho_22 ... =1\n a = np.matrix(a)\n a = a.T\n\n done_queue = multiprocessing.Queue()\n process_list = []\n for x in range(core):\n process_list.append(multiprocessing.Process(target = sweep_mp,args = (job[x],self.system,self.nu2,a,self.add_freq,self.index,sweep_n,self.n,done_queue)))\n\n tStart = time.time()\n print 'start'\n for p in process_list:\n p.start()\n\n stop_num = 0\n while stop_num != core:\n a = done_queue.get()\n if a == 'STOP':\n stop_num += 1\n else:\n self.result[a[0]] = a[1]\n prog.increment_amount()\n print prog, '\\r',\n sys.stdout.flush()\n\n print '\\n'\n for p in process_list:\n p.join()\n print \"%s.exitcode = %s\" %(p.name, p.exitcode)\n\n tStop = time.time()\n print\"spend\",(tStop - tStart),\"second\"\n \n self.sweep_save_file(filename,points)", "def sim_run(ini_resource=0.0002, ini_density=(1e4, 1e4), min_size=(1.5e1, 1.5e4), max_size=(2.5e1, 2.5e4),\n spp_names=('Aa', 'Bb'), dilution_rate=0.0, volume=1.0, nsi_spp=(500, 500), nsi_min=200,\n nsi_max=2000, num_sc=(100, 100), time_end=30, time_step=1 / 24, print_time_step=1,\n n_procs=2, n_threads=1, mem_lim=2e9):\n cluster = LocalCluster(n_workers=n_procs, threads_per_worker=n_threads, memory_limit=mem_lim)\n client = Client(cluster)\n\n sbm_out = []\n sbmc = dask.delayed(SBMc)(ini_resource=ini_resource, ini_density=ini_density, min_size=min_size, max_size=max_size,\n spp_names=spp_names, num_sc=num_sc, time_end=time_end,\n dilution_rate=dilution_rate, volume=volume)\n sbmi = dask.delayed(SBMi)(ini_resource=ini_resource, ini_density=ini_density, min_size=min_size, max_size=max_size,\n spp_names=spp_names, nsi_spp=nsi_spp, nsi_min=nsi_min, nsi_max=nsi_max, volume=volume,\n time_step=time_step, time_end=time_end, print_time_step=print_time_step,\n dilution_rate=dilution_rate)\n sbm_out.append(sbmc)\n sbm_out.append(sbmi)\n\n with ProgressBar(), dask.config.set(scheduler='processes'):\n output = dask.compute(sbm_out)\n\n client.close()\n cluster.close()\n return output", "def split_by_cost(cost_list, \n comm=None, \n return_work=False,\n return_all=False):\n if comm == None:\n comm = MPI.COMM_WORLD\n \n size = comm.Get_size()\n rank = comm.Get_rank()\n\n ### Total cost of job_list\n total = np.sum(cost_list) \n ### Ideal work for each rank\n max_work = (total / size)*1.01\n \n ### Preparing indices that each rank will use\n work_idx = [[] for x in range(size)]\n work_sum = [0 for x in range(size)]\n current_worker = 0\n withheld_idx_list = []\n withheld_value_list = []\n for idx,value in enumerate(cost_list):\n ## Decide whether to withhold value\n if work_sum[current_worker] + value > max_work*1.05:\n withheld_idx_list.append(idx)\n withheld_value_list.append(value)\n continue\n \n work_idx[current_worker].append(idx)\n work_sum[current_worker] += value\n if work_sum[current_worker] > max_work:\n current_worker += 1\n \n withheld_idx_list = np.array(withheld_idx_list)\n withheld_value_list = np.array(withheld_value_list)\n withheld_sort_idx = np.argsort(withheld_idx_list)\n withheld_idx_list = withheld_idx_list[withheld_sort_idx]\n withheld_value_list = withheld_value_list[withheld_sort_idx]\n for idx,withheld_idx in enumerate(withheld_idx_list):\n min_idx = np.argmin(work_sum)\n work_sum[min_idx] += withheld_value_list[idx]\n work_idx[min_idx].append(withheld_idx)\n \n my_list = work_idx[rank]\n \n if not return_all:\n if not return_work:\n return my_list\n else:\n return my_list,work_sum[rank]\n else:\n if not return_work:\n return work_idx\n else:\n return work_idx,work_sum", "def mi_from_dm(distance_matrix, ns, nh, spike_train_list=None):\n \n nr = len(distance_matrix)\n nt = nr/ns\n nearest_neighbours = np.array([r.argsort()[:nh] for r in distance_matrix])\n \n if spike_train_list is not None:\n\n members_of_glob = trains_in_glob(spike_train_list)\n glob_comp = glob_composition(spike_train_list, ns, nt, nh)\n\n counts = []\n for i in range(len(nearest_neighbours)):\n c_i = 0\n \n if i not in members_of_glob:\n for j in nearest_neighbours[i]:\n if j not in members_of_glob:\n if spike_train_list[i].start_time == spike_train_list[j].start_time:\n c_i += 1 # count neigbours out of glob\n else:\n f_i = glob_comp[i]/float(sum(glob_comp.values()))\n c_i += (nh - c_i)*f_i # if one neighbour is in glob, all following neighb are as well\n break\n counts.append(c_i)\n else:\n f_i = glob_comp[i]/float(sum(glob_comp.values()))\n c_i += 1 + (nh - 1)*f_i #If in glob, take fraction of remaining neighbours except you\n counts.append(c_i)\n \n counts = np.array(counts)\n \n else:\n \n counts = []\n for i in range(len(nearest_neighbours)):\n c_i = 1\n for j in nearest_neighbours[i]:\n if (i != j and abs(i - j)%ns==0 ):\n c_i += 1 \n counts.append(c_i)\n counts = np.array(counts) \n \n I = sum(np.log2(counts*ns/float(nh))) / float(nr)\n\n return I", "def _get_mass_nsm(model, element_ids, mass_ids,\n all_eids, all_mass_ids, etypes_skipped,\n etype, eids, xyz,\n length_eids_pids, nsm_centroids_length, lengths,\n area_eids_pids, nsm_centroids_area, areas,\n mass, cg, I, reference_point):\n element_ids = set(element_ids)\n mass_ids = set(mass_ids)\n if etype in ['CROD', 'CONROD']:\n eids2 = get_sub_eids(all_eids, eids, etype)\n for eid in eids2:\n elem = model.elements[eid]\n n1, n2 = elem.node_ids\n length = norm(xyz[n2] - xyz[n1])\n centroid = (xyz[n1] + xyz[n2]) / 2.\n mpl = elem.MassPerLength()\n if elem.type == 'CONROD':\n #nsm = property_nsms[nsm_id]['CONROD'][eid] + element_nsms[nsm_id][eid]\n length_eids_pids['CONROD'].append((eid, -42)) # faked number\n lengths['CONROD'].append(length)\n nsm_centroids_length['CONROD'].append(centroid)\n else:\n pid = elem.pid\n #nsm = property_nsms[nsm_id]['PROD'][pid] + element_nsms[nsm_id][eid]\n length_eids_pids['PROD'].append((eid, pid))\n nsm_centroids_length['PROD'].append(centroid)\n lengths['PROD'].append(length)\n #m = (mpl + nsm) * length\n massi = mpl * length\n #if massi != elem.Mass(): # pragma: no cover\n #msg = 'mass_new=%s mass_old=%s\\n%s' % (massi, elem.Mass(), str(elem))\n #raise RuntimeError(msg)\n if eid in element_ids:\n mass = _increment_inertia(centroid, reference_point, massi, mass, cg, I)\n elif etype == 'CTUBE':\n eids2 = get_sub_eids(all_eids, eids, etype)\n for eid in eids2:\n elem = model.elements[eid]\n pid = elem.pid\n n1, n2 = elem.node_ids\n length = norm(xyz[n2] - xyz[n1])\n centroid = (xyz[n1] + xyz[n2]) / 2.\n mpl = elem.pid_ref.MassPerLength()\n length_eids_pids['PTUBE'].append((eid, pid))\n lengths['PTUBE'].append(length)\n #nsm = property_nsms[nsm_id]['PTUBE'][pid] + element_nsms[nsm_id][eid]\n #m = (mpl + nsm) * length\n massi = mpl * length\n #if massi != elem.Mass(): # pragma: no cover\n #msg = 'mass_new=%s mass_old=%s\\n%s' % (massi, elem.Mass(), str(elem))\n #raise RuntimeError(msg)\n if eid in element_ids:\n mass = _increment_inertia(centroid, reference_point, massi, mass, cg, I)\n elif etype == 'CBAR':\n eids2 = get_sub_eids(all_eids, eids, etype)\n for eid in eids2:\n elem = model.elements[eid]\n pid = elem.pid\n n1, n2 = elem.node_ids\n centroid = (xyz[n1] + xyz[n2]) / 2.\n length = norm(xyz[n2] - xyz[n1])\n mpl = elem.pid_ref.MassPerLength()\n length_eids_pids['PBAR'].append((eid, pid))\n lengths['PBAR'].append(length)\n nsm_centroids_length['PBAR'].append(centroid)\n #nsm = property_nsms[nsm_id]['PBAR'][pid] + element_nsms[nsm_id][eid]\n #m = (mpl + nsm) * length\n massi = mpl * length\n #if massi != elem.Mass() or not np.array_equal(centroid, elem.Centroid()): # pragma: no cover\n #msg = 'mass_new=%s mass_old=%s\\n' % (massi, elem.Mass())\n #msg += 'centroid_new=%s centroid_old=%s\\n%s' % (\n #str(centroid), str(elem.Centroid()), str(elem))\n #raise RuntimeError(msg)\n if eid in element_ids:\n mass = _increment_inertia(centroid, reference_point, massi, mass, cg, I)\n\n elif etype == 'CBEAM':\n mass = _get_cbeam_mass(\n model, xyz, element_ids, all_eids,\n length_eids_pids, lengths, nsm_centroids_length,\n eids, mass, cg, I, reference_point)\n\n elif etype in ['CTRIA3', 'CTRIA6', 'CTRIAR']:\n mass = _get_tri_mass(\n model, xyz, element_ids, all_eids,\n area_eids_pids, areas, nsm_centroids_area,\n eids, mass, cg, I, reference_point)\n\n elif etype in ['CQUAD4', 'CQUAD8', 'CQUADR']:\n mass = _get_quad_mass(\n model, xyz, element_ids, all_eids,\n area_eids_pids, areas, nsm_centroids_area,\n eids, mass, cg, I, reference_point)\n\n elif etype == 'CQUAD':\n eids2 = get_sub_eids(all_eids, eids, etype)\n for eid in eids2:\n elem = model.elements[eid]\n n1, n2, n3, n4 = elem.node_ids[:4]\n prop = elem.pid_ref\n centroid = (xyz[n1] + xyz[n2] + xyz[n3] + xyz[n4]) / 4.\n area = 0.5 * norm(cross(xyz[n3] - xyz[n1], xyz[n4] - xyz[n2]))\n\n if prop.type == 'PSHELL':\n t = prop.Thickness()\n mpa = prop.nsm + prop.Rho() * t\n elif prop.type in ['PCOMP', 'PCOMPG']:\n mpa = prop.get_mass_per_area()\n elif prop.type == 'PLPLANE':\n continue\n #raise NotImplementedError(prop.type)\n else:\n raise NotImplementedError(prop.type)\n m = area * mpa\n if CHECK_MASS and (m != elem.Mass() or not np.array_equal(centroid, elem.Centroid())): # pragma: no cover\n msg = 'mass_new=%s mass_old=%s\\n' % (m, elem.Mass())\n msg += 'centroid_new=%s centroid_old=%s\\n%s' % (\n str(centroid), str(elem.Centroid()), str(elem))\n raise RuntimeError(msg)\n if eid in element_ids:\n mass = _increment_inertia(centroid, reference_point, m, mass, cg, I)\n\n elif etype == 'CSHEAR':\n eids2 = get_sub_eids(all_eids, eids, etype)\n for eid in eids2:\n elem = model.elements[eid]\n n1, n2, n3, n4 = elem.node_ids\n prop = elem.pid_ref\n pid = elem.pid\n centroid = (xyz[n1] + xyz[n2] + xyz[n3] + xyz[n4]) / 4.\n area = 0.5 * norm(cross(xyz[n3] - xyz[n1], xyz[n4] - xyz[n2]))\n mpa = prop.MassPerArea()\n\n area_eids_pids['PSHEAR'].append((eid, pid))\n areas['PSHEAR'].append(area)\n nsm_centroids_area['PSHEAR'].append(centroid)\n\n #nsm = property_nsms[nsm_id]['PSHEAR'][pid] + element_nsms[nsm_id][eid]\n #m = area * (mpa + nsm)\n m = area * mpa\n #if m != elem.Mass() or not np.array_equal(centroid, elem.Centroid()): # pragma: no cover\n #msg = 'mass_new=%s mass_old=%s\\n' % (m, elem.Mass())\n #msg += 'centroid_new=%s centroid_old=%s\\n%s' % (\n #str(centroid), str(elem.Centroid()), str(elem))\n #raise RuntimeError(msg)\n if eid in element_ids:\n mass = _increment_inertia(centroid, reference_point, m, mass, cg, I)\n elif etype == 'CONM2':\n eids2 = get_sub_eids(all_mass_ids, eids, etype)\n for eid in eids2:\n elem = model.masses[eid]\n centroid, m, dI = elem.centroid_mass_inertia()\n di_list = [dI[0][0], dI[1][1], dI[2][2], dI[0][1], dI[0][2], dI[1][2]]\n if eid in mass_ids:\n mass = _increment_inertia(centroid, reference_point, m, mass, cg, I)\n I = [i1 + di for i1, di in zip(I, di_list)]\n\n elif etype in ['CONM1', 'CMASS1', 'CMASS2', 'CMASS3', 'CMASS4']:\n eids2 = get_sub_eids(all_mass_ids, eids, etype)\n for eid in eids2:\n elem = model.masses[eid]\n m = elem.Mass()\n centroid = elem.Centroid()\n if eid in mass_ids:\n mass = _increment_inertia(centroid, reference_point, m, mass, cg, I)\n elif etype == 'CTETRA':\n eids2 = get_sub_eids(all_eids, eids, etype)\n for eid in eids2:\n elem = model.elements[eid]\n n1, n2, n3, n4 = elem.node_ids[:4]\n centroid = (xyz[n1] + xyz[n2] + xyz[n3] + xyz[n4]) / 4.\n #V = -dot(n1 - n4, cross(n2 - n4, n3 - n4)) / 6.\n volume = -dot(xyz[n1] - xyz[n4], cross(xyz[n2] - xyz[n4], xyz[n3] - xyz[n4])) / 6.\n m = elem.Rho() * volume\n #if m != elem.Mass() or not np.array_equal(centroid, elem.Centroid()): # pragma: no cover\n #msg = 'mass_new=%s mass_old=%s\\n' % (m, elem.Mass())\n #msg += 'centroid_new=%s centroid_old=%s\\n%s' % (\n #str(centroid), str(elem.Centroid()), str(elem))\n #raise RuntimeError(msg)\n if eid in element_ids:\n mass = _increment_inertia(centroid, reference_point, m, mass, cg, I)\n\n elif etype == 'CPYRAM':\n eids2 = get_sub_eids(all_eids, eids, etype)\n for eid in eids2:\n elem = model.elements[eid]\n n1, n2, n3, n4, n5 = elem.node_ids[:5]\n centroid1 = (xyz[n1] + xyz[n2] + xyz[n3] + xyz[n4]) / 4.\n area1 = 0.5 * norm(cross(xyz[n3]-xyz[n1], xyz[n4]-xyz[n2]))\n centroid5 = xyz[n5]\n\n #V = (l * w) * h / 3\n #V = A * h / 3\n centroid = (centroid1 + centroid5) / 2.\n\n #(n1, n2, n3, n4, n5) = self.get_node_positions()\n #area1, c1 = area_centroid(n1, n2, n3, n4)\n #volume = area1 / 3. * norm(c1 - n5)\n volume = area1 / 3. * norm(centroid1 - centroid5)\n m = elem.Rho() * volume\n if CHECK_MASS and (m != elem.Mass() or not np.array_equal(centroid, elem.Centroid())): # pragma: no cover\n msg = 'mass_new=%s mass_old=%s\\n' % (m, elem.Mass())\n msg += 'centroid_new=%s centroid_old=%s\\n%s' % (\n str(centroid), str(elem.Centroid()), str(elem))\n raise RuntimeError(msg)\n #print('*eid=%s type=%s mass=%s rho=%s V=%s' % (\n #elem.eid, 'CPYRAM', m, elem.Rho(), volume))\n if eid in element_ids:\n mass = _increment_inertia(centroid, reference_point, m, mass, cg, I)\n\n elif etype == 'CPENTA':\n eids2 = get_sub_eids(all_eids, eids, etype)\n for eid in eids2:\n elem = model.elements[eid]\n n1, n2, n3, n4, n5, n6 = elem.node_ids[:6]\n area1 = 0.5 * norm(cross(xyz[n3] - xyz[n1], xyz[n2] - xyz[n1]))\n area2 = 0.5 * norm(cross(xyz[n6] - xyz[n4], xyz[n5] - xyz[n4]))\n centroid1 = (xyz[n1] + xyz[n2] + xyz[n3]) / 3.\n centroid2 = (xyz[n4] + xyz[n5] + xyz[n6]) / 3.\n centroid = (centroid1 + centroid2) / 2.\n volume = (area1 + area2) / 2. * norm(centroid1 - centroid2)\n m = elem.Rho() * volume\n #if m != elem.Mass() or not np.array_equal(centroid, elem.Centroid()): # pragma: no cover\n #msg = 'mass_new=%s mass_old=%s\\n' % (m, elem.Mass())\n #msg += 'centroid_new=%s centroid_old=%s\\n%s' % (\n #str(centroid), str(elem.Centroid()), str(elem))\n #raise RuntimeError(msg)\n #print('*eid=%s type=%s mass=%s rho=%s V=%s' % (\n #elem.eid, 'CPENTA', m, elem.Rho(), volume))\n if eid in element_ids:\n mass = _increment_inertia(centroid, reference_point, m, mass, cg, I)\n\n elif etype in ['CHEXA', 'CHEXA1', 'CHEXA2']:\n eids2 = get_sub_eids(all_eids, eids, etype)\n for eid in eids2:\n elem = model.elements[eid]\n n1, n2, n3, n4, n5, n6, n7, n8 = elem.node_ids[:8]\n #(A1, c1) = area_centroid(n1, n2, n3, n4)\n centroid1 = (xyz[n1] + xyz[n2] + xyz[n3] + xyz[n4]) / 4.\n area1 = 0.5 * norm(cross(xyz[n3] - xyz[n1], xyz[n4] - xyz[n2]))\n #(A2, c2) = area_centroid(n5, n6, n7, n8)\n centroid2 = (xyz[n5] + xyz[n6] + xyz[n7] + xyz[n8]) / 4.\n area2 = 0.5 * norm(cross(xyz[n7] - xyz[n5], xyz[n8] - xyz[n6]))\n\n volume = (area1 + area2) / 2. * norm(centroid1 - centroid2)\n m = elem.Rho() * volume\n centroid = (centroid1 + centroid2) / 2.\n #if m != elem.Mass() or not np.array_equal(centroid, elem.Centroid()): # pragma: no cover\n #msg = 'mass_new=%s mass_old=%s\\n' % (m, elem.Mass())\n #msg = 'centroid_new=%s centroid_old=%s\\n%s' % (\n #str(centroid), str(elem.Centroid()), str(elem))\n #raise RuntimeError(msg)\n #print('*centroid1=%s centroid2=%s' % (str(centroid1), str(centroid2)))\n #print('*area1=%s area2=%s length=%s' % (area1, area2, norm(centroid1 - centroid2)))\n #print('*eid=%s type=%s mass=%s rho=%s V=%s' % (\n #elem.eid, 'CHEXA', m, elem.Rho(), volume))\n if eid in element_ids:\n mass = _increment_inertia(centroid, reference_point, m, mass, cg, I)\n\n elif etype == 'CBEND':\n model.log.info('elem.type=%s mass is innaccurate' % etype)\n #nsm = property_nsms[nsm_id]['PBEND'][pid] + element_nsms[nsm_id][eid]\n eids2 = get_sub_eids(all_eids, eids, etype)\n for eid in eids2:\n elem = model.elements[eid]\n m = elem.Mass()\n centroid = elem.Centroid()\n if eid in element_ids:\n mass = _increment_inertia(centroid, reference_point, m, mass, cg, I)\n\n elif etype in ['CQUADX']:\n pass\n elif etype in ['CTRIAX', 'CTRIAX6']:\n mass = _mass_catch_all(model, etype, etypes_skipped,\n element_ids, all_eids, eids,\n mass, cg, I, reference_point)\n elif etype in ['CSUPER', 'CSUPEXT']:\n pass\n elif etype.startswith('C'):\n model.log.warning('etype=%r should be explicit' % etype)\n #raise RuntimeError('etype=%r should be explicit' % etype) ## TODO: this is temporary\n mass = _mass_catch_all(model, etype, etypes_skipped,\n element_ids, all_eids, eids,\n mass, cg, I, reference_point)\n\n #property_nsms[nsm_id][nsm.nsm_type][nsm_idi]\n #for nsm_id, prop_types in sorted(property_nsms.items()):\n #for prop_type, prop_id_to_val in sorted(prop_types.items()):\n #for pid, val in sorted(prop_id_to_val.items()):\n #TODO: CRAC2D mass not supported...how does this work???\n # I know it's an \"area\" element similar to a CQUAD4\n #TODO: CCONEAX mass not supported...how does this work???\n #TODO: CBEND mass not supported...how do I calculate the length?\n\n #area_eids['PSHELL'].append(eid)\n #areas['PSHELL'].append(area)\n return mass, cg, I", "def ol_mpi_send(data, dest: int, tag: int):\n import numba_mpi\n\n def impl(data, dest: int, tag: int) -> None:\n \"\"\"reduce a single number across all cores\"\"\"\n status = numba_mpi.send(data, dest, tag)\n assert status == 0\n\n return impl", "def connector_mediation(task):\n\tatlas = 'power'\n\tproject='hcp'\n\tknown_membership,network_names,num_nodes,name_int_dict = network_labels(atlas)\n\tsubjects = np.load('%s/dynamic_mod/results/%s_%s_%s_subs_fz.npy' %(homedir,'hcp',task,atlas))\n\tstatic_results = graph_metrics(subjects,task,atlas,run_version='fz')\n\tmatrices = static_results['matrices']\n\tsubject_pcs = static_results['subject_pcs']\n\tsubject_mods = static_results['subject_mods']\n\tmod_pc_corr = np.zeros(subject_pcs.shape[1])\n\tfor i in range(subject_pcs.shape[1]):\n\t\tmod_pc_corr[i] = nan_pearsonr(subject_mods,subject_pcs[:,i])[0]\n\tmean_conn = np.nanmean(matrices,axis=0)\n\te_tresh = np.percentile(mean_conn,85)\n\tsubject_pcs[np.isnan(subject_pcs)] = 0.0\n\tm = np.zeros((264,264,264))\n\tpool = Pool(40)\n\tfor n in range(264):\n\t\tprint n\n\t\tsys.stdout.flush()\n\t\tvariables = []\n\t\tfor i,j in combinations(range(264),2):\n\t\t\tvariables.append(pd.DataFrame(data={'pc':subject_pcs[:,n],'weight':matrices[:,i,j],'q':subject_mods},index=range(len(subject_pcs))))\n\t\tresults = pool.map(multi_med,variables)\n\t\tfor r,i in zip(results,combinations(range(264),2)):\n\t\t\tm[n,i[0],i[1]] = r\n\t\t\tm[n,i[1],i[0]] = r\n\t\tnp.save('/home/despoB/mb3152/dynamic_mod/results/full_med_matrix_new_%s.npy'%(task),m)", "def prim_MST():\n node_list = list(node_set)\n s = random.choice(node_list)\n\n val_s = -exp_node_weights[s][1] + exp_node_weights[s][0] # value of starting node\n value = 0 # MST value\n cost = 0 # MST cost\n add_cost = 0\n compensation = 0 # compensation for edges that are counted twice\n\n prev = [0]*no_nodes\n dist = [math.inf]*no_nodes\n S = set()\n H = Heap(no_nodes)\n H.insert(s, val_s, 0)\n dist[s] = val_s\n\n for v in range(no_nodes):\n H.insert(v, math.inf, 0)\n\n while H.size > 0:\n v = H.delete_min()\n if v[1] > 0: # min in Heap is of positive value, i.e., a cost, abort\n break\n\n # abort if out of budget\n cost += exp_node_weights[v[0]][0]\n if cost > budget:\n #print(value, compensation)\n MST_value = -(value-compensation)\n return (S, MST_value)\n\n # complementarity\n for node in S:\n for adjacent in exp_graph[node]:\n if adjacent[0] == v[0]:\n value -= adjacent[1]\n\n S.add(v[0])\n value += v[1]\n compensation += v[2] # necessary since edge weight was added to node quality already\n\n for w in exp_graph[v[0]]:\n if not w[0] in S:\n if dist[w[0]] > w[1]:\n d = -w[1] - exp_node_weights[w[0]][1] + exp_node_weights[w[0]][0] # negate for maximum spanning tree\n if d > 0: # bad/costly node\n continue\n dist[w[0]] = d\n comp = -w[1]\n prev[w[0]] = v[0]\n H.decrease_key(w[0], dist[w[0]], comp)\n\n MST_value = -(value-compensation)\n del H\n return (S, MST_value, cost, add_cost)", "def test_mpi_code() -> None:\n print(\"MPI code checking is disabled\")", "def gather_EMData(data, number_of_proc, myid, main_node):\n\tfrom mpi import MPI_COMM_WORLD, MPI_INT, MPI_TAG_UB\n\tfrom mpi import mpi_send, mpi_recv\t\n\n\tl = len(data)\n\tgathered_data = []\n\tinc = 1 # A temp measure\n\tif myid == main_node:\n\t\tfor i in xrange(0, number_of_proc*inc, inc):\n\t\t\tif i == main_node:\n\t\t\t\tfor k in xrange(l):\n\t\t\t\t\tgathered_data.append(data[k])\n\t\t\telse:\n\t\t\t\tfor k in xrange(l):\n\t\t\t\t\tim = recv_EMData(i, i*l+k)\n\t\t\t\t\tmem_len = mpi_recv(1, MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD)\n\t\t\t\t\tmembers = mpi_recv(int(mem_len[0]), MPI_INT, i, MPI_TAG_UB, MPI_COMM_WORLD)\n\t\t\t\t\tmembers = map(int, members)\n\t\t\t\t\tim.set_attr('members', members)\n\t\t\t\t\tgathered_data.append(im)\n\telse:\n\t\tfor k in xrange(l):\n\t\t\tsend_EMData(data[k], main_node, myid*l+k)\n\t\t\tmem = data[k].get_attr('members')\n\t\t\tmpi_send(len(mem), 1, MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)\n\t\t\tmpi_send(mem, len(mem), MPI_INT, main_node, MPI_TAG_UB, MPI_COMM_WORLD)\n\treturn gathered_data", "def _calc_message(self,\n src_node, # source of the message\n dst_set, # a set of destinations,\n upward,\n ):\n # incoming messages are from these clusters\n incoming = set(self.children[src_node])\n if self.parents[src_node] is not None:\n incoming.add(self.parents[src_node])\n if upward:\n incoming.difference_update(dst_set) # only has one destination\n assert len(dst_set) == 1, \"should have a single receiver in the upward pass!\"\n factor = self.clique_potentials[src_node].copy()\n clique_vars = self.cliques[src_node]\n for r in incoming:\n sepset = list(set(self.cliques[r]).intersection(set(clique_vars)))\n # find the index of sepset in the clique potential\n inds = sorted([clique_vars.index(i) for i in sepset])\n # multiply with the incoming message from the child\n factor = tensor_mult(factor, self.messages[(r,src_node)], inds, list(range(len(sepset))))\n for dst_node in dst_set:\n tmp_factor = factor.copy()\n if not upward: # divide out the incoming message to produce the outgoing message\n sepset = set(self.cliques[dst_node]).intersection(set(clique_vars))\n # find the index of sepset in the clique potential\n inds = sorted([clique_vars.index(i) for i in sepset])\n # multiply with the incoming message from the child\n tmp_factor = tensor_mult(tmp_factor, 1./self.messages[(dst_node,src_node)], inds, list(range(len(sepset))))\n outgoing_vars = set(clique_vars).intersection(set(self.cliques[dst_node]))\n sum_over_vars = set(clique_vars) - set(outgoing_vars)\n sum_over_vars_inds = sorted([clique_vars.index(i) for i in sum_over_vars])\n msg = np.sum(tmp_factor, axis=tuple(sum_over_vars_inds))\n if self._normalize_messages:\n msg /= np.sum(msg)\n self.messages[(src_node,dst_node)] = msg\n if self._verbosity > 2:\n print(\"{} -> ({})-> {}\".format(clique_vars, outgoing_vars ,self.cliques[dst_node]), flush=True)\n return factor # is used to set the clique-marginals in the downward pass", "def mpi_command(pars):\n mpi_run = 'mpirun'\n if pars['scheduler'] == 'direct':\n mpi_run += ' ' + '-np %s'%pars['mpi']\n if pars['scheduler'] == 'slurm':\n ntasks = pars['ntasks_per_node']*pars['nodes']\n mpi_run += ' ' + '-np %s'%ntasks\n if pars['map_by'] is not None:\n mpi_run += ' ' + '--map-by %s:PE=%s'%(pars['map_by'],pars['pe'])\n if pars['rank_by'] is not None:\n mpi_run += ' ' + '--rank-by %s'%pars['rank_by']\n\n return mpi_run", "def sim_split_no_mig_size(params, ns):\n #9 parameters\t\n nuA, nu1a, nu1b, nu2a, nu2b, nu3a, nu3b, T1, T2 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function for T1\n nu_T1 = [nu1a, nu2a, nu3a]\n fs.integrate(nu_T1, T1)\n ## Population function for T2\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2) \n return fs", "def process_aGMK_MC_procedure(X,ID,Y,grid_search,acc=\"F1Mean\",REP=30,test_prop=0.25,n_folds=3,n_jobs=-1,RS=0,VERBOSE=False):\n \n #Get the objects and their labels\n O, labels = model_objects(X, ID, Y)\n #Model the objects by a Gaussian distribution (mean vector and covariance matrix)\n G = [GaussianObject(o.x) for o in O]\n \n #Parameters of the classification \n grid_search_ = dict([\n ('svm__kernel',['precomputed']),\n ('svm__C',grid_search['C']),\n ('Kernel__alpha', grid_search['ALPHA']),\n ('Kernel__gamma',grid_search['GAMMA']),\n ])\n \n kernel = aGMK() \n #Process the classifications in parallel\n results = Parallel(n_jobs=n_jobs,verbose=False)(delayed(mc_procedure)(i,G,labels,kernel,grid_search_,acc,test_prop,n_folds,RS,VERBOSE) for i in range(REP))\n \n #Reshape the results\n CM = [results[i][0] for i in range(REP)]\n YP = [results[i][1] for i in range(REP)]\n best_params = [results[i][2] for i in range(REP)]\n\n return CM, YP, best_params", "def test_spatial_smoothing_xesmf_reduce_spatial_dims_MPI_curv(\r\n PM_ds_control_3d_full,\r\n):\r\n da = PM_ds_control_3d_full\r\n step = 5\r\n actual = spatial_smoothing_xesmf(\r\n da,\r\n d_lon_lat_kws={\"lon\": step},\r\n )\r\n expected_lat_size = 180 // step\r\n assert actual[\"lon\"].size < da.lon.size\r\n assert actual[\"lat\"].size == expected_lat_size", "def run_split_with_solids(self,mc):\n top, bot = self.outs\n feed = self.ins[0]\n top.copy_like(feed)\n bot.copy_like(top)\n top_mass = top.mass\n F_mass_solids = sum(top_mass*self.split)\n TS=1-mc\n F_mass_tot = F_mass_solids/TS\n F_mass_wat = F_mass_tot - F_mass_solids\n top_mass[:] *= self.split\n top.imass['Water']=F_mass_wat\n bot.mass[:] -= top_mass", "def sim_split_no_mig(params, ns):\n #5 parameters\t\n nuA, nu1, nu2, nu3, T1 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function for T1\n nu_T1 = [nu1, nu2, nu3]\n fs.integrate(nu_T1, T1) \n return fs", "def test_result_reduce_ddp():\n tutils.reset_seed()\n tutils.set_random_master_port()\n\n worldsize = 2\n mp.spawn(_ddp_test_fn, args=(worldsize,), nprocs=worldsize)" ]
[ "0.5712891", "0.56743956", "0.5644941", "0.56176066", "0.5497657", "0.5445917", "0.53922945", "0.53776425", "0.53776425", "0.5272448", "0.5257341", "0.52249587", "0.5215293", "0.52147806", "0.51855075", "0.51734966", "0.51608145", "0.5157823", "0.51528966", "0.5091743", "0.5091389", "0.50885856", "0.5088417", "0.50818896", "0.50680315", "0.50559306", "0.50522375", "0.50311697", "0.50220233", "0.502182" ]
0.6318238
0
Convenient inner loop for mst tile saving
def _save_mst_tile(tile, i, preread_ifgs): mst_tile = mst.mst_multiprocessing(tile, dest_tifs, preread_ifgs, params) # locally save the mst_mat mst_file_process_n = join(params[cf.TMPDIR], 'mst_mat_{}.npy'.format(i)) np.save(file=mst_file_process_n, arr=mst_tile)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def saveTiles(z, x, y, ntiles, mapname, image, suffix = 'png', imgtype = None):\n for dx in range(0, ntiles):\n tilex = x*ntiles + dx\n ensureDirExists(getTileDir(mapname, z, tilex))\n for dy in range(0, ntiles): \n tiley = y*ntiles + dy\n offsetx = BORDER_WIDTH + dx*TILE_SIZE\n offsety = BORDER_WIDTH + dy*TILE_SIZE\n view = image.view(offsetx, offsety, TILE_SIZE, TILE_SIZE)\n if imgtype:\n view.save(getTilePath(mapname, z, tilex, tiley, suffix), imgtype)\n else:\n view.save(getTilePath(mapname, z, tilex, tiley, suffix))", "def batch_save_tile_img(tiles_gdf, tif, tile_size, region, zone, save_path, display=False):\n for idx, tile in tqdm(tiles_gdf.iterrows()):\n dataset = tile['dataset']\n save_tile_img(tif, tile['xyz'], dataset, tile_size, region, zone, save_path, display=False)", "def save_tiles(self, tiles, output_dir):\n save_path = f\"{output_dir}/tiles.npy\"\n tiles_np = np.asarray(tiles)\n np.save(save_path, tiles_np)\n print(\"done saving .npy!\")", "def batch_save_tile_mask(tiles_gdf, label_poly_series, tile_size, region, zone, save_path, channels=3, display=False):\n \n import warnings; warnings.simplefilter('ignore')\n\n for idx, tile in tqdm(tiles_gdf.iterrows()):\n dataset = tile['dataset']\n tile_poly = get_specific_tile(idx, tiles_gdf)\n save_tile_mask(label_poly_series, tile_poly, tile['xyz'], tile_size, dataset,\n region, zone, save_path, channels, display)", "def build_tiles(img,tilefile,tilesize,options=[]):\n\tlevels=ceil(log(max(img.get_xsize(),img.get_ysize())/tilesize)/log(2.0))\n\t\n\ttf=file(tilefile,\"w\")\n\t\n\ttile_dict={}\n\tpos=0\n\timg2=img.copy()\n\txs,ys=img2.get_xsize(),img2.get_ysize()\n\tfor l in range(int(levels)):\n\t\trmin=img2.get_attr(\"mean\")-img2.get_attr(\"sigma\")*3.0\n\t\trmax=img2.get_attr(\"mean\")+img2.get_attr(\"sigma\")*3.0\n\t\tfor x in range(0,img2.get_xsize(),tilesize):\n\t\t\tfor y in range(0,img2.get_ysize(),tilesize):\n\t\t\t\ti=img2.get_clip(Region(x,y,tilesize,tilesize))\n\t\t\t\ti.set_attr(\"render_min\",rmin)\n\t\t\t\ti.set_attr(\"render_max\",rmax)\n\t\t\t\ti.set_attr(\"jpeg_quality\",70)\n\t\t\t\tfsp=\"tmpimg.%d.%03d.%03d.jpg\"%(l,x/tilesize,y/tilesize)\n\t\t\t\ti.write_image(fsp)\n\t\t\t\tsz=os.stat(fsp).st_size\n\t\t\t\ttile_dict[(l,x/tilesize,y/tilesize)]=(pos,sz)\n\t\t\t\tpos+=sz\n\t\timg2.process_inplace(\"math.meanshrink\",{\"n\":2})\n\t\n\t# This will produce 2 power spectrum images in the tile file\n\t# with scale factors -1 and -2\n\tif \"pspec\" in options :\n\t\tnx,ny=img.get_xsize()/512,img.get_ysize()/512\n\t\ta=EMData()\n\t\ta.set_size(512,512)\n\t\tif (ny>2 and nx>2) :\n\t\t\tfor y in range(1,ny-1):\n\t\t\t\tfor x in range(1,nx-1):\n\t\t\t\t\tc=img.get_clip(Region(x*512,y*512,512,512))\n\t\t\t\t\tc.process_inplace(\"normalize\")\n\t\t\t\t\tc.process_inplace(\"math.realtofft\")\n\t\t\t\t\tc.process_inplace(\"math.squared\")\n\t\t\t\t\ta+=c\n\t\t\ta.set_value_at(256,256,0,.01)\n\t\t\ta-=a.get_attr(\"minimum\")-a.get_attr(\"sigma\")*.01\n\t\t\ta.process_inplace(\"math.log\")\n\t\t\ta-=a.get_attr(\"minimum\")\n\t\t\ta.set_attr(\"render_min\",a.get_attr(\"minimum\")-a.get_attr(\"sigma\")*.1)\n\t\t\ta.set_attr(\"render_max\",a.get_attr(\"mean\")+a.get_attr(\"sigma\")*4.0)\n\t\t\ta.set_attr(\"jepg_quality\",80)\n\t\t\ta.write_image(\"/tmp/tmpimg.mrc\")\n\t\t\tfsp=\"tmpimg.jpg\"\n\t\t\ta.write_image(fsp)\n\t\t\tsz=os.stat(fsp).st_size\n\t\t\ttile_dict[(-1,0,0)]=(pos,sz)\n\t\t\tpos+=sz\n\t\n#\t\ttry:\n\t\t\timport matplotlib\n\t\t\tmatplotlib.use('Agg')\n\t\t\timport pylab\n\t\t\tmanager = pylab.get_current_fig_manager()\n\t\t\tapix=options[\"pspec\"]\n\t\t\tdx=1.0/(2.0*apix*256.0)\n\t\t\tx=pylab.arange(dx,dx*255.9,dx)\n\t\t\ty=a.calc_radial_dist(255,1,1,0)\t# radial power spectrum (log)\n\t\t\tpylab.figure(figsize=(8,6),dpi=96)\n\t\t\tpylab.axes([.08,.08,.9,.9], axisbg='w')\n\t\t\tpylab.plot(x,y)\n\t\t\tpylab.axis([0,dx*256,min(y),max(y)])\n\t\t\tpylab.xlabel(\"Spatial Freq. (1/A)\")\n\t\t\tpylab.ylabel(\"Log Intensity (10^x)\")\n#\t\t\tprint y\n\t\t\t\n\t\t\tfsp=\"tmpimg2.png\"\n\t\t\tpylab.savefig(fsp,dpi=96)\n\t\t\tsz=os.stat(fsp).st_size\n\t\t\ttile_dict[(-2,0,0)]=(pos,sz)\n\t\t\tpos+=sz\n\n#\t\texcept:\n#\t\t\tprint \"Unable to generate plot (need matplotlib)\"\n\t\t\t\n\t\n\tpickle.dump(tile_dict,tf)\n\t\n\tfor l in range(int(levels)):\n\t\tfor x in range(0,xs,tilesize):\n\t\t\tfor y in range(0,ys,tilesize):\n\t\t\t\tfsp=\"tmpimg.%d.%03d.%03d.jpg\"%(l,x/tilesize,y/tilesize)\n\t\t\t\ta=file(fsp,\"r\")\n\t\t\t\tb=a.read()\n\t\t\t\ta.close()\n\t\t\t\ttf.write(b)\n\t\t\t\tos.remove(fsp)\n\t\txs/=2\n\t\tys/=2\n\t\n\tif \"pspec\" in options :\n\t\tfor fsp in [\"tmpimg.jpg\",\"tmpimg2.png\"] :\n\t\t\ta=file(fsp,\"r\")\n\t\t\tb=a.read()\n\t\t\ta.close()\n\t\t\ttf.write(b)\n#\t\t\tos.remove(fsp)\n\t\n\ttf.close()", "def save_tiles(df,output_dir,namefunc = None):\n if not isinstance(df,pd.core.frame.DataFrame):\n raise TypeError(\"df must be a pandas DataFrame!\")\n if any(e not in df.columns for e in ('z','x','y')):\n raise ValueError(\"df must have columns x, y, and z\")\n if namefunc is None:\n def namefunc(x,y,z):\n return f'{z}_{x}_{y}.png'\n\n opath = os.path.abspath(os.path.expanduser(output_dir))\n Path(opath).mkdir(parents=True, exist_ok=True)\n L = df.shape[0]\n flocs = [''] * L\n for i,xyz in enumerate(zip(df['x'],df['y'],df['z'])):\n x,y,z = xyz\n print(f\"({i+1} of {L})...\")\n sleep(0.75)\n outloc = os.path.join(opath,namefunc(x,y,z))\n if save_tile(x,y,z,outloc) == 0:\n flocs[i] = outloc\n df = df.assign(file_loc = flocs)\n return df[df['file_loc'] != '']", "def prepare_map(self):\n for y, row in enumerate(self.contents):\n for x, tile in enumerate(row):\n bm = self.get_tile(tile)\n self.image[\n y * TILE_SIZE : (y + 1) * TILE_SIZE,\n x * TILE_SIZE : (x + 1) * TILE_SIZE,\n ] = bm", "def convert(self):\n self.tilewidth = int(self.tilewidth)\n self.tileheight = int(self.tileheight)\n self.width = int(self.width)\n self.height = int(self.height)\n self.pixel_width = self.width * self.tilewidth\n self.pixel_height = self.height * self.tileheight\n for layer in self.layers:\n self.named_layers[layer.name] = layer\n layer.opacity = float(layer.opacity)\n layer.x = int(layer.x)\n layer.y = int(layer.y)\n layer.width = int(layer.width)\n layer.height = int(layer.height)\n layer.pixel_width = layer.width * self.tilewidth\n layer.pixel_height = layer.height * self.tileheight\n layer.visible = bool(int(layer.visible))\n for tile_set in self.tile_sets:\n self.named_tile_sets[tile_set.name] = tile_set\n tile_set.spacing = int(tile_set.spacing)\n tile_set.margin = int(tile_set.margin)\n for img in tile_set.images:\n if img.trans:\n img.trans = (int(img.trans[:2], 16), int(img.trans[2:4], 16), int(img.trans[4:], 16))\n for obj_group in self.object_groups:\n obj_group.x = int(obj_group.x)\n obj_group.y = int(obj_group.y)\n obj_group.width = int(obj_group.width)\n obj_group.height = int(obj_group.height)\n for map_obj in obj_group.objects:\n map_obj.x = int(map_obj.x)\n map_obj.y = int(map_obj.y)\n map_obj.width = int(map_obj.width)\n map_obj.height = int(map_obj.height)", "def convert(self):\n self.tilewidth = int(self.tilewidth)\n self.tileheight = int(self.tileheight)\n self.width = int(self.width)\n self.height = int(self.height)\n self.pixel_width = self.width * self.tilewidth\n self.pixel_height = self.height * self.tileheight\n for layer in self.layers:\n self.named_layers[layer.name] = layer\n layer.opacity = float(layer.opacity)\n layer.x = int(layer.x)\n layer.y = int(layer.y)\n layer.width = int(layer.width)\n layer.height = int(layer.height)\n layer.pixel_width = layer.width * self.tilewidth\n layer.pixel_height = layer.height * self.tileheight\n layer.visible = bool(int(layer.visible))\n for tile_set in self.tile_sets:\n self.named_tile_sets[tile_set.name] = tile_set\n tile_set.spacing = int(tile_set.spacing)\n tile_set.margin = int(tile_set.margin)\n for img in tile_set.images:\n if img.trans:\n img.trans = (int(img.trans[:2], 16), int(img.trans[2:4], 16), int(img.trans[4:], 16))\n for obj_group in self.object_groups:\n obj_group.x = int(obj_group.x)\n obj_group.y = int(obj_group.y)\n obj_group.width = int(obj_group.width)\n obj_group.height = int(obj_group.height)\n for map_obj in obj_group.objects:\n map_obj.x = int(map_obj.x)\n map_obj.y = int(map_obj.y)\n map_obj.width = int(map_obj.width)\n map_obj.height = int(map_obj.height)", "def save_tiles(tiles, prefix=\"\", directory=os.getcwd(), format=\"png\"):\n for tile in tiles:\n tile.save(\n filename=tile.generate_filename(\n prefix=prefix, directory=directory, format=format\n ),\n format=format,\n )\n return tuple(tiles)", "def process_tile(tile):\n global base_kwds, resampling, src\n # Get the bounds of the tile.\n ulx, uly = mercantile.xy(\n *mercantile.ul(tile.x, tile.y, tile.z))\n lrx, lry = mercantile.xy(\n *mercantile.ul(tile.x + 1, tile.y + 1, tile.z))\n\n kwds = base_kwds.copy()\n kwds['transform'] = from_bounds(ulx, lry, lrx, uly, 256, 256)\n src_nodata = kwds.pop('src_nodata', None)\n dst_nodata = kwds.pop('dst_nodata', None)\n\n with rasterio.open('/vsimem/tileimg', 'w', **kwds) as tmp:\n reproject(rasterio.band(src, src.indexes),\n rasterio.band(tmp, tmp.indexes),\n src_nodata=src_nodata,\n dst_nodata=dst_nodata,\n num_threads=1,\n resampling=resampling)\n\n data = bytearray(virtual_file_to_buffer('/vsimem/tileimg'))\n\n # Workaround for https://bugs.python.org/issue23349.\n if sys.version_info[0] == 2 and sys.version_info[2] < 10:\n # Check for backported bug fix before re-ordering\n\tif kwds['driver'] == 'PNG' and data[0:8] == png_header:\n # Properly constructed PNG, no need to re-order bytes\n pass\n\telif kwds['driver'] == 'JPEG' and data[0:4] == jpeg_header:\n # Properly constructed JPEG, no need to re-order bytes\n pass\n\telse:\n data[:] = data[-1:] + data[:-1]\n\n return tile, data", "def stich(data, title=None):\n # Get name, list of tiles, width and height\n name = data[\"levels\"][0][\"name\"] \n tiles = data[\"levels\"][0][\"tiles\"]\n width = data[\"levels\"][0][\"width\"]\n height = data[\"levels\"][0][\"height\"]\n\n # Create the directory to place all the downloaded tiles in\n if title: #if title provided, name directory based on that\n dirname = title\n else: #if title not provided, generate a name\n dirname = name + str(width) + str(height)\n os.makedirs(dirname, exist_ok=True)\n os.chdir(dirname)\n\n #Create the empty image based on dimensions\n result = Image.new('RGB', (width, height))\n tile_size = None \n\n # actually get the tiles\n for i in tiles:\n image = get_tile(i['url']) #download image\n if not tile_size:\n tile_size = image.size[0] # on the first tile get the image size\n result.paste(im=image, box=(i['x'] * tile_size, i['y'] * tile_size)) # each tile has a number which isn't\n # it's cooridnate in pixels but it's order. \n # To get pixel coordinate just multiply by the size of each tile\n result.save('final.jpeg') # save file in directory\n os.chdir(os.path.join( os.path.dirname( __file__ ), '..' )) # then navigate back up to the base directory", "def _assemble_tiles(i, n, tile, tsincr_g, output_dir, outtype):\n # pylint: disable=too-many-arguments\n tsincr_file = os.path.join(output_dir, '{}_{}.npy'.format(outtype, n))\n tsincr = np.load(file=tsincr_file)\n tsincr_g[tile.top_left_y:tile.bottom_right_y, tile.top_left_x:tile.bottom_right_x] = tsincr[:, :, i]", "def prepare_map(self):\n for y_coord, row in enumerate(self.contents):\n for x_coord, tile in enumerate(row):\n bit_map = self.get_tile_bitmap(tile)\n self.image[y_coord * TILE_SIZE:(y_coord+1) * TILE_SIZE,\n x_coord * TILE_SIZE:(x_coord+1) * TILE_SIZE] = bit_map", "def save(self):\n print(\"Clicked S(ave)\")\n saved_tiles = []\n for tile in self.tiles.sprites():\n # Append tiles pos to correct list if tile is occupied\n if not tile.is_available:\n tiles_attr = {\"type\": tile.tile_type, \"pos\": tile.rect.topleft}\n saved_tiles.append(tiles_attr)\n save_tiles(saved_tiles, lvl=\"02\")\n print(saved_tiles)\n # Flash white screen when level is saved\n self.surface.fill(s.WHITE)\n pygame.display.flip()\n pygame.time.wait(100)\n print(\"Saved\")", "def write_overview_tile(self, tx, ty, tz,tms_osm):\n\n image_format = self.get_overview_tile_format(tx, ty, tz)\n\n if image_format is None:\n return\n else:\n num_bands = self.get_num_bands(image_format)\n\n dsquery = self.mem_drv.Create('', 2*self.tile_size, 2*self.tile_size, num_bands)\n self.fill_init_dest(dsquery)\n # tms: z=19: 281626\n # -z=18-140813 176168*2=352336; 176168*2+1=352337\n # -- 352336,352337\n y_from=2*ty\n y_to=2*ty + 1\n ty_tms=ty;\n s_y_type=\"tms\"\n if tms_osm:\n # osm: z=19: 281626\n # -z=18-140813 85975*2+1=171951; 85975*2=171950\n # -- 171951,171950 [in range: last/end not used]\n y_from=2*ty + 1\n y_to=2*ty\n ty_tms=(2**tz-1) - ty\n s_y_type=\"osm\"\n s_tile_id=\"{0}-{1}-{2}.{3}\".format(str(tz), str(tx),str(ty),s_y_type)\n if self.verbose:\n # Build from zoom 19 tiles: (281626, 171951) (281627, 171951) (281626, 171950) (281627, 171950)\n print \"\\tBuild [\",s_tile_id,\"] from [\",self.output_dir,\"] zoom\", tz+1,\" tiles [\",s_y_type,\"]: \", (2*tx, y_from), (2*tx+1, y_from),(2*tx, y_to), (2*tx+1, y_to)\n\n for cx, cy, child_image_format in self.iter_children(tx, ty, tz):\n if (ty_tms==0 and cy==1) or (ty_tms!=0 and (cy % (y_from)) != 0):\n tileposy = 0\n else:\n tileposy = self.tile_size\n if tx:\n tileposx = cx % (2*tx) * self.tile_size\n elif tx==0 and cx==1:\n tileposx = self.tile_size\n else:\n tileposx = 0\n\n path = self.get_full_path(cx, cy, tz+1, format_extension[child_image_format])\n\n dsquerytile = gdal.Open(path, gdal.GA_ReadOnly)\n\n dsquery.WriteRaster(tileposx, tileposy, self.tile_size, self.tile_size,\n dsquerytile.ReadRaster(0, 0, self.tile_size, self.tile_size),\n band_list=range(1, dsquerytile.RasterCount+1))\n\n if image_format == \"PNG\" and dsquerytile.RasterCount != num_bands:\n dsquery.WriteRaster(tileposx, tileposy, self.tile_size, self.tile_size,\n self.get_alpha_filler(), band_list=[num_bands])\n\n dstile = self.mem_drv.Create('', self.tile_size, self.tile_size, num_bands)\n path = self.get_full_path(tx, ty, tz, format_extension[image_format])\n self.resampler(path, dsquery, dstile, image_format)", "def put_next_tiles(p,tiles):\n if tiles['mode'].upper() == \"INIT\":\n set_value(p,tiles['0']['lig'],tiles['0']['col'],tiles['0']['val']) # mettre la vleur dans le plateau avec la position donnee de tiles\n set_value(p,tiles['1']['lig'],tiles['1']['col'],tiles['1']['val'])\n else:\n set_value(p,tiles['0']['lig'],tiles['0']['col'],tiles['0']['val'])", "def create_tiles(self, zoom):\n # Compute the tile x-y-z index range for the rasterlayer for this zoomlevel\n bbox = self.rasterlayer.extent()\n indexrange = tiler.tile_index_range(bbox, zoom)\n\n # Compute scale of tiles for this zoomlevel\n tilescale = tiler.tile_scale(zoom)\n\n # Count the number of tiles that are required to cover the raster at this zoomlevel\n nr_of_tiles = (indexrange[2] - indexrange[0] + 1) * (indexrange[3] - indexrange[1] + 1)\n\n # Create destination raster file\n self.log('Snapping dataset to zoom level {0}'.format(zoom))\n\n bounds = tiler.tile_bounds(indexrange[0], indexrange[1], zoom)\n sizex = (indexrange[2] - indexrange[0] + 1) * self.tilesize\n sizey = (indexrange[3] - indexrange[1] + 1) * self.tilesize\n dest_file = os.path.join(self.tmpdir, 'djangowarpedraster' + str(zoom) + '.tif')\n\n snapped_dataset = self.dataset.warp({\n 'name': dest_file,\n 'origin': [bounds[0], bounds[3]],\n 'scale': [tilescale, -tilescale],\n 'width': sizex,\n 'height': sizey,\n })\n\n self.log('Creating {0} tiles for zoom {1}.'.format(nr_of_tiles, zoom))\n\n counter = 0\n for tilex in range(indexrange[0], indexrange[2] + 1):\n for tiley in range(indexrange[1], indexrange[3] + 1):\n # Log progress\n counter += 1\n if counter % 250 == 0:\n self.log('{0} tiles created at zoom {1}'.format(counter, zoom))\n\n # Calculate raster tile origin\n bounds = tiler.tile_bounds(tilex, tiley, zoom)\n\n # Construct band data arrays\n pixeloffset = (\n (tilex - indexrange[0]) * self.tilesize,\n (tiley - indexrange[1]) * self.tilesize\n )\n\n band_data = [\n {\n 'data': band.data(offset=pixeloffset, size=(self.tilesize, self.tilesize)),\n 'nodata_value': band.nodata_value\n } for band in snapped_dataset.bands\n ]\n\n # Add tile data to histogram\n if zoom == self.max_zoom:\n self.push_histogram(band_data)\n\n # Warp source raster into this tile (in memory)\n dest = GDALRaster({\n 'width': self.tilesize,\n 'height': self.tilesize,\n 'origin': [bounds[0], bounds[3]],\n 'scale': [tilescale, -tilescale],\n 'srid': WEB_MERCATOR_SRID,\n 'datatype': snapped_dataset.bands[0].datatype(),\n 'bands': band_data,\n })\n\n # Store tile\n RasterTile.objects.create(\n rast=dest,\n rasterlayer=self.rasterlayer,\n tilex=tilex,\n tiley=tiley,\n tilez=zoom\n )\n\n # Store histogram data\n if zoom == self.max_zoom:\n bandmetas = RasterLayerBandMetadata.objects.filter(rasterlayer=self.rasterlayer)\n for bandmeta in bandmetas:\n bandmeta.hist_values = self.hist_values[bandmeta.band].tolist()\n bandmeta.save()\n\n # Remove snapped dataset\n self.log('Removing snapped dataset.', zoom=zoom)\n snapped_dataset = None\n os.remove(dest_file)", "def tiled_writing(red, nir, output):\n \n #open datasets\n src_red = rio.open(red)\n src_nir = rio.open(nir)\n \n #define raster properies and update datatype\n meta = src_red.meta.copy()\n meta.update({'dtype':'float32'}) # meta is a dictionary\n outfile = output\n #open outfile in writing mode with the properties of defined raster band\n with rio.open(outfile, 'w', **meta) as dst:\n #iterate over blocks of the bands, calculate ndvi for each block \n # and put the blocks back together\n for window in calc_tiles(src_red, tile_size_x, tile_size_y):\n red_block = src_red.read(window=window, masked=True)\n nir_block = src_nir.read(window=window, masked=True)\n #cast ndarrays to Float32 type\n red = red_block.astype('f4')\n nir = nir_block.astype('f4')\n #allow division by zero\n np.seterr(divide='ignore', invalid='ignore')\n #calculate ndvi and write raster\n ndvi = (nir - red) / (nir + red)\n dst.write(ndvi, window=window)\n\n #close dataset\n src_red.close()\n src_nir.close()\n return outfile", "def update(self, dt):\n store = self.entity_manager.get_all_components_of_type(tilemap.Tilemap)\n\n if store:\n for entity, component in store.iteritems():\n trender = self.entity_manager.get_component(entity, tilemap_render.TilemapRender)\n \n if trender and trender.need_to_update:\n \"\"\" Lets calculate the tiles. \"\"\"\n tiles_to_draw = ((int(math.floor(trender.world_y/component.tileheight)), int((trender.world_y+trender.view_height)/component.tileheight + 2)),\n (int(math.floor(-trender.world_x/component.tilewidth)), int((-trender.world_x+trender.view_width)/component.tilewidth + 2)))\n\n vertex_data = []\n texture_data = []\n color_data = []\n vertices = 0\n\n for y in range(tiles_to_draw[0][0], tiles_to_draw[0][1]):\n # 720 is screen height!\n y1 = (720 - trender.view_y) + component.tileheight * -y\n y2 = y1 - component.tileheight\n \n for x in range(tiles_to_draw[1][0], tiles_to_draw[1][1]):\n x1 = trender.view_x + component.tilewidth * x\n x2 = x1 + component.tilewidth\n \n \n for layer in reversed(component.layers):\n if (x,y) in layer.tiles:\n \n vertex_data.extend([x1, y2, x2, y2, x2, y1, x1, y1])\n texture_data.extend(component.tileset_bin.tiles[layer.tiles[(x,y)].gid].tex_coords)\n color_data.extend((255, 255, 255, 255)*4)\n \n vertices = vertices + 1\n \n trender.batch = pyglet.graphics.Batch()\n trender.batch.add(vertices*4, \n pyglet.gl.GL_QUADS, \n pyglet.graphics.TextureGroup(component.tileset_bin.atlas.texture),\n ('v2i', vertex_data),\n ('t3f', texture_data),\n ('c4B', color_data))\n \n trender.need_to_update = False", "def save_tile_img(tif, xyz, dataset, tile_size, region, zone, save_path, display=False):\n \n prefix = f'{region}{zone}{dataset}_'\n x,y,z = xyz\n tile, mask = rt_main.tile(tif, x,y,z, tilesize=tile_size)\n if display: \n plt.imshow(np.moveaxis(tile,0,2))\n plt.show()\n \n skimage.io.imsave(f'{save_path}/{prefix}{z}_{x}_{y}.png',np.moveaxis(tile,0,2), check_contrast=False)", "def doTile(tile):\n global d, fmt, output, img, demag\n # get adjusted upper left coordinate for tile\n xstart,ystart=getCoords(tile)\n px = 256//demag\n tumor,blank=0,0\n for y in range(0,px):\n for x in range(0,px):\n curry,currx = y+ystart,x+xstart\n B,G,R = img.item(currx,curry,0),img.item(currx,curry,1),img.item(currx,curry,2)\n if B > 220 and G > 220 and R > 220:\n blank += 1\n if blank > (px**2)/2:\n print('removing %s' % tile)\n #os.remove(tile)\n return(1)\n if B < 70 and G > 180 and R < 70:\n tumor = 1\n print(\"%s tumor = %d\" % (tile,tumor))\n output.write(str(tumor)+',')\n \n blank = int(blank*2 > px**2)\n tumor = (tumor > 0)\n return(blank)", "def scns2tilecache_all_avail(self):\n scn_lst = self.get_scnlist_tilecache()\n for scn in scn_lst:\n self.scn2tilecache(scn)", "def generate_base_tiles(self):\n\n if not self.options.quiet:\n print(\"Generating Base Tiles:\")\n\n if self.options.verbose:\n print('')\n print(\"Tiles generated from the max zoom level:\")\n print(\"----------------------------------------\")\n print('')\n\n # Set the bounds\n tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz]\n\n ds = self.warped_input_dataset\n tilebands = self.dataBandsCount + 1\n querysize = self.querysize\n\n if self.options.verbose:\n print(\"dataBandsCount: \", self.dataBandsCount)\n print(\"tilebands: \", tilebands)\n\n tcount = (1+abs(tmaxx-tminx)) * (1+abs(tmaxy-tminy))\n ti = 0\n\n tile_details = []\n\n tz = self.tmaxz\n for ty in range(tmaxy, tminy-1, -1):\n for tx in range(tminx, tmaxx+1):\n\n ti += 1\n ytile = GDAL2Tiles.getYtile(ty, tz, self.options)\n tilefilename = os.path.join(\n self.output_folder, str(tz), '{0:04d}'.format(tx) + \"_\" + '{0:04d}'.format(ytile) + \".\" + self.tileext)\n if self.options.verbose:\n print(ti, '/', tcount, tilefilename)\n\n if self.options.resume and os.path.exists(tilefilename):\n if self.options.verbose:\n print(\"Tile generation skipped because of --resume\")\n continue\n\n # Create directories for the tile\n if not os.path.exists(os.path.dirname(tilefilename)):\n os.makedirs(os.path.dirname(tilefilename))\n\n if self.options.profile == 'mercator':\n # Tile bounds in EPSG:3857\n b = self.mercator.TileBounds(tx, ty, tz)\n elif self.options.profile == 'geodetic':\n b = self.geodetic.TileBounds(tx, ty, tz)\n\n # Don't scale up by nearest neighbour, better change the querysize\n # to the native resolution (and return smaller query tile) for scaling\n\n if self.options.profile in ('mercator', 'geodetic'):\n rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1])\n\n # Pixel size in the raster covering query geo extent\n nativesize = wb[0] + wb[2]\n if self.options.verbose:\n print(\"\\tNative Extent (querysize\", nativesize, \"): \", rb, wb)\n\n # Tile bounds in raster coordinates for ReadRaster query\n rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1], querysize=querysize)\n\n rx, ry, rxsize, rysize = rb\n wx, wy, wxsize, wysize = wb\n\n else: # 'raster' profile:\n\n tsize = int(self.tsize[tz]) # tilesize in raster coordinates for actual zoom\n xsize = self.warped_input_dataset.RasterXSize # size of the raster in pixels\n ysize = self.warped_input_dataset.RasterYSize\n if tz >= self.nativezoom:\n querysize = self.tilesize\n\n rx = (tx) * tsize\n rxsize = 0\n if tx == tmaxx:\n rxsize = xsize % tsize\n if rxsize == 0:\n rxsize = tsize\n\n rysize = 0\n if ty == tmaxy:\n rysize = ysize % tsize\n if rysize == 0:\n rysize = tsize\n ry = ysize - (ty * tsize) - rysize\n\n wx, wy = 0, 0\n wxsize = int(rxsize/float(tsize) * self.tilesize)\n wysize = int(rysize/float(tsize) * self.tilesize)\n if wysize != self.tilesize:\n wy = self.tilesize - wysize\n\n # Read the source raster if anything is going inside the tile as per the computed\n # geo_query\n tile_details.append(\n TileDetail(\n tx=tx, ty=ytile, tz=tz, rx=rx, ry=ry, rxsize=rxsize, rysize=rysize, wx=wx,\n wy=wy, wxsize=wxsize, wysize=wysize, querysize=querysize,\n )\n )\n\n conf = TileJobInfo(\n src_file=self.tmp_vrt_filename,\n nb_data_bands=self.dataBandsCount,\n output_file_path=self.output_folder,\n tile_extension=self.tileext,\n tile_driver=self.tiledriver,\n tile_size=self.tilesize,\n kml=self.kml,\n tminmax=self.tminmax,\n tminz=self.tminz,\n tmaxz=self.tmaxz,\n in_srs_wkt=self.in_srs_wkt,\n out_geo_trans=self.out_gt,\n ominy=self.ominy,\n is_epsg_4326=self.isepsg4326,\n options=self.options,\n )\n\n return conf, tile_details", "def save_tile(x,y,z,fpath):\n UA = \"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/77.0\"\n tile_url = f\"https://{random.choice('abc')}.tile.openstreetmap.org/{z}/{x}/{y}.png\"\n # cmd = f\"wget --user-agent='please download' -O {fpath} {url}\"\n if os.path.exists(fpath):\n print(f\"Already have tile {fpath}!\")\n return 0\n if os.path.isdir(fpath):\n raise ValueError(f\"requested path {fpath} exists and is a directory!\")\n try:\n res = rq.get(\n url=tile_url,\n headers={'User-Agent': UA}\n )\n status = res.status_code\n if status == 200:\n with open(fpath,'wb') as of:\n of.write(res.content)\n return 0\n else:\n print(f\"Error: response {status} from server:\\n{res.reason}\")\n return status\n except Exception as e:\n print(f\"Error getting tile: {e}\")\n return 1", "def generate_base_tiles(self):\n\n gdal.SetConfigOption(\"GDAL_PAM_ENABLED\", \"NO\")\n\n print \"Generating Base Tiles:\"\n if self.options.verbose:\n #mx, my = self.out_gt[0], self.out_gt[3] # OriginX, OriginY\n #px, py = self.mercator.MetersToPixels( mx, my, self.tmaxz)\n #print \"Pixel coordinates:\", px, py, (mx, my)\n print\n print \"Tiles generated from the max zoom level:\"\n print \"----------------------------------------\"\n print\n\n\n # Set the bounds\n tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz]\n querysize = self.querysize\n\n # Just the center tile\n #tminx = tminx+ (tmaxx - tminx)/2\n #tminy = tminy+ (tmaxy - tminy)/2\n #tmaxx = tminx\n #tmaxy = tminy\n\n #print tminx, tminy, tmaxx, tmaxy\n tcount = (1+abs(tmaxx-tminx)) * (1+abs(tmaxy-tminy))\n #print tcount\n ti = 0\n i_y_column_count=((tmaxy-tminy)+1)\n ds = self.out_ds\n tz = self.tmaxz\n if self.options.verbose:\n # tx in range(tminx, tmaxx+1) tminx[ 281596 ] tmaxx[ 281744 ] ; ((tmaxx-tmaxy)+1) x_tiles[ 23393 ]\n print \"\\ttz=[\",tz,\"] : tx in range(tminx, tmaxx+1) tminx[\",tminx,\"] tmaxx[\",tmaxx,\"] ; ((tmaxx-tmaxy)+1) x_tiles[\",tcount,\"]\"\n # ty_tms in range(tmaxy, tminy-1, -1) tmaxy[ 352409 ] tminy[ 352253 ] ; ((tmaxy-tminy)) y_tiles[ 157 ] 352409-(352253-1)\n print \"\\ttz=[\",tz,\"] : ty_tms in range(tmaxy, tminy-1, -1) tmaxy[\",tmaxy,\"] tminy[\",tminy,\"] ; ((tmaxy-tminy+1)) y_tiles[\",i_y_column_count,\"]\"\n if self.options.resume:\n i_count = self.tile_exists(0, 0, tz,2)\n if i_count == tcount:\n if self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; x/y-tiles of z[\",tz,\"] y_tiles[\",tcount,\"]\"\n return\n for tx in range(tminx, tmaxx+1):\n tmaxy_work=tmaxy\n if self.options.resume:\n i_count = self.tile_exists(tx, 0, tz,3)\n if i_count == i_y_column_count:\n if self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; z =\",tz,\" ; y-tiles of x[\",tx,\"] y_tiles[\",i_y_column_count,\"]\"\n break\n else:\n if i_count > 0:\n # this assums the rows are compleate, which may NOT be true\n tmaxy_work-=i_count\n if self.options.verbose:\n print \"\\tTile generation skipped to tmaxy[\",tmaxy_work,\"] because of --resume ; z =\",tz,\" ; y-tiles of x[\",tx,\"] y_tiles[\",i_y_column_count,\"]\"\n for ty_tms in range(tmaxy_work, tminy-1, -1): #range(tminy, tmaxy+1):\n ty_osm=self.flip_y(tz,ty_tms)\n ty=ty_tms\n if self.options.tms_osm:\n ty=ty_osm\n if self.stopped:\n if self.options.mbtiles:\n if self.mbtiles_db:\n self.mbtiles_db.close_db()\n break\n ti += 1\n\n if self.options.resume:\n exists = self.tile_exists(tx, ty, tz,0)\n if exists and self.options.verbose:\n print \"\\tTile generation skipped because of --resume ; z =\",tz,\" ; x =\",tx,\" ; y_tms =\",ty_tms, \"; y_osm =\",ty_osm\n else:\n exists = False\n\n if not exists:\n if self.options.verbose:\n print ti, '/', tcount, self.get_verbose_tile_name(tx, ty, tz)\n # Don't scale up by nearest neighbour, better change the querysize\n # to the native resolution (and return smaller query tile) for scaling\n if self.options.profile in ('mercator','geodetic'):\n if self.options.profile == 'mercator':\n # Tile bounds in EPSG:900913\n b = self.mercator.TileBounds(tx, ty_tms, tz)\n elif self.options.profile == 'geodetic':\n b = self.geodetic.TileBounds(tx, ty_tms, tz)\n\n rb, wb = self.geo_query( ds, b[0], b[3], b[2], b[1])\n nativesize = wb[0]+wb[2] # Pixel size in the raster covering query geo extent\n if self.options.verbose:\n print \"\\tNative Extent (querysize\",nativesize,\"): \", rb, wb\n\n querysize = self.querysize\n # Tile bounds in raster coordinates for ReadRaster query\n rb, wb = self.geo_query( ds, b[0], b[3], b[2], b[1], querysize=querysize)\n\n rx, ry, rxsize, rysize = rb\n wx, wy, wxsize, wysize = wb\n else: # 'raster' or 'gearth' or 'garmin' profile:\n tsize = int(self.tsize[tz]) # tilesize in raster coordinates for actual zoom\n xsize = self.out_ds.RasterXSize # size of the raster in pixels\n ysize = self.out_ds.RasterYSize\n if tz >= self.nativezoom:\n querysize = self.tilesize # int(2**(self.nativezoom-tz) * self.tilesize)\n\n rx = (tx) * tsize\n rxsize = 0\n if tx == tmaxx:\n rxsize = xsize % tsize\n if rxsize == 0:\n rxsize = tsize\n\n rysize = 0\n if ty_tms == tmaxy:\n rysize = ysize % tsize\n if rysize == 0:\n rysize = tsize\n ry = ysize - (ty_tms * tsize) - rysize\n\n wx, wy = 0, 0\n\n wxsize, wysize = int(rxsize/float(tsize) * querysize), int(rysize/float(tsize) * querysize)\n if wysize != querysize:\n wy = querysize - wysize\n xyzzy = Xyzzy(querysize, rx, ry, rxsize, rysize, wx, wy, wxsize, wysize)\n try:\n if self.options.verbose:\n print ti,'/',tcount,' total ; z =',tz,' ; x =',tx,' ; y_tms =',ty_tms,' ; y_osm =',ty_osm\n print \"\\tReadRaster Extent: \", (rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize)\n self.write_base_tile(tx, ty, tz, xyzzy)\n except ImageOutputException, e:\n self.error(\"'%d/%d/%d': %s\" % (tz, tx, ty, e.message))\n\n if not self.options.verbose or self.is_subprocess:\n self.progressbar( ti / float(tcount) )\n if self.options.mbtiles:\n if self.mbtiles_db:\n self.mbtiles_db.close_db()\n self.mbtiles_db=None", "def updateScreenTiling(self,level):\n\n self.tile_list=[]\n self.objList=[]\n self.level=level\n\n self.rowCount=0\n \n for row in worldData[self.level]:\n self.colCount=0\n for tile in row:\n if tile!=0:\n img11=self.tilType[tile-1]\n img=pygame.transform.scale(img11,(self.tileSize,self.tileSize))\n img_rect = img.get_rect()\n img_rect.x = self.colCount * self.tileSize\n img_rect.y = self.rowCount * self.tileSize\n tile= (img, img_rect)\n self.tile_list.append(tile)\n self.colCount+=1\n self.rowCount+=1\n \n self.rowCount=0\n for row in objectData[self.level]:\n self.colCount=0\n for tile in row:\n if tile!=0:\n img11=self.objType[tile-1]\n img=pygame.transform.scale(img11,(self.tileSize,self.tileSize))\n img_rect = img.get_rect()\n img_rect.x = self.colCount * self.tileSize\n img_rect.y = self.rowCount * self.tileSize\n tile= (img, img_rect)\n self.objList.append(tile)\n self.colCount+=1\n self.rowCount+=1", "def write_base_tile(self, tx, ty, tz, xyzzy):\n\n data_bands = range(1, self.data_bands_count+1)\n data = self.out_ds.ReadRaster(xyzzy.rx, xyzzy.ry, xyzzy.rxsize, xyzzy.rysize,\n xyzzy.wxsize, xyzzy.wysize, band_list=data_bands)\n\n image_format = self.get_base_tile_format(tx, ty, tz, xyzzy)\n\n if image_format is None:\n return\n else:\n num_bands = self.get_num_bands(image_format)\n\n if self.verbose:\n print \"\\tReadRaster Extent: \", (xyzzy.rx, xyzzy.ry, xyzzy.rxsize, xyzzy.rysize),\n print 'z =',tz,' ; x =',tx,' ; y =',ty, (xyzzy.wx, xyzzy.wy, xyzzy.wxsize, xyzzy.wysize)\n\n dstile = self.mem_drv.Create('', self.tile_size, self.tile_size, num_bands)\n\n path = self.get_full_path(tx, ty, tz, format_extension[image_format])\n\n # Query is in 'nearest neighbour' but can be bigger in then the tilesize\n # We scale down the query to the tilesize by supplied algorithm.\n if self.tile_size == xyzzy.querysize:\n self.fill_init_dest(dstile)\n\n # Use the ReadRaster result directly in tiles ('nearest neighbour' query)\n dstile.WriteRaster(xyzzy.wx, xyzzy.wy, xyzzy.wxsize, xyzzy.wysize, data, band_list=data_bands)\n if image_format == \"PNG\":\n dstile.WriteRaster(xyzzy.wx, xyzzy.wy, xyzzy.wxsize, xyzzy.wysize, self.alpha, band_list=[num_bands])\n\n gdal_write(path, dstile, image_format)\n\n # Note: For source drivers based on WaveLet compression (JPEG2000, ECW, MrSID)\n # the ReadRaster function returns high-quality raster (not ugly nearest neighbour)\n # TODO: Use directly 'near' for WaveLet files\n else:\n # Big ReadRaster query in memory scaled to the tilesize - all but 'near' algo\n dsquery = self.mem_drv.Create('', xyzzy.querysize, xyzzy.querysize, num_bands)\n self.fill_init_dest(dsquery)\n\n dsquery.WriteRaster(xyzzy.wx, xyzzy.wy, xyzzy.wxsize, xyzzy.wysize, data, band_list=data_bands)\n if image_format == \"PNG\":\n dsquery.WriteRaster(xyzzy.wx, xyzzy.wy, xyzzy.wxsize, xyzzy.wysize, self.alpha,band_list=[num_bands])\n\n self.resampler(path, dsquery, dstile, image_format)\n\n self.alpha = None", "def renderMetaTile(z, x, y, ntiles, hypsoreliefMap, landcoverreliefMap, areasMap, oceanMap, contoursMap, featuresMap):\n hypsorelief = renderLayer('hypsorelief', z, x, y, ntiles, hypsoreliefMap, 'png')\n landcoverrelief = renderLayer('landcoverrelief', z, x, y, ntiles, landcoverreliefMap, 'png')\n areas = renderLayer('areas', z, x, y, ntiles, areasMap, 'png')\n ocean = renderLayer('ocean', z, x, y, ntiles, oceanMap, 'png', True)\n contours = renderLayer('contours', z, x, y, ntiles, contoursMap, 'png', True)\n features = renderLayer('features', z, x, y, ntiles, featuresMap, 'png', True)\n base_h = getComposite((hypsorelief, areas, ocean))\n base_l = getComposite((landcoverrelief, ocean))\n composite_h = getComposite((base_h, contours, features))\n composite_l = getComposite((base_l, contours, features))\n saveTiles(z, x, y, ntiles, 'composite_h', composite_h)\n saveTiles(z, x, y, ntiles, 'composite_l', composite_l)\n if SAVE_JPEG_COMPOSITE:\n basename = 'jpeg' + str(JPEG_COMPOSITE_QUALITY)\n saveTiles(z, x, y, ntiles, basename+'_h', composite_h, 'jpg', basename)\n saveTiles(z, x, y, ntiles, basename+'_l', composite_l, 'jpg', basename)\n if SAVE_INTERMEDIATE_TILES:\n saveTiles(z, x, y, ntiles, 'base_h', base_h)\n saveTiles(z, x, y, ntiles, 'base_l', base_l)\n saveTiles(z, x, y, ntiles, 'contours', contours)\n saveTiles(z, x, y, ntiles, 'hypsorelief', hypsorelief)\n saveTiles(z, x, y, ntiles, 'landcoverrelief', landcoverrelief)\n saveTiles(z, x, y, ntiles, 'areas', areas)\n saveTiles(z, x, y, ntiles, 'ocean', ocean)\n saveTiles(z, x, y, ntiles, 'features', features)", "def make_tiles(input_path, save_path, dimension):\n for filename in os.listdir(input_path):\n if filename.endswith(\".png\"):\n image_path = input_path + filename\n\n width, height = Image.open(image_path).size\n\n # Ensures image is square.\n assert width == height\n # Ensures the image can be cut into the desired dimensions.\n assert width % dimension == 0\n n_tiles = (width / dimension) ** 2\n\n tiles = image_slicer.slice(image_path, n_tiles, save=False)\n image_slicer.save_tiles(\n tiles, directory=save_path, prefix=filename[0:2], format=\"png\"\n )" ]
[ "0.7104713", "0.6770546", "0.66476554", "0.6564062", "0.6432503", "0.63820964", "0.63444513", "0.62283266", "0.62283266", "0.6224158", "0.6180002", "0.6177511", "0.61530757", "0.61273086", "0.61271673", "0.61231756", "0.60986423", "0.6098325", "0.6088457", "0.60603994", "0.6042453", "0.5976175", "0.59078926", "0.5896568", "0.58833724", "0.5880141", "0.5832504", "0.5832494", "0.57605195", "0.5755096" ]
0.73028386
0
Wrapper for reference pixel calculation
def _ref_pixel_calc(ifg_paths: List[str], params: dict) -> Tuple[int, int]: lon = params[cf.REFX] lat = params[cf.REFY] ifg = Ifg(ifg_paths[0]) ifg.open(readonly=True) # assume all interferograms have same projection and will share the same transform transform = ifg.dataset.GetGeoTransform() if lon == -1 or lat == -1: log.info('Searching for best reference pixel location') half_patch_size, thresh, grid = refpixel.ref_pixel_setup(ifg_paths, params) process_grid = mpiops.array_split(grid) refpixel.save_ref_pixel_blocks(process_grid, half_patch_size, ifg_paths, params) mean_sds = refpixel._ref_pixel_mpi(process_grid, half_patch_size, ifg_paths, thresh, params) mean_sds = mpiops.comm.gather(mean_sds, root=0) if mpiops.rank == MASTER_PROCESS: mean_sds = np.hstack(mean_sds) refpixel_returned = mpiops.run_once(refpixel.find_min_mean, mean_sds, grid) if isinstance(refpixel_returned, ValueError): from pyrate.core.refpixel import RefPixelError raise RefPixelError( "Reference pixel calculation returned an all nan slice!\n" "Cannot continue downstream computation. Please change reference pixel algorithm used before " "continuing.") refy, refx = refpixel_returned # row first means first value is latitude log.info('Selected reference pixel coordinate (x, y): ({}, {})'.format(refx, refy)) lon, lat = refpixel.convert_pixel_value_to_geographic_coordinate(refx, refy, transform) log.info('Selected reference pixel coordinate (lon, lat): ({}, {})'.format(lon, lat)) else: log.info('Using reference pixel from config file (lon, lat): ({}, {})'.format(lon, lat)) log.warning("Ensure user supplied reference pixel values are in lon/lat") refx, refy = refpixel.convert_geographic_coordinate_to_pixel_value(lon, lat, transform) log.info('Converted reference pixel coordinate (x, y): ({}, {})'.format(refx, refy)) refpixel.update_refpix_metadata(ifg_paths, refx, refy, transform, params) log.debug("refpx, refpy: "+str(refx) + " " + str(refy)) ifg.close() return int(refx), int(refy)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def referencepixel(self, *args, **kwargs):\n return _coordsys.coordsys_referencepixel(self, *args, **kwargs)", "def calc(self, *args, **kwargs):\n return _image.image_calc(self, *args, **kwargs)", "def pixel(self, x: int, y: int, colour: int, /) -> None:", "def pixel_ref(self):\n return self._pixel_ref", "def imagecalc(self, *args, **kwargs):\n return _image.image_imagecalc(self, *args, **kwargs)", "def GetOutTextureCoord(self):\n ...", "def setreferencepixel(self, *args, **kwargs):\n return _coordsys.coordsys_setreferencepixel(self, *args, **kwargs)", "def GetInTextureCoord(self):\n ...", "def subtract_reference_pixels(img,no_channels=32,statfunc=biweight_location,vertical_smooth_window=15,array_size=2048):\n correctedStrips = []\n for channelstrip in np.split(img,np.arange(1,no_channels)*int(array_size/no_channels),axis=1):\n # Correct odd and even columns seperately\n topRefeven = statfunc(channelstrip[:4,0::2])\n topRefodd = statfunc(channelstrip[:4,1::2]) # Calculate median/mean of odd and even columns \n botRefeven = statfunc(channelstrip[-4:,0::2])\n botRefodd = statfunc(channelstrip[-4:,1::2])\n\n Corrected_channelstrip = channelstrip.copy()\n Corrected_channelstrip[:,0::2] = channelstrip[:,0::2] - np.linspace(topRefeven,botRefeven,channelstrip.shape[0])[:,np.newaxis]\n Corrected_channelstrip[:,1::2] = channelstrip[:,1::2] - np.linspace(topRefodd,botRefodd,channelstrip.shape[0])[:,np.newaxis]\n\n correctedStrips.append(Corrected_channelstrip)\n\n HRefSubtractedImg = np.hstack(correctedStrips)\n VRef = statfunc(np.hstack((HRefSubtractedImg[:,:4],HRefSubtractedImg[:,-4:])),axis=1)\n # Remove any DC offset at the edges which could arise due to low value columns in vertical reference pixels\n VRef = VRef - statfunc(np.concatenate((VRef[:4],VRef[-4:]))) # We can set it to zero since we have subtracted top and bottom reference pixels\n if vertical_smooth_window > 1:\n vsmoothdegree = 2 if vertical_smooth_window >= 5 else 1\n VRef = savgol_filter(VRef,window_length=vertical_smooth_window,polyorder=vsmoothdegree)\n return HRefSubtractedImg - VRef[:,np.newaxis]", "def setReference(this, **kargs):\n\t\t\n\t\t# Arguments\n\t\tsumSeuil = kargs.get('sumSeuil', 200)\n\t\trefSeuil = kargs.get('refSeuil', 150)\n\t\tinterval = kargs.get('interval', 0)\n\t\tcheck = kargs.get('check', False)\n\t\tcount = kargs.get('count', 1)\n\t\t\n\t\t# Image cumulative\n\t\tcumul = None\n\t\t\n\t\t# Capture image par image\n\t\tif count > 1: printf('Prise de reference sur %d prises... ' % count)\n\t\tfor i in xrange(count):\n\t\t\tif i and interval: time.sleep(interval/1000)\n\t\t\t\n\t\t\t# Prise d'image\n\t\t\tthis.getFrame()\n\t\t\t\n\t\t\t# Référence actuelle\n\t\t\tcurrent = this._FRAME\n\t\t\t\n\t\t\tif i: # Si ce n'est plus la première itération\n\t\t\t\t\n\t\t\t\tif check:\n\t\t\t\t\t# Détection d'un changement\n\t\t\t\t\tthis.detectByRef(seuil=refSeuil, ref=result, frame=current)\n\t\t\t\t\tsum = this.binary.sum()/255\n\t\t\t\t\tif sum > sumSeuil: # Crash\n\t\t\t\t\t\traise Exception(\"Don't interfere with the reference ! (%d)\" % sum)\n\t\t\t\t# END CHECK\n\t\t\t\t\n\t\t\t\t# Cumulation\n\t\t\t\tcumul += current\n\t\t\t\n\t\t\telse: # Première itération\n\t\t\t\tcumul = current.astype(int)\n\t\t\t\t\n\t\t\t# Calcul de l'image moyenne actuelle\n\t\t\tresult = (cumul / (i+1)).astype(np.uint8)\n\t\t###\n\t\t\n\t\tthis.resetBin()\n\t\tthis._REF = result\n\t\tif count > 1: print 'ok'\n\t\treturn result", "def intermediate_pixel(alpha, source_RGB, target_RGB):\n return int((1-alpha)*source_RGB+alpha*target_RGB)", "def __diff_image(self):\n img = cv2.imread(self.imagefile()).copy()\n Reference.__draw_bugs(img, self.__true_positives, False, 1)\n Reference.__draw_bugs(img, self.__false_negatives, (0, 255, 0))\n Reference.__draw_bugs(img, self.__false_positives, (0, 0, 255))\n return img", "def get_reference(event, x,y,flags, param):\n global refPt,frame\n if event == cv2.EVENT_LBUTTONDOWN:\n refPt = [(x, y)]\n refPt.append((x+80,y+80))\n #accessing the values within the rectange would use: image[refPt[0][0:1],refPt[1][0:1]]\n cv2.rectangle(frame,refPt[0],refPt[1],(255,255,0),2)\n cv2.imshow('Reference region made',frame)\n cv2.destroyAllWindows()\n \n color_data[\"refPt\"] = refPt", "def __call__(self, sample):\n x, y = sample\n return TF.to_tensor(x), (TF.to_tensor(y) * 255).int()", "def pix(fixture_position):\n virtual = proj.dot(fixture_position)\n return virtual[:2]/virtual[2]", "def test_add_refpix():\n data = np.ones((10, 10))\n refpix = (2, 3, 4, 5)\n\n new_array = bpd.add_refpix(data, refpix)\n yd, xd = new_array.shape\n print, xd, yd\n\n assert yd == 19\n assert xd == 15\n assert np.all(new_array[0:5, 5] == np.array([0, 0, 0, 0, 1]))\n assert np.all(new_array[13:, 5] == np.array([1, 0, 0, 0, 0, 0]))\n assert np.all(new_array[5, 0:4] == np.array([0, 0, 1, 1]))\n assert np.all(new_array[5, 10:] == np.array([1, 1, 0, 0, 0]))", "def intensity(self) -> int:", "def pixelvalue(self, *args, **kwargs):\n return _image.image_pixelvalue(self, *args, **kwargs)", "def _relative_bias(sim: xr.DataArray, ref: xr.DataArray) -> xr.DataArray:\n out = (sim - ref) / ref\n return out.assign_attrs(units=\"\")", "def __getitem__(self, *args):\n return _itkRGBAPixelPython.itkRGBAPixelF___getitem__(self, *args)", "def find_pixels(self):\n ref_image=Image.open('sample0000.png')\n imarray=np.array(ref_image)\n ref_image.close()\n self.number_of_pix=imarray.shape\n print self.number_of_pix\n ref_image=None\n imarray=None", "def find_reddest_pixel_fast(img): \n img = np.array(img, dtype = 'int32')\n location = cv2.minMaxLoc((img[:, :, 2] - img[:, :, 1]) + (img[:, :, 2] - img[:, :, 0]))[3]\n return location", "def __getitem__(self, *args):\n return _itkRGBAPixelPython.itkRGBAPixelUS___getitem__(self, *args)", "def division(self, x,y,a,b):\n real = (a*x + b*y)/(a*a + b*b)\n img = (a*y - b*x)/(a*a + b*b)\n return real, img", "def reference(self, images, subset_size):\n reference_image = np.mean([self.subset(image_, subset_size)\n for image_ in images], axis=0)\n\n gradient_0, gradient_1 = np.gradient(reference_image)\n gradient_magnitude = np.sqrt(gradient_0**2 + gradient_1**2)\n\n return reference_image, gradient_0, gradient_1, gradient_magnitude", "def __call__(self, x, y):\n return numpy.full_like(x, self.surfbrightness, dtype=float) \\\n if isinstance(x, numpy.ndarray) else self.surfbrightness", "def pixel_value(self, x, y, c1, c2, i1, i2, val1, val2, F1, F2, l1, l2, mask='FQPM'):\r\n x1, y1 = c1\r\n x1 += 0.5\r\n y1 += 0.5\r\n\r\n a1 = np.sqrt(x1**2+y1**2)\r\n x2, y2 = c2\r\n x2 += 0.5\r\n y2 += 0.5\r\n a2 = np.sqrt(x2**2+y2**2)\r\n r1 = np.sqrt((x1-x)**2 + (y1-y)**2) # doesn't have to be an integer\r\n r2 = np.sqrt((x2-x)**2 + (y2-y)**2)\r\n k1_airy = self.airy(r1, F1, l1, i1)\r\n k2_airy = self.airy(r2, F2, l2, i2)\r\n norm_airy = k1_airy + k2_airy\r\n k1_airy /= norm_airy\r\n k2_airy /= norm_airy\r\n if mask == 'FQPM':\r\n val_airy = k1_airy*self.four_qs(x, y, c1, val1, val2) + \\\r\n k2_airy*self.four_qs(x, y, c2, val1, val2)\r\n return val_airy\r\n elif mask == 'EOPM':\r\n val_airy = k1_airy*self.eight_octants(x, y, c1, val1, val2) + \\\r\n k2_airy*self.eight_octants(x, y, c2, val2, val1)\r\n return val_airy", "def heliographic(self, *args):\n\n\t\txScl = self.im_raw.scale[0].value\n\t\tyScl = self.im_raw.scale[1].value\n\t\t\n\t\t# Check for single coordinate or ndarray object.\n\t\tif isinstance(args[0], np.ndarray):\n\t\t\t# Retrieve integer dimensions and create arrays holding\n\t\t\t# x and y coordinates of each pixel\n\t\t\txdim = np.int(np.floor(self.im_raw.dimensions[0].value))\n\t\t\tydim = np.int(np.floor(self.im_raw.dimensions[1].value))\n\t\t\ttry:\n\t\t\t\txrow = (np.arange(0, xdim) - self.X0 + args[1])*xScl\n\t\t\t\tyrow = (np.arange(0, ydim) - self.Y0 + args[2])*yScl\n\t\t\t\tself.xg, self.yg = np.meshgrid(xrow, yrow, indexing='xy')\n\t\t\t\tself.rg = np.sqrt(self.xg**2 + self.yg**2)\n\t\t\t\tx = self.xg\n\t\t\t\ty = -self.yg\n\t\t\texcept IndexError:\n\t\t\t\txrow = (np.arange(0, xdim) - self.X0)*xScl\n\t\t\t\tyrow = (np.arange(0, ydim) - self.Y0)*yScl\n\t\t\t\tself.xg, self.yg = np.meshgrid(xrow, yrow, indexing='xy')\n\t\t\t\tself.rg = np.sqrt(self.xg**2 + self.yg**2)\n\t\t\t\tx = self.xg\n\t\t\t\ty = -self.yg\n\t\telse:\n\t\t\t# Have to switch coordinate conventions because calculations\n\t\t\t# assume standard cartesian whereas python indexing is \n\t\t\t# [row, column]\n\t\t\tx = (args[1] - self.X0)*xScl/60.0\n\t\t\ty = (self.Y0 - args[0])*yScl/60.0\n\n\t\tb0_r = np.deg2rad(self.B0)\n\t\tradius = self.rsun\n\t\tRobs = 1/np.tan(np.deg2rad(radius/60))\n\n\t\txxat = np.tan(np.deg2rad(x/60))\n\t\tyyat = np.tan(np.deg2rad(y/60))\n\n\t\trat2 = (xxat**2 + yyat**2)\n\t\tphi = 0*rat2\n\t\tw_rat2 = np.where(rat2 is not 0)\n\t\tphi[w_rat2] = np.arctan2(xxat[w_rat2], yyat[w_rat2])\n\n\t\tmax_ra = np.arcsin(1.0/Robs)\n\t\tmax_rat2 = np.tan(max_ra)*np.tan(max_ra)\n\n\t\tii = np.where(rat2 > max_rat2)\n\t\tif ii[0].any() > 0:\n\t\t\trat2[ii] = max_rat2\n\t\t\t#offlimb[ii] = 1\n\n\t\t###############################################\n\t\tras2 = 0*rat2\n\t\tras2[w_rat2] = 1.0/(1.0 + 1.0/rat2[w_rat2])\n\t\td1 = (1.0 - ras2)\n\t\td2 = (1.0 - (Robs**2*ras2))\n\t\tx = ras2*Robs + np.sqrt(d1)*np.sqrt(d2)\n\t\trr = np.sqrt(rat2*Robs)\n\t\tt1 = np.sin(phi)*rr\n\t\tt2 = np.cos(phi)*rr\n\n\t\thglt = np.arcsin(t2)\n\t\thgln = np.arctan2(x, t1)\n\n\n\n\n\t\t# Only add the instance attribute if it doesn't exist.\n\t\tif isinstance(args[0], np.ndarray) and not hasattr(self, 'lonh'):\n\t\t\tself.lonh = np.rad2deg(hgln)\n\t\t\tself.lath = np.rad2deg(hglt)\n\n\t\treturn np.rad2deg(hgln), np.rad2deg(hglt)", "def retrieve_pixel(self, x, y, index):\n pass", "def comp_point_ref(self, is_set=False):\n\n point_list = list()\n for line in self.get_lines():\n point_list.append(line.get_middle())\n point_ref = sum(array(point_list)) / len(point_list)\n\n if is_set:\n self.point_ref = point_ref\n return point_ref" ]
[ "0.70273024", "0.6470538", "0.6387868", "0.6360936", "0.6305052", "0.6197508", "0.6159541", "0.6128455", "0.6084345", "0.6061778", "0.60592145", "0.5950711", "0.5843533", "0.5832167", "0.58209383", "0.5817011", "0.5734207", "0.57130796", "0.5680172", "0.5634101", "0.56332415", "0.5625403", "0.56178993", "0.5616794", "0.55950075", "0.5557374", "0.5547657", "0.5521934", "0.5513079", "0.5503413" ]
0.6673501
1
MPI wrapper for maxvar and vcmt computation
def _maxvar_vcm_calc(ifg_paths, params, preread_ifgs): log.info('Calculating the temporal variance-covariance matrix') process_indices = mpiops.array_split(range(len(ifg_paths))) def _get_r_dist(ifg_path): """ Get RDIst class object """ ifg = Ifg(ifg_path) ifg.open() r_dist = vcm_module.RDist(ifg)() ifg.close() return r_dist r_dist = mpiops.run_once(_get_r_dist, ifg_paths[0]) prcs_ifgs = mpiops.array_split(ifg_paths) process_maxvar = [] for n, i in enumerate(prcs_ifgs): log.debug('Calculating maxvar for {} of process ifgs {} of total {}'.format(n+1, len(prcs_ifgs), len(ifg_paths))) process_maxvar.append(vcm_module.cvd(i, params, r_dist, calc_alpha=True, write_vals=True, save_acg=True)[0]) if mpiops.rank == MASTER_PROCESS: maxvar = np.empty(len(ifg_paths), dtype=np.float64) maxvar[process_indices] = process_maxvar for i in range(1, mpiops.size): # pragma: no cover rank_indices = mpiops.array_split(range(len(ifg_paths)), i) this_process_ref_phs = np.empty(len(rank_indices), dtype=np.float64) mpiops.comm.Recv(this_process_ref_phs, source=i, tag=i) maxvar[rank_indices] = this_process_ref_phs else: # pragma: no cover maxvar = np.empty(len(ifg_paths), dtype=np.float64) mpiops.comm.Send(np.array(process_maxvar, dtype=np.float64), dest=MASTER_PROCESS, tag=mpiops.rank) mpiops.comm.barrier() maxvar = mpiops.comm.bcast(maxvar, root=0) vcmt = mpiops.run_once(vcm_module.get_vcmt, preread_ifgs, maxvar) log.debug("Finished maxvar and vcm calc!") return maxvar, vcmt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def local_max_and_argmax(node):\r\n if node.op == T._max_and_argmax:\r\n if len(node.outputs[1].clients) == 0:\r\n #MaxAndArgmax support variable axis,\r\n #but CAReduce support only constant axis.\r\n if node.inputs[1].data is None:\r\n axis = None\r\n else:\r\n try:\r\n axis = get_scalar_constant_value(node.inputs[1])\r\n except NotScalarConstantError:\r\n return False\r\n\r\n new = CAReduce(scal.maximum, axis)(node.inputs[0])\r\n return [new, None]", "def MaxMarginalize(self,V):\r\n var = scipy.setdiff1d(self.var,V)\r\n map1 = [scipy.where(self.var==i)[0][0] for i in var]\r\n card = self.card[map1]\r\n \r\n assignments = I2A(range(len(self.val)), self.card)\r\n indx = A2I(assignments[:, map1], card)\r\n val = scipy.ndimage.maximum(self.val,indx,index = range( card.prod() ))\r\n \r\n return factor(var,card,val)", "def compute_Vm(self):\n return self.df[['V_m']].max()[0]", "def v_cmax(self, tl, ared):\n\t return ared*self.VCMAX0*exp(self.HAV/(R*self.TO)*(1. - self.TO/tl))/(1. + exp((self.SVC*tl - self.HDV)/(R*tl)))", "def compute_vmax(particle, fieldset, time):\n if particle.active == 1:\n particle.vmax = fieldset.vscale*(particle.SCL**fieldset.d)", "def test_compute_c_max_output():\n # build\n T = np.array([600, 500])\n E_ion = np.array([20, 10])\n E_atom = np.array([30, 40])\n angles_ion = np.array([60, 60])\n angles_atom = np.array([60, 60])\n ion_flux = np.array([1e21, 1e20])\n atom_flux = np.array([2e21, 2e20])\n\n # run\n output = divHretention.compute_c_max(\n T, E_ion, E_atom, angles_ion, angles_atom,\n ion_flux, atom_flux, full_export=True)\n\n # test\n assert len(output) == 3\n\n # run\n output = divHretention.compute_c_max(\n T, E_ion, E_atom, angles_ion, angles_atom,\n ion_flux, atom_flux, full_export=False)\n\n # test\n assert len(output) == 2", "def _compute_q_argmax(self):\n self.cur_head = self._sess.run(self.ucb_net._P_argmax,\n {self.state_ph: self.state,\n self.ucb_A_ph: self.ucb_A,\n self.ucb_b_ph: self.ucb_b})[0]\n x = self._sess.run(self._net_outputs.q_heads,\n {self.state_ph: self.state})\n return np.argmax(x[:,:,self.cur_head], axis=1)[0]", "def max(self):\n maxs = self.client.map(_call_max, self.vecDask, pure=False)\n max_val = - np.inf\n for future, result in daskD.as_completed(maxs, with_results=True):\n if result > max_val:\n max_val = result\n return max_val", "def auxmaxrho1(x,m_ind):\n \n cc_sum = auxmaxrho2(x,m_ind) \n f = cc_sum + auxmax_cc_piece(x,0,m_ind) \n cfg.max_piece[m_ind] = 0 # max_piece should be ok here. We do not solve aux and real problem at the same time.\n \n for k_ind in range(1,cfg.nomax):\n \n f_tmp = cc_sum + auxmax_cc_piece(x,k_ind,m_ind) \n if f_tmp > f: \n f = f_tmp\n cfg.max_piece[m_ind] = k_ind\n \n return f", "def argmax2(self, cvars=None, ctuple=None):\n if (cvars is None):\n return self.v.ind2sub(self.t.argmax())\n ax = tuple(map(lambda x:ctuple[cvars.index(x)] if x in cvars else slice(None) ,self.v))\n return self.v.ind2sub(self.t[ax].argmax())", "def auxmaxrho2(x,m_ind):\n \n f = 0.0\n for k_ind in range(cfg.nomax):\n f -= auxmax_cc_piece(x,k_ind,m_ind) \n\n return f", "def milp(mdp, maxV, zeroConstraints=()):\n m = Model()\n m.setParam('OutputFlag', False)\n\n # convert notation to previous implementation\n S = mdp.S\n A = mdp.A\n R = mdp.rFuncs\n psi = mdp.psi\n T = mdp.T\n alpha = mdp.alpha\n gamma = mdp.gamma\n\n # useful constants\n rLen = len(R)\n M = 10000 # a large number\n Sr = range(len(S))\n Ar = range(len(A))\n\n # decision variables\n x = m.addVars(len(S), len(A), lb=0, name='x')\n z = m.addVars(rLen, vtype=GRB.BINARY, name='z')\n y = m.addVars(rLen, name='y')\n\n # constraints on y\n for i in range(rLen):\n m.addConstr(y[i] <= sum([x[s, a] * R[i](S[s], A[a]) for s in Sr for a in Ar]) - maxV[i] + (1 - z[i]) * M)\n m.addConstr(y[i] <= z[i] * M)\n\n # constraints on x (valid occupancy)\n for sp in Sr:\n m.addConstr(sum(x[s, a] * ((s == sp) - gamma * T(S[s], A[a], S[sp])) for s in Sr for a in Ar) == alpha(S[sp]))\n\n # == constraints\n for consIdx in range(len(zeroConstraints)):\n m.addConstr(sum(x[S.index(s), A.index(a)] for s, a in zeroConstraints[consIdx]) == 0)\n # obj\n m.setObjective(sum([psi[i] * y[i] for i in xrange(rLen)]), GRB.MAXIMIZE)\n\n m.optimize()\n\n pi = {(S[s], A[a]): x[s, a].X for s in Sr for a in Ar}\n\n if m.status == GRB.Status.OPTIMAL:\n # return feasible being true and the obj value, opt pi\n # .X attribute is to retrieve the value of the variable\n return pi\n else:\n # simply return infeasible\n raise Exception('milp problem optimal solution not found' + m.status)", "def _call_max(vecObj):\n res = vecObj.max()\n return res", "def get_vmax(self, ch_id: int) -> float:\n return float(self.query(':measure:vmax? channel{}'.format(ch_id)))", "def produce_max(self, meta, raven_variables, dispatch, t):\n #balance = defaultdict(float)\n interaction = self.get_interaction()\n balance, meta = interaction.produce_max(meta, raven_variables, dispatch, t)\n #for resource, quantity in int_balance.items():\n # balance[resource] += quantity\n return balance, meta", "def vmnmx ( self , var , vmin , vmax ) :\n if var.xminmax() :\n vmn , vmx = var.xminmax ()\n if is_good_number ( vmin ) : vmin = max ( vmin , vmn )\n else : vmin = vmn\n if is_good_number ( vmax ) : vmax = min ( vmax , vmx )\n else : vmax = vmx\n\n assert is_good_number ( vmin ), 'Invalid type of ``min'' %s/%s' % ( vmin , type ( vmin ) )\n assert is_good_number ( vmax ), 'Invalid type of ``max'' %s/%s' % ( vmin , type ( vmin ) )\n assert vmin < vmax, 'Invalid min/max range: %s/%s' % ( vmin , vmax )\n \n return vmin , vmax", "def V_mpp(eta,Absorbed):\n return fmax(lambda voltage : voltage * current_density(voltage, eta,Absorbed))", "def maxmarginal(self, target, out=None):\n return self.__opReduce2(self.v - target,np.max, out=out)", "def calcParallel(self, I, V, Vmax, Vmin, Voc=None):\n if Voc is None:\n Voc = Vmax\n I, V = np.asarray(I), np.asarray(V)\n Vmax = np.asarray(Vmax)\n Vmin = np.asarray(Vmin)\n Voc = np.asarray(Voc)\n Vff = Voc\n delta_Voc = Vmax - Voc\n if np.isclose(delta_Voc, 0):\n Vff = 0.8 * Voc\n delta_Voc = 0.2 * Voc\n elif delta_Voc < 0:\n Vff = Vmax\n delta_Voc = -delta_Voc\n Vquad4 = Vff + delta_Voc * self.Vmod_q4pts\n Vreverse = Vmin * self.negpts\n Vforward = Vff * self.pts\n Vtot = np.concatenate((Vreverse, Vforward, Vquad4), axis=0).flatten()\n Itot = np.zeros((3 * self.npts,))\n for i, v in zip(I, V):\n Itot += npinterpx(Vtot, v, i)\n return Itot, Vtot", "def getmaxnumvar(self): # 3\n res,resargs = self.__obj.getmaxnumvar()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _maxnumvar_return_value = resargs\n return _maxnumvar_return_value", "def _find_mp(voltage, current):\n p = voltage * current\n idx = np.argmax(p)\n return voltage[idx], current[idx]", "def produce_max(self, meta, raven_vars, dispatch, t):\n request, meta = self.get_capacity(meta, raven_vars, dispatch, t)\n return request, meta", "def getmaxnumvar(self):\n maxnumvar_ = ctypes.c_int32()\n res = __library__.MSK_XX_getmaxnumvar(self.__nativep,ctypes.byref(maxnumvar_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n maxnumvar_ = maxnumvar_.value\n _maxnumvar_return_value = maxnumvar_\n return (_maxnumvar_return_value)", "def max_varEff(df):\n if isinstance(df, str):\n df = pd.read_csv(df, index_col=0)\n\n ref_list = ['mmsplice_ref_acceptorIntron',\n 'mmsplice_ref_acceptor',\n 'mmsplice_ref_exon',\n 'mmsplice_ref_donor',\n 'mmsplice_ref_donorIntron']\n alt_list = ['mmsplice_alt_acceptorIntron',\n 'mmsplice_alt_acceptor',\n 'mmsplice_alt_exon',\n 'mmsplice_alt_donor',\n 'mmsplice_alt_donorIntron']\n\n if 'mmsplice_dlogitPsi' not in df.columns:\n X = df[alt_list].values - df[ref_list].values\n X = transform(X)\n df['mmsplice_dlogitPsi'] = LINEAR_MODEL.predict(X)\n\n dfMax = df.groupby(['ID'], as_index=False).agg(\n {'mmsplice_dlogitPsi': lambda x: max(x, key=abs)})\n\n dfMax = dfMax.merge(df, how='left', on=['ID', 'mmsplice_dlogitPsi'])\n dfMax = dfMax.drop_duplicates(subset=['ID', 'mmsplice_dlogitPsi'])\n # dfMax = dfMax.drop(\"mmsplice_dlogitPsi\", axis=1)\n return dfMax", "def Vmaxpu(*args):\n # Getter\n if len(args) == 0:\n return CheckForError(lib.Generators_Get_Vmaxpu())\n\n # Setter\n Value, = args\n CheckForError(lib.Generators_Set_Vmaxpu(Value))", "def CvM(self, using, dx=0.0001):\n pits = np.array(self.PIT(using=using,dx=dx))\n cvm_result = skgof.cvm_test(pits, stats.uniform())\n return cvm_result.statistic, cvm_result.pvalue", "def ml_result(self, var, e):\n\t\tdist = self.enumerate_ask(var, e)\n\t\treturn max(dist.items(), key=lambda x:x[1])[0]", "def argmax(self, evidence={}):\n if len(evidence)==0:\n return self.v.ind2sub(self.t.argmax())\n ax = tuple([ evidence[v] if v in evidence else slice(None) for v in self.v ])\n return self.v.ind2sub( self.t[ax].argmax() )", "def test_MPI_Parallel_Interface(comm):\n\n def printMPI(msg):\n for i in range(comm.Get_size()):\n comm.barrier()\n if comm.Get_rank() == i:\n print(\"Proc {}: {}\".format(i, msg))\n\n n = 10\n\n par = MPI_Objective_Interface(mp.Extended_Rosenbrock, nb_domain_grid_pts=n,\n comm=comm)\n\n printMPI(par.counts)\n\n # ref = mp.Extended_Rosenbrock\n\n np.testing.assert_array_equal(\n mp.Extended_Rosenbrock.startpoint(n)[par.subdomain_slices],\n par.startpoint())\n np.testing.assert_almost_equal(\n mp.Extended_Rosenbrock.f(mp.Extended_Rosenbrock.startpoint(n)),\n par.f(par.startpoint()),\n err_msg=\"Different Function Value at startpoint\")\n np.testing.assert_allclose(\n mp.Extended_Rosenbrock.grad(mp.Extended_Rosenbrock.startpoint(n))[\n par.subdomain_slices],\n par.grad(par.startpoint()),\n err_msg=\"Different Gradient Value at startpoint\")", "def auxmaxf1(x):\n \n# Sum over data points\n f = 0.0\n for m_ind in range(cfg.ntrain):\n f += auxmax_f1_part_i(x,m_ind) \n \n return f" ]
[ "0.6341681", "0.61969894", "0.6065083", "0.60228604", "0.60162735", "0.596405", "0.5831983", "0.58185774", "0.57805014", "0.57521695", "0.57240343", "0.57173043", "0.56774616", "0.5617856", "0.5602376", "0.5560389", "0.55562276", "0.5540691", "0.5516906", "0.549255", "0.54219013", "0.5418369", "0.5373195", "0.53677535", "0.53400254", "0.5324612", "0.5319652", "0.5316081", "0.52880955", "0.52860427" ]
0.80074286
0
MPI wrapper for time series calculation.
def _timeseries_calc(ifg_paths, params, vcmt, tiles, preread_ifgs): if params[cf.TIME_SERIES_CAL] == 0: log.info('Time Series Calculation not required') return if params[cf.TIME_SERIES_METHOD] == 1: log.info('Calculating time series using Laplacian Smoothing method') elif params[cf.TIME_SERIES_METHOD] == 2: log.info('Calculating time series using SVD method') output_dir = params[cf.TMPDIR] total_tiles = len(tiles) process_tiles = mpiops.array_split(tiles) for t in process_tiles: log.debug("Calculating time series for tile "+str(t.index)+" out of "+str(total_tiles)) ifg_parts = [shared.IfgPart(p, t, preread_ifgs, params) for p in ifg_paths] mst_tile = np.load(os.path.join(output_dir, 'mst_mat_{}.npy'.format(t.index))) res = timeseries.time_series(ifg_parts, params, vcmt, mst_tile) tsincr, tscum, _ = res np.save(file=os.path.join(output_dir, 'tsincr_{}.npy'.format(t.index)), arr=tsincr) np.save(file=os.path.join(output_dir, 'tscuml_{}.npy'.format(t.index)), arr=tscum) mpiops.comm.barrier() log.debug("Finished timeseries calc!")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_trajectory():\n pass", "def mpi_schedule_job_array(csvstore, job_array, mpi_service=MPIService()):\n param_array = job_array.param_array\n job = job_array.job\n try:\n if mpi_service.rank == 0:\n # master\n results = []\n nb_completed_tasks = 0\n nb_tasks = len(param_array)\n for i in range(1, mpi_service.size):\n if len(param_array) > 0:\n task_param = param_array.pop(0)\n mpi_service.comm.send([job, task_param], dest=i, tag=0)\n while nb_completed_tasks < nb_tasks:\n [slave_rank, [start, end, result]] = mpi_service.comm.recv(source=MPI.ANY_SOURCE, tag=0)\n results += result\n nb_completed_tasks += 1\n if len(param_array) > 0:\n task_param = param_array.pop(0)\n mpi_service.comm.send([job, task_param], dest=slave_rank, tag=0)\n print \"All tasks sent\"\n try:\n kill_slaves(mpi_service)\n except Exception as inst:\n print inst\n print \"All tasks completed\"\n return results\n else:\n # slave\n mpi_status = MPI.Status()\n while 1:\n # waiting sending works by master\n print 'Slave ' + str(mpi_service.rank) + ' is ready...'\n [task_job, task_param] = mpi_service.comm.recv(source=0, tag=MPI.ANY_TAG, status=mpi_status)\n if mpi_status.Get_tag() == 1:\n print 'Closed rank ' + str(mpi_service.rank)\n break\n start_date = datetime.datetime.now()\n result = task_job(task_param)\n end_date = datetime.datetime.now()\n print mpi_service.rank, task_param, \"ended\"\n mpi_service.comm.send([mpi_service.rank, [start_date, end_date, result]], dest=0, tag=0)\n\n except:\n if mpi_service.rank == 0:\n print \"Something went wrong, we should log errors.\"\n traceback.print_exc()\n kill_slaves(mpi_service)\n sys.exit(1)", "def mpirun(self):\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n size = comm.Get_size()\n print rank \n print size\n data = []\n dcds = self.getdcds()\n for i in range(0, len(dcds)):\n pid = i % size \n if pid == rank:\n dcd = dcds[i]\n dcdpath = self.d + \"/\" + dcd\n data.extend(self.metric(self.dcdtopsf(dcd), dcdpath))\n self.write(data)", "def _postprocess_timeseries(rows, cols, params):\n # pylint: disable=too-many-locals\n xlks, _, crop = cf.transform_params(params)\n base_unw_paths = cf.original_ifg_paths(params[cf.IFG_FILE_LIST])\n dest_tifs = cf.get_dest_paths(base_unw_paths, crop, params, xlks)\n output_dir = params[cf.TMPDIR]\n\n # load previously saved prepread_ifgs dict\n preread_ifgs_file = join(output_dir, 'preread_ifgs.pk')\n ifgs = cp.load(open(preread_ifgs_file, 'rb'))\n\n # metadata and projections\n gt, md, wkt = ifgs['gt'], ifgs['md'], ifgs['wkt']\n epochlist = ifgs['epochlist']\n ifgs = [v for v in ifgs.values() if isinstance(v, PrereadIfg)]\n\n tiles = shared.get_tiles(dest_tifs[0], rows, cols)\n\n # load the first tsincr file to determine the number of time series tifs\n tsincr_file = os.path.join(output_dir, 'tsincr_0.npy')\n tsincr = np.load(file=tsincr_file)\n\n # pylint: disable=no-member\n no_ts_tifs = tsincr.shape[2]\n # we create 2 x no_ts_tifs as we are splitting tsincr and tscuml\n # to all processes.\n process_tifs = mpiops.array_split(range(2 * no_ts_tifs))\n\n # depending on nvelpar, this will not fit in memory\n # e.g. nvelpar=100, nrows=10000, ncols=10000, 32bit floats need 40GB memory\n # 32 * 100 * 10000 * 10000 / 8 bytes = 4e10 bytes = 40 GB\n # the double for loop helps us overcome the memory limit\n log.info('process {} will write {} ts (incr/cuml) tifs of '\n 'total {}'.format(mpiops.rank, len(process_tifs), no_ts_tifs * 2))\n for i in process_tifs:\n tscum_g = np.empty(shape=ifgs[0].shape, dtype=np.float32)\n if i < no_ts_tifs:\n for n, t in enumerate(tiles):\n _assemble_tiles(i, n, t, tscum_g, output_dir, 'tscuml')\n md[ifc.EPOCH_DATE] = epochlist.dates[i + 1]\n # sequence position; first time slice is #0\n md['SEQUENCE_POSITION'] = i+1\n dest = os.path.join(params[cf.OUT_DIR],\n 'tscuml' + \"_\" +\n str(epochlist.dates[i + 1]) + \".tif\")\n md[ifc.DATA_TYPE] = ifc.CUML\n shared.write_output_geotiff(md, gt, wkt, tscum_g, dest, np.nan)\n else:\n tsincr_g = np.empty(shape=ifgs[0].shape, dtype=np.float32)\n i %= no_ts_tifs\n for n, t in enumerate(tiles):\n _assemble_tiles(i, n, t, tsincr_g, output_dir, 'tsincr')\n md[ifc.EPOCH_DATE] = epochlist.dates[i + 1]\n # sequence position; first time slice is #0\n md['SEQUENCE_POSITION'] = i+1\n dest = os.path.join(params[cf.OUT_DIR],\n 'tsincr' + \"_\" + str(\n epochlist.dates[i + 1]) + \".tif\")\n md[ifc.DATA_TYPE] = ifc.INCR\n shared.write_output_geotiff(md, gt, wkt, tsincr_g, dest, np.nan)\n log.info('process {} finished writing {} ts (incr/cuml) tifs of '\n 'total {}'.format(mpiops.rank, len(process_tifs), no_ts_tifs * 2))", "def compute_time_step():\n\n dt = Hydro.compute_time_step()\n\n return dt", "def run(self, input_time_series=None, num_iter=None, record=False,\n output=False):\n pass", "def dpsi_dt(self, psi, t):\n#\t#To avoid doing anything twice. (odeint tends to do that.)\n#\t#---------------------------------------------------------\n#\tnovel, result = self.check_novelty(t,psi)\n#\tif not novel:\n#\t if self.my_id == 0:\n#\t\tprint \"Time: %2.2f / %2.2f au. Runtime: %2.2f---\"%(\n#\t\t t, self.total_duration, (time.time() - self.t_0)/60.)\n#\t\tself.debug_norm(t, psi, result)\t\n#\t\t\n#\t return result\n#\t##########################################################\n\n\t#Making a complex array. \n\tpsi_complex = psi[:len(psi)/2] + 1j * psi[len(psi)/2:] \n\t\n\tdp_dt_complex = zeros(psi_complex.shape, dtype = complex)\n\tdp_dt_buffer= zeros(psi_complex.shape, dtype = complex)\n\t\n\n\t#Do operations.\n\tmat_vec = self.mat_vec_product(psi_complex, t)\n\n\tdp_dt_complex[self.my_slice] = self.solve_overlap(-1j * mat_vec)\n\t\n\n\n\t#Add and redistribute.\n\tdp_dt_complex = pypar.reduce(dp_dt_complex, pypar.SUM, 0, buffer = dp_dt_buffer)\n\tdp_dt_buffer = dp_dt_complex.copy()\n\tdp_dt_complex = pypar.broadcast(dp_dt_buffer, 0)\n\t\n\n\n\t#Making a float array.\n\tdp_dt = r_[real(dp_dt_buffer), imag(dp_dt_buffer)] \n\t\n\tif self.my_id == 0:\n\t print \"Time: %2.2f / %2.2f au. Runtime: %2.2f\"%(\n\t\tt, self.total_duration, (time.time() - self.t_0)/60.)\n\t self.debug_norm(t, psi, dp_dt)\t\n\t\n\t#Store latest result. ----------------------------------\n\tself.prev_out = dp_dt\n\t############################3###########################3\n\treturn dp_dt", "def ec_data_processor_precip(path, x='TIMESTAMP_END', y='LE', daily=True):\n\n\n # Get the data from the path and turn the path into a data frame\n # ec_dataset = pd.read_csv(path, header=2)\n\n ec_dataset = pd.read_csv(path, header=2, engine='python')\n\n # print ec_dataset.head()\n print ec_dataset['LE'].head()\n print ec_dataset[ec_dataset[y] != -9999].head()\n # === get rid of no data values in any category of the energy balance ===\n precip_dataset = ec_dataset[ec_dataset['P'] != -9999]\n ec_dataset = ec_dataset[ec_dataset[y] != -9999]\n ec_dataset = ec_dataset[ec_dataset['NETRAD'] != -9999]\n ec_dataset = ec_dataset[ec_dataset['H'] != -9999]\n ec_dataset = ec_dataset[ec_dataset['LE'] != -9999]\n # # You probably won't need these because Marcy Doesn't think they are valid for her towers\n # ec_dataset = ec_dataset[ec_dataset['SH'] != -9999]\n # ec_dataset = ec_dataset[ec_dataset['SLE'] != -9999]\n\n if x.startswith(\"TIMESTAMP\"):\n a = ec_dataset[x].apply(lambda b: dt.strptime(str(b), '%Y%m%d%H%M'))\n aa = precip_dataset[x].apply(lambda d: dt.strptime(str(d), '%Y%m%d%H%M'))\n\n # # TODO - if converting PRISM to MTN time.\n # # Convert to PRISM time (Mtn Standard + 5 hours) PRISM midnight is 12:00 UTC - 7 hours for mountain. Net +5 hrs\n # a = [i + timedelta(hours=19) for i in a]\n # aa = [i + timedelta(hours=19) for i in aa]\n\n\n else:\n a = ec_dataset[x]\n\n # ===== Time Series Processing =====\n\n timeseries = a\n p_timeseries = aa\n # print 'timeseries\\n', timeseries\n Rn = ec_dataset['NETRAD'].values\n H = ec_dataset['H'].values\n LE = ec_dataset['LE'].values\n P = precip_dataset['P']\n print 'P \\n', P\n # indexed_datetimes = pd.DataFrame(pd.DatetimeIndex(timeseries))\n\n # # testing\n # plt.plot(timeseries, P, color='black')\n # plt.show()\n\n # recreate a dataframe of the variables you want to time average on a monthly timestep\n halfhour_data = pd.DataFrame({'timeseries': timeseries, 'Rn': Rn, 'LE': LE, 'H': H}) # took out precip. no good vals? 'P': P\n\n halfhour_precip = pd.DataFrame({'timeseries': p_timeseries, 'P': P})\n # set the timeseries column to the index so groupby function can group by year and month of the index.\n halfhour_data = halfhour_data.set_index(pd.DatetimeIndex(halfhour_data['timeseries']))\n halfhour_precip = halfhour_precip.set_index(pd.DatetimeIndex(halfhour_precip['timeseries']))\n # convert latent heat to mmH2O by dividing by latent heat of vaporization.\n halfhour_data['mmh20'] = halfhour_data['LE'] * 7.962e-4\n\n if daily:\n\n daily_cum_data = halfhour_data.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum()\n daily_cum_precip = halfhour_precip.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum()\n\n # get each day in the timeseries. there are duplicates from the groupby function, so use set() to get rid of\n # duplicates\n daily_cum_time = daily_time_parse(timeseries)\n daily_cum_precip_time = daily_time_parse(p_timeseries)\n\n # # testing\n # daily_cum_data.to_csv('/Users/dcadol/Desktop/daily_cumulative_df.csv')\n\n # format daily_cum_data to have datetimes\n daily_cum_data['date'] = daily_cum_time\n daily_cum_precip['date'] = daily_cum_precip_time\n\n return daily_cum_data, daily_cum_precip", "def accumulateSubgridMassHistory(self,q):\n if self.trackSubScales:\n for ci in range(self.nc):\n self.subgridTmp[ci][:] = self.subgridError_last[ci]\n #would be nice to have dt^{n+1} alone\n dt = self.timeIntegration.dt\n assert dt > 0.0\n dtInv = old_div(1.0,dt)\n self.subgridTmp[ci] *= dtInv\n self.subgridTmp[ci] *= self.subgridErrorMassCoef_last[ci]#figure this out\n #mwf debug\n logEvent(\"HaukeSangalliTrackSubScales accumulating delta u^n.abs.max= %s dm.max=%s \" % (max(numpy.absolute(self.subgridTmp[ci].flat)),max(numpy.absolute(self.subgridErrorMassCoef_last[ci].flat))),1)\n #mwf should be\n q[('mt',ci)] -= self.subgridTmp[ci]\n #don't think this matters right now because called after calculateSubgridError\n self.subgridTmp_ip[ci][:] = self.subgridError_ip_last[ci]\n self.subgridTmp_ip[ci] *= dtInv\n self.subgridTmp_ip[ci] *= self.subgridErrorMassCoef_ip_last[ci]#figure this out\n self.cip[('mt',ci)] -= self.subgridTmp_ip[ci]", "def make_parallel_MPI(function):\n\n def wrapper(*args, **kwargs):\n\n # Checks that the essential paremeters are there\n assert not kwargs['out_allPartTypes'] is None\n assert not kwargs['simulation_name'] is None\n\n # Generate a simulation object and oush it to **kwargs\n sim = Simulation(simulation_name=kwargs['simulation_name'])\n kwargs['simulation'] = sim\n\n # Set-up the MPI allocation schedule\n process = 0\n process_iterator = itertools.product(sim.clusterIDAllowed, sim.redshiftAllowed)\n\n for halo_num, redshift in process_iterator:\n\n if process % size == rank:\n\n cluster_obj = Cluster(clusterID=int(halo_num), redshift=redshift_str2num(redshift))\n file_name = sim.cluster_prefix + sim.halo_Num(halo_num) + redshift\n fileCompletePath = sim.pathSave + '/' + sim.simulation + '_output/collective_output/' + file_name + '.hdf5'\n\n kwargs['cluster'] = cluster_obj\n kwargs['fileCompletePath'] = fileCompletePath\n\n print('CPU ({}/{}) is processing halo {} @ z = {} ------ process ID: {}'.format(rank, size, cluster_obj.clusterID, cluster_obj.redshift, process))\n # Each CPU loops over all apertures - this avoids concurrence in file reading\n # The loop over apertures is defined explicitly in the wrapped function.\n function(*args, **kwargs)\n\n process += 1\n\n return wrapper", "def job_gen(self, time_frame):", "def _local_ts(self, *data):\n arr = self.function(*data)\n if self.var.func_input_dtype == 'numpy':\n arr = xr.DataArray(arr, coords=self.coords)\n arr.name = self.name\n return arr", "def comm_times_group(ns, hosts):\n\n return run_on_hosts(hosts,\n '''python %sape/timings/communication/mpi_run_group.py \"%s\" %s'''%(\n ape_dir, ns, ' '.join(hosts)))", "def time_analysis(self, time_points, plot=False, interval=1800):\n\n first_day = int(min(time_points)/86400)\n\n dyn_cl = dynamic_clusters()\n for t in time_points:\n day = int(t/86400)-first_day+1\n #print day\n time_in_day = t%86400 #in seconds\n dyn_cl.add_element(day,time_in_day) \n\n timestamps_vec = time_wrap(time_points)[0] \n fitting = activity_time(timestamps_vec, interval=interval)\n\n self.methods[\"time_dyn_clst\"] = dyn_cl\n self.methods[\"time_fitting\"] = fitting\n if plot: self.temporal_plot(vis=False)\n rospy.loginfo('Done\\n')", "def comm_times_single(ns, send_host, recv_host):\n\n return run_on_hosts((send_host, recv_host),\n '''python %sape/timings/communication/mpi_run_single.py \"%s\" %s %s'''%(\n ape_dir, str(ns), send_host, recv_host))", "def runSlaveRun():\n\n np.set_printoptions(linewidth=1000)\n function = None\n options = None\n\n # print(\"Process {}/{} reporting for duty!\".format(rank, size))\n\n function = comm.bcast(function, root=0)\n arguments = comm.scatter(options, root=0)\n\n results = function(*arguments)\n\n comm.Barrier()\n comm.gather(results, root=0)\n comm.Disconnect()", "def test_MPI_Parallel_Interface(comm):\n\n def printMPI(msg):\n for i in range(comm.Get_size()):\n comm.barrier()\n if comm.Get_rank() == i:\n print(\"Proc {}: {}\".format(i, msg))\n\n n = 10\n\n par = MPI_Objective_Interface(mp.Extended_Rosenbrock, nb_domain_grid_pts=n,\n comm=comm)\n\n printMPI(par.counts)\n\n # ref = mp.Extended_Rosenbrock\n\n np.testing.assert_array_equal(\n mp.Extended_Rosenbrock.startpoint(n)[par.subdomain_slices],\n par.startpoint())\n np.testing.assert_almost_equal(\n mp.Extended_Rosenbrock.f(mp.Extended_Rosenbrock.startpoint(n)),\n par.f(par.startpoint()),\n err_msg=\"Different Function Value at startpoint\")\n np.testing.assert_allclose(\n mp.Extended_Rosenbrock.grad(mp.Extended_Rosenbrock.startpoint(n))[\n par.subdomain_slices],\n par.grad(par.startpoint()),\n err_msg=\"Different Gradient Value at startpoint\")", "def total_fire_power_time_series_par(files, bounding_box):\n \n assert isinstance(bounding_box, BoundingBox)\n bb = bounding_box\n \n results = {}\n with get_context('spawn').Pool() as pool:\n \n vals = pool.map(_process_single_fire_power_time_series, zip(files, itertools.repeat(bb)))\n vals = (val for val in vals if val is not None)\n \n for time, val, fname in vals:\n results[time] = (val, fname)\n \n return results", "def _process(self, X):\n # 周波数毎に実施する\n ones = np.ones(self.L.shape[1])\n\n spire_cost = np.zeros(self.grid.n_points)\n\n # 初期のポジションベクトル\n n_channels = np.shape(X)[0]\n n_freq_bins = np.shape(X)[1]\n n_frames = np.shape(X)[2]\n\n d = None\n n_mic_pair = 0\n # for m1 in range(1):\n\n step = 2\n\n mic_pairs = self.mic_pairs\n # mic_pairs=[[m1,m2] for m1 in range(n_channels-1) for m2 in range(m1+1,np.minimum(m1+step+1,n_channels)) ]\n mic_pairs = np.array(mic_pairs)\n\n n_mic_pair = np.shape(mic_pairs)[0]\n d = np.array(self.mic_positions[mic_pairs[:, 1]]) - np.array(\n self.mic_positions[mic_pairs[:, 0]]\n )\n # d: n_mic_pair,dim\n\n # 時間周波数毎の初期のポジションベクトル\n position_vector = np.zeros(shape=(n_freq_bins, n_frames, self.dim))\n\n X_temp = X[:, self.freq_bins, :]\n\n sigma = np.angle(X_temp[mic_pairs[:, 1], ...] / X_temp[mic_pairs[:, 0], ...])\n sigma = np.transpose(sigma, (1, 2, 0))\n\n sigma = np.where(np.abs(sigma) < 1.0e-18, np.zeros_like(sigma) + 1.0e-18, sigma)\n z = np.zeros(shape=(n_freq_bins, n_frames, n_mic_pair), dtype=np.int)\n x = np.random.normal(size=n_freq_bins * n_frames * n_mic_pair)\n x = np.reshape(x, newshape=(n_freq_bins, n_frames, n_mic_pair))\n # 初期化\n mode_vec = self.rough_mode_vec[self.freq_bins, :, :]\n mode_vec = np.conjugate(mode_vec)\n prod = np.einsum(\"fmi,mft->fti\", mode_vec, X[:, self.freq_bins, :])\n # prod=np.einsum(\"mi,mt->ti\",mode_vec,X[:,k,:])\n amp = np.abs(prod)\n # ft\n index = np.argmax(amp, axis=-1)\n org_shape = np.shape(index)\n index = np.reshape(index, [-1])\n\n # indexに相当する方向を取る\n if self.dim == 2:\n rough_azimuth_recon = self.rough_grid.azimuth[index]\n # ダミー\n rough_colatitude_recon = np.zeros_like(rough_azimuth_recon) + np.pi\n elif self.dim == 3:\n rough_azimuth_recon = self.rough_grid.azimuth[index]\n rough_colatitude_recon = self.rough_grid.colatitude[index]\n\n doas = np.concatenate(\n (\n rough_colatitude_recon[:, None], # colatitude [0, pi]\n rough_azimuth_recon[:, None], # azimuth [0, 2 pi]\n ),\n axis=-1,\n )\n distance = 3.0\n\n # source_locations: 3, n_frames\n source_locations = geom.spherical_to_cartesian(doa=doas, distance=distance)\n source_locations = np.reshape(source_locations, (3, org_shape[0], org_shape[1]))\n\n position_vector[self.freq_bins, :, :] = np.transpose(\n source_locations[: self.dim, :, :], (1, 2, 0)\n )\n\n size = np.einsum(\"fti,fti->ft\", np.conjugate(position_vector), position_vector)\n size = np.sqrt(size)[..., np.newaxis]\n position_vector = position_vector / np.maximum(size, 1.0e-18)\n\n est_p = position_vector[self.freq_bins, ...]\n z = z[self.freq_bins, ...]\n x = x[self.freq_bins, ...]\n freqs = self.freq_hz\n cluster_index = cluster_index[self.freq_bins, ...]\n\n silent_mode = True\n freqs_d = np.einsum(\"f,pi->fpi\", freqs, d)\n x_non_const_power_vector = np.zeros(shape=(n_freq_bins, n_frames))\n\n for i in range(self.n_mm_itertaions):\n (\n org_cost_0,\n org_cost_1,\n org_cost_2,\n org_cost_3,\n cost_0,\n cost_1,\n cost_2,\n cost_3,\n est_p,\n z,\n x,\n x_non_const_power,\n ) = coplaner_doa_estimation_one_iteration(\n freqs_d,\n est_p,\n sigma,\n z,\n x,\n use_clustering=use_clustering,\n cluster_index=cluster_index,\n cluster_center=cluster_center,\n iter_num2=self.rooting_n_iter,\n silent_mode=silent_mode,\n zero_feature_index=2,\n )\n\n if silent_mode == False:\n print(cost_0, cost_1, cost_2, cost_3)\n\n # est_pから\n # fti\n position_vector[self.freq_bins, ...] = est_p\n\n x_non_const_power_vector[self.freq_bins, :] = x_non_const_power[:, :, 0]\n\n size = np.einsum(\"fti,fti->ft\", np.conjugate(position_vector), position_vector)\n size = np.sqrt(size)[..., np.newaxis]\n position_vector = position_vector / np.maximum(size, 1.0e-18)\n\n # gridを探す\n\n # position_vectorに相当する方向を取る\n if self.dim == 2:\n azimuth_recon = self.grid.azimuth\n # ダミー\n colatitude_recon = np.zeros_like(azimuth_recon) + np.pi\n elif self.dim == 3:\n azimuth_recon = self.grid.azimuth\n colatitude_recon = self.grid.colatitude\n\n doas = np.concatenate(\n (\n colatitude_recon[:, None], # colatitude [0, pi]\n azimuth_recon[:, None], # azimuth [0, 2 pi]\n ),\n axis=-1,\n )\n distance = 3.0\n # source_locations: 3, n_grid_num\n grid_locations = geom.spherical_to_cartesian(doa=doas, distance=distance)\n size = np.einsum(\"in,in->n\", np.conjugate(grid_locations), grid_locations)\n size = np.sqrt(size)[np.newaxis, ...]\n grid_locations = grid_locations / np.maximum(size, 1.0e-18)\n\n grid_index_buf = []\n\n # 制約なし解のパワーが1を大幅に超えて居たらReject\n print(np.average(x_non_const_power_vector))\n valid_index = x_non_const_power_vector < self.reject_th\n for k in self.freq_bins:\n prod = np.einsum(\"in,ti->tn\", grid_locations, position_vector[k, ...])\n grid_index = np.argmax(prod, axis=-1)\n\n grid_index = grid_index[valid_index[k, :]]\n\n grid_index_buf.append(grid_index)\n grid_index_buf = np.array(grid_index_buf)\n\n for n in range(self.grid.n_points):\n spire_cost[n] = spire_cost[n] + np.count_nonzero(grid_index_buf == n)\n\n self.grid.set_values(spire_cost)", "def get_timeseries_on_points(self, varname, points):\n res = np.zeros((len(points), self.ntimestep), dtype=np.float64)\n for record in range(self.ntimestep):\n res[:, record] = self.get_data_on_points(varname, record, points)\n return res", "def compute(self): \n Ex=np.zeros((self.nx,self.ny+1))\n Ey=np.zeros((self.nx+1,self.ny))\n Hz=np.zeros((self.nx,self.ny))\n Hzx=np.zeros((self.nx,self.ny))\n Hzy=np.zeros((self.nx,self.ny))\n \n imx = []\n #eps, mu = self.makeenv()\n mu=np.ones((self.nx,self.ny))*const.mu_0\n eps = self.luneberg(int(self.nx/2), int(self.ny*2/3), self.R)\n eps[:20,:] *= self.q #adself.ds a space of higher permittivity \n eps[-20:,:] *= self.q #adself.ds a space of higher permittivity \n eps[:,:20] *= self.q #adself.ds a space of higher permittivity \n eps[:,-20:] *= self.q #adself.ds a space of higher permittivity \n\n c = self.dt/(eps*self.ds)\n d = self.dt/(mu* self.ds)\n \n sigma = self.pml(eps, mu, 20)\n cax = 1 - (sigma[0] * self.dt / eps)\n cay = 1 - (sigma[1] * self.dt / eps)\n dax = 1 - (sigma[2] * self.dt / mu) \n day = 1 - (sigma[3] * self.dt / mu)\n \n bar = progressbar.ProgressBar()\n for n in bar(range(self.nt+1)):\n Ex[:,1:-1] = (cay[:,1:]+cay[:,:-1])/2*Ex[:,1:-1] + (c[:,1:]+c[:,:-1])/2*(Hz[:,1:]-Hz[:,:-1])\n Ey[1:-1,:] = (cax[1:,:]+cax[:-1,:])/2*Ey[1:-1,:] - (c[1:,:]+c[:-1,:])/2*(Hz[1:,:]-Hz[:-1,:])\n \n Hzx = dax*Hzx - d*(Ey[1:,:] - Ey[:-1,:])\n Hzy = day*Hzy + d*(Ex[:,1:] - Ex[:,:-1]) \n Hz = Hzx + Hzy + self.actualsource(self.source, self.f, n, self.dt) \n \n if(n%self.interval == 0): imx.append(Ex[:self.nx,:self.ny]**2 + Ey[:self.nx, :self.ny]**2)\n\n return imx", "def PARALLEL_worker_mc_inv(procnum, num_samples_per_processor, inversion_type, M_amplitude, green_func_array, real_data_array, comparison_metric, perform_normallised_waveform_inversion, compare_all_waveforms_simultaneously, return_dict_MTs, return_dict_similarity_values_all_samples, return_dict_shift_idxs, return_dict_MT_single_force_rel_amps, return_dict_medium_1_medium_2_rel_amp_ratios, invert_for_ratio_of_multiple_media_greens_func_switch, green_func_phase_labels, num_phase_types_for_media_ratios, invert_for_relative_magnitudes_switch=False, rel_exp_mag_range=[1.,1.], auto_shift_for_best_fit=True):\n print(\"Processing for process:\", procnum, \"for \", num_samples_per_processor, \"samples.\")\n \n # Define temp data stores for current process:\n tmp_MTs = np.zeros((len(green_func_array[0,:,0]), num_samples_per_processor), dtype=float)\n tmp_similarity_values_all_samples = np.zeros(num_samples_per_processor, dtype=float)\n tmp_shift_idxs_all_samples = []\n if inversion_type == \"DC_single_force_couple\" or inversion_type == \"DC_single_force_no_coupling\" or inversion_type == \"DC_crack_couple\" or inversion_type == \"single_force_crack_no_coupling\":\n tmp_MT_single_force_rel_amps = np.zeros(num_samples_per_processor, dtype=float)\n else:\n tmp_MT_single_force_rel_amps = []\n if invert_for_ratio_of_multiple_media_greens_func_switch:\n tmp_medium_1_medium_2_rel_amp_ratios = np.zeros(num_samples_per_processor, dtype=float)\n else:\n tmp_medium_1_medium_2_rel_amp_ratios = []\n if invert_for_ratio_of_multiple_media_greens_func_switch:\n if num_phase_types_for_media_ratios>0:\n tmp_frac_medium_2_diff_phases_dict = {} # Dictionary for temp storing of phase fractions of medium 1\n tmp_medium_1_medium_2_rel_amp_ratios_multi_phases = np.zeros((num_samples_per_processor, 3), dtype=float)\n else:\n tmp_medium_1_medium_2_rel_amp_ratios_multi_phases = []\n \n # Sort greens function storage if processing for multiple media:\n if invert_for_ratio_of_multiple_media_greens_func_switch:\n green_func_array_total_both_media = green_func_array.copy()\n \n # 3. Loop over samples, checking how well a given MT sample synthetic wavefrom from the forward model compares to the real data:\n for i in range(num_samples_per_processor):\n # Generate random medium amplitude ratio and associated greens functions (if required):\n if invert_for_ratio_of_multiple_media_greens_func_switch:\n # If want to invert for ratio of meduim 1 to medium 2 separately for different phases:\n if num_phase_types_for_media_ratios>0:\n # Generate different phase fractions:\n tmp_frac_medium_2_diff_phases_dict[\"P\"] = np.random.uniform(0.0, 1.0)\n tmp_frac_medium_2_diff_phases_dict[\"S\"] = np.random.uniform(0.0, 1.0)\n tmp_frac_medium_2_diff_phases_dict[\"surface\"] = np.random.uniform(0.0, 1.0)\n # Generate associated greens functions:\n green_func_array = np.zeros(np.shape(green_func_array_total_both_media[:,:,:,0]), dtype=float)\n # Loop over greens function for each station-phase:\n for j in range(len(green_func_phase_labels)):\n tmp_frac_medium_2 = tmp_frac_medium_2_diff_phases_dict[green_func_phase_labels[j]] # Get fraction for specific phase, for specific greens functions for specific station-phase\n green_func_array[j, :, :] = (1. - tmp_frac_medium_2)*green_func_array_total_both_media[j,:,:,0] + tmp_frac_medium_2*green_func_array_total_both_media[j,:,:,1] \n # Otherwise generate single fraction value and associated greens functions:\n else:\n frac_medium_2 = np.random.uniform(0.0, 1.0)\n green_func_array = (1. - frac_medium_2)*green_func_array[:,:,:,0] + frac_medium_2*green_func_array[:,:,:,1]\n \n # 4. Generate synthetic waveform for current sample:\n # Vary moment amplitude randomly if specified:\n if invert_for_relative_magnitudes_switch:\n M_amplitude_exp_factor = np.random.uniform(low=rel_exp_mag_range[0], high=rel_exp_mag_range[1])\n M_amplitude = 10.**M_amplitude_exp_factor\n # And generate waveform from source mechanism tensor:\n if inversion_type==\"full_mt\":\n MT_curr_sample = generate_random_MT()*M_amplitude # Generate a random MT sample\n elif inversion_type==\"full_mt_Lune_samp\":\n MT_curr_sample = generate_random_MT_Lune_samp()*M_amplitude # Generate a random MT sample, sampled uniformly in Lune space\n elif inversion_type==\"DC\":\n MT_curr_sample = generate_random_DC_MT()*M_amplitude # Generate a random DC sample\n elif inversion_type==\"single_force\":\n MT_curr_sample = generate_random_single_force_vector()*M_amplitude # Generate a random single force sample\n elif inversion_type == \"DC_single_force_couple\":\n MT_curr_sample, random_DC_to_single_force_amp_frac = generate_random_DC_single_force_coupled_tensor() # Generate a random DC-single-force coupled sample, with associated relative amplitude of DC to single force\n MT_curr_sample = MT_curr_sample*M_amplitude\n elif inversion_type == \"DC_single_force_no_coupling\":\n MT_curr_sample, random_DC_to_single_force_amp_frac = generate_random_DC_single_force_uncoupled_tensor()\n MT_curr_sample = MT_curr_sample*M_amplitude\n elif inversion_type == \"DC_crack_couple\":\n MT_curr_sample, random_DC_to_single_force_amp_frac = generate_random_DC_crack_coupled_tensor()\n MT_curr_sample = MT_curr_sample*M_amplitude\n elif inversion_type == \"single_force_crack_no_coupling\":\n MT_curr_sample, random_DC_to_single_force_amp_frac = generate_random_single_force_crack_uncoupled_tensor()\n MT_curr_sample = MT_curr_sample*M_amplitude\n synth_waveform_curr_sample = forward_model(green_func_array, MT_curr_sample) # Note: Greens functions must be of similar amplitude units going into here...\n \n # 5. Compare real data to synthetic waveform (using variance reduction or other comparison metric), to assign probability that data matches current model:\n similarity_curr_sample, shift_idxs = compare_synth_to_real_waveforms(real_data_array, synth_waveform_curr_sample, comparison_metric, perform_normallised_waveform_inversion, compare_all_waveforms_simultaneously, auto_shift_for_best_fit) \n \n # 6. Append results to data store:\n tmp_MTs[:,i] = MT_curr_sample[:,0]\n tmp_similarity_values_all_samples[i] = similarity_curr_sample\n tmp_shift_idxs_all_samples.append(list(shift_idxs))\n if inversion_type == \"DC_single_force_couple\" or inversion_type == \"DC_single_force_no_coupling\" or inversion_type == \"DC_crack_couple\" or inversion_type == \"single_force_crack_no_coupling\":\n tmp_MT_single_force_rel_amps[i] = random_DC_to_single_force_amp_frac\n if invert_for_ratio_of_multiple_media_greens_func_switch:\n if num_phase_types_for_media_ratios>0:\n tmp_medium_1_medium_2_rel_amp_ratios_multi_phases[i,0] = tmp_frac_medium_2_diff_phases_dict[\"P\"]\n tmp_medium_1_medium_2_rel_amp_ratios_multi_phases[i,1] = tmp_frac_medium_2_diff_phases_dict[\"S\"]\n tmp_medium_1_medium_2_rel_amp_ratios_multi_phases[i,2] = tmp_frac_medium_2_diff_phases_dict[\"surface\"]\n else:\n tmp_medium_1_medium_2_rel_amp_ratios[i] = frac_medium_2\n \n if i % 10000 == 0:\n print(\"Processor number:\", procnum, \"- Processed for\",i,\"samples out of\",num_samples_per_processor,\"samples\")\n \n # 7. And convert misfit measure to likelihood function probability:\n tmp_similarity_values_all_samples = np.exp(-(1.-tmp_similarity_values_all_samples)/2.)\n \n # And return values back to script:\n return_dict_MTs[procnum] = tmp_MTs\n return_dict_similarity_values_all_samples[procnum] = tmp_similarity_values_all_samples\n return_dict_shift_idxs[procnum] = tmp_shift_idxs_all_samples\n return_dict_MT_single_force_rel_amps[procnum] = tmp_MT_single_force_rel_amps\n if num_phase_types_for_media_ratios>0:\n return_dict_medium_1_medium_2_rel_amp_ratios[procnum] = tmp_medium_1_medium_2_rel_amp_ratios_multi_phases\n else:\n return_dict_medium_1_medium_2_rel_amp_ratios[procnum] = tmp_medium_1_medium_2_rel_amp_ratios\n print(\"Finished processing process:\", procnum, \"for \", num_samples_per_processor, \"samples.\")", "def custom_processing(self, funct: callable, data_tmp: np.ndarray, **kwargs) -> np.ndarray:\n tic = time.time()\n data_tmp = funct(data_tmp, **kwargs)\n self.process_time.append(time.time() - tic)\n return data_tmp", "def gather_qpt_function(self, func_name, *args, **kwargs):\n partial = self.gather_qpt_function_me(func_name, *args, **kwargs)\n\n if i_am_master:\n\n # Contruct an array with the shape of partial,\n # adding a dimension of length nqpt.\n total = np.zeros([self.nqpt] + list(partial.shape[1:]),\n dtype=partial.dtype)\n\n for i, arr in enumerate(partial):\n total[i,...] = arr[...]\n\n active_ranks = self.get_active_ranks()\n if len(active_ranks) > 1:\n for irank in active_ranks[1:]:\n partial = comm.recv(source=irank, tag=irank)\n for arr in partial:\n i += 1\n total[i,...] = arr[...]\n\n elif self.active_worker:\n comm.send(partial, dest=0, tag=rank)\n return\n\n else:\n return\n\n # Now I could broadcast the total result to all workers\n # but right now there is no need to.\n\n return total", "def sweep_multiprocessing(self,sweep_n,start,end,points,filename='./test.txt'):\n ###############################\n ##multiprocessing preparation\n ##############################\n core = 10\n points = points//core*core # points per thread\n self.result = [[0.0 for i in range(self.n+1)]for j in range(points)]#this is the matrix which store the result, it will be saved to file later.\n job = self.allocate_job(start,end,points,core)\n\n \n ################################\n ##This are codes for progress bar\n ###############################\n prog = ProgressBar(0, points, 50, mode='fixed', char='#')\n ##the linear algebra start here\n a = np.zeros(self.N)\n a[self.N-1] = 1 #1 because rho_11+rho_22 ... =1\n a = np.matrix(a)\n a = a.T\n\n done_queue = multiprocessing.Queue()\n process_list = []\n for x in range(core):\n process_list.append(multiprocessing.Process(target = sweep_mp,args = (job[x],self.system,self.nu2,a,self.add_freq,self.index,sweep_n,self.n,done_queue)))\n\n tStart = time.time()\n print 'start'\n for p in process_list:\n p.start()\n\n stop_num = 0\n while stop_num != core:\n a = done_queue.get()\n if a == 'STOP':\n stop_num += 1\n else:\n self.result[a[0]] = a[1]\n prog.increment_amount()\n print prog, '\\r',\n sys.stdout.flush()\n\n print '\\n'\n for p in process_list:\n p.join()\n print \"%s.exitcode = %s\" %(p.name, p.exitcode)\n\n tStop = time.time()\n print\"spend\",(tStop - tStart),\"second\"\n \n self.sweep_save_file(filename,points)", "def pc_work_time_total(self) -> \"float\":\n return _beamforming_swig.phasedarray_sptr_pc_work_time_total(self)", "def get_timeseries_on_nodes(self, varname, nodes):\n res = np.zeros((len(nodes), self.ntimestep), dtype=np.float64)\n for record in range(self.ntimestep):\n values = self.get_data_value(varname, record)\n res[range(len(nodes)), record] = values[nodes]\n\n return res", "def update_data():\n values = temp_serial_placeholder()\n time = current_time_milli() - __start\n points = [ [time, values[0]], [time, values[1]] ]\n __data.append(points)\n return points", "def time_stats(df):", "def do_ts(self, arg):\n self.do_timesheet(arg)" ]
[ "0.56803995", "0.56590575", "0.53260535", "0.5320121", "0.52309686", "0.5216873", "0.52014816", "0.51989406", "0.5178184", "0.51628107", "0.5117995", "0.5109982", "0.50846106", "0.50473", "0.5044743", "0.50315577", "0.49988696", "0.4984488", "0.49706215", "0.49481472", "0.49353054", "0.49112293", "0.487157", "0.48668605", "0.4854386", "0.48514286", "0.48314306", "0.48184702", "0.48124355", "0.48079145" ]
0.622448
0
Sets the number of simulation threads to use in Calculix
def setNumThreads(cls, numThreads: int): cls.NUMTHREADS = numThreads
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setNumThreads(self, num):\r\n self.threads = num", "def setNumThreads(self, num):\r\n # implement ThreadPool interface\r\n assert not self.prepared, \"You can't change number of threads for working server\"\r\n self.threads = num", "def setNThreads(self,n):\n assert(n>0)\n self._c_param.n_threads = n", "def setNumThreads(self, num):\n # implement ThreadPool interface\n assert not self.prepared, \"You can't change number of threads for working server\"\n self.threads = num", "def setNumWorkers(self, num):\r\n self.numWorkers = num", "def setNthreads(self, nthreads=None):\n if nthreads is None:\n nthreads = 4\n lib._omp_set_num_threads(nthreads)", "def _nthreads_update(self):\n self.inputs.environ[\"OMP_NUM_THREADS\"] = \"%d\" % self.inputs.num_threads", "def __init__(__self__, *,\n threads_per_core: int):\n pulumi.set(__self__, \"threads_per_core\", threads_per_core)", "def set_num_parallel_workers(num):\n if num <= 0 or num > INT32_MAX:\n raise ValueError(\"Number of parallel workers given is not within the required range.\")\n _config.set_num_parallel_workers(num)", "def set_num_jobs(self, num):\n self.num_jobs = num", "def set_threadpool_size(nthreads):\n os.environ[\"OMP_THREAD_LIMIT\"] = \"0\" if nthreads is None else str(nthreads)", "def set_numpins(self, n):\n self.numpins = n", "def set_workers(self, nworkers):\n\n self.max_workers = nworkers", "def setupthreads(self,numthreads_):\n res = __library__.MSK_XX_setupthreads(self.__nativep,numthreads_)\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])", "def set_cpus(self, num_cpus: int) -> None:\n if self.batch:\n if self.launcher in [\"pbs\", \"cobalt\"]:\n if hasattr(self, \"batch_settings\") and self.batch_settings:\n if hasattr(self.batch_settings, \"set_ncpus\"):\n self.batch_settings.set_ncpus(num_cpus)\n if self.launcher == \"slurm\":\n if hasattr(self, \"batch_settings\") and self.batch_settings:\n if hasattr(self.batch_settings, \"set_cpus_per_task\"):\n self.batch_settings.set_cpus_per_task(num_cpus)\n\n for db in self.dbnodes:\n db.run_settings.set_cpus_per_task(num_cpus)\n if db.is_mpmd and hasattr(db.run_settings, \"mpmd\"):\n for mpmd in db.run_settings.mpmd:\n mpmd.set_cpus_per_task(num_cpus)", "def set_number_executors(self, number_executors):\n with self.__threads_lock:\n self._assert_not_running()\n self.__number_executors = number_executors", "def setNumberOfIterations(self, value):\n return self._set(numberOfIterations=value)", "def setNumberOfIterations(self, value):\n return self._set(numberOfIterations=value)", "def getNumThreads(cls) -> int:\n return cls.NUMTHREADS", "def set_runs_per_restart(self, num):\n raise NotImplementedError()", "def setNumIterations(*argv):", "def setNumberOfIntervals(self, n=500):\n self._simulator_.update(numberOfIntervals=n)\n return", "def manage_threads(_) -> int:\n return 1 << 33", "def manage_threads(_) -> int:\n return 1 << 33", "def set_option_thread_count(self, integer, apikey=''):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/action/setOptionThreadCount/', {'Integer': integer, 'apikey': apikey})))", "def set_omp_threads(self, omp_threads):\n self.omp_env[\"OMP_NUM_THREADS\"] = omp_threads", "def nThreads(self):\n return self._c_param.n_threads", "def setNIterations(self, value):\n return self._set(nIterations=value)", "def num_of_threads(self, num_of_threads):\n if (self.local_vars_configuration.client_side_validation and\n num_of_threads is not None and num_of_threads > 64): # noqa: E501\n raise ValueError(\"Invalid value for `num_of_threads`, must be a value less than or equal to `64`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n num_of_threads is not None and num_of_threads < 1): # noqa: E501\n raise ValueError(\"Invalid value for `num_of_threads`, must be a value greater than or equal to `1`\") # noqa: E501\n\n self._num_of_threads = num_of_threads", "def setIterationCount(self, newIterationCount):\n \n pass" ]
[ "0.8256052", "0.7567058", "0.75560105", "0.7523079", "0.71850544", "0.71713644", "0.7045087", "0.6716871", "0.6572469", "0.6557934", "0.6522405", "0.64511395", "0.63847023", "0.63694143", "0.6355767", "0.62587845", "0.625188", "0.625188", "0.62317884", "0.6220977", "0.6196904", "0.61768264", "0.61663425", "0.61663425", "0.6107344", "0.6087822", "0.60851514", "0.6072813", "0.606708", "0.6021385" ]
0.7770937
1
Returns the number of threads used
def getNumThreads(cls) -> int: return cls.NUMTHREADS
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getThreads():\r\n return multiprocessing.cpu_count()", "def nThreads(self):\n return self._c_param.n_threads", "def getThreads():\n if sys.platform == 'win32':\n return int(os.environ['NUMBER_OF_PROCESSORS'])\n else:\n return int(os.popen('grep -c cores /proc/cpuinfo').read())", "def num_workers(self) -> int:\n return sum(self.client.nthreads().values())", "def get_ncpu():\n from multiprocessing import cpu_count\n return cpu_count()", "def get_thread_count(self):\n return self.THREAD_COUNT", "def numcpu () :\n import multiprocessing\n return multiprocessing.cpu_count()", "def cpu_count():\n num_available_cores = multiprocessing.cpu_count()\n return num_available_cores", "def num_threads(self):\n return self.inputs.num_threads", "def number_of_workers():\n return (multiprocessing.cpu_count() * 2) + 1", "def number_of_workers():\n return (cpu_count() * 2) + 1", "def _get_threads():\n if sys.platform == 'win32':\n # return (int)(os.environ['NUMBER_OF_PROCESSORS'])\n return 0 # save trouble, do not use multiprocessing on windows\n else:\n return (int)(os.popen('grep -c cores /proc/cpuinfo').read())", "def threads_per_core(self) -> int:\n return pulumi.get(self, \"threads_per_core\")", "def cpu_count_cores():\n return cext.cpu_count_cores()", "def get_num_threads():\n\n num_cores = os.cpu_count()\n\n # the specific environment variable takes prescedence\n if \"PRA_NUM_THREADS\" in os.environ:\n return int(os.environ[\"PRA_NUM_THREADS\"])\n\n # we also respect OMP and MKL variables\n env_var = [\n \"OMP_NUM_THREADS\",\n \"MKL_NUM_THREADS\",\n ]\n\n all_limits = [int(getattr(os.environ, var, num_cores)) for var in env_var]\n\n return min(all_limits)", "def nworkers(self):\n return len(self._workers)", "def _n_workers(self, processes: int = 2) -> int:\n if 2 <= processes <= cpu_count():\n n_workers = processes\n else:\n n_workers = cpu_count()\n return n_workers", "def concurrency(self):\n return multiprocessing.cpu_count()", "def get_num_parallel_workers():\n return _config.get_num_parallel_workers()", "def pool_size():\r\n if DESIRED_THREADS > 1:\r\n return min(DESIRED_THREADS, multiprocessing.cpu_count())\r\n else:\r\n raise Exception(\"ARG ERROR: DESIRED_THREADS is not valid\")", "def _get_num_processors():\n cores = 0\n try:\n cores = len(os.sched_getaffinity(0))\n except AttributeError:\n cores = cpu_count()\n return cores", "def get_used(self):\n return int(self.used_cores)", "def num_workers(self):\n return self._num_workers", "def cpu_count():\r\n if mp is None:\r\n return 1\r\n return mp.cpu_count()", "def threads_per_core(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"threads_per_core\")", "def get_number_executors(self):\n with self.__threads_lock:\n return self.__number_executors", "def get_threads(self):\r\n threads = self._config.get_int('threads', DEFAULT_THREADS)\r\n if threads < THREADS_MIN_TOTAL:\r\n threads = THREADS_MIN_TOTAL\r\n if threads > THREADS_MAX_TOTAL:\r\n threads = THREADS_MAX_TOTAL\r\n return threads", "def count_cpus():\r\n try:\r\n return multiprocessing.cpu_count()\r\n except Exception:\r\n logging.exception('can not get cpu count from'\r\n ' multiprocessing.cpu_count()')\r\n cpuinfo = get_cpuinfo()\r\n # Returns at least one cpu. Check comment #1 in crosbug.com/p/9582.\r\n return len(cpuinfo) or 1", "def get_total_n_cpu(self) -> int:", "def num_cores(self):\n return self.cores_per_socket * self.sockets_per_node * self.num_nodes" ]
[ "0.87160325", "0.8365115", "0.80756944", "0.8064238", "0.80488205", "0.80468506", "0.80122787", "0.796875", "0.79343176", "0.79194176", "0.7910496", "0.78782344", "0.78163666", "0.76584685", "0.76553136", "0.76175404", "0.76108193", "0.7597156", "0.7580023", "0.7564992", "0.75505", "0.7478689", "0.74708277", "0.7449172", "0.74165344", "0.7397605", "0.7382241", "0.73653007", "0.73578703", "0.732553" ]
0.86468875
1
Sets the path for the Calculix executable. Necessary when using Windows where there is not a default installation proceedure for Calculix
def setCalculixPath(cls, calculixPath: str) -> None: if os.path.isdir(calculixPath) : cls.CALCULIX_PATH = calculixPath
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_vernissagecmd_path(path):\n\n global vernissagecmd_path # Allows us to change the global value of the path.\n if path == 'default': # Change the file path back to the default value.\n vernissagecmd_path = default_vernissagecmd_path\n print('VernissageCmd.exe path changed to {path}'.format(path=default_vernissagecmd_path))\n else: # Change the file path to the new str.\n vernissagecmd_path = path\n print('VernissageCmd.exe path changed to {path}'.format(path=path))", "def setPath(*args):", "def set_script_dir(self, path):\n self.script_dir = path", "def setup(path):\n if platform.system != \"Windows\":\n cwd = os.getcwd()\n\n os.chdir(path)\n\n os.system(\"sh setup.sh\")\n\n os.chdir(cwd)", "def main():\n\n if os.path.isfile(os.path.join(os.getcwd(), 'fose_loader.exe')):\n util.replace_command('FalloutLauncher.exe', 'fose_loader.exe')", "def set_path():\n import os\n import sys\n\n sys.path.insert(0, os.path.join(os.path.dirname(__file__), \"..\"))", "def addCopasiPath(copasiPath):\n if not re.search(copasiPath, os.environ[\"PATH\"]):\n os.environ[\"PATH\"] += os.pathsep + copasiPath", "def setFDKToolsPath(toolName):\n\ttoolPath = 0\n\tif sys.platform == \"darwin\":\n\t\tpaths = os.environ[\"PATH\"]\n\t\tif \"FDK/Tools/osx\" not in paths:\n\t\t\thome = os.environ[\"HOME\"]\n\t\t\tfdkPath = \":%s/bin/FDK/Tools/osx\" % (home)\n\t\t\tos.environ[\"PATH\"] = paths + fdkPath\n\t\n\tif os.name == \"nt\":\n\t\tp = os.popen(\"for %%i in (%s) do @echo. %%~$PATH:i\" % (toolName))\n\t\tlog = p.read()\n\t\tp.close()\n\t\tlog = log.strip()\n\t\tif log:\n\t\t\ttoolPath = log\t\n\telse:\n\t\tp = os.popen(\"which %s\" % (toolName))\n\t\tlog = p.read()\n\t\tp.close()\n\t\tlog = log.strip()\n\t\tif log:\n\t\t\ttoolPath = log\t\n\t\n\tif not toolPath:\n\t\tprint \"\"\"\nThe script cannot run the command-line program '%s'. Please make sure the AFDKO is installed, and the system environment variable PATH\ncontains the path the to FDK sub-directory containing '%s'.\"\"\" % (toolName, toolName)\n\n\treturn toolPath # get reid of new-line", "def set_working_folder():\n username = getpass.getuser()\n osType = sys.platform\n if username.lower() == 'youval':\n if osType.startswith('win'):\n dr = r'C:\\Phenix\\Dev\\Work\\work\\Clashes\\wtest'\n else:\n dr = '/net/cci/youval/work/work/Clashes/wtest'\n os.chdir(dr)", "def setUtilPath(self):\r\n utilpath.COMMANDER = _search_file(BASE_DIR,'Commander',True)[0]\r\n utilpath.STATBLOCKFIELDREADER = _search_file(BASE_DIR,'StatBlockFieldReader',True)[0]\r\n utilpath.HOSTMANAGER = _search_file(BASE_DIR,'HostManager',True)[0]\r\n utilpath.DATAVIEW = _search_file(TOOLS_DIR,'DataView',True)[0]", "def set_output_path(path):\n\n if not os.path.exists(path):\n cmdline_main.message(\"Creating %s\",path)\n try:\n os.makedirs(path)\n except OSError, e:\n if e.errno != errno.EEXIST:\n cmdline_main.warning(\"Unable to set output path %s\",path)\n\n param.normalize_path.prefix=path\n\n if not path in param.resolve_path.search_paths:\n param.resolve_path.search_paths+=[path]", "def launcher_path() -> Optional[str]:\n return u.resource(LAUNCHER_SCRIPT)", "def cd(self,path):\n self.cwd = path", "def _set_executables(self):\n\n # add path from argument to env\n if self.home_path:\n if self.env:\n self.env += f\":{self.home_path}\"\n else:\n self.env = self.home_path\n\n # set fuzzer_exe \n self.fuzzer_exe = self._search_for_executable(self.fuzzer_exe)\n L.debug(\"Will use %s as fuzzer executable.\", self.fuzzer_exe)\n\n # set compiler_exe\n if self.compiler_exe:\n self.compiler_exe = self._search_for_executable(self.compiler_exe)\n L.debug(\"Will use %s as fuzzer compiler.\", self.compiler_exe)\n\n # set additional executables\n for exe_name, exe_file in self.EXECUTABLES.items():\n self.EXECUTABLES[exe_name] = self._search_for_executable(exe_file)", "def SetToolPaths(toolpaths):\n global tool_search_paths\n\n tool_search_paths = toolpaths", "def _set_runtime_infos(args):\n import os\n runtime = cc.view('_runtime')\n runtime.set('command', args.launcherid)\n runtime.set('reloader', args.use_reloader)\n cc.set('absoluteDir', os.path.abspath(cc.get('dir')) + '/')", "def write_inno_script (self, fd):\n print(\"; WARNING: This script has been created by py2exe. Changes to this script\", file=fd)\n print(\"; will be overwritten the next time py2exe is run!\", file=fd)\n print(\"[Setup]\", file=fd)\n print(\"AppName=%s\" % self.name, file=fd)\n print(\"AppVerName=%s %s\" % (self.name, self.version), file=fd)\n print(\"ChangesEnvironment=true\", file=fd)\n print(r\"DefaultDirName={pf}\\%s\" % self.name, file=fd)\n print(\"DefaultGroupName=%s\" % self.name, file=fd)\n print(\"OutputBaseFilename=%s\" % self.distfilebase, file=fd)\n print(\"OutputDir=..\", file=fd)\n print(\"SetupIconFile=%s\" % self.icon, file=fd)\n print(file=fd)\n print(\"[Tasks]\", file=fd)\n print(\"Name: modifypath; Description: Add application directory to %PATH%\", file=fd)\n print(file=fd)\n # List of source files\n files = self.windows_exe_files + \\\n self.console_exe_files + \\\n self.service_exe_files + \\\n self.comserver_files + \\\n self.lib_files\n print('[Files]', file=fd)\n for path in files:\n print(r'Source: \"%s\"; DestDir: \"{app}\\%s\"; Flags: ignoreversion' % (path, os.path.dirname(path)), file=fd)\n # Set icon filename\n print('[Icons]', file=fd)\n for path in self.windows_exe_files:\n print(r'Name: \"{group}\\%s\"; Filename: \"{app}\\%s\"' %\n (self.name, path), file=fd)\n for path in self.console_exe_files:\n name = os.path.basename(path).capitalize()\n print(r'Name: \"{group}\\%s help\"; Filename: \"cmd.exe\"; Parameters: \"/K %s --help\"' % (name, path), file=fd)\n print(r'Name: \"{group}\\Uninstall %s\"; Filename: \"{uninstallexe}\"' % self.name, file=fd)\n print(file=fd)\n # Uninstall optional log files\n print('[UninstallDelete]', file=fd)\n for path in (self.console_exe_files + self.windows_exe_files):\n exename = os.path.basename(path)\n print(r'Type: files; Name: \"{pf}\\%s\\%s.log\"' % (self.lname, exename), file=fd)\n print(file=fd)\n # Add app dir to PATH\n print(\"[Code]\", file=fd)\n print(\"\"\"\\\nconst\n ModPathName = 'modifypath';\n ModPathType = 'user';\n\nfunction ModPathDir(): TArrayOfString;\nbegin\n setArrayLength(Result, 1)\n Result[0] := ExpandConstant('{app}');\nend;\n#include \"modpath.iss\"\n\"\"\", file=fd)\n shutil.copy(r\"scripts\\modpath.iss\", \"dist\")", "def add_to_path(bin_dir: Path) -> str:\n path_elements = [str(bin_dir)]\n\n existing_path = getenv(\"PATH\")\n if existing_path:\n path_elements.append(existing_path)\n\n return \":\".join(path_elements)", "def set_output_path(self, path, timestamp=True):\n self.ui.lineEdit_output_path.setText(path)\n self.ui.checkBox_timestamp.setChecked(timestamp)", "def _SetProgramDir(self):\n p = self.params\n\n # Program dirs are where the summaries are written to.\n if p.task_name:\n program_dir_name = f'{p.task_name}_{p.name}_{p.dataset_name.lower()}'\n else:\n program_dir_name = f'{p.name}_{p.dataset_name.lower()}'\n self._program_dir = os.path.join(self._logdir, program_dir_name)\n\n pdir = epath.Path(self._program_dir)\n pdir.mkdir(parents=True, exist_ok=True)\n (pdir / 'params.txt').write_text(p.ToText())", "def thepath = getProgramPath(theprog):\r\n\r\n theprog = lower(theprog);\r\n\r\n if strcmp(theprog,'POV-Ray')\r\n # install location for POV-Ray\r\n thepath = '/usr/local/bin';\r\n\r\n else if strcmp(theprog,'quietpov')\r\n # install location for the QuietPOV add-on\r\n thepath = 'C:\\Program Files\\POV-Ray for Windows v3.6\\guiext\\QuietPOV';\r\n\r\n else if strcmp(theprog,'imagemagick')\r\n # install location for ImageMagick\r\n thepath = '/home/kieran/Downloads/ImageMagick-6.8.5-8';\r\n\r\n else if strcmp(theprog,'ffmpeg')\r\n # install location for the ffmpeg library\r\n thepath = '/usr/bin/ffmpeg';\r\n\r\n else\r\n thepath = '';", "def cfgInstallPath( *args ):\n return cfgPath( cfgInstallSection, *args )", "def main():\n if getattr(sys, 'frozen', False):\n folderCurrent = os.path.dirname(sys.executable)\n else:\n folderCurrent = os.path.abspath(os.path.dirname(__file__))\n\n replaceAll(folderCurrent)", "def _install_ff_locally(self, path, ff_exe):\n\n if sys.platform.startswith('win'):\n # Windows: copy the whole tuntime\n copy_xul_runtime(op.dirname(ff_exe), path)\n else:\n # OSX / Linux: create a symlink to xul runtime exe\n os.mkdir(path)\n stub_exe = op.join(path, 'xulrunner')\n os.symlink(ff_exe, stub_exe)\n return stub_exe", "def path(cls):\n from os.path import sep, join, exists\n from os import environ\n return join(environ.get(\"SystemRoot\", join(\"C:\", sep, \"Windows\")), \"System32\", \"mpclaim.exe\")", "def launch (self):\n path = \"\"\n os.system(path + 'kidlogger_user.exe')", "def set_ranlib(self):\n # Some systems don't have the ranlib command (e.g. SGIs).\n # In the case where ranlib is not present in the PATH,\n # echo is used instead of ranlib\n print \"Setting ranlib command...\",\n\n path=str(os.getenv('PATH')).split(os.pathsep)\n for i in path:\n if os.path.isfile(os.path.join(i,'ranlib')):\n self.config.ranlib=os.path.join(i,'ranlib')\n print self.config.ranlib\n return\n\n for i in path:\n if os.path.isfile(os.path.join(i,'echo')):\n self.config.ranlib=os.path.join(i,'echo')\n print self.config.ranlib\n return", "def set_plugin_path(self, path):\n ckresult(_dll.FMOD_System_SetPluginPath(self._ptr, path))", "def setServerPath(value):\n global serverPath\n if os.path.isfile(value):\n serverPath = os.path.normpath(value)\n else:\n serverPath = 'tsserver'", "def add_to_path(path):\n from fabric.contrib.files import append\n import vars\n vars = vars.Vars()\n for file in [ vars.os.default_shell_config, vars.os.default_loginshell_config ]:\n append(file, \"export PATH=$PATH:\"+path, use_sudo=True)" ]
[ "0.59972835", "0.57588685", "0.57472515", "0.56947476", "0.55666745", "0.54746413", "0.5436197", "0.54127634", "0.5398966", "0.5395865", "0.5377132", "0.5316808", "0.52898544", "0.5277046", "0.5276305", "0.5267065", "0.52460194", "0.5244962", "0.5238546", "0.52273226", "0.5226884", "0.5205834", "0.51957625", "0.51811695", "0.5173241", "0.5171998", "0.5162023", "0.5143798", "0.51365256", "0.5127873" ]
0.7140539
0
Creates node sets for any RBE connectors used in the simulation
def prepareConnectors(self): # Kinematic Connectors require creating node sets # These are created and added to the node set collection prior to writing numConnectors = 1 for connector in self.connectors: # Node are created and are an attribute of a Connector self._nodeSets.append(connector.nodeset) numConnectors += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_nodes(self):", "def gen_nodes(self):\n self.nodes = []\n for i in range(self.num_nodes):\n self.nodes.append(Node(self.fk))", "def create_exporters(self):\n for node_cfg in self.node_cfg_list:\n self.create_node(node_cfg)", "def create_nodes(self):\n # Create a 'cylinder' of nodes for each layer\n for layer in range(1, self.layers+1):\n diameter = self.dia_dict[layer-1] # Calculate the diameter of the current ring\n n = self.ring_n\n # Calculate the angle by dividing a full circle by the number of nodes\n steps = (2*np.pi) / n\n\n # Create a list of node names for each layer apart for easy access later\n node_layer = []\n\n # Now for the depth of the cylinder we loop over the height\n for h in range(self.height):\n # For the first layer we also create a center\n if layer == 1:\n c_name = f'N.{h}.c'\n self.fem.AddNode(c_name, 0, 0, h)\n self.center_layer.append(c_name)\n\n # Then for each step in the 'ring' we create a node based on it's geometry\n for c in range(n):\n x = np.cos(c*steps) * (diameter/2)\n y = np.sin(c*steps) * (diameter/2)\n name = f'R.{layer}.{h}.{c}'\n self.fem.AddNode(name, x, y, h)\n node_layer.append(name)\n\n # Then the new layer of nodes is saved\n if layer == 1:\n self.node_layers.append(self.center_layer)\n self.node_layers.append(node_layer)", "def __build_nodes(self):\n self.components = {}\n\n for node in self.get_nodes():\n # Create the node\n assert node not in self.components, \"Node %s already exists\" % node.name\n self.components[node] = Node(name=node,\n node=self.graph.nodes[node],\n temperature_driven=self.temperature_driven,\n repr_days=self.repr_days)\n # Add the new components\n self.components.update(self.components[node].get_components())", "def buildConnectedSets(self, cars):", "def __initilization(self,node_set):\n \n print \"*********************************\"\n \n for x in node_set:\n x.node_vol=np.transpose(np.matrix([cmath.exp(0), cmath.exp(complex(0,math.pi*2/3)), cmath.exp(complex(0,-math.pi*2/3))]))\n \n print \"Forward/Backward Algorithm Initialization Done!\"", "def create_netlist(self):\n self.add_modules()\n self.add_pins()\n self.create_instances()", "def __init__(self):\n\n self.nodes = set()", "def create_nodes(self):\n # Create a special dictionary that will raise an error if a key is\n # updated. This avoids the\n nodes = NodeDict()\n\n return create_solph_nodes_from_data(self.input_data, nodes)", "def initnodes(self):\n newnodes = self.config[\"nodes\"]\n newpynodes = self.config[\"pynodes\"]\n logging.info('Loading initial nodes: {}'.format(newnodes))\n logging.info('Loading initial python nodes: {}'.format(newpynodes))\n for node in newnodes:\n self.runnode(node)\n for node in newpynodes:\n self.runnode(node, True)", "def nodes_from_seeds(seeds):\n base_nodes = [Node(frozenset([attr])) for attr in seeds]\n make_lattice(base_nodes, seeds)\n return base_nodes", "def generate_networks(self):\n\n # Defines dictionary of residue interaction types to include as network\n # edges.\n #**N.B.** Might want to provide these interactions as a program input?\n # **N.B.** 'intra' in the interaction names dict refers to interactions\n # between residues in the same chain\n interactions = [['hb', 'hb_pairs', 'hb_pairs_fasta_intra'],\n ['nhb', 'nhb_pairs', 'nhb_pairs_fasta_intra'],\n ['plusminus2', 'minus_2', 'minus_2_fasta'],\n ['plusminus2', 'plus_2', 'plus_2_fasta'],\n ['plusminus1', 'minus_1', 'minus_1_fasta'],\n ['plusminus1', 'plus_1', 'plus_1_fasta'],\n ['vdw', 'van_der_waals', 'van_der_waals_fasta_intra']]\n\n # Initialises MultiGraph (= undirected graph with self loops and\n # parallel edges) network of interacting residues\n G = nx.MultiGraph()\n\n # Adds nodes (= residues) to MultiGraph, labelled with their side-chain\n # identity (initially set to unknown), z-coordinate, buried surface area\n # (sandwiches only) and whether they are edge or central strands\n # (sandwiches only).\n if self.barrel_or_sandwich == '2.40':\n for num in range(self.input_df.shape[0]):\n node = self.input_df['domain_ids'][num] + self.input_df['res_ids'][num]\n aa_id = self.input_df['fasta_seq'][num]\n int_or_ext = self.input_df['int_ext'][num][0:3]\n z_coord = self.input_df['z_coords'][num]\n try:\n phi_psi_class = self.input_df['phi_psi_class'][num]\n except KeyError:\n phi_psi_class = '-'\n if not int_or_ext in ['int', 'ext']:\n raise ValueError('Residue {} has not been assigned to the '\n 'interior or exterior surface of the input'\n ' beta-barrel structure'.format(node))\n G.add_node(node, type='strand', aa_id=aa_id, int_ext=int_or_ext,\n eoc='-', z=z_coord, phipsi=phi_psi_class)\n elif self.barrel_or_sandwich == '2.60':\n for num in range(self.input_df.shape[0]):\n node = self.input_df['domain_ids'][num] + self.input_df['res_ids'][num]\n aa_id = self.input_df['fasta_seq'][num]\n int_or_ext = self.input_df['int_ext'][num][0:3]\n z_sandwich_coord = self.input_df['sandwich_z_coords'][num]\n #z_strand_coord = self.input_df['strand_z_coords'][num]\n #buried_surface_area = self.input_df['buried_surface_area'][num]\n edge_or_central = self.input_df['edge_or_central'][num][0:3]\n try:\n phi_psi_class = self.input_df['phi_psi_class'][num]\n except KeyError:\n phi_psi_class = '-'\n if not int_or_ext in ['int', 'ext']:\n raise ValueError('Residue {} has not been assigned to the '\n 'interior or exterior surface of the input'\n ' beta-barrel structure'.format(node))\n G.add_node(node, type='strand', aa_id=aa_id, int_ext=int_or_ext,\n z=z_sandwich_coord,\n #zstrand=z_strand_coord, bsa=buried_surface_area,\n eoc=edge_or_central,\n phipsi=phi_psi_class)\n\n domain_res_ids = list(G.nodes())\n\n # Adds edges (= residue interactions) to MultiGraph, labelled by\n # interaction type. The interactions considered are defined in\n # interactions_dict.\n for int_list in interactions:\n edge_label = int_list[0]\n int_name = int_list[1]\n int_fasta = int_list[2]\n\n for num in range(self.input_df.shape[0]):\n res_1 = self.input_df['domain_ids'][num] + self.input_df['res_ids'][num]\n res_list = self.input_df[int_name][num]\n if type(res_list) != list:\n res_list = [res_list]\n\n for res_index, res_2 in enumerate(res_list):\n res_2 = self.input_df['domain_ids'][num] + res_2\n # Accounts for interactions between residue pairs where one\n # residue is in the beta-barrel/sandwich domain and the\n # other is within a loop region\n aa_id = self.input_df[int_fasta][num][res_index]\n if not res_2 in list(G.nodes()):\n G.add_node(res_2, type='loop', aa_id=aa_id)\n if aa_id != G.nodes()[res_2]['aa_id']:\n print(aa_id, G.nodes()[res_2]['aa_id'])\n raise ValueError(\n 'Identity of node {} is inconsistent according to '\n 'the pairwise interactions listed in {} '\n '{}'.format(res_2, self.input_df_path, edge_label)\n )\n\n # Ensures interactions are only added to the network once\n if G.has_edge(res_1, res_2) is False:\n G.add_edge(res_1, res_2, interaction=edge_label)\n elif G.has_edge(res_1, res_2) is True:\n attributes = [val for label, sub_dict in\n dict(G[res_1][res_2]).items() for key,\n val in sub_dict.items()]\n if not edge_label in attributes:\n G.add_edge(res_1, res_2, interaction=edge_label)\n\n return G", "def __createNetwork__(self, amount_nodes, amount_links):\n random.seed()\n numOfNodes = 0\n linksPerIteration = (amount_links-3)/(amount_nodes-3) if amount_nodes > 3 else 1\n #generate n nodes\n while numOfNodes < amount_nodes:\n node = Node(numOfNodes)\n self.appendNode(node)\n numOfNodes += 1\n #make first three nodes fully connected\n if numOfNodes == 2:\n self.__connectNode__(numOfNodes, 1)\n if numOfNodes == 3:\n self.__connectNode__(numOfNodes, 2)\n #link following nodes\n if numOfNodes > 3:\n self.__connectNode__(numOfNodes, linksPerIteration)", "def initial_nodes_setup(config):\n # Nodes setup\n nodes = []\n path = config['urdf_data_path'] + 'nodes.csv'\n position_data = genfromtxt(path, delimiter=',')\n for i in range(config['simulation']['n_nodes']):\n info = {}\n info['position'] = [\n position_data[i][1] * 1.125, position_data[i][0] / 1.125\n ]\n info['importance'] = 0\n nodes.append(info)\n return nodes", "def fuel_create_repositories(self, nodes):\n nodes_ids = [str(node['id']) for node in nodes]\n cmd = (\n \"fuel --env {env_id} \"\n \"node --node-id {nodes_ids} \"\n \"--tasks setup_repositories\".format(\n env_id=self.cluster_id,\n nodes_ids=' '.join(nodes_ids))\n )\n logger.info(\n \"Executing {cmd} command.\".format(cmd=cmd))\n with self.env.d_env.get_admin_remote() as remote:\n remote.check_call(cmd)", "def generate_model(self):\n rootpath = 'c:\\\\Users\\\\Gamelab\\\\Desktop\\\\RT\\\\Others\\\\Thesis\\\\Thesis_coding\\\\ABM\\\\' \n \n df = pd.read_csv(rootpath+'data\\\\subset_initialized_latlonvalues.csv')\n df = df.drop(columns='Unnamed: 0')\n households_in_block = {}\n household_ids_in_block = {}\n # holds all the graphs indexed by blockid [geoid]\n \n def add_and_remove_edges(G, p_new_connection, p_remove_connection): \n\n new_edges = [] \n rem_edges = [] \n for node in G.nodes(): \n # find the other nodes this one is connected to \n connected = [to for (fr, to) in G.edges(node)] \n # and find the remainder of nodes, which are candidates for new edges \n unconnected = [n for n in G.nodes() if not n in connected] \n\n # probabilistically add a random edge \n if len(unconnected): # only try if new edge is possible \n if random.random() < p_new_connection: \n new = random.choice(unconnected) \n G.add_edge(node, new) \n #print(\"\\tnew edge:\\t {} -- {}\".format(node, new) \n new_edges.append( (node, new) ) \n # book-keeping, in case both add and remove done in same cycle \n unconnected.remove(new) \n connected.append(new) \n\n # probabilistically remove a random edge \n if len(connected): # only try if an edge exists to remove \n if random.random() < p_remove_connection: \n remove = random.choice(connected) \n G.remove_edge(node, remove) \n #print \"\\tedge removed:\\t {} -- {}\".format(node, remove) \n rem_edges.append( (node, remove) ) \n # book-keeping, in case lists are important later? \n connected.remove(remove) \n unconnected.append(remove) \n return rem_edges, new_edges\n\n\n\n\n #now i need to get number of geoids unique \n for block in df['geoid'].unique(): \n G_temp=nx.Graph()\n households_in_block[block] = df[df['geoid']==block] # contains all the information about the households \n household_ids_in_block[block] = df[df['geoid']==block]['CASE_ID'].values \n # contains only their ID\n # you only need id to initialize a node\n tempdf = households_in_block[block]\n for household in household_ids_in_block[block]:\n lon = tempdf.loc[tempdf['CASE_ID']==household,'lon'].values[0]\n lat = tempdf.loc[tempdf['CASE_ID']==household,'lat'].values[0] \n \n G_temp.add_node(str(household), pos=(lon,lat))\n self.G.add_node(str(household), pos=(lon,lat))\n \n ## add G to the dictionary\n self.graph_dict[block] = G_temp\n \n \n rem_edges, new_edges = add_and_remove_edges(self.G, 0.5, 0.5)\n self.G.remove_edges_from(rem_edges)\n self.G.add_edges_from(new_edges)\n\n \n\n self.grid= NetworkGrid(self.G)\n \n for _, row in df.iterrows(): # index, row in ...\n \n agent = Household(unique_id = str(row['CASE_ID']),\n model = self, \n income = row['income'],\n age= row['age'],\n size= row['household_'],\n ami_category = row['ami_categ'],\n elec_consumption= row['elec_consumption'],\n attitude = row['attitude'],\n pbc = row['pbc'],\n subnorms = row['subnorms'],\n geoid = row['geoid'],\n tract = row['tract'],\n bgid = row['bgid'],\n adoption_status = 0)\n \n \n\n if agent:\n self.schedule.add(agent)\n y = row['lat']\n x = row['lon']\n self.grid.place_agent(agent, node_id=agent.unique_id)\n #self.space.place_agent(agent, (x, y))\n #agent.pos = (x, y)", "def create_nodes(nd=None):\n\n if not nd:\n raise ValueError(\"No nodes data provided.\")\n\n nodes = []\n\n # Create Bus objects from buses table\n busd = {}\n\n for i, b in nd[\"buses\"].iterrows():\n if b[\"active\"]:\n bus = solph.Bus(label=b[\"label\"])\n nodes.append(bus)\n\n busd[b[\"label\"]] = bus\n if b[\"excess\"]:\n nodes.append(\n solph.Sink(\n label=b[\"label\"] + \"_excess\",\n inputs={\n busd[b[\"label\"]]: solph.Flow(\n variable_costs=b[\"excess costs\"]\n )\n },\n )\n )\n if b[\"shortage\"]:\n nodes.append(\n solph.Source(\n label=b[\"label\"] + \"_shortage\",\n outputs={\n busd[b[\"label\"]]: solph.Flow(\n variable_costs=b[\"shortage costs\"]\n )\n },\n )\n )\n\n # Create Source objects from table 'commodity sources'\n for i, cs in nd[\"commodity_sources\"].iterrows():\n if cs[\"active\"]:\n nodes.append(\n solph.Source(\n label=cs[\"label\"],\n outputs={\n busd[cs[\"to\"]]: solph.Flow(\n variable_costs=cs[\"variable costs\"]\n )\n },\n )\n )\n\n # Create Source objects with fixed time series from 'renewables' table\n for i, re in nd[\"renewables\"].iterrows():\n if re[\"active\"]:\n # set static outflow values\n outflow_args = {\n \"nominal_value\": re[\"capacity\"]\n }\n # get time series for node and parameter\n for col in nd[\"timeseries\"].columns.values:\n if col.split(\".\")[0] == re[\"label\"]:\n outflow_args[col.split(\".\")[1]] = nd[\"timeseries\"][col]\n\n # create\n nodes.append(\n solph.Source(\n label=re[\"label\"],\n outputs={\n busd[re[\"to\"]]: solph.Flow(**outflow_args)\n },\n )\n )\n\n # Create Sink objects with fixed time series from 'demand' table\n for i, de in nd[\"demand\"].iterrows():\n if de[\"active\"] and not pd.isnull(de['active']):\n # set static inflow values\n inflow_args = {\n \"nominal_value\": de[\"nominal value\"]\n }\n # get time series for node and parameter\n for col in nd[\"timeseries\"].columns.values:\n if col.split(\".\")[0] == de[\"label\"]:\n inflow_args[col.split(\".\")[1]] = nd[\"timeseries\"][col]\n\n # create\n nodes.append(\n solph.Sink(\n label=de[\"label\"],\n inputs={\n busd[de[\"from\"]]: solph.Flow(**inflow_args)\n },\n )\n )\n\n # Create Transformer objects from 'transformers' table\n for i, t in nd[\"transformers\"].iterrows():\n if t[\"active\"]:\n # set static inflow values\n inflow_args = {\"variable_costs\": t[\"variable input costs\"]}\n # get time series for inflow of transformer\n for col in nd[\"timeseries\"].columns.values:\n if col.split(\".\")[0] == t[\"label\"]:\n inflow_args[col.split(\".\")[1]] = nd[\"timeseries\"][col]\n # create\n nodes.append(\n solph.Transformer(\n label=t[\"label\"],\n inputs={busd[t[\"from\"]]: solph.Flow(**inflow_args)},\n outputs={\n busd[t[\"to\"]]: solph.Flow(nominal_value=t[\"capacity\"])\n },\n conversion_factors={busd[t[\"to\"]]: t[\"efficiency\"]},\n )\n )\n\n for i, s in nd[\"storages\"].iterrows():\n if s[\"active\"]:\n nodes.append(\n solph.components.GenericStorage(\n label=s[\"label\"],\n inputs={\n busd[s[\"bus\"]]: solph.Flow(\n nominal_value=s[\"capacity inflow\"],\n variable_costs=s[\"variable input costs\"],\n )\n },\n outputs={\n busd[s[\"bus\"]]: solph.Flow(\n nominal_value=s[\"capacity outflow\"],\n variable_costs=s[\"variable output costs\"],\n )\n },\n nominal_storage_capacity=s[\"nominal capacity\"],\n loss_rate=s[\"capacity loss\"],\n initial_storage_level=s[\"initial capacity\"],\n max_storage_level=s[\"capacity max\"],\n min_storage_level=s[\"capacity min\"],\n inflow_conversion_factor=s[\"efficiency inflow\"],\n outflow_conversion_factor=s[\"efficiency outflow\"],\n )\n )\n\n for i, p in nd[\"powerlines\"].iterrows():\n if p[\"active\"]:\n bus1 = busd[p[\"bus_1\"]]\n bus2 = busd[p[\"bus_2\"]]\n nodes.append(\n solph.custom.Link(\n label=\"powerline\" + \"_\" + p[\"bus_1\"] + \"_\" + p[\"bus_2\"],\n inputs={bus1: solph.Flow(), bus2: solph.Flow()},\n outputs={\n bus1: solph.Flow(nominal_value=p[\"capacity\"]),\n bus2: solph.Flow(nominal_value=p[\"capacity\"]),\n },\n conversion_factors={\n (bus1, bus2): p[\"efficiency\"],\n (bus2, bus1): p[\"efficiency\"],\n },\n )\n )\n\n return nodes", "def _create_connections(self):\n self.predecessors = {}\n self.successors = {}\n for nd in self.nodes:\n self.predecessors[nd.name] = []\n self.successors[nd.name] = []\n\n for (nd_out, nd_in) in self.edges:\n self.predecessors[nd_in.name].append(nd_out)\n self.successors[nd_out.name].append(nd_in)", "def generate_nodes(self):\n \n # For all state nodes\n node = 0\n \n for i in range(self.x0_n):\n for j in range(self.x1_n):\n for k in range(self.x2_n):\n \n # State\n x = np.array([ self.xd[0][i] , self.xd[1][j] , self.xd[2][k] ])\n \n # State and grid index based on node #\n self.nodes_state[node,:] = x\n self.nodes_index[node,:] = np.array([i,j,k])\n \n # Node # based on index ijk\n self.x_grid2node[i,j,k] = node\n \n # Increment node number\n node = node + 1", "def build_node_chains(self):\n\n self.node_chain_lookup = -np.ones(self.tri.npoints, dtype=np.int)\n self.node_chain_list = []\n\n node_chain_idx = 1\n\n self.node_chain_list.append([]) # placeholder for any isolated base-level nodes\n\n for node1 in self.node_high_to_low: \n if (self.node_chain_lookup[node1] != -1): \n continue\n\n junction, this_chain = self._node_walk_downhill(node1)\n\n if len(this_chain) > 1:\n self.node_chain_list.append(this_chain)\n \n self.node_chain_lookup[this_chain[0:-1]] = node_chain_idx \n if self.node_chain_lookup[this_chain[-1]] == -1:\n self.node_chain_lookup[this_chain[-1]] = node_chain_idx\n\n node_chain_idx += 1\n\n else: \n self.node_chain_list[0].append(this_chain[0])\n self.node_chain_lookup[this_chain[0]] = 0\n\n return", "def _create_graph(self, pools: List[Pool]):\n for pool in pools:\n self._add_nodes(pool.tokens)\n\n for pool in pools: # noqa: WPS440,WPS441\n self._add_edges(pool) # noqa: WPS441", "def add_nodes(self):\n for node_id in self.nodes:\n x = self.nodes[node_id][0]\n y = self.nodes[node_id][1]\n if node_id == 0:\n self.G.add_node(\"Source\", x=x, y=y, demand=0)\n self.G.add_node(\"Sink\", x=x, y=y, demand=0)\n else:\n self.G.add_node(node_id, x=x, y=y, demand=0)", "def generateTopology():\n switches = {}\n interfaces = {}\n links = {}\n return (switches,links)", "def create_techanim_connections(self):\n self.import_setup()\n input_info = self.techanim_info[techanim_creator_utils.RENDER_INPUT_KEY]\n self._create_input_layer_connections(input_info)\n rigid_info = self.techanim_info[techanim_creator_utils.RIGID_KEY]\n self._create_input_layer_connections(rigid_info)\n\n # output connections to the rig/alembic\n layers = [self._wrap_ns(self.setup_config[\"render_output\"])]\n render_output_nodes = self.get_layer_nodes_info(layers)\n for layer, output_nodes in render_output_nodes.iteritems():\n for oNode in output_nodes:\n src_plug = \"{}.outMesh\".format(oNode)\n render_node = oNode.rpartition(self.setup_config[\"output_suffix\"])[0]\n render_node = techanim_creator_utils.removeNS(render_node)\n render_node = \"{}:{}\".format(self.target_namespace,\n render_node)\n dest_plug = \"{}.inMesh\".format(render_node)\n # test if already connected so we do not get the warnings\n if not cmds.isConnected(src_plug, dest_plug):\n try:\n cmds.connectAttr(src_plug, dest_plug, f=True)\n except Exception as e:\n plug_str = \"{} >> {}\".format(src_plug, dest_plug)\n msg = str(e)\n self.potentionally_faulty_connections[plug_str] = msg\n if self.potentionally_faulty_connections:\n self.print_faulty_connections()", "def __init__(self, nodes=None, edges=None):\n self._nodes = []\n self.nodes = nodes\n self._edges = []\n self.edges = edges\n self._create_connections()\n self._sorted_nodes = None\n self._node_wip = []", "def create_connection_stations(city: str, active_nodes: set[Node]) -> \\\n set[tuple[str, str, str, str]]:\n row_set = set()\n for node in active_nodes:\n for neighbor in node.get_neighbours():\n row_set.add((city, node.name, neighbor.name, node.get_color(neighbor)))\n\n return row_set", "def build_graph(self):\n for node in self.nodes:\n self.graph.add_node(node.id, node_obj=node)\n edges = []\n for i in range(0, len(self.nodes)):\n for j in range(i+1, len(self.nodes)):\n if (self.nodes[i].distance(self.nodes[j]) < self.radio_range):\n edges.append((self.nodes[i].id, self.nodes[j].id,1))\n self.graph.add_weighted_edges_from(edges)", "def _generate_vertexes(self):\n # generate list of sets for each vms\n self._graph = [set() for _ in range(self._vm_count)]", "def _create_special_connections(self):\n\t\tfor connection in self._infoSpecialConnections:\n\t\t\t# List of source cells ids\n\t\t\tsourcesId = self.cellsId[connection[0]][connection[1]]\n\t\t\t# gather the sources all together\n\t\t\tsourcesId = comm.gather(sourcesId,root=0)\n\t\t\tif rank==0: sourcesId = sum(sourcesId,[])\n\t\t\tsourcesId = comm.bcast(sourcesId,root=0)\n\t\t\t# List of taget cells ids\n\t\t\ttargetsId = self.cellsId[connection[2]][connection[3]]\n\t\t\t# Ratio of connection\n\t\t\tconRatio = connection[4]\n\t\t\t# Number of connections\n\t\t\tconNum = int(connection[5])\n\t\t\t# Weight of connections\n\t\t\tconWeight = float(connection[6])\n\t\t\t# Type of synapse\n\t\t\tsynType = connection[7]\n\t\t\t# connect sources to targets\n\t\t\tself._connect(sourcesId,targetsId,conRatio,conNum,conWeight,synType)" ]
[ "0.6607326", "0.6201226", "0.61612624", "0.6156707", "0.6078838", "0.6066718", "0.6010416", "0.598341", "0.58685356", "0.5867943", "0.5858527", "0.5854881", "0.5841058", "0.58386105", "0.5774203", "0.5754194", "0.5719591", "0.568923", "0.5685475", "0.56728554", "0.56583863", "0.56463647", "0.5641651", "0.5641256", "0.56332433", "0.5625757", "0.5622654", "0.561438", "0.5613736", "0.559632" ]
0.70691216
0
Returns if the analysis was completed successfully.
def isAnalysisCompleted(self) -> bool: return self._analysisCompleted
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_result(self):\n return len(self.__analysis_items) > 0", "def successful(self) -> bool:\n\n return self._successful", "def has_success(self) -> bool:\n return self._has_success", "def is_successful(self) -> bool:\n return bool(self.result_state and self.result_state.is_successful())", "def is_success(self):\n return self.current_state == self.States.SUCCEEDED", "def did_solve(self) -> bool:\n return self._stats[\"success\"]", "def successful(self) -> bool:\n pass", "def wasSuccessful(self):\n if self.args.minimum_coverage != None:\n if self.coverage_percent < self.args.minimum_coverage:\n self.stream.writeln(\n self.colors.red(\n \"Coverage of {}% is below minimum level of {}%\".format(\n self.coverage_percent, self.args.minimum_coverage\n )\n )\n )\n return False\n\n # fail if no tests are run.\n if (\n sum(\n len(x)\n for x in [\n self.errors,\n self.expectedFailures,\n self.failures,\n self.passing,\n self.skipped,\n self.unexpectedSuccesses,\n ]\n )\n == 0\n ):\n return False\n else:\n return len(self.all_errors) + len(self.unexpectedSuccesses) == 0", "def was_successful(self):\n return self.data.exception_type is None or \\\n self.data.exception_type in TestOutcome.POSITIVE_RESULTS", "def successful(self) -> bool:\n return self._unparsed_response is not None", "def done(self) -> bool:\n return pulumi.get(self, \"done\")", "def success(self):\n return self.ready() and not self._exception", "def succeeded(self):\n output = self.__call__()\n if output.succeeded:\n return output or True\n return False", "def result(self):\n result = True\n if self.state != \"error\":\n if self.tests_run < len(self.tests):\n result = False\n else:\n failed = [test for test in self.tests if test.test_result == False]\n if failed:\n result = False\n else:\n result = False\n\n return result", "def isdone(self):\n return bool(self.total_time)", "def has_finished():", "def finished(self):\n if len(self.progress) > 0:\n return self.progress[-1].status in [TestStatus.completed, TestStatus.canceled]\n return False", "def has_finished(self):\n return hasattr(self, '_result') or hasattr(self, '_result_exc')", "def is_completed(self):\n return self._is_completed()", "def success(self):\n return self._success", "def isOk(self):\n return self._isOk", "def IsCompleted(self) -> bool:", "def IsCompleted(self) -> bool:", "def IsCompleted(self) -> bool:", "def done(self):\n self.__validate_engine()\n return pythonengine.isDoneFEval(self._future)", "def success(self):\n self.succeeded = True", "def is_done(self):\n return_val = False\n for name in os.listdir(self.results_dir_path):\n if name.startswith('top_genes_per_phenotype'):\n return_val = True\n return return_val", "def is_success(self):\n return self._tag == 'success'", "def was_successful(self):\n return self._build_proto.status == common.SUCCESS", "def is_success(self):\n return self.type_id == STATE_SUCCESS" ]
[ "0.7395358", "0.7370877", "0.7368734", "0.7324297", "0.7295538", "0.7222038", "0.71889675", "0.71834224", "0.71548474", "0.71323526", "0.7120799", "0.70954716", "0.7075373", "0.7017168", "0.701042", "0.7007025", "0.7000122", "0.698778", "0.6979704", "0.6962313", "0.69311875", "0.69254804", "0.69254804", "0.69254804", "0.6924013", "0.6919508", "0.6906812", "0.68806106", "0.68694484", "0.68643326" ]
0.80789053
0
Clears any files generated from the analysis
def clearAnalysis(self, includeResults:bool = False) -> None: filename = 'input' # Base filename for the analysis files = [filename + '.inp', filename + '.cvg', filename + '.sta'] if includeResults: files.append(filename + '.frd') files.append(filename + '.dat') try: for file in files: filePath = os.path.join(self._workingDirectory,file) os.remove(filePath) except: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_files(self):\n self.filenames.clear()", "def clean():\n clean_files()", "def clear(self):\r\n shutil.rmtree(self._output_dir, ignore_errors=True)", "def clear_local_output_directory():\n output_path = '../output/*'\n files = glob.glob(output_path)\n for single_file in files:\n os.remove(single_file)", "def clear_all(self):\n self.clear_files_paths()\n self.clear_programs()", "def clean_outputs(self) -> None:\n\n def _delete_if_not_none(fn: Optional[str]) -> None:\n if fn is not None:\n Path(fn).unlink()\n\n _delete_if_not_none(self.config[\"LOG_FILE\"])\n\n for file_ in self.exporter.get_all_files():\n file_.unlink()", "def clean(self):\n print(\"Cleaning outputs in %s\" % self.args.output)\n files = glob.glob(self.args.output + \"*.pkl\")\n for f in files:\n if os.path.exists(f):\n os.remove(f)", "def clean(self):\n os.remove(\"temp.py\") # Delete the file \"temp.py\", to free up disk space", "def clear_data():\n for i in range(_MAX_NUM_TESTS):\n rand, ref = filename(i)\n if os.path.exists(rand):\n os.remove(rand)\n if os.path.exists(ref):\n os.remove(ref)", "def clearOutputDirectory(self):\n for file in os.listdir(self.config[\"outputPath\"]):\n self.logger.info(\"Deleting old output file: {0}\".format(file))\n os.remove(os.path.join(self.config[\"outputPath\"], file))", "def clean(self):\n actual_output_file = path.splitext(self.source_name)[0] + \".actual\"\n if path.exists(self.binary_name):\n os.unlink(self.binary_name)\n if path.exists(actual_output_file):\n os.unlink(actual_output_file)", "def clean(self):\n if self.options.format != 'svg':\n for svgfile in self.svgouts.itervalues():\n os.remove(svgfile)\n os.rmdir(self.tmpdir)", "def clear_files_paths(self):\n del self.__files_paths[:]", "def clean(self):\r\n\r\n for _, data in self.composition.items():\r\n index_file = Path(data['file'] + '.fxi')\r\n if index_file.exists():\r\n index_file.unlink()", "def clean(self):\n\t\tself.archiver.closeFile()", "def clean_cwd():\n\n # Generator of the files generated for each runs\n del_files = (file for file in os.listdir() if file.endswith('.vtk')\n or file.endswith('.dat')\n or file.startswith('eeldata')\n or file.endswith('.log'))\n\n for file in del_files:\n try:\n os.remove(file)\n print(\"\\rRemoved {:s} succesfully!\".format(file), end=' '*15)\n except:\n print(\"\\rFailed to remove {:s}\".format(file))\n raise\n\n print('')", "def clear_old_files(self):\n self.logger.logMsg(\"Clearing Old Files.....\")\n try:\n for files in os.listdir(self.download_path):\n path = os.path.join(self.download_path, files)\n os.remove(path)\n for files in os.listdir(self.outpath):\n path = os.path.join(self.outpath, files)\n os.remove(path)\n except Exception as e:\n self.logger.logError(\"Error Creating Old Files {}.....\".format(str(e)))\n raise Exception('Error in Clearing Old Files')\n\n self.logger.logMsg(\"Done Clearing Old Files.....\")", "def cleanUp(self):\n print(\" cleaning up\",self.folderSave)\n for fname in glob.glob(self.folderSave+\"/*.*\"):\n if not fname.endswith(\".npy\") and not fname.endswith(\".csv\"):\n print(\" deleting\",os.path.basename(fname))\n os.remove(fname)", "def _cleanup(self):\n os.system(\"rm -r %s/*\" %(self._snippet_index_dir))\n os.system(\"rm %s/*\" %(self._para_dir))\n os.system(\"rm %s/*\" %(self._temp_dir))\n os.system(\"rm %s/*\" %(self._snippet_result_dir))", "def clean(self):\n files = ['CHG', 'CHGCAR', 'POSCAR', 'INCAR', 'CONTCAR',\n 'DOSCAR', 'EIGENVAL', 'IBZKPT', 'KPOINTS', 'OSZICAR',\n 'OUTCAR', 'PCDAT', 'POTCAR', 'vasprun.xml',\n 'WAVECAR', 'XDATCAR', 'PROCAR', 'ase-sort.dat',\n 'LOCPOT', 'AECCAR0', 'AECCAR1', 'AECCAR2',\n 'WAVECAR.GTO', 'vasp.out', 'vasp.err']\n for f in files:\n try:\n os.remove(f)\n except OSError:\n pass", "def reset(self):\n def remove_auxiliary_dir():\n egg_info_dir = self.project_name_sc + \".egg-info\"\n remove_directories([\n egg_info_dir,\n \".env\",\n \".eggs\",\n \".pytest_cache\",\n \"build\",\n \"dist\",\n \".cache\",\n \".benchmark\",\n \".tox\",\n \".vagrant\",\n \".tox\"])\n remove_files([\n \".coverage\",\n \".doit.db\",\n \".doit.bak\",\n \".doit.dat\",\n \".doit.dir\",\n ])\n\n # TODO(lschneider): Remove unnecessary files without command lines.\n # This code could be run directly from this function. However\n # the pathlib library is not part of the standard python 2.\n prefix = \"python -c \\\"import pathlib; \"\n delete_pyfiles = prefix + \"import pathlib; [p.unlink() for p in pathlib.Path('.').rglob('*.py[co]')]\\\"\"\n delete_dirs = prefix + \"import pathlib; [p.rmdir() for p in pathlib.Path('.').rglob('__pycache__')]\\\"\"\n\n return {\n \"actions\": [\n delete_pyfiles,\n delete_dirs,\n remove_auxiliary_dir,\n ],\n \"verbosity\": 2\n }", "def _clear_audio_files(self):\n try:\n shutil.rmtree(self.audio_file_folder)\n except:\n print('Failure to clear audio files in {self.audio_file_folder}')", "def clean(self):\n original_dir = os.getcwd()\n os.chdir(self.output)\n\n # Clear out directory\n file_list = os.listdir(self.output)\n\n for afile in file_list:\n if not afile.endswith('.gitignore'):\n path = os.path.join(self.output, afile)\n if os.path.isdir(path):\n rmtree(path)\n else:\n os.remove(path)\n os.chdir(original_dir)", "def cleanup_intermediate_files(self):\n self.cmd(\"rm -f {local_temp_dir}/*rg_dict* \\\n {local_temp_dir}/*aln* \\\n {local_temp_dir}/snappy*\".\n format(\n local_temp_dir=self.local_temp_dir\n ),\n shell=True)", "def _clean_input_dir():\n for existing_file in os.listdir(join(input_dir, 'analysis')):\n if existing_file != '.hold':\n os.remove(join(input_dir, 'analysis', existing_file))", "def clean(self) -> None:\n # remove all *.py and *.pyi files in the folder\n for wc in [\"*.py\", \"*.pyi\", \"modules.json\"]:\n for f in (self.package_path).rglob(wc):\n f.unlink()", "def tearDown(self):\n testing_dir = os.path.split(os.path.realpath(__file__))[0]\n for f in glob.glob(os.path.join(testing_dir, \"*\")):\n if f.split(\".\")[-1] in [\"o\", \"out\", \"pyc\", \"log\"]:\n subprocess.call(['rm', f])", "def cleanUpTemporaryFiles(options):\n os.system(\"rm \"+options.output_directory_per_run+\"/*.abundance\")\n os.system(\"rm \"+options.output_directory_per_run+\"/*.phasing_score\")\n os.system(\"rm \"+options.output_directory_per_run+\"/*regionsOfInterest*\")\n os.system(\"mv \"+options.output_directory_per_run+\"/* \"+options.output_directory_per_run+\"/../\")\n os.system(\"rm -rf \"+options.output_directory_per_run)", "def clear(self):\n\n Console.info(\"Cleaning sprite files...\")\n Console.indent()\n \n for dirPath, dirNames, fileNames in os.walk(self.base):\n for fileName in fileNames:\n if fileName.startswith(\"jasysprite\"):\n filePath = os.path.join(dirPath, fileName)\n Console.debug(\"Removing file: %s\", filePath)\n os.remove(filePath)\n \n Console.outdent()", "def _cleanup_files(self):\n\n for root, dirs, files in os.walk(self.build_directory):\n dirs_to_delete = [\n Path(root).joinpath(x) for x in dirs if x == '__pycache__'\n ]\n files_to_delete = [\n Path(root).joinpath(x) for x in files if Path(x).suffix == '.pyc'\n ]\n for d in dirs_to_delete:\n logger.info('Deleting: %s', d)\n shutil.rmtree(d)\n for f in files_to_delete:\n logger.info('Deleting: %s', f)\n f.unlink()" ]
[ "0.7968305", "0.77191716", "0.7537249", "0.7515307", "0.7513296", "0.7491484", "0.7484085", "0.7410033", "0.734755", "0.7287088", "0.72671735", "0.72499853", "0.7249866", "0.7248436", "0.72048616", "0.7182686", "0.7161519", "0.71290827", "0.7113421", "0.7099185", "0.70814157", "0.706988", "0.7051044", "0.70200706", "0.7007137", "0.70067734", "0.6991504", "0.69897234", "0.6983243", "0.6974369" ]
0.778449
1
crawl targeted twitter account, save tweets to csv
def crawlAccount(target): # connect Twitter api twitter = connectTwitter() try: user_timeline = twitter.get_user_timeline(screen_name=target, count=200, include_rts=False, exclude_replies=False) except TwythonError: sys.exit('Received 404 for %s. Account does not exist or is banned.' % target) user_timeline = twitter.get_user_timeline(screen_name=target, count=200, include_rts=True, exclude_replies=False) tweets = [] ids = [] # stop this loop while len(ids) < user[0]['statuses_count']: if len(user_timeline) == 0: print '[!] No more tweets available. Ending scraper.\n' break for tweet in user_timeline: ids.append(tweet['id']) tweets.append(tweet) with open('../Raw data/tweets/%s.json' % screen_name, 'a') as json_out: json.dump(tweet, json_out) json_out.write('\n') print '\t[i] Found %i tweets so far.' % (len(ids)) time.sleep(5) user_timeline = twitter.get_user_timeline(screen_name=screen_name, count=200, max_id=min(ids) - 1, include_rts=True, exclude_replies=False) else: print '[!] All tweets scraped. Ending scraper.\n' return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collect_twitter_sentiment():\r\n # Open/create a file to append data to\r\n csvFile = open(NAME+'_posts.csv', 'a')\r\n # Use csv writer\r\n csvWriter = csv.writer(csvFile)\r\n # Calling the user function with current parameters\r\n results = twitter.user_timeline(id=NAME, count=TWEET_COUNT)\r\n for tweet in results:\r\n print(tweet.created_at, tweet.text)\r\n csvWriter.writerow([tweet.created_at, tweet.text.encode('utf-8')])\r\n return csvFile", "def getTwitterscraperTweets():\n import subprocess\n numOfAuthors = len(authors)\n numOfWords = len(words)\n callVars = ['./recoverTweets.sh',str(numOfWords),str(numOfAuthors)]\n callVars.extend([word for word in words]+[author for author in authors])\n if startingDate:\n callVars.extend(['-sd',startingDate])\n if endingDate:\n callVars.extend(['-ed',endingDate])\n #if maxTweets:\n # callVars.extend(['-max',str(maxTweets)])\n callVars.append(\"data/twitterscrapertmp\")\n print(\"Querying twitterAPI by using TwitterScraper... (it may take a long time)\")\n subprocess.call(callVars)\n with open('data/twitterscrapertmp') as json_data:\n tweets = json.load(json_data)\n if removeRetweets:\n tweets = [tweet for tweet in tweets if not isRetweet(tweet)]\n print(\"Query ended. Retrieved: \",len(tweets),\" tweets\")\n #saveTweets(tweets,outputCollection,onFile=True,onDb=True)\n os.remove('data/twitterscrapertmp')\n return tweets", "def get_user_tweets(api, screen_name, output_path):\n logger = logging.getLogger(__name__)\n logger.info('Pulling tweets')\n\n # Create empty list for tweet objects\n tweets = []\n # Pulls users must recent 200 tweets\n new_tweets = api.user_timeline(screen_name=screen_name, count=200)\n tweets.extend(new_tweets)\n oldest = tweets[-1].id - 1\n\n # Continues to pull tweets 200 at a time until limit is hit\n while len(new_tweets) > 0:\n new_tweets = api.user_timeline(screen_name=screen_name,\n count=200, max_id=oldest)\n tweets.extend(new_tweets)\n oldest = tweets[-1].id - 1\n\n logger.info(\"...%s tweets downloaded and cleaned\" % (len(tweets)))\n\n # Write all text of tweets to a file\n filename = screen_name + '.csv'\n file = open(join(output_path, filename), 'w')\n\n # Iterates through all tweets and cleans them before outputting\n for tweet in tweets:\n clean_tweet = clean_string(tweet.text)\n line = screen_name + ', ' + clean_tweet + '\\n'\n file.write(line)\n logger.info(\"Done pulling tweets for %s\" % screen_name)\n file.close()", "def crawl(self):\n retrievedTweets = []\n\n count = 1\n \n today = datetime.datetime.now()\n today = today.replace(hour=23, minute=59, second=59, microsecond=999999)\n gap = 1\n yesterday = today - datetime.timedelta(gap) \n nextDay = yesterday + datetime.timedelta(gap)\n \n while True:\n try:\n lst = tweepy.Cursor(self.api.search, lang='en', q=self.keyword, count=50, until=nextDay.date(), result_type='popular').items(50)\n for tweet in lst:\n self.data = [tweet.created_at, tweet.id, tweet.text,\n tweet.user._json['screen_name'], tweet.user._json['name'], \n tweet.favorite_count, tweet.retweet_count, tweet.user.location]\n self.data = tuple(self.data)\n retrievedTweets.append(self.data)\n break\n except tweepy.TweepError as e:\n print(e.reason)\n continue\n except StopIteration: \n break\n\n return retrievedTweets", "def _get_tweets(self):\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n api = tweepy.API(auth)\n search = api.search(self.term, lang='en', count=100)\n\n print(f\"Getting tweets that mention '{self.term}', \"\n f\"this may take a while...\")\n\n save_tweet_text = [tweet._json['text'] for tweet in search]\n while len(save_tweet_text) < 1000:\n try:\n oldest = search[-1].id - 1\n search = api.search(self.term, lang='en', count=100, max_id=oldest)\n new_tweets = [tweet._json['text'] for tweet in search]\n save_tweet_text.extend(new_tweets)\n\n # Turn into a set to remove duplicated tweets, then back to list\n save_tweet_text = list(set(save_tweet_text))\n except IndexError:\n break\n\n print(f\"Done. {len(save_tweet_text)} Tweets received.\")\n return save_tweet_text", "def __save_tweet(self, twitter_result):\n timestamp = twitter_result['timestamp']\n\n # Remove +0000 from timestamp\n timestamp_split = timestamp.split(' ')\n timestamp = ''\n for piece in timestamp_split:\n if piece[0] is not '+':\n timestamp += piece + ' '\n\n # Remove trailing space\n timestamp = timestamp[:-1]\n\n # Cast to iso format\n timestamp = datetime.strptime(timestamp, \"%a %b %d %H:%M:%S %Y\").isoformat()\n\n crawl = self.mongo_controller.add_crawl_twitter(\n twitter_result['keyword_id'],\n twitter_result['tweet_id'],\n twitter_result['text'],\n twitter_result['likes'],\n twitter_result['retweets'],\n timestamp,\n return_object=True,\n cast=True,\n )\n\n app.send_task('process-crawl', kwargs={ 'crawl_dict': crawl.to_json() }, queue=queues['processor'])\n\n return crawl", "def extract_tweets(secret: str, query: str, outfile: str, count: int = 0, wait: int = 300) -> None:\n logger = logging.getLogger(\"extracter\")\n logger.info(\"Authenticating with Tweepy\")\n\n logger.info(\"Reading secrets file %s\", secret)\n token_fp = open(secret, \"r\")\n auth = tweepy.OAuthHandler(token_fp.readline().strip(), token_fp.readline().strip())\n auth.set_access_token(token_fp.readline().strip(), token_fp.readline().strip())\n api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)\n token_fp.close()\n\n logger.info(\"Attempting to authenticate\")\n api.verify_credentials()\n\n logger.info(\"Authenticated! Examining outfile.\")\n if not os.path.exists(outfile):\n logger.info(\"%s doesn't exist - it will be created.\", outfile)\n file_p = open(outfile, \"w\", encoding=\"utf-8\")\n tweet_writer = csv.writer(file_p)\n tweet_writer.writerow(\n [\n \"full_text\",\n \"created_at\",\n \"source\",\n \"id\",\n \"retweet_count\",\n \"favorite_count\",\n \"user_name\",\n \"user_id_str\",\n \"user_handle\",\n \"user_location\",\n \"user_desc\",\n \"user_protected\",\n \"user_followers\",\n \"user_created\",\n \"user_verified\",\n \"user_tweet_count\",\n ]\n )\n else:\n logger.info(\"%s exists - will append.\", outfile)\n file_p = open(outfile, \"a\", encoding=\"utf-8\")\n tweet_writer = csv.writer(file_p)\n\n logger.info(\"Starting Tweet extraction for query '%s'\", query)\n\n if not count:\n logger.info(\"(executing forever)\")\n else:\n logger.info(\"(executing %s times)\", count)\n\n i = 1\n bookmark = \"1\"\n\n while True:\n # Our search query.\n #\n # q - search query. We use the -filter:retweets\n # specifier in order to prune any retweets.\n # Otherwise we'd have to prune Tweets that\n # are prefaced with 'RT'\n #\n # lang - English Tweets only\n #\n # count - 100 is the max as per the Twitter API\n #\n # tweet_mode - we use extended tweet mode in\n # order to access Tweets that are greater\n # than 140 char. in length this is to keep\n # legacy Twitter API applications intact\n #\n # result_type - we use recent so as to create\n # a chronological record of Tweets\n #\n # since_id - we keep track of the last Tweet\n # saved and use it as a bookmark in order\n # to only get the Tweets coming after it\n #\n for tweet in api.search(\n q=f\"{query} -filter:retweets\",\n lang=\"en\",\n count=100,\n tweet_mode=\"extended\",\n result_type=\"recent\",\n max_id=bookmark,\n ):\n # These are the features we write\n tweet_writer.writerow(\n [\n tweet.full_text,\n tweet.created_at,\n tweet.source,\n tweet.id_str,\n tweet.retweet_count,\n tweet.favorite_count,\n tweet.user.name,\n tweet.user.id_str,\n tweet.user.screen_name,\n tweet.user.location,\n tweet.user.description,\n tweet.user.protected,\n tweet.user.followers_count,\n tweet.user.created_at,\n tweet.user.verified,\n tweet.user.statuses_count,\n ]\n )\n\n # Flush the stream every time just in case\n file_p.flush()\n\n # Set the most recent Tweet as a bookmark\n bookmark = tweet.id_str\n\n # Transparency/monitoring\n limits = api.rate_limit_status()\n rem = limits[\"resources\"][\"application\"][\"/application/rate_limit_status\"][\"remaining\"]\n logger.info(\"Tweets written to %s (%s hourly API accesses left)\", outfile, rem)\n\n # Do not loop if demo\n if i == count:\n break\n i += 1\n\n # Respect API\n time.sleep(wait)", "def export_csv_search(cart, tag = None):\n # Reads all the tweets in folder\n try:\n tweets = tbf.load_stream(cart, tag = tag)\n\n if tag is None:\n ii = cart.index('/#')\n tag = cart[ii+1:-1]\n\n nodes = np.unique(np.array([twe.user_name for twe in tweets]))\n #links_A = [lin.name_A for lin in twe.link_to]\n\n links_A = []\n links_B = []\n for twe in tweets:\n links_A += [lin.name_A for lin in twe.link_to]\n links_B += [lin.name_B for lin in twe.link_to]\n\n #tbf.export_csv(links_A, links_B)\n fileo = open(cart + tag + '_links.csv', 'w')\n filecsv = csv.writer(fileo,delimiter='\\t')\n\n for A, B in zip(links_A, links_B):\n filecsv.writerow([A,B])\n\n fileo.close()\n status = True\n cazzillo = None\n\n except Exception as cazzillo:\n print(cazzillo)\n status = False\n\n return status, cazzillo", "def extract_tweets(consumer_key,consumer_secret,access_token,access_token_secret,search_key):\n # Step 1 - Authenticate\n consumer_key= str(consumer_key)\n consumer_secret= str(consumer_secret)\n\n access_token=str(access_token)\n access_token_secret=str(access_token_secret)\n\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n\n api = tweepy.API(auth)\n\n #Step 3 - Retrieve Tweets\n public_tweets = api.search(search_key)\n tweets_list=[]\n for tweet in public_tweets:\n tweets_list.append(tweet.text)\n return tweets_list", "def save_user_tweets(user, n, auth):\r\n t = twitter.Twitter(auth=auth)\r\n print(\"Fetching %i tweets from @%s\" % (n, user))\r\n tweets = t.statuses.user_timeline(screen_name=user, count=n)\r\n print(\" (actually fetched %i)\" % len(tweets))\r\n for tweet in tweets:\r\n save_tweet(tweet, outfile)", "def get_tweet_data(session, analytics_account, start_time, end_time, user_agent):\n\n export_url = \"https://analytics.twitter.com/user/\" + analytics_account + \"/tweets/export.json\"\n bundle_url = \"https://analytics.twitter.com/user/\" + analytics_account + \"/tweets/bundle\"\n\n export_data = {\n 'start_time' : end_time,\n 'end_time' : start_time,\n 'lang' : 'en'\n }\n querystring = '?' + urllib.parse.urlencode(export_data)\n print('Querying Twitter...')\n\n\n status = 'Pending'\n counter = 0\n while status == 'Pending':\n attempt = session.post(export_url + querystring, headers=user_agent)\n status_dict = json.loads(attempt.text)\n status = status_dict['status']\n counter += 1\n print('Attempt:', counter, ' Response:',status)\n time.sleep(5)\n\n csv_header = {'Content-Type': 'application/csv',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, sdch',\n 'Accept-Language': 'en-US,en;q=0.8',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'}\n\n data_req = session.get(bundle_url + querystring, headers=csv_header)\n #print(\"data_req response: \", data_req.status_code)\n print(\"Data retrieved, appending dataset.\")\n return data_req.text", "def write_to_file(self) -> None:\n with open(self.output_file_path, mode='w', newline='') as csv_file:\n tweet = ['id', 'created_time', 'text']\n writer = csv.DictWriter(csv_file, fieldnames=tweet)\n writer.writeheader()\n for tweet in self.clean_unique_tweets:\n try:\n writer.writerow(tweet)\n except:\n pass\n print(\"Tweets written to a file\")", "def retrieve_all_tweets(api, id_scr):\n full_tweet_list = []\n new_tweets = api.user_timeline(user_id=id_scr, count=200)\n full_tweet_list.extend(new_tweets)\n oldest = full_tweet_list[-1].id - 1\n\n while len(new_tweets) > 0:\n print \"getting tweets before {}\".format(oldest)\n new_tweets = api.user_timeline(user_id=id_scr, count=200, max_id=oldest)\n full_tweet_list.extend(new_tweets)\n oldest = full_tweet_list[-1].id - 1\n\n out_tweets = [[tweet.id_str, tweet.created_at, tweet.text.encode(\"utf-8\"), tweet.entities] for tweet in\n full_tweet_list]\n\n with open('{}_tweets.csv'.format(id_scr), 'wb') as f:\n writer = csv.writer(f)\n writer.writerow([\"id\", \"created_at\", \"text\", \"entities\"])\n writer.writerows(out_tweets)", "def twitter(self):\n\n q = \" OR \".join(self.search_terms) + \" -filter:retweets\"\n results = self.__api.search(q=q, lang='en', count=100)\n\n tweets = []\n\n for res in results:\n\n publishedAt = datetime.strptime(res._json['created_at'], '%a %b %d %H:%M:%S +0000 %Y').strftime(\"%Y-%m-%d\")\n\n if (res._json['in_reply_to_screen_name'] == None and publishedAt == datetime.now().strftime(\"%Y-%m-%d\")):\n tweets.append([res._json['id'],\n res._json['text'],\n res._json['user']['screen_name'],\n publishedAt,\n res._json['user']['followers_count']])\n\n self.list = pd.DataFrame(tweets, columns=['id', 'title', 'user', 'publishedAt', 'followers_count']).nlargest(10,\n 'followers_count')\n\n return", "def get_tweets(self):\n keyword = 'covid'\n\n # Load tokens from file\n with open('../data/tokens.json', 'r') as f:\n tokens = json.load(f)\n\n # Stream tweets\n auth = tweepy.OAuthHandler(tokens['consumer_key'], tokens['consumer_secret'])\n auth.set_access_token(tokens['access_token_key'], tokens['access_token_secret'])\n api = tweepy.API(auth)\n\n # listen for tweets\n while True:\n\n # TODO: save file in Cloud Storage\n file_name = date.today().strftime('corpus-%d-%m-%Y.json')\n print(f'Updating {file_name} ...')\n\n StreamListener = StreamListener(\n file_name=file_name, \n max_tweets=1000)\n myStream = tweepy.Stream(\n auth=api.auth, \n listener=StreamListener)\n\n myStream.filter(track=[keyword], languages=['en'])\n \n time.sleep(60)", "def write_to_file_ann(self) -> None:\n with open(self.output_file_path, mode='w', newline='') as csv_file:\n tweet = ['id', 'created_time', 'text']\n writer = csv.DictWriter(csv_file, fieldnames=tweet)\n writer.writeheader()\n for tweet in self.unique_tweets:\n try:\n writer.writerow(tweet)\n except:\n pass\n print(\"Tweets written to a file\")", "def get_tweets():\n\n # Read bearer token from secrets file\n with open(\"./secrets.yml\", \"r\") as f:\n bearer_token = yaml.load(f, Loader=yaml.FullLoader)[\"BEARER_TOKEN\"]\n\n # Set start and end times as current time rounded down to nearest minute with supplied offset\n dt_fmt = \"%Y-%m-%dT%H:%M:00Z\"\n dt_now = datetime.datetime.now().replace(second=0, microsecond=0)\n start_time_offset = int(sys.argv[1])\n end_time_offset = int(sys.argv[2])\n dt_end = dt_now - datetime.timedelta(minutes=end_time_offset)\n dt_start = dt_now - datetime.timedelta(minutes=start_time_offset)\n dt_end = dt_end.strftime(dt_fmt)\n dt_start = dt_start.strftime(dt_fmt)\n\n # Make request, checking for mentions in specified time period\n logging.info(\"Getting mentions from Twitter\")\n uri = \"https://api.twitter.com/2/tweets/search/recent\"\n headers = {\"Authorization\": f\"Bearer {bearer_token}\"}\n query = {\"query\": f\"@{ACCOUNT_NAME}\",\n \"expansions\" : \"author_id\",\n \"user.fields\" : \"username\",\n \"start_time\" : dt_start,\n \"end_time\" : dt_end}\n response = requests.get(uri, headers=headers, params=query)\n\n # Make connection to local database\n connection = sqlite3.connect(\"../database/procrystaldb.db\")\n cursor = connection.cursor()\n\n # Get current total number of rows in database\n cursor.execute(\"SELECT COUNT(*) FROM Twitter;\")\n initial_rows = cursor.fetchall()[0][0]\n\n # Get usernames and tweet ids from tweets and save to database\n if response.status_code == 200:\n content = response.json()\n num_results = content[\"meta\"][\"result_count\"]\n if num_results > 0:\n # First get dictionary of usernames\n user_id_to_name = {}\n for user in content[\"includes\"][\"users\"]:\n user_id_to_name[user[\"id\"]] = user[\"username\"]\n # Then get tweet id, username and save to database\n for result in content[\"data\"]:\n # if KEYWORD in result[\"text\"].lower():\n tweet_id = result[\"id\"]\n username = user_id_to_name[result[\"author_id\"]]\n sql_insert = f\"\"\"\n INSERT OR IGNORE INTO Twitter (tweet_id, username, reply_sent)\n VALUES ('{tweet_id}', '{username}', false);\n \"\"\"\n cursor.execute(sql_insert)\n logging.info(f\"Mentions fetched: {num_results}\")\n else:\n logging.error(f\"Get mentions errored with: {response.json()}\")\n\n # Get final total number of rows in database and therefore number of rows added\n cursor.execute(\"SELECT COUNT(*) FROM Twitter;\")\n final_rows = cursor.fetchall()[0][0]\n rows_added = final_rows - initial_rows\n logging.info(f\"New mentions added: {rows_added}\")\n\n # Close database connection\n connection.commit()\n connection.close()\n\n return rows_added", "def user_scrape(users: List, outfile: str, limit: int, since: str) -> None:\n assert(len(users)>0)\n\n # put params into configuration object\n c = twint.Config()\n c.Hide_output = True\n c.Limit = limit\n c.Language = \"en\"\n c.Output = os.path.join(data_dir, outfile)\n c.Store_csv = True\n c.Since = since\n\n for u in tqdm(users, total=293):\n # and run the search for each username\n sleep(2.5)\n try:\n #print(\"scanning tweets from user {}\".format(u))\n c.Username = u\n twint.run.Search(c)\n except:\n continue", "def get_tweets(api, username, fh, limit):\n if args.json is False:\n for status in tqdm(tweepy.Cursor(api.user_timeline, screen_name=username).items(limit), unit=\"tw\", total=limit):\n process_tweet(status)\n if args.save:\n fh.write(str(json.dumps(status._json))+\",\")\n else:\n for status in (tweepy.Cursor(api.user_timeline, screen_name=username).items(limit)):\n process_tweet(status)\n if args.save:\n fh.write(str(json.dumps(status._json))+\",\")", "def get_tweets():\n\n\tuser ='kaiserkumars'\n\t# api = twitter.Api(consumer_key='iJoZZuV7etVrJfE4K9ir8sIqa',\n\t# consumer_secret='uyJyWoP05z2MUKnggW7vHnIG2sckmM1aHRMgGveZLyrz8401Xs',\n\t# access_token_key='622588040-TYDgG1UlGUvA1hW8PA7mOG5CiMw0WiuPZlkoP8cc',\n\t# access_token_secret='laAmFjeLhWzOK7Y524VevdMdeLeNpnmCUmjee1AQU7osj')\n\tapi = twitter.Api(consumer_key=get_secret('consumer_key'),\n\t consumer_secret=get_secret('consumer_secret'),\n\t access_token_key=get_secret('access_token_key'),\n\t access_token_secret=get_secret('access_token_secret'))\n\n\tstatuses = api.GetUserTimeline(user_id=622588040,count=0)\n\t# print(statuses)\n\t# duplicate='UNIQUE constraint failed: mtwitter_weatherdata.location, core_weatherdata.metric, core_weatherdata.date'\n\tbulk_insert=[]\n\t# print(dir(TwitterData))\n\tfor s in statuses:\n\t\t# print(s)\n\t\tdt = parse(s.created_at)\n\t\t# print(dt)\n\t\tdata = TwitterData(org_name=s.user.name,profile_url=s.user.profile_image_url,tweet_id =s.id,screen_name=s.user.screen_name, tweet = s.text, date= dt, favCount =0)\n\t\tbulk_insert.append(data)\n\ttry:\n\t\tTwitterData.objects.bulk_create(bulk_insert)\n\t\tprint(\"Success.\")\n\texcept Exception as e:\n\t\t# if(str(e)==duplicate):\n\t\t# \tprint('Duplicate Data')\n\t\t# else:\n\t\tprint(str(e))\n\n\treturn statuses", "def scrape_from_user(acc, num, path='data/tweet_ids.txt'):\n print('Collecting tweets from {}'.format(acc[num]))\n\n tweets = []\n new_tweets = []\n\n new_tweets = _api.user_timeline(screen_name=acc[num], count=200)\n tweets.extend(new_tweets)\n\n oldest = tweets[-1].id - 1\n\n while len(new_tweets) > 0:\n new_tweets = _api.user_timeline(screen_name=acc[num], count=200,\n max_id=oldest)\n tweets.extend(new_tweets)\n oldest = tweets[-1].id - 1\n print('{} tweets collected so far'.format(len(tweets)), end='\\r')\n\n with open(path, 'a+') as f:\n for x in range(len(tweets)):\n f.write(str(tweets[x].id_str))\n f.write('\\n')\n\n print('\\nDone.')", "async def tweet():\n with logger.contextualize(request_id=str(uuid.uuid4())):\n tweets = generate()\n upload(tweets)", "def get_tweets(user, num = 200):\n tweets = []\n \n for tweet in user.home_timeline(count = num):\n edited_tweet = tweet.text\n edited_tweet = edited_tweet.encode(encoding='UTF-8', errors='Ignore') \n tweets.append(edited_tweet)\n return tweets", "def collect_tweets(redis_client, twitter_client, search_term):\n search = Search(redis_client, twitter_client, search_term)\n search.get_term_state()\n search.parse_term_state()\n search.set_query_string()\n search.set_execution_time()\n search.execute_query()\n search.incr_query_counters()\n search.set_newest_id()\n search.set_oldest_id()\n search.set_scenario()\n search.set_term_state()\n search.store_results()\n search.set_score()\n search.log_state()", "def twitter(self):\n message = \"\"\n count = self.collection.count()\n\n twitter = Twitter(auth = OAuth(self.access_key, self.access_secret, self.consumer_key, self.consumer_secret))\n for keyword in self.twitter_keywords:\n query = twitter.search.tweets(q = keyword)\n for result in query['statuses']:\n try:\n data = {\"id\": count+1, \"source\": \"twitter\", \"timestamp\": datetime.now()}\n data['tweet'] = result['text']\n data['name'] = result[\"user\"][\"screen_name\"]\n data['url'] = \"https://twitter.com/\" + data[\"name\"] + \"/status/\" + str(result['id'])\n data['search_string'] = keyword\n try:\n dataid = self.collection.insert(data)\n except DuplicateKeyError as e:\n continue\n count += 1\n\n # Slack push notification\n length = 82 - len(data['url'])\n message += \"\\nURL: \" + data['url'] + \" search string: \".rjust(length) + keyword\n\n except Exception as e:\n print(e)\n pass\n \n if message:\n print(self.G + \"[+] Twitter\" + self.B + message)\n self.message += \"\\n*Twitter*:\\n```\"\n self.message += message\n self.message += \"\\n```\"\n\n return", "def userTweets(username):\n api = twitter.Api()\n user_tweets = api.GetUserTimeline(username)\n for tweet in user_tweets:\n util.safe_print(tweet.GetText())", "def get_tweets(username, amount):\n tweets = []\n twitter = Twython()\n\n finished = False\n page = 1\n while not finished:\n\n if amount <= 200:\n # Make the API call.\n search_results = twitter.getUserTimeline(screen_name=username,\n page=str(page), count=str(amount))\n finished = True\n\n else:\n # Make the API call.\n search_results = twitter.getUserTimeline(screen_name=username,\n page=str(page), count='200')\n amount -= 200\n page += 1\n\n if isinstance(search_results, dict) and search_results['error']:\n raise TwitterAPIException(str(search_results['error']))\n elif not search_results:\n raise TwitterAPIException('User has no tweets.')\n\n for result in search_results:\n tweets.append(result['text']) \n\n return tweets", "def load_tweets(self, max_items=10000, user=None):\n for name, info in self.users.items():\n try:\n os.mkdir(self.root + info['party'].lower().replace(' ', '_'))\n except FileExistsError:\n pass\n \n filepath = self.root + info['party'].lower().replace(' ', '_')\n filepath = filepath + '/' + name.lower().replace(' ', '')\n try:\n print(f'Reading tweets from {name}')\n user = info['screen_name']\n curs = tweepy.Cursor(self.api.user_timeline,\n screen_name=user,\n count=200,\n tweet_mode=\"extended\"\n ).items(max_items)\n\n with open(filepath + '.jsonl', 'w') as f:\n for status in curs:\n tweet = status._json\n json_dump_line(tweet, f)\n \n except tweepy.TweepError as exc:\n print(exc)\n os.remove(filepath + '.jsonl')", "def get_tweets(twitter, screen_name, num_tweets):\n\n request = robust_request(twitter, 'search/tweets', {'q': screen_name, 'count': num_tweets})\n tweets = [a['text'] for a in request]\n\n return tweets", "def get_tweets_from_username(api, screen_name):\n\n # initialize a list to hold all the Tweets\n alltweets = []\n output = []\n\n # make initial request for most recent tweets\n # (200 is the maximum allowed count)\n new_tweets = api.user_timeline(screen_name=screen_name, count=200, tweet_mode=\"extended\")\n\n # save most recent tweets\n alltweets.extend(new_tweets)\n\n # save the id of the oldest tweet less one to avoid duplication\n oldest = alltweets[-1].id - 1\n\n # keep grabbing tweets until there are no tweets left\n while len(new_tweets) > 0:\n print(\"Getting tweets before %s\" % (oldest))\n\n # all subsequent requests use the max_id param to prevent\n # duplicates\n new_tweets = api.user_timeline(screen_name=screen_name, count=200, max_id=oldest, tweet_mode=\"extended\")\n\n # save most recent tweets\n alltweets.extend(new_tweets)\n\n # update the id of the oldest tweet less one\n oldest = alltweets[-1].id - 1\n print(\"... %s tweets downloaded so far\" % (len(alltweets)))\n\n # transform the tweepy tweets into a 2D array that will\n for tweet in alltweets:\n output.append([tweet.id_str,\n tweet.created_at,\n tweet.full_text,\n tweet.in_reply_to_screen_name,\n tweet.user.name,\n tweet.user.location,\n tweet.user.followers_count,\n tweet.user.friends_count,\n tweet.geo,\n tweet.coordinates,\n tweet.retweet_count,\n tweet.favorite_count,\n tweet.lang,\n tweet.retweeted])\n\n # Convert to dataframe\n df = pd.DataFrame.from_records(output, columns=[\"id_str\",\n \"created_at\",\n \"full_text\",\n \"in_reply_to_screen_name\",\n \"user_name\",\n \"user_location\",\n \"user_followers_count\",\n \"user_friends_count\",\n \"geo\",\n \"coordinates\",\n \"retweet_count\",\n \"favorite_count\",\n \"lang\",\n \"retweeted\"])\n return df" ]
[ "0.70010406", "0.67850053", "0.6734272", "0.66105545", "0.6591761", "0.64855397", "0.64794904", "0.6473131", "0.64262015", "0.64123046", "0.63488454", "0.6339719", "0.6291743", "0.6237709", "0.621407", "0.62131447", "0.61392415", "0.6134089", "0.6120575", "0.60954064", "0.6068717", "0.6000345", "0.5951418", "0.59498817", "0.5948447", "0.5932758", "0.593224", "0.58867174", "0.58795387", "0.5862277" ]
0.7076422
0
adding an assertion for testing dataframe equality setting up a database_handler object with the dummy database path and connecting it
def setUp(self): self.addTypeEqualityFunc(pandas.DataFrame, self.assertDataframeEqual) self.database_connection.connect()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_build_dataframe(self):\n insert_good_data()\n dataframe = get_dataframe()\n # 1 2 3\n self.assertIs(type(dataframe['Total'][0]), numpy.float64)\n self.assertIs(type(dataframe['InvoiceDate'][0]), str)\n self.assertIs(type(dataframe['Count'][0]), numpy.int64)\n # 4\n self.assertEqual(dataframe['Total'][0], 8198.79)\n # 5\n self.assertDataframeEqual(dataframe, get_equal_dataframe())\n alt_dataframe = get_alter_dataframe(self.database_connection)\n # 6\n self.assertNotEqual(alt_dataframe['Count'][0], dataframe['Count'][0])\n # 7\n with self.assertRaises(AssertionError):\n self.assertDataframeEqual(alt_dataframe, dataframe)\n # 8\n self.assertEqual(dataframe['Total'][0], alt_dataframe['Total'][0])", "def test_create_from_dataframe(self):\n self.insert()\n data = self.tbl.select()\n data.index.name = None\n tbl = Table.create(':memory:', \"Foo_2\", data, verbose=True,\n primary_key='id', autoincrement=True)\n self.check(self.idata, tbl.select())", "def test_connect_db_to_query(db):\n assert 1", "def assert_frame_equal(*args, **kwargs):\n return pandas.testing.assert_frame_equal(*args, **kwargs)", "def test_inserted_data(client):\n data = pd.read_csv(\"housing.csv\")\n data = format_data_housing(data)\n House.insert_from_pd(data)\n houses: DataFrame = pd.read_sql(\"SELECT * FROM house\", db.engine)\n assert len(houses) == data.shape[0]\n houses = house_results_to_dataframe(houses)\n assert_frame_equal(houses, data, check_dtype=False)", "def test_connectable_postgresql_db(sa, test_backends, test_df):\n\n if \"postgresql\" not in test_backends:\n pytest.skip(\"skipping fixture because postgresql not selected\")\n\n url = get_sqlalchemy_url(\n drivername=\"postgresql\",\n username=\"postgres\",\n password=\"\",\n host=os.getenv(\"GE_TEST_LOCAL_DB_HOSTNAME\", \"localhost\"),\n port=\"5432\",\n database=\"test_ci\",\n )\n engine = sa.create_engine(url)\n with engine.begin() as connection:\n schema_check_results = connection.execute(\n sa.text(\n \"SELECT schema_name FROM information_schema.schemata WHERE schema_name = 'connection_test';\"\n )\n ).fetchall()\n if len(schema_check_results) == 0:\n with engine.begin() as connection:\n connection.execute(sa.text(\"CREATE SCHEMA connection_test;\"))\n\n table_check_results = connection.execute(\n sa.text(\n \"\"\"\nSELECT EXISTS (\n SELECT FROM information_schema.tables\n WHERE table_schema = 'connection_test'\n AND table_name = 'test_df'\n);\n\"\"\"\n )\n ).fetchall()\n if table_check_results != [(True,)]:\n add_dataframe_to_db(\n df=test_df,\n name=\"test_df\",\n con=engine,\n index=True,\n schema=\"connection_test\",\n )\n\n # Return a connection string to this newly-created db\n return engine", "def test_dummydb_basic(self):\n db = DummyDB()", "def df_equal(left: pd.DataFrame, right: pd.DataFrame, **kwargs) -> bool:\n pd.testing.assert_frame_equal(left, right, **kwargs)\n return True", "def testExampleDataFrameGeneration(ref):\n df = generate_dataframe()\n columns = ref.all_fields_except(['random'])\n ref.assertDataFrameCorrect(df, 'dataframe_result.csv',\n check_data=columns, check_types=columns)", "def test_get_df(mocker):\n spy_load_metadata = mocker.spy(MetaData, 'load_document')\n expected_df = pd.read_json('tests/odata/fixtures/records.json', orient='records')\n\n provider = ODataConnector(\n name='test',\n baseroute='http://services.odata.org/V4/Northwind/Northwind.svc/',\n auth={'type': 'basic', 'args': ['u', 'p']},\n )\n\n data_source = ODataDataSource(\n domain='test',\n name='test',\n entity='Orders',\n query={\n '$filter': \"ShipCountry eq 'France'\",\n '$orderby': 'Freight desc',\n '$skip': 50,\n '$top': 3,\n },\n )\n\n try:\n df = provider.get_df(data_source)\n sl = ['CustomerID', 'EmployeeID', 'Freight']\n assert df[sl].equals(expected_df[sl])\n except socket.error:\n pytest.skip('Could not connect to the standard example OData service.')\n\n assert spy_load_metadata.call_count == 1\n args, _ = spy_load_metadata.call_args\n assert args[0].url.endswith('/$metadata')\n\n provider.auth = None\n try:\n provider.get_df(data_source)\n except socket.error:\n pytest.skip('Could not connect to the standard example OData service.')", "def test_upload_to_df(upload_dataframe: pd.DataFrame) -> None:\n validated = UploadCollection.from_dataframe(upload_dataframe)\n assert upload_dataframe.equals(validated.to_dataframe()[upload_dataframe.columns])", "def test_insert_dataset(self):\n\n # Note that query logic is tested separately by integration tests. This\n # test just checks that the function maps inputs to outputs as expected.\n\n mock_connection = MagicMock()\n mock_cursor = mock_connection.cursor()\n database = Database(mock_connection)\n dataset = self.test_utils.load_sample_dataset()\n\n result = database.insert_dataset(sentinel.issue, dataset)\n\n self.assertIsNone(result)\n self.assertEqual(mock_cursor.execute.call_count, 53)\n\n last_query_values = mock_cursor.execute.call_args[0][-1]\n expected_query_values = (\n 0, sentinel.issue, 'WY', 20201209,\n 0.2519685039370078, 29, 127, 32, 0.4233576642335766, 31, 137, 58, 22, 2,\n 7, None, 2, 8, 0, 1, '2', 5, 29, 3, 4, 0.1172985781990521, 29, 1688, 198,\n 1729, 31, 856, 31, 198, 29, 0.4950838635049161, 31, 1729, 856, 5, 6, 7,\n 0.2362768496420047, 29, 838, 198, 26, 8, 9, 10, 11, 12, 13, 14, 15, 16,\n 17, 18, 19, 20, 21, 22, 23, 31, 24, 25, 15, 26, 27, 28, 29, 30, 31, 32,\n 33, 34, 35, 36, 37, 38, 39, 40, 41, 29, 42, 43, 44, 45, 0, 29, 0, 29,\n 46, 47, 48, 49, 50, 51, 52, 58, 31, 32, 29, 32, 31, 196, 29, 189, 31,\n 53, 54, 55, 56, 2, 29, 2, 29, 137, 31, 'D')\n self.assertEqual(len(last_query_values), len(expected_query_values))\n\n for actual, expected in zip(last_query_values, expected_query_values):\n if isinstance(expected, float):\n self.assertAlmostEqual(actual, expected)\n else:\n self.assertEqual(actual, expected)", "def test_pass_history_and_master_db_params_correctly(self,\n mock_download_master_file,\n mock_get_prices):\n class BuyBelow10(Moonshot):\n \"\"\"\n A basic test strategy that buys below 10.\n \"\"\"\n DB = 'test-db'\n DB_FIELDS = [\"Volume\", \"Wap\", \"Close\"]\n DB_TIMES = [\"00:00:00\"]\n DB_DATA_FREQUENCY = \"daily\"\n UNIVERSES = \"us-stk\"\n SIDS = [\"FI12345\",\"FI23456\"]\n EXCLUDE_SIDS = \"FI34567\"\n EXCLUDE_UNIVERSES = [\"usa-stk-pharm\", \"usa-stk-biotech\"]\n CONT_FUT = False\n\n def prices_to_signals(self, prices):\n signals = prices.loc[\"Wap\"] < 10\n return signals.astype(int)\n\n def _mock_get_prices():\n\n dt_idx = pd.DatetimeIndex([\"2018-05-01\",\"2018-05-02\",\"2018-05-03\", \"2018-05-04\"])\n fields = [\"Close\",\"Wap\",\"Volume\"]\n idx = pd.MultiIndex.from_product([fields, dt_idx], names=[\"Field\", \"Date\"])\n\n prices = pd.DataFrame(\n {\n \"FI12345\": [\n #Close\n 9,\n 11,\n 10.50,\n 9.99,\n # Wap\n 9,\n 11,\n 10.50,\n 9.99,\n # Volume\n 5000,\n 16000,\n 8800,\n 9900\n ],\n \"FI23456\": [\n # Close\n 9.89,\n 11,\n 8.50,\n 10.50,\n # Wap\n 9.89,\n 11,\n 8.50,\n 10.50,\n # Volume\n 15000,\n 14000,\n 28800,\n 17000\n\n ],\n },\n index=idx\n )\n return prices\n\n def _mock_download_master_file(f, *args, **kwargs):\n\n master_fields = [\"Timezone\", \"Symbol\", \"SecType\", \"Currency\", \"PriceMagnifier\", \"Multiplier\", \"Exchange\"]\n securities = pd.DataFrame(\n {\n \"FI12345\": [\n \"America/New_York\",\n \"ABC\",\n \"STK\",\n \"USD\",\n None,\n None,\n \"NASDAQ\",\n ],\n \"FI23456\": [\n \"America/New_York\",\n \"DEF\",\n \"STK\",\n \"USD\",\n None,\n None,\n \"NASDAQ\",\n ]\n },\n index=master_fields\n )\n securities.columns.name = \"Sid\"\n securities.T.to_csv(f, index=True, header=True)\n f.seek(0)\n\n mock_download_master_file.side_effect = _mock_download_master_file\n mock_get_prices.return_value = _mock_get_prices()\n\n results = BuyBelow10().backtest(start_date=\"2018-05-01\", end_date=\"2018-05-04\")\n\n get_prices_call = mock_get_prices.mock_calls[0]\n _, args, kwargs = get_prices_call\n self.assertListEqual(kwargs[\"codes\"], [\"test-db\"])\n self.assertEqual(kwargs[\"start_date\"], \"2017-03-25\") # default 252+ trading days before requested start_date\n self.assertEqual(kwargs[\"end_date\"], \"2018-05-04\")\n self.assertEqual(kwargs[\"universes\"], \"us-stk\")\n self.assertEqual(kwargs[\"sids\"], [\"FI12345\", \"FI23456\"])\n self.assertEqual(kwargs[\"exclude_universes\"], ['usa-stk-pharm', 'usa-stk-biotech'])\n self.assertEqual(kwargs[\"exclude_sids\"], \"FI34567\")\n self.assertEqual(kwargs[\"fields\"], ['Volume', 'Wap', 'Close'])\n self.assertEqual(kwargs[\"times\"], [\"00:00:00\"])\n self.assertEqual(kwargs[\"data_frequency\"], \"daily\")\n self.assertFalse(kwargs[\"cont_fut\"])\n self.assertIsNone(kwargs[\"timezone\"])\n self.assertTrue(kwargs[\"infer_timezone\"])\n\n download_master_file_call = mock_download_master_file.mock_calls[0]\n _, args, kwargs = download_master_file_call\n self.assertListEqual(kwargs[\"sids\"], [\"FI12345\", \"FI23456\"])\n self.assertListEqual(kwargs[\"fields\"], [\n \"Currency\", \"Multiplier\", \"PriceMagnifier\",\n \"Exchange\", \"SecType\", \"Symbol\", \"Timezone\"])", "def test_insert_dataset(self):\n\n # Note that query logic is tested separately by integration tests. This\n # test just checks that the function maps inputs to outputs as expected.\n\n mock_connection = MagicMock()\n mock_cursor = mock_connection.cursor()\n database = Database(mock_connection)\n dataset = self.test_utils.load_sample_dataset()\n\n result = database.insert_dataset(sentinel.issue, dataset)\n\n self.assertIsNone(result)\n self.assertEqual(mock_cursor.execute.call_count, 20)\n\n last_query_values = mock_cursor.execute.call_args[0][-1]\n expected_query_values = (\n 0, sentinel.issue, 'MA', '2020-05-10', 53, 84, 15691, 73, 12427, 83,\n 3625, 84, None, 0, None, 0, None, 0, None, 0, None, 0, None, 0, None,\n 0, None, 0, None, 0, None, 0, None, 0, None, 0, 0.697850497273019, 72,\n 10876, 15585, 0.2902550897239881, 83, 3607, 12427, 0.21056656682174496,\n 73, 3304, 15691, None, None, None, None, None, None, None, None)\n self.assertEqual(len(last_query_values), len(expected_query_values))\n\n for actual, expected in zip(last_query_values, expected_query_values):\n if isinstance(expected, float):\n self.assertAlmostEqual(actual, expected)\n else:\n self.assertEqual(actual, expected)", "def test_get_df_db(oracle_connector):\n data_sources_spec = [\n {\n 'domain': 'Oracle test',\n 'type': 'external_database',\n 'name': 'my_oracle_sql_con',\n 'query': 'SELECT * FROM City;',\n }\n ]\n\n data_source = OracleSQLDataSource(**data_sources_spec[0])\n df = oracle_connector.get_df(data_source)\n\n assert not df.empty\n assert df.shape == (50, 5)\n assert set(df.columns) == {'ID', 'NAME', 'COUNTRYCODE', 'DISTRICT', 'POPULATION'}\n\n assert len(df[df['POPULATION'] > 500000]) == 5", "def test_from_object_df(self):\n df_test = make_simple_dataframe()\n df_read = BaseDataClass.from_object(df_test).df\n self.assertEqual(\n pd.testing.assert_frame_equal(df_test, df_read),\n None,\n )", "def test_readSongData():\n\n # make sure the number of columns pull out from the database is correct\n assert svd.song_df.shape[1] == 8", "def test_creation_when_valid_database_exists(self):\n database_filename = \"test.db\"\n\n # Delete the test database if it exists.\n test_database = os.path.join(os.getcwd(), database_filename)\n if os.path.exists(test_database):\n os.remove(test_database)\n\n # Create our pre-existing, _valid_, database.\n database_creation_statement = \"\"\"\n CREATE TABLE data(\n row_ID INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n ID VARCHAR,\n Time DATETIME,\n Value REAL,\n Debug INTEGER\n );\n \"\"\"\n database_insertion_values = (\"Temperature\", datetime.datetime.now(), 20.0, 1)\n\n with sqlite3.connect(database_filename) as conn:\n cur = conn.cursor()\n cur.execute(database_creation_statement)\n cur.execute(\"INSERT INTO data (ID, Time, Value, Debug) VALUES (?, ?, ?, ?);\", database_insertion_values)\n conn.commit()\n\n original_data = cur.execute(\"SELECT * FROM data\").fetchall()\n\n # Create the database object, build the database\n database = app.database.Database(database_filename)\n database.create_database()\n\n # Pull out the table names from the database we've created.\n column_names = extract_column_names(database_filename)\n\n # Assert that they are as expected:\n for column_name in app.database.database_columns:\n self.assertEqual(\n True,\n column_name in column_names,\n \"Database creation process did not yield the column names expected. Missing: {0}\".format(column_name)\n )\n\n # Assert that the existing data has been unmolested.\n with sqlite3.connect(database_filename) as conn:\n cur = conn.cursor()\n data_after_investigation = cur.execute(\"SELECT * FROM data\").fetchall()\n\n self.assertEqual(\n True,\n original_data == data_after_investigation,\n \"Data retrieved after investigating database did not match original data in Data table.\"\n )", "def test_get_metadata_df(self):\n\n # first need to populate LabMetadata tables\n from data_processors.lims.lambdas import labmetadata\n labmetadata.scheduled_update_handler({'event': \"test_get_metadata_df\"}, None)\n\n logger.info(f\"Lab metadata count: {LabMetadata.objects.count()}\")\n\n # SEQ-II validation dataset\n mock_bcl_workflow: Workflow = WorkflowFactory()\n mock_sqr: SequenceRun = mock_bcl_workflow.sequence_run\n mock_sqr.run_id = \"r.Uvlx2DEIME-KH0BRyF9XBg\"\n mock_sqr.instrument_run_id = \"200612_A01052_0017_BH5LYWDSXY\"\n mock_sqr.gds_volume_name = \"bssh.acddbfda498038ed99fa94fe79523959\"\n mock_sqr.gds_folder_path = f\"/Runs/{mock_sqr.instrument_run_id}_{mock_sqr.run_id}\"\n mock_sqr.sample_sheet_name = \"SampleSheet.csv\"\n mock_sqr.name = mock_sqr.instrument_run_id\n mock_sqr.save()\n\n mock_library_run = LibraryRun(\n instrument_run_id=mock_sqr.instrument_run_id,\n run_id=mock_sqr.run_id,\n library_id=\"L2000199\",\n lane=1,\n override_cycles=\"Y151;I8N2;U10;Y151\",\n )\n mock_library_run.save()\n\n samplesheet_path = f\"{mock_sqr.gds_folder_path}/{mock_sqr.sample_sheet_name}\"\n\n metadata_df = bcl_convert.get_metadata_df(\n gds_volume=mock_sqr.gds_volume_name,\n samplesheet_path=samplesheet_path\n )\n\n logger.info(\"-\" * 32)\n logger.info(f\"\\n{metadata_df}\")\n\n self.assertTrue(not metadata_df.empty)\n self.assertTrue(\"PTC_SsCRE200323LL_L2000172_topup\" in metadata_df[\"sample\"].tolist())\n\n if \"\" in metadata_df[\"override_cycles\"].unique().tolist():\n logger.info(\"-\" * 32)\n logger.info(\"THERE SEEM TO BE BLANK OVERRIDE_CYCLES METADATA FOR SOME SAMPLES...\")\n self.assertFalse(\"\" in metadata_df[\"override_cycles\"].tolist())\n # This probably mean need to fix data, look for corresponding Lab Metadata entry...\n\n library_id_list = metadata_df[\"library_id\"].tolist()\n library_run_list = libraryrun_srv.link_library_runs_with_x_seq_workflow(library_id_list, mock_bcl_workflow)\n self.assertIsNotNone(library_run_list)\n self.assertEqual(1, len(library_run_list))\n self.assertEqual(mock_library_run.library_id, library_run_list[0].library_id)\n\n library_run_in_workflows = mock_bcl_workflow.libraryrun_set.all()\n self.assertEqual(1, library_run_in_workflows.count())", "def test_create_dataframe(dataframe):\n results = True\n rows = dataframe.shape[0]\n column_names = sorted(dataframe.columns)\n column_datatypes = list(dataframe[column_names].dtypes)\n\n # Checks columns match those specified in #1\n if column_names != DATA_COLUMNS:\n raise ValueError(\"DataFrame does not have necessary datatypes: \" + str(DATA_COLUMNS))\n # Checks column datatypes match\n if column_datatypes != DATA_DATATYPES:\n raise ValueError(\"DataFrame does not have necessary column names: \" + str(DATA_DATATYPES))\n # Checks for a least 3 rows in DataFrame\n if rows < 10:\n raise ValueError(\"DataFrame does not have enough rows of data (>=10).\")\n\n return results", "def test_inner_live_setup(self):\n mocktable = self.classes.MockTable\n session = Session(bind=self.bind)\n session.add(mocktable(test=5))\n session.commit()\n session.close()\n\n res = session.query(mocktable).all()\n session.close()\n expected = (res[0].test, len(res))\n\n assert expected == (5, 1)", "def test_pgsql_query(matrix, mock_psycopg2):\n result = matrix.pgsql_query(\"SELECT * from users;\")\n assert result is False\n matrix.save_pgsql_conf(db)\n result = matrix.pgsql_query(\"SELECT * from users;\")\n assert result is True", "def test_database_connection(self):\n\t\t\n\t\tself.assertTrue(database.connect())", "def test_database(client, test_db):\n tester = os.path.exists(\"flaskr.db\")\n assert tester", "def test_db(app):\n assert app.config['DATABASE'] == 'sqlite:///:memory:'", "def testDatabase(self):\n con = self.getMetadataDatabaseConnection()\n if con:\n return True", "def test_creation_when_valid_database_exists_and_overwrite(self):\n database_filename = \"test.db\"\n\n # Delete the test database if it exists.\n test_database = os.path.join(os.getcwd(), database_filename)\n if os.path.exists(test_database):\n os.remove(test_database)\n\n # Create our pre-existing, _valid_, database.\n database_creation_statement = \"\"\"\n CREATE TABLE data(\n row_ID INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n ID VARCHAR,\n Time DATETIME,\n Value REAL,\n Debug INTEGER\n );\n \"\"\"\n database_insertion_values = (\"Temperature\", datetime.datetime.now(), 20.0, 1)\n\n with sqlite3.connect(database_filename) as conn:\n cur = conn.cursor()\n cur.execute(database_creation_statement)\n cur.execute(\"INSERT INTO data (ID, Time, Value, Debug) VALUES (?, ?, ?, ?);\", database_insertion_values)\n conn.commit()\n\n original_data = cur.execute(\"SELECT * FROM data\").fetchall()\n\n # Create the database object, build the database\n database = app.database.Database(database_filename, overwrite=True)\n database.create_database()\n\n # Pull out the table names from the database we've created.\n column_names = extract_column_names(database_filename)\n\n # Assert that they are as expected:\n for column_name in app.database.database_columns:\n self.assertEqual(\n True,\n column_name in column_names,\n \"Database creation process did not yield the column names expected. Missing: {0}\".format(column_name)\n )\n\n # Assert that the existing data has been unmolested.\n with sqlite3.connect(database_filename) as conn:\n cur = conn.cursor()\n data_after_investigation = cur.execute(\"SELECT * FROM data\").fetchall()\n\n self.assertEqual(\n True,\n original_data != data_after_investigation,\n \"Data retrieved after investigating database did match original data in Data table.\"\n )", "def test_connect_sqlite(self, tmpdir):\n # Set the connect string\n filename = '{}/my.db'.format(str(tmpdir))\n connect_string = 'sqlite:{}'.format(filename)\n # Connect by passing the connect sting (clear environment first)\n if ENV_DATABASE in os.environ:\n del os.environ[ENV_DATABASE]\n con = DatabaseDriver.connect(connect_string=connect_string)\n self.validate_database(con, filename)\n # Make sure that database file has been deleted\n assert not os.path.isfile(filename)\n # Repeat with the environment variable set\n os.environ[ENV_DATABASE] = connect_string\n con.close()\n con = DatabaseDriver.connect()\n self.validate_database(con, filename)\n connect_info = DatabaseDriver.info()\n assert connect_info.startswith('sqlite3 @ ')\n os.environ[ENV_DATABASE] = 'unknown'\n with pytest.raises(ValueError):\n DatabaseDriver.info()\n con.close()", "def test_dataframe(test_data,tmp_path):\n\n for d in test_data:\n\n # Pretty standard map\n gpm = GenotypePhenotypeMap(genotype=d[\"genotype\"],\n wildtype=d[\"wildtype\"],\n phenotype=d[\"phenotype\"],\n uncertainty=d[\"uncertainty\"])\n\n df = pd.DataFrame({\"genotype\":d[\"genotype\"],\n \"phenotype\":d[\"phenotype\"],\n \"uncertainty\":d[\"uncertainty\"]})\n gpm_from_df = gpmap.read_dataframe(df,wildtype=d[\"wildtype\"])\n conftest.compare_gpmap(gpm,gpm_from_df)\n\n # Minimal map\n gpm = GenotypePhenotypeMap(wildtype=d[\"wildtype\"],\n genotype=d[\"genotype\"])\n df = pd.DataFrame({\"genotype\":d[\"genotype\"]})\n gpm_from_df = gpmap.read_dataframe(df,wildtype=d[\"wildtype\"])\n conftest.compare_gpmap(gpm,gpm_from_df)\n\n # Read without wildtype --> should still work\n gpm_from_df = gpmap.read_dataframe(df)\n conftest.compare_gpmap(gpm,gpm_from_df)\n\n # Map without genotype (fail)\n df = pd.DataFrame({\"phenotype\":d[\"phenotype\"]})\n with pytest.raises(ValueError):\n gpm_from_df = gpmap.read_dataframe(df,wildtype=d[\"wildtype\"])", "def useful_test_function(db, query):\n print pd.read_sql_query(query, db)" ]
[ "0.71694195", "0.69254637", "0.6790876", "0.6677135", "0.6648234", "0.6580347", "0.650768", "0.6470732", "0.6430044", "0.6362951", "0.6330373", "0.632379", "0.6319858", "0.6303485", "0.6296251", "0.6263661", "0.6252208", "0.6215653", "0.6214677", "0.6211742", "0.62074625", "0.6194286", "0.6156103", "0.61341447", "0.61246634", "0.61165106", "0.61078537", "0.61077565", "0.61060464", "0.6102847" ]
0.7636546
0
1.2.3 checking data type is correct 4. testing sum of columns is correct 5. (added assertion) testing dataframes are the same, one manufactured 6. clearing the database and inserting altered data CustomerId duplicate, checking difference in Count (34) 7. testing raising an exception of AssertionError with dataframe and alt_dataframe 8. testing Sum of both alt_dataframe and dataframe are the same
def test_build_dataframe(self): insert_good_data() dataframe = get_dataframe() # 1 2 3 self.assertIs(type(dataframe['Total'][0]), numpy.float64) self.assertIs(type(dataframe['InvoiceDate'][0]), str) self.assertIs(type(dataframe['Count'][0]), numpy.int64) # 4 self.assertEqual(dataframe['Total'][0], 8198.79) # 5 self.assertDataframeEqual(dataframe, get_equal_dataframe()) alt_dataframe = get_alter_dataframe(self.database_connection) # 6 self.assertNotEqual(alt_dataframe['Count'][0], dataframe['Count'][0]) # 7 with self.assertRaises(AssertionError): self.assertDataframeEqual(alt_dataframe, dataframe) # 8 self.assertEqual(dataframe['Total'][0], alt_dataframe['Total'][0])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_dataframe(dataframe):\n results = True\n rows = dataframe.shape[0]\n column_names = sorted(dataframe.columns)\n column_datatypes = list(dataframe[column_names].dtypes)\n\n # Checks columns match those specified in #1\n if column_names != DATA_COLUMNS:\n raise ValueError(\"DataFrame does not have necessary datatypes: \" + str(DATA_COLUMNS))\n # Checks column datatypes match\n if column_datatypes != DATA_DATATYPES:\n raise ValueError(\"DataFrame does not have necessary column names: \" + str(DATA_DATATYPES))\n # Checks for a least 3 rows in DataFrame\n if rows < 10:\n raise ValueError(\"DataFrame does not have enough rows of data (>=10).\")\n\n return results", "def verify_pandas(self):\n self.check_dataset_duplicate_ids(self.vertices)\n # self.check_dataset_children_ids()\n self.check_dataset_litter_ids()\n self.check_dataset_dates()", "def testExampleDataFrameGeneration(ref):\n df = generate_dataframe()\n columns = ref.all_fields_except(['random'])\n ref.assertDataFrameCorrect(df, 'dataframe_result.csv',\n check_data=columns, check_types=columns)", "def test_inserted_data(client):\n data = pd.read_csv(\"housing.csv\")\n data = format_data_housing(data)\n House.insert_from_pd(data)\n houses: DataFrame = pd.read_sql(\"SELECT * FROM house\", db.engine)\n assert len(houses) == data.shape[0]\n houses = house_results_to_dataframe(houses)\n assert_frame_equal(houses, data, check_dtype=False)", "def test_create_from_dataframe(self):\n self.insert()\n data = self.tbl.select()\n data.index.name = None\n tbl = Table.create(':memory:', \"Foo_2\", data, verbose=True,\n primary_key='id', autoincrement=True)\n self.check(self.idata, tbl.select())", "def test_prepare_essentiality_data(ijr904, combined_dataframe):\n tested_dataframe = essential.prepare_essentiality_data(\n join(dirname(__file__), \"data\", \"essentiality\", \"mock_essential.csv\"), ijr904\n )\n assert tested_dataframe.equals(combined_dataframe)", "def test_create_dataframe(chosen_columns, chosen_url):\n print(\"reading in data\")\n chosen_df = readindata(chosen_columns, chosen_url)\n print(\"checking columns\")\n checkcolumnstest(chosen_columns, chosen_df)\n print(\"checking types\")\n checktypestest(chosen_df)\n print(\"checking for Nan\")\n checkfornan(chosen_df)\n print(\"checking 1 row\")\n checkrowstest(chosen_df)\n return True", "def test_upload_to_df(upload_dataframe: pd.DataFrame) -> None:\n validated = UploadCollection.from_dataframe(upload_dataframe)\n assert upload_dataframe.equals(validated.to_dataframe()[upload_dataframe.columns])", "def test_transform_data(self):\n # assemble\n input_data = (\n self.spark\n .read\n .parquet(self.test_data_path + 'employees'))\n\n expected_data = (\n self.spark\n .read\n .parquet(self.test_data_path + 'employees_report'))\n\n expected_cols = len(expected_data.columns)\n expected_rows = expected_data.count()\n expected_avg_steps = (\n expected_data\n .agg(mean('steps_to_desk').alias('avg_steps_to_desk'))\n .collect()[0]\n ['avg_steps_to_desk'])\n\n # act\n data_transformed = transform_data(input_data, 21)\n\n cols = len(expected_data.columns)\n rows = expected_data.count()\n avg_steps = (\n expected_data\n .agg(mean('steps_to_desk').alias('avg_steps_to_desk'))\n .collect()[0]\n ['avg_steps_to_desk'])\n\n # assert\n self.assertEqual(expected_cols, cols)\n self.assertEqual(expected_rows, rows)\n self.assertEqual(expected_avg_steps, avg_steps)\n self.assertTrue([col in expected_data.columns\n for col in data_transformed.columns])", "def test_insert_dataset(self):\n\n # Note that query logic is tested separately by integration tests. This\n # test just checks that the function maps inputs to outputs as expected.\n\n mock_connection = MagicMock()\n mock_cursor = mock_connection.cursor()\n database = Database(mock_connection)\n dataset = self.test_utils.load_sample_dataset()\n\n result = database.insert_dataset(sentinel.issue, dataset)\n\n self.assertIsNone(result)\n self.assertEqual(mock_cursor.execute.call_count, 53)\n\n last_query_values = mock_cursor.execute.call_args[0][-1]\n expected_query_values = (\n 0, sentinel.issue, 'WY', 20201209,\n 0.2519685039370078, 29, 127, 32, 0.4233576642335766, 31, 137, 58, 22, 2,\n 7, None, 2, 8, 0, 1, '2', 5, 29, 3, 4, 0.1172985781990521, 29, 1688, 198,\n 1729, 31, 856, 31, 198, 29, 0.4950838635049161, 31, 1729, 856, 5, 6, 7,\n 0.2362768496420047, 29, 838, 198, 26, 8, 9, 10, 11, 12, 13, 14, 15, 16,\n 17, 18, 19, 20, 21, 22, 23, 31, 24, 25, 15, 26, 27, 28, 29, 30, 31, 32,\n 33, 34, 35, 36, 37, 38, 39, 40, 41, 29, 42, 43, 44, 45, 0, 29, 0, 29,\n 46, 47, 48, 49, 50, 51, 52, 58, 31, 32, 29, 32, 31, 196, 29, 189, 31,\n 53, 54, 55, 56, 2, 29, 2, 29, 137, 31, 'D')\n self.assertEqual(len(last_query_values), len(expected_query_values))\n\n for actual, expected in zip(last_query_values, expected_query_values):\n if isinstance(expected, float):\n self.assertAlmostEqual(actual, expected)\n else:\n self.assertEqual(actual, expected)", "def test_insert_dataset(self):\n\n # Note that query logic is tested separately by integration tests. This\n # test just checks that the function maps inputs to outputs as expected.\n\n mock_connection = MagicMock()\n mock_cursor = mock_connection.cursor()\n database = Database(mock_connection)\n dataset = self.test_utils.load_sample_dataset()\n\n result = database.insert_dataset(sentinel.issue, dataset)\n\n self.assertIsNone(result)\n self.assertEqual(mock_cursor.execute.call_count, 20)\n\n last_query_values = mock_cursor.execute.call_args[0][-1]\n expected_query_values = (\n 0, sentinel.issue, 'MA', '2020-05-10', 53, 84, 15691, 73, 12427, 83,\n 3625, 84, None, 0, None, 0, None, 0, None, 0, None, 0, None, 0, None,\n 0, None, 0, None, 0, None, 0, None, 0, None, 0, 0.697850497273019, 72,\n 10876, 15585, 0.2902550897239881, 83, 3607, 12427, 0.21056656682174496,\n 73, 3304, 15691, None, None, None, None, None, None, None, None)\n self.assertEqual(len(last_query_values), len(expected_query_values))\n\n for actual, expected in zip(last_query_values, expected_query_values):\n if isinstance(expected, float):\n self.assertAlmostEqual(actual, expected)\n else:\n self.assertEqual(actual, expected)", "def create_test_df():\n test_df = pd.DataFrame({'id': [i for i in range(1, 1001)], 'member_id': [\n 10 * i for i in range(1, 1001)]})\n test_df['na_col'] = np.nan\n test_df['id_na'] = test_df.id\n test_df.loc[1:3, 'id_na'] = np.nan\n test_df['constant_col'] = 'constant'\n test_df['constant_col_num'] = 0\n test_df['character_factor'] = [\n choice(list('ABCDEFG')) for _ in range(1000)]\n test_df['num_factor'] = [choice([1, 2, 3, 4]) for _ in range(1000)]\n test_df['nearzerovar_variable'] = 'most_common_value'\n test_df.loc[0, 'nearzerovar_variable'] = 'one_value'\n test_df['binary_variable'] = [choice([0, 1]) for _ in range(1000)]\n test_df['character_variable'] = [str(i) for i in range(1000)]\n test_df['duplicated_column'] = test_df.id\n test_df['many_missing_70'] = [1] * 300 + [np.nan] * 700\n test_df['character_variable_fillna'] = ['A'] * \\\n 300 + ['B'] * 200 + ['C'] * 200 + [np.nan] * 300\n test_df['numeric_variable_fillna'] = [1] * 400 + [3] * 400 + [np.nan] * 200\n test_df['num_variable'] = 100.0\n test_df['int_factor_10'] = [choice(range(10)) for _ in range(1000)]\n test_df['outlier'] = normal(size=1000)\n test_df.loc[[1, 10, 100], 'outlier'] = [999, 3, 999]\n test_df['outlier_na'] = test_df['outlier']\n test_df.loc[[300, 500], 'outlier_na'] = np.nan\n test_df['datetime'] = pd.date_range('1/1/2015', periods=1000, freq='H')\n test_df['None_100'] = [1] * 900 + [None] * 100\n test_df['None_na_200'] = [1] * 800 + [None] * 100 + [np.nan] * 100\n test_df['character_variable_up1'] = ['A'] * 500 + ['B'] * 200 + ['C'] * 300\n test_df['character_variable_up2'] = ['A'] * 500 + ['B'] * 200 + ['D'] * 300\n test_df['other_na'] = ['Missing'] * 100 + ['missing'] * 100 + ['N/a'] * 100 + \\\n ['NA'] * 100 + ['na'] * 100 + ['n/a'] * 100 + ['Not Available'] * 100 + \\\n ['Unknown'] * 100 + ['do_not_touch'] * 200\n return test_df", "def test_read_data():\r\n test_data_r = pd.read_csv('Test_files/test_data.csv')\r\n test_data_c = pd.DataFrame({'id':[1,2,3,4],\r\n 'price':[8000,950,2400,1150],\r\n 'currency':['PLN','GBP','PLN','EU'],\r\n 'quantity':[6,1,1,2],\r\n 'matching_id':[1,3,2,2,]})\r\n\r\n assert_frame_equal(test_data_r, test_data_c)\r\n\r\n test_matching_r= pd.read_csv('Test_files/test_matching.csv')\r\n test_matching_c = pd.DataFrame({'matching_id':[1,2,3],\r\n 'top_priced_count':[1,2,1]})\r\n\r\n assert_frame_equal(test_matching_r,test_matching_c)\r\n\r\n test_currencies_r = pd.read_csv('Test_files/test_currencies.csv')\r\n test_currencies_c = pd.DataFrame({'currency':['GBP','EU','PLN'],\r\n 'ratio':[2.6,2.2,1]})\r\n\r\n assert_frame_equal(test_currencies_r,test_currencies_c)\r\n\r\n return test_data_r,test_matching_r,test_currencies_r", "def test_ehr_submission_data_cutoff(self, mock_get_affected_tables):\n # mocks the return value of get_affected_tables as we only want to loop through the\n # visit_occurrence not all of the CDM tables\n mock_get_affected_tables.return_value = [common.VISIT_OCCURRENCE]\n\n queries = []\n visit_occurrence_tmpl = self.jinja_env.from_string(\"\"\"\n INSERT INTO `{{fq_dataset_name}}.{{cdm_table}}`\n (visit_occurrence_id, person_id, visit_concept_id, visit_start_date, \n visit_start_datetime, visit_end_date, visit_end_datetime, visit_type_concept_id)\n VALUES\n (111, 222, 3, date('2018-03-06'), timestamp('2018-03-06 11:00:00'), \n date('2018-03-07'), timestamp('2018-03-07 11:00:00'), 4),\n (222, 333, 3, date('2019-03-06'), timestamp('2019-03-06 11:00:00'), \n date('2019-03-07'), timestamp('2019-03-07 11:00:00'), 4),\n (333, 444, 3, date('2020-03-06'), timestamp('2020-03-06 11:00:00'), \n date('2020-03-07'), timestamp('2020-03-07 11:00:00'), 4),\n (444, 555, 3, date('2021-03-06'), timestamp('2021-03-06 11:00:00'), \n date('2021-03-07'), timestamp('2021-03-07 11:00:00'), 4),\n (555, 666, 3, date('2022-03-06'), timestamp('2022-03-06 11:00:00'), \n date('2022-03-07'), timestamp('2022-03-07 11:00:00'), 4)\n \"\"\").render(fq_dataset_name=self.fq_dataset_name,\n cdm_table=common.VISIT_OCCURRENCE)\n queries.append(visit_occurrence_tmpl)\n\n self.load_test_data(queries)\n\n table_and_counts = [{\n 'fq_table_name':\n '.'.join([self.fq_dataset_name, 'visit_occurrence']),\n 'fq_sandbox_table_name':\n f'{self.fq_sandbox_name}.{self.rule_instance.sandbox_table_for(common.VISIT_OCCURRENCE)}',\n 'loaded_ids': [111, 222, 333, 444, 555],\n 'sandboxed_ids': [444, 555],\n 'fields': [\n 'visit_occurrence_id', 'person_id', 'visit_concept_id',\n 'visit_start_date', 'visit_start_datetime', 'visit_end_date',\n 'visit_end_datetime', 'visit_type_concept_id'\n ],\n 'cleaned_values': [\n (111, 222, 3, parse('2018-03-06').date(),\n parse('2018-03-06 11:00:00 UTC'), parse('2018-03-07').date(),\n parse('2018-03-07 11:00:00 UTC'), 4),\n (222, 333, 3, parse('2019-03-06').date(),\n parse('2019-03-06 11:00:00 UTC'), parse('2019-03-07').date(),\n parse('2019-03-07 11:00:00 UTC'), 4),\n (333, 444, 3, parse('2020-03-06').date(),\n parse('2020-03-06 11:00:00 UTC'), parse('2020-03-07').date(),\n parse('2020-03-07 11:00:00 UTC'), 4)\n ]\n }]\n\n self.default_test(table_and_counts)", "def test_data_types(sdc_builder, sdc_executor, database, sql_type, insert_fragment, expected_type, expected_value, keep_data):\n table_name = get_random_string(string.ascii_lowercase, 20)\n connection = database.engine.connect()\n try:\n # Create table\n connection.execute(f\"\"\"\n CREATE TABLE {table_name}(\n id int primary key,\n data_column {sql_type} NULL\n )\n \"\"\")\n\n # And insert a row with actual value\n connection.execute(f\"INSERT INTO {table_name} VALUES(1, {insert_fragment})\")\n # And a null\n connection.execute(f\"INSERT INTO {table_name} VALUES(2, NULL)\")\n\n builder = sdc_builder.get_pipeline_builder()\n\n origin = builder.add_stage('MySQL Query Consumer')\n origin.sql_query = 'SELECT * FROM {0}'.format(table_name)\n origin.incremental_mode = False\n origin.on_unknown_type = 'CONVERT_TO_STRING'\n\n wiretap = builder.add_wiretap()\n\n origin >> wiretap.destination\n\n pipeline = builder.build().configure_for_environment(database)\n sdc_executor.add_pipeline(pipeline)\n\n sdc_executor.start_pipeline(pipeline)\n sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', 2)\n sdc_executor.stop_pipeline(pipeline)\n\n assert len(wiretap.output_records) == 2\n record = wiretap.output_records[0]\n null_record = wiretap.output_records[1]\n\n # Since we are controlling types, we want to check explicit values inside the record rather the the python\n # wrappers.\n # TLKT-177: Add ability for field to return raw value\n\n assert record.field['data_column'].type == expected_type\n assert null_record.field['data_column'].type == expected_type\n\n assert record.field['data_column']._data['value'] == expected_value\n assert null_record.field['data_column'] == None\n finally:\n if not keep_data:\n logger.info('Dropping table %s in %s database ...', table_name, database.type)\n connection.execute(f\"DROP TABLE {table_name}\")", "def test_number_of_nulls(self):\n self.assertEqual(em.number_of_nulls(self.test_df), 3)", "def setUp(self):\n self.addTypeEqualityFunc(pandas.DataFrame, self.assertDataframeEqual)\n self.database_connection.connect()", "def assert_correct_and_equal(self, other: Union[pd.DataFrame, dict]):\n if isinstance(other, dict):\n other = pd.DataFrame.from_records(other)\n if not isinstance(other, pd.DataFrame):\n raise TypeError(\"other must be a dataframe or a dict!\")\n # Sort cols\n cols = list(self._data.columns) + [c for c in other.columns if c not in self._data.columns]\n other = other[cols]\n SampleDataSchema.to_schema().select_columns(self._data.columns).validate(other)\n assert_frame_equal(\n self._data.sort_values(by=list(self._data.columns)).reset_index(drop=True),\n other.sort_values(by=list(self._data.columns)).reset_index(drop=True),\n )", "def test_insert_selected_columns(self):\n my_conn = MySQL(*self.conn_params)\n Base = declarative_base()\n current_dir = os.path.dirname(os.path.abspath(__file__))\n parsed_pcaxis = pyaxis.parse(current_dir + '/22350.px',\n encoding='ISO-8859-1')\n table_data = parsed_pcaxis['DATA']\n table_data = utils.parse_df_columns(table_data)\n table_data.name = 'ipc'\n\n class Ipc(Base):\n \"\"\"Auxiliary sqlalchemy table model for the tests.\"\"\"\n\n __tablename__ = 'ipc'\n\n id = Column(Integer, primary_key=True)\n comunidades_y_ciudades_autonomas = Column(String(100))\n grupos_ecoicop = Column(String(100))\n tipo_de_dato = Column(String(50))\n periodo = Column(String(50))\n data = Column(Float)\n\n Ipc.__table__.create(bind=my_conn.engine)\n insert_data = table_data[['grupos_ecoicop', 'data']]\n insert_data.name = 'ipc'\n\n my_conn.insert(insert_data, if_exists='append',\n columns=['grupos_ecoicop', 'data'])\n result_data = pd.read_sql_query('select * from ipc',\n con=my_conn.engine)\n self.assertTrue(\n result_data['comunidades_y_ciudades_autonomas'].isnull().all())\n self.assertTrue(result_data['periodo'].isnull().all())\n self.assertTrue(result_data['tipo_de_dato'].isnull().all())\n self.assertFalse(result_data['grupos_ecoicop'].isnull().all())\n self.assertFalse(result_data['data'].isnull().all())\n my_conn.drop('ipc')", "def sanity_checks(df: pd.DataFrame) -> None:\n df_temp = df.copy()\n # checks that the max date is less than tomorrow's date.\n assert datetime.datetime.strptime(df_temp['Date'].max(), '%Y-%m-%d') < (datetime.datetime.utcnow() + datetime.timedelta(days=1))\n # checks that there are no duplicate dates\n assert df_temp['Date'].duplicated().sum() == 0, 'One or more rows share the same date.'\n if 'Cumulative total' not in df_temp.columns:\n df_temp['Cumulative total'] = df_temp['Daily change in cumulative total'].cumsum()\n # checks that the cumulative number of tests on date t is always greater than the figure for t-1:\n assert (df_temp['Cumulative total'].iloc[1:] >= df_temp['Cumulative total'].shift(1).iloc[1:]).all(), \"On one or more dates, `Cumulative total` is greater on date t-1.\"\n # df.iloc[1:][df['Cumulative total'].iloc[1:] < df['Cumulative total'].shift(1).iloc[1:]]\n # cross-checks a sample of scraped figures against the expected result.\n assert len(sample_official_data) > 0\n for dt, d in sample_official_data:\n val = df_temp.loc[df_temp['Date'] == dt, SERIES_TYPE].squeeze().sum()\n assert val == d[SERIES_TYPE], f\"scraped value ({val:,d}) != official value ({d[SERIES_TYPE]:,d}) on {dt}\"\n return None", "def test_overall_report_columns():\n assert (len(overall_data['columns']) == 31)", "def test_complex_df_report():\n tz_df = pd.DataFrame(\n dict(\n duration_col=[timedelta(seconds=x) for x in range(30)],\n date_col=[date.today() for _ in range(30)],\n datetime_col=[datetime.utcnow() for _ in range(30)],\n datetimez_col=[datetime.now(timezone.utc) for _ in range(30)],\n )\n )\n\n raw_data = {\n \"first_name\": [\"Jason\", \"Molly\", \"Tina\", \"Jake\", \"Amy\"],\n \"last_name\": [\"Miller\", \"Jacobson\", \"Ali\", \"Milner\", \"Cooze\"],\n \"age\": [42, 52, 36, 24, 73],\n \"preTestScore\": [4, 24, 31, 2, 3],\n \"postTestScore\": [25, 94, 57, 62, 70],\n }\n index_df = pd.DataFrame(raw_data, columns=[\"first_name\", \"last_name\", \"age\", \"preTestScore\", \"postTestScore\"])\n df_desc = index_df.describe()\n df_desc_2 = df_desc.reset_index()\n\n tz_t = dp.DataTable(tz_df)\n index_t = dp.DataTable(index_df)\n df_desc_t = dp.DataTable(df_desc)\n df_desc_2_t = dp.DataTable(df_desc_2)\n\n with deletable(dp.Report(tz_t, index_t, df_desc_t, df_desc_2_t)) as dp_report:\n dp_report.publish(name=gen_name())\n\n # NOTE - as above, downloading embedded assets from a report currently not supported in API\n # check_df_equal(tz_df, tz_t.download_df())\n # check_df_equal(index_df, index_t.download_df())\n # check_df_equal(df_desc, df_desc_t.download_df())\n # check_df_equal(df_desc_2, df_desc_2_t.download_df())", "def test_get_metadata_df(self):\n\n # first need to populate LabMetadata tables\n from data_processors.lims.lambdas import labmetadata\n labmetadata.scheduled_update_handler({'event': \"test_get_metadata_df\"}, None)\n\n logger.info(f\"Lab metadata count: {LabMetadata.objects.count()}\")\n\n # SEQ-II validation dataset\n mock_bcl_workflow: Workflow = WorkflowFactory()\n mock_sqr: SequenceRun = mock_bcl_workflow.sequence_run\n mock_sqr.run_id = \"r.Uvlx2DEIME-KH0BRyF9XBg\"\n mock_sqr.instrument_run_id = \"200612_A01052_0017_BH5LYWDSXY\"\n mock_sqr.gds_volume_name = \"bssh.acddbfda498038ed99fa94fe79523959\"\n mock_sqr.gds_folder_path = f\"/Runs/{mock_sqr.instrument_run_id}_{mock_sqr.run_id}\"\n mock_sqr.sample_sheet_name = \"SampleSheet.csv\"\n mock_sqr.name = mock_sqr.instrument_run_id\n mock_sqr.save()\n\n mock_library_run = LibraryRun(\n instrument_run_id=mock_sqr.instrument_run_id,\n run_id=mock_sqr.run_id,\n library_id=\"L2000199\",\n lane=1,\n override_cycles=\"Y151;I8N2;U10;Y151\",\n )\n mock_library_run.save()\n\n samplesheet_path = f\"{mock_sqr.gds_folder_path}/{mock_sqr.sample_sheet_name}\"\n\n metadata_df = bcl_convert.get_metadata_df(\n gds_volume=mock_sqr.gds_volume_name,\n samplesheet_path=samplesheet_path\n )\n\n logger.info(\"-\" * 32)\n logger.info(f\"\\n{metadata_df}\")\n\n self.assertTrue(not metadata_df.empty)\n self.assertTrue(\"PTC_SsCRE200323LL_L2000172_topup\" in metadata_df[\"sample\"].tolist())\n\n if \"\" in metadata_df[\"override_cycles\"].unique().tolist():\n logger.info(\"-\" * 32)\n logger.info(\"THERE SEEM TO BE BLANK OVERRIDE_CYCLES METADATA FOR SOME SAMPLES...\")\n self.assertFalse(\"\" in metadata_df[\"override_cycles\"].tolist())\n # This probably mean need to fix data, look for corresponding Lab Metadata entry...\n\n library_id_list = metadata_df[\"library_id\"].tolist()\n library_run_list = libraryrun_srv.link_library_runs_with_x_seq_workflow(library_id_list, mock_bcl_workflow)\n self.assertIsNotNone(library_run_list)\n self.assertEqual(1, len(library_run_list))\n self.assertEqual(mock_library_run.library_id, library_run_list[0].library_id)\n\n library_run_in_workflows = mock_bcl_workflow.libraryrun_set.all()\n self.assertEqual(1, library_run_in_workflows.count())", "def test_get_top_3_bussiest_hours(self):\n test_df_data = [\n (1, \"2019\", \"5\", \"2\"),\n (1, \"2019\", \"4\", \"2\"),\n (2, \"2019\", \"2\", \"3\"),\n (3, \"2019\", \"5\", \"4\"),\n (2, \"2019\", \"15\", \"2\"),\n (1, \"2019\", \"20\", \"12\"),\n (3, \"2019\", \"21\", \"6\"),\n (3, \"2019\", \"4\", \"2\"),\n (1, \"2020\", \"5\", \"3\"),\n (2, \"2020\", \"15\", \"2\"),\n (1, \"2020\", \"20\", \"12\"),\n (4, \"2020\", \"23\", \"12\"),\n (1, \"2020\", \"21\", \"6\"),\n (5, \"2020\", \"4\", \"2\"),\n (1, \"2020\", \"5\", \"3\"),\n (1, \"2020\", \"5\", \"4\"),\n (4, \"2020\", \"11\", \"12\"),\n ]\n test_df_schema = StructType([\n StructField(\"passenger_count\", IntegerType(), False),\n StructField(\"year\", StringType(), False),\n StructField(\"week\", StringType(), False),\n StructField(\"hour\", StringType(), False)\n ])\n test_df = spark().createDataFrame(\n test_df_data,\n schema=test_df_schema,\n )\n\n expected_data = [\n (\"2\",),\n (\"12\",),\n (\"3\",)\n ]\n expected_df_schema = StructType([\n StructField(\"hour\", StringType(), False)\n ])\n expected_df = spark().createDataFrame(\n expected_data,\n schema=expected_df_schema,\n )\n actual_df = \\\n correctness_queries.get_top_3_bussiest_hours(test_df)\n\n expected_count = expected_df.count()\n equal_count = actual_df.join(\n expected_df,\n on=[\"hour\"],\n how=\"inner\"\n ).count()\n\n # compare pair details\n self.assertEqual(equal_count, expected_count)", "def test_table_counts():\n number_of_test_run = 2 # Run the pipeline twice\n for i in range(number_of_test_run):\n dp = DataPipeline()\n dp.run()\n\n dp = DataPipeline()\n assert dp.get_product_count() == (500000,)\n assert dp.get_duplicate_count(from_table=\"products\") == (0,)\n assert dp.get_aggregate_table_result_count() == (222024, )\n 222024\n dp.close()", "def save_df_1(obj, cur):\n\n df = obj.value\n\n t1 = clock()\n\n db_cols, db_type_map = get_db_cols(cur, 'test_dataframe_table')\n # print(db_cols)\n # print(db_type_map)\n\n t2 = clock()\n print(\"Get db specs: \", t2 - t1)\n\n t1 = clock()\n\n df_cols, df_rows, df_type_map = get_df_cols_rows(df)\n # print(df_cols)\n # print(df_type_map)\n\n t2 = clock()\n print(\"Get df specs: \", t2 - t1)\n\n t1 = clock()\n\n shared_items = [\n k for k in db_type_map\n if k in df_type_map and db_type_map[k] != df_type_map[k]\n ]\n if len(shared_items) != 0:\n print(\"A column type was changed, please don't do this.\")\n return\n\n inter_cols = [col for col in df_cols if col in db_cols]\n # print(inter_cols)\n add_cols = [col for col in df_cols if col not in db_cols]\n # print(add_cols)\n\n df_indices = tuple([r[0] for r in df_rows])\n\n t2 = clock()\n print(\"Inter_cols, add_cols and indices: \", t2 - t1)\n\n t1 = clock()\n\n sql = \"\"\"SELECT rid,{}\n FROM test_dataframe_table\n WHERE index in %s\n AND rid = (\n SELECT max(t2.rid)\n FROM test_dataframe_table AS t2\n WHERE t2.index = test_dataframe_table.index)\n \"\"\".format(','.join(inter_cols))\n cur.execute(sql, (df_indices, ))\n r_list = [r for r in cur]\n\n t2 = clock()\n print(\"Get data from db table: \", t2 - t1)\n\n t1 = clock()\n\n hash_list = [(hash(r[1:]), r[0]) for r in r_list]\n hash_dic = dict(hash_list)\n\n t2 = clock()\n print(\"Hash dic: \", t2 - t1)\n\n t1 = clock()\n\n rids = []\n update_rows = []\n new_rows = []\n for i, r in enumerate(df_rows):\n row = tuple([\n cast(r[0], df_type_map.get(r[1])) for r in zip(r, df_cols)\n if r[1] in db_cols\n ])\n # if i < 5:\n # print(row)\n rest_row = tuple(\n [r[0] for r in zip(r, df_cols) if r[1] not in db_cols])\n h = hash(row)\n if h in hash_dic:\n rid = hash_dic.get(h)\n update_rows.append((rid, rest_row))\n rids.append(rid)\n else:\n new_rows.append(r)\n\n t2 = clock()\n print(\"Compute update_rows and new_rows: \", t2 - t1)\n\n t1 = clock()\n\n if len(add_cols) != 0:\n alter_sql = \"ALTER TABLE test_dataframe_table {};\".format(','.join(\n map(lambda x: 'ADD COLUMN {} {}'.format(x, df_type_map.get(x)),\n add_cols)))\n cur.execute(alter_sql)\n update_sql = \"UPDATE test_dataframe_table SET {} WHERE rid = %s\".format(\n ','.join(map(lambda x: '{} = %s'.format(x), add_cols)))\n for (rid, rest_row) in update_rows:\n cur.execute(update_sql, (*rest_row, rid))\n\n t2 = clock()\n print(\"Alter table and update: \", t2 - t1)\n\n t1 = clock()\n\n insert_sql = \"\"\"\n INSERT INTO test_dataframe_table({})\n VALUES %s RETURNING rid\n \"\"\".format(','.join(df_cols))\n new_rids = execute_values(cur, insert_sql, new_rows, fetch=True)\n new_rids = [x[0] for x in new_rids]\n print(len(new_rids))\n rids.extend(new_rids)\n\n t2 = clock()\n print(\"Insert new tuples: \", t2 - t1)\n\n t1 = clock()\n\n insert_versioning_sql = \"INSERT INTO test_dataframe_object(t, lineno, name, rlist, clist) VALUES (%s, %s, %s, %s, %s)\"\n args = (obj.time, obj.lineno, obj.name, rids, df_cols)\n cur.execute(insert_versioning_sql, args)\n\n t2 = clock()\n print(\"Save version: \", t2 - t1)", "def test_success(database):\n # Create a 12 character random fain\n fain_1 = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for _ in range(12))\n fain_2 = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for _ in range(12))\n fain_3 = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for _ in range(12))\n fain_4 = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for _ in range(12))\n\n # Just some basic sums to make sure it works\n af_1_row_1 = AwardFinancialFactory(transaction_obligated_amou=1100, fain=fain_1, allocation_transfer_agency=None)\n af_1_row_2 = AwardFinancialFactory(transaction_obligated_amou=11, fain=fain_1.lower(),\n allocation_transfer_agency=None)\n # Non-ignored rows with a matching ATA/AID\n af_2_row_1 = AwardFinancialFactory(transaction_obligated_amou=9900, fain=fain_2, allocation_transfer_agency=None)\n af_2_row_2 = AwardFinancialFactory(transaction_obligated_amou=99, fain=fain_2, allocation_transfer_agency='good',\n agency_identifier='good')\n # Ignored row with non-matching ATA/AID\n af_3 = AwardFinancialFactory(transaction_obligated_amou=8888, fain=fain_3, allocation_transfer_agency='good',\n agency_identifier='bad')\n # No TOA in File C, ignored\n af_4 = AwardFinancialFactory(transaction_obligated_amou=None, piid=fain_4.lower(),\n allocation_transfer_agency='good', agency_identifier='good')\n\n # Fain sums for AFA\n afa_1_row_1 = AwardFinancialAssistanceFactory(fain=fain_1, federal_action_obligation=-1100,\n original_loan_subsidy_cost=None, record_type='2')\n afa_1_row_2 = AwardFinancialAssistanceFactory(fain=fain_1.lower(), federal_action_obligation=-10,\n original_loan_subsidy_cost=None, record_type='3')\n # original loan subsidy cost used in this row because assistance type is '08'\n afa_1_row_3 = AwardFinancialAssistanceFactory(fain=fain_1, original_loan_subsidy_cost=-1, assistance_type='08',\n federal_action_obligation=None, record_type='2')\n # federal action obligation used in this row (it's 0), because assistance type is not 07 and 08\n afa_1_row_4 = AwardFinancialAssistanceFactory(fain=fain_1, original_loan_subsidy_cost=-2222, assistance_type='09',\n federal_action_obligation=None, record_type='3')\n # Ignored because record type 1\n afa_1_row_5 = AwardFinancialAssistanceFactory(fain=fain_1, federal_action_obligation=-1100,\n original_loan_subsidy_cost=None, record_type='1')\n # Fain 2 Test for non-ignored ATA\n afa_2 = AwardFinancialAssistanceFactory(fain=fain_2, federal_action_obligation=-9999,\n original_loan_subsidy_cost=None, record_type='2')\n # Fain 3 test for ignoring a non-matching ATA/AID\n afa_3 = AwardFinancialAssistanceFactory(fain=fain_3, federal_action_obligation=-9999, record_type='3')\n\n # This one matches but will be ignored\n afa_4 = AwardFinancialAssistanceFactory(fain=fain_4, federal_action_obligation=-9999)\n\n errors = number_of_errors(_FILE, database, models=[af_1_row_1, af_1_row_2, af_2_row_1, af_2_row_2, af_3, af_4,\n afa_1_row_1, afa_1_row_2, afa_1_row_3, afa_1_row_4, afa_1_row_5,\n afa_2, afa_3, afa_4])\n assert errors == 0", "def test_chain_fn(self):\n df1 = pd.DataFrame({'a': [1, 2, np.NaN], 'b': [\"hi\", np.NaN, \"ho\"]})\n onlyA = df1.dropna(subset=[1], axis=1)\n\n comp: pd.DataFrame = onlyA.loc[0:1] == [1, 2] # helps with the mistaking type error highlighting in pycharm\n self.assertTrue(all(comp))\n self.assertTrue(all(np.isnan(onlyA.loc[2])))\n\n self.assertTrue(pd.DataFrame({'a': [1.0], 'b': ['hi']}).equals(df1.dropna()))\n\n df2 = pd.DataFrame({'a': [1, 2, np.NaN, np.NaN], 'b': [\"hi\", None, \"ho\", None]})\n self.assertTrue(df1.equals(df2.dropna(how='all')))\n\n df2.fillna(-1)\n # both columns have two NaN\n self.assertEquals((df2.fillna(-1) == -1.0).a.value_counts()[True], 2)\n self.assertEquals((df2.fillna(-1) == -1.0).b.value_counts()[True], 2)\n self.assertTrue(pd.Series([2, 2], index=['a', 'b']).equals(df2.isnull().sum()))\n\n df2.a.map(np.isnan).value_counts()[True] # replaced by isnull().sum() above, and only works for numeric cols\n\n df2.apply(lambda row: row.isnull().sum(), axis=1) # count of missing values per row\n df2.apply(lambda col: col.isnull().sum()) # count of missing values per column\n df2.isnull().sum(axis=1) # count of missing values per row\n df2.isnull().sum(axis=0) # count of missing values per column\n\n comp2: np.ndarray = df2.isin([1.0, 2, \"ho\"]).values.sum(axis=0) == [2, 1]\n self.assertTrue(all(comp2))\n\n df3 = pd.DataFrame({'a': [1, 1, 2, 2], 'b': [1, 2, 3, 4]})\n\n df_a_sb = df3.groupby('a').b.sum()\n self.assertTrue(df_a_sb.equals(pd.Series([3,7], index=[1, 2]))) # names ignored in comparison\n pd.Series([3, 7], index=pd.Series([1, 2], name='a'), name='b') # but can set the names if needed\n\n df4 = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [11, 22, 33, 44]},\n index=pd.MultiIndex.from_product([[1,2], ['aa', 'bb']], names=['idx1', 'idx2']))\n df4.columns = pd.Series(['a', 'b'], name=\"idx_c\")\n df4.unstack(0)\n df4.unstack(1)\n\n df5 = pd.DataFrame([[1, 3], [2, 4], [11, 33], [22, 44]],\n columns=pd.Series([1, 2], name=\"idx1\"),\n index=pd.MultiIndex.from_product([['a', 'b'], ['aa','bb']], names=['idx_c', 'idx2'])).T\n self.assertTrue(df5.equals(df4.unstack(1)))\n self.assertTrue(str(df5) == str(df4.unstack(1)))\n\n df5.stack(0)\n\n df6 = pd.DataFrame({'x': np.arange(100),\n 'y': np.concatenate([np.repeat(1, 50), np.repeat(2, 50)]),\n 'dat': pd.date_range(\"20170101\", periods=100, freq='min')})\n df6 = df6.set_index('dat')\n df6.groupby([pd.TimeGrouper('H')]).sum()\n df7 = df6.groupby(['y', pd.TimeGrouper('H')]).sum()\n\n df6.x.rolling(10).sum()", "def test_failure(database):\n # Create a 12 character random fain\n fain_1 = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for _ in range(12))\n fain_2 = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for _ in range(12))\n fain_3 = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for _ in range(12))\n fain_4 = ''.join(choice(ascii_uppercase + ascii_lowercase + digits) for _ in range(12))\n\n # Simple addition that doesn't add up right\n af_1_row_1 = AwardFinancialFactory(transaction_obligated_amou=1100, fain=fain_1, allocation_transfer_agency=None)\n af_1_row_2 = AwardFinancialFactory(transaction_obligated_amou=11, fain=fain_1.lower(),\n allocation_transfer_agency=None)\n # Incorrect addition based on assistance type in AFA\n af_2 = AwardFinancialFactory(transaction_obligated_amou=9999, fain=fain_2, allocation_transfer_agency=None)\n # Don't ignore when ATA and AID match\n af_3 = AwardFinancialFactory(transaction_obligated_amou=1111, fain=fain_3, allocation_transfer_agency='good',\n agency_identifier='good')\n # Not ignored with TOA of 0\n af_4 = AwardFinancialFactory(transaction_obligated_amou=0, fain=fain_4, allocation_transfer_agency='good',\n agency_identifier='good')\n\n # Sum of this fain doesn't add up to af fain sum\n afa_1_row_1 = AwardFinancialAssistanceFactory(fain=fain_1, federal_action_obligation=-1100,\n original_loan_subsidy_cost=None, record_type='2')\n afa_1_row_2 = AwardFinancialAssistanceFactory(fain=fain_1.lower(), federal_action_obligation=-10,\n original_loan_subsidy_cost=None, record_type='3')\n # Both of these rows use the column that isn't filled in for summing so neither results in the correct number\n afa_2_row_1 = AwardFinancialAssistanceFactory(fain=fain_2, federal_action_obligation=-9999,\n original_loan_subsidy_cost=None, record_type='2')\n afa_2_row_2 = AwardFinancialAssistanceFactory(fain=fain_2, federal_action_obligation=None,\n original_loan_subsidy_cost=-9999, assistance_type='07',\n record_type='3')\n # This shouldn't be ignored\n afa_3 = AwardFinancialAssistanceFactory(fain=fain_3, federal_action_obligation=0, original_loan_subsidy_cost=None,\n record_type='2')\n # Shouldn't be ignored with a TOA of 0\n afa_4 = AwardFinancialAssistanceFactory(fain=fain_4, federal_action_obligation=1, original_loan_subsidy_cost=None,\n record_type='2')\n\n errors = number_of_errors(_FILE, database, models=[af_1_row_1, af_1_row_2, af_2, af_3, af_4, afa_1_row_1,\n afa_1_row_2, afa_2_row_1, afa_2_row_2, afa_3, afa_4])\n assert errors == 4", "def test_multiple(self):\n df = self.df.copy()\n out = get_full_column(df.values)\n self.assertTrue(out == 0)" ]
[ "0.7033019", "0.6735344", "0.66333157", "0.644243", "0.6424497", "0.6372388", "0.63426524", "0.6339254", "0.62987274", "0.62735826", "0.62442666", "0.6217152", "0.6199997", "0.617955", "0.61647725", "0.61497754", "0.6148181", "0.6148095", "0.6096861", "0.60874194", "0.6004757", "0.60034883", "0.60022587", "0.59972197", "0.5982742", "0.59642524", "0.59636545", "0.5949602", "0.5948745", "0.5941207" ]
0.78982604
0
1. testing the build_graph method returns the correct string, and waiting for file to open (less than 1 sec)
def test_build_graph(self): insert_good_data() dataframe = get_dataframe() results = processing.build_graph(dataframe, figure_path, False) # 1 self.assertEqual(results, "Updated html File and Opened it")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testGraphExtract(self):\n graph = Graph2()\n graph.parseFile(TESTFILE)", "async def get_graph_for_file(\n file_name: str,\n score: int = 0,\n par_length: int = 0,\n co_occ: int = 0,\n target_collection: List[str] = Query([]),\n):\n database = get_db()\n query_graph_result = database.AQLQuery(\n query=main_queries.QUERY_GRAPH_VIEW,\n batchSize=15000,\n bindVars={\n \"filename\": file_name,\n \"score\": score,\n \"parlength\": par_length,\n \"coocc\": co_occ,\n \"targetcollection\": target_collection,\n },\n )\n collection_keys = []\n total_collection_dict = {}\n total_histogram_dict = {}\n\n # extract a dictionary of collection numbers and number of parallels for each\n for parallel in query_graph_result.result:\n count_this_parallel = parallel[\"parlength\"]\n target_filename = re.sub(\"_[0-9][0-9][0-9]\",\"\",parallel[\"textname\"])\n if target_filename in total_histogram_dict.keys():\n total_histogram_dict[target_filename] += count_this_parallel\n else:\n total_histogram_dict[target_filename] = count_this_parallel\n\n collection_key = re.search(COLLECTION_PATTERN, target_filename)\n\n if not collection_key:\n continue\n\n collection = collection_key.group()\n if collection not in total_collection_dict.keys():\n total_collection_dict[collection] = count_this_parallel\n else:\n total_collection_dict[collection] += count_this_parallel\n if collection not in collection_keys:\n collection_keys.append(collection)\n\n # find the proper full names vor each collection\n collections = database.AQLQuery(\n query=menu_queries.QUERY_COLLECTION_NAMES,\n bindVars={\n \"collections\": collection_keys,\n \"language\": get_language_from_filename(file_name),\n },\n )\n\n collections_with_full_name = {}\n for collection_result in collections.result[0]:\n collections_with_full_name.update(collection_result)\n\n parallel_graph_name_list = {}\n for key in total_collection_dict:\n parallel_graph_name_list.update(\n {key + \" \" + collections_with_full_name[key]: total_collection_dict[key]}\n )\n\n unsorted_graphdata_list = list(map(list, parallel_graph_name_list.items()))\n\n histogram_data = []\n for name, count in total_histogram_dict.items():\n displayname = name\n query_displayname = database.AQLQuery(\n query=main_queries.QUERY_DISPLAYNAME,\n bindVars={\n \"filename\": name\n },\n rawResults=True\n )\n displayname_results = query_displayname.result\n if displayname_results:\n displayname = displayname_results[0][0] + ' (' + displayname_results[0][1] + ')'\n\n histogram_data.append([displayname, count])\n\n # returns a list of the data as needed by Google Graphs\n return {\n \"piegraphdata\": sorted(\n unsorted_graphdata_list, reverse=True, key=lambda x: x[1]\n ),\n \"histogramgraphdata\": sorted(histogram_data, reverse=True, key=lambda x: x[1]),\n }", "def test_watch_graph_caches(self):\n self.make_files(foo='foo', bar='bar')\n with pike.Graph('g') as graph:\n pike.glob('.', '*')\n watcher = pike.watch_graph(graph)\n ret = watcher.run()\n self.assertEqual(len(ret['default']), 2)\n with self.assertRaises(pike.StopProcessing):\n watcher.run()", "def os_open_graph( self, ):\r\n pass", "def functionality_1(path_to_txt):\n print(\"you are invoking minimum spanning tree + Dijkstra algorithm\")\n gc = GraphConstructor()\n gc.graph_from_combined_file(path_to_txt)\n vertices, dm = gc.dijkstra_input_vertices_and_distance_matrix()\n source_vertex = 0\n start_time = time.process_time()\n got = Dijkstra_to_find_shortest_distances(source_vertex, vertices, dm)\n print(\"the result of minimum spanning tree + Dijkstra algorithm is:\")\n print(\"[{}]\".format(\", \".join([str(val) for val in got])))\n print(\"the execution takes time: {}\".format(str(time.process_time() - start_time)))", "def test_init_flowgram_file(self):\r\n fh, tmp_filename = init_flowgram_file(n=100, l=400)\r\n self.assert_(exists(tmp_filename))\r\n self.tmp_filename = tmp_filename\r\n fh.close()\r\n result_file_content = list(open(tmp_filename))\r\n\r\n self.assertEqual(result_file_content, [\"100 400\\n\"])", "def test_graph(reformfile1):\n # create graphable input\n nobs = 100\n idict = dict()\n idict['RECID'] = [i for i in range(1, nobs + 1)]\n idict['MARS'] = [2 for i in range(1, nobs + 1)]\n idict['s006'] = [10.0 for i in range(1, nobs + 1)]\n idict['e00300'] = [10000 * i for i in range(1, nobs + 1)]\n idict['_expanded_income'] = idict['e00300']\n idf = pd.DataFrame(idict, columns=list(idict))\n # create TaxCalcIO graph files\n tcio = TaxCalcIO(input_data=idf,\n tax_year=2020,\n reform=reformfile1.name,\n assump=None,\n growdiff_response=None,\n aging_input_data=False,\n exact_calculations=False)\n tcio.static_analysis(writing_output_file=False,\n output_graph=True)\n # delete graph files\n output_filename = tcio.output_filepath()\n fname = output_filename.replace('.csv', '-atr.html')\n if os.path.isfile(fname):\n os.remove(fname)\n fname = output_filename.replace('.csv', '-mtr.html')\n if os.path.isfile(fname):\n os.remove(fname)", "def run_program(file_name_root, words_to_use_file, desired_depth, max_size):\n\n # Checks.\n if not isinstance(file_name_root, str) and not isinstance(words_to_use_file, str):\n raise ValueError(\"'str required here. Created by @Edd1e234'\")\n if not isinstance(desired_depth, int) and not isinstance(max_size, int):\n raise ValueError(\"'bool' and 'int' required here. Created by @Edd1e234\")\n\n tree = BTree(max_size)\n\n total_time_start = time.time()\n file_read_time_start = time.time()\n read_file_into_tree(file_name_root, tree)\n file_read_time_end = time.time()\n file_read_time = file_read_time_end - file_read_time_start\n print(\"Finished reading tree: \", file_read_time)\n\n if tree.root is None:\n print(\"TREE ROOT IS NONE\")\n\n raise SystemError(\"Something Went Wrong, tree root is none, text file empty?\"\n \"Created by @Edd1e234\")\n\n sim_time_start = time.time()\n read_file_sim(words_to_use_file, tree)\n sim_time_end = time.time()\n sim_time = sim_time_end - sim_time_start\n\n print(\"Finished Sim: \", sim_time, \"\\n\")\n\n print(\"Solution A\")\n print(\"Total node amount in tree \", tree.total_nodes)\n print(\"No time needed this is one operation.\\n\")\n\n height_timer_start = time.time()\n print(\"Tree height is, \", get_tree_height(tree))\n height_timer_end = time.time()\n\n height_time = height_timer_end - height_timer_start\n\n print(\"Get Solution B\")\n print(\"Height timer took \",\n height_time, \"\\n\")\n\n get_all_words_start = time.time()\n get_all_words(\"all.words.in.list.txt\", tree)\n get_all_words_end = time.time()\n\n get_all_words_time = get_all_words_end - get_all_words_start\n\n print(\"Solution C\")\n print(\"Finished printing all words: \",\n get_all_words_time, \"\\n\")\n\n desired_depth_time = -1\n # Try to eliminate steps if possible.\n if desired_depth is not None or desired_depth is not 0:\n desired_depth_start = time.time()\n get_desired_depth(\"words.at.desired.depth.txt\", tree, desired_depth)\n desired_depth_end = time.time()\n desired_depth_time = desired_depth_end - desired_depth_start\n\n print(\"Solution D:\")\n print(\"Finished Desired Depth: \",\n desired_depth_time, \"\\n\")\n\n total_time_end = time.time()\n total_time = total_time_end - total_time_start\n print(\"Total Time is: \", total_time)\n\n return file_read_time, sim_time, height_time, get_all_words_time, desired_depth_time, total_time", "def graphSearch(data, simFunction, paramConfig={}):\n # There are some fields can be config by user,\n # If user specified these fields in paramConfig, \n # overload these parameters to userConfig\n overloadConfig(userConfig, paramConfig)\n # Parse the input json file and read out the parameters\n params = parseRrtInputFile(data)\n # Construct objects\n checker = UniformChecker(params.unsafeSet, params.variables)\n goalSetChecker = GoalChecker(params.goalSet, params.variables)\n distanceChecker = DistChecker(params.goal, params.variables)\n # Read the important param\n availableModes = params.modes\n startModes = params.modes\n remainTime = params.timeHorizon\n minTimeThres = params.minTimeThres\n\n # Set goal rach flag to False\n # Once the flag is set to True, It means we find a transition Graph\n goalReached = False\n\n # Build the initial mode stack\n # Current Method is ugly, we need to get rid of the initial Mode for GraphSearch\n # It helps us to achieve the full automate search\n # TODO Get rid of the initial Mode thing\n random.shuffle(startModes)\n dummyNode = GraphSearchNode(\"start\", remainTime, minTimeThres, 0)\n for mode in startModes:\n dummyNode.children[mode] = GraphSearchNode(mode, remainTime, minTimeThres, dummyNode.level+1)\n dummyNode.children[mode].parent = dummyNode\n dummyNode.children[mode].initial = (params.initialSet[0], params.initialSet[1])\n\n curModeStack = dummyNode.children[startModes[0]]\n dummyNode.visited.add(startModes[0])\n \n startTime = time.time()\n while True:\n\n if not curModeStack:\n break\n\n if curModeStack == dummyNode:\n startModes.pop(0)\n if len(startModes)==0:\n break\n \n \n curModeStack = dummyNode.children[startModes[0]]\n dummyNode.visited.add(startModes[0])\n continue\n \n print str(curModeStack)\n\n # Keep check the remain time, if the remain time is less than minTime\n # It means it is impossible to stay in one mode more than minTime\n # Therefore, we have to go back to parents\n if curModeStack.remainTime < minTimeThres:\n print \"Back to previous mode because we cannot stay longer than the min time thres\"\n curModeStack = curModeStack.parent\n continue\n\n # If we have visited all available modes\n # We should select a new candidate point to proceed\n # If there is no candidates available,\n # Then we can say current node is not valid and go back to parent\n if len(curModeStack.visited) == len(availableModes):\n if len(curModeStack.candidates)<2:\n print \"Back to previous mode because we do not have any other modes to pick\"\n curModeStack = curModeStack.parent\n # If the tried all possible cases with no luck to find path\n if not curModeStack:\n break\n continue\n else:\n print \"Pick a new point from candidates\"\n curModeStack.candidates.pop(0)\n curModeStack.visited = set()\n curModeStack.children = {}\n continue\n\n\n # Generate bloated tube if we haven't done so\n if not curModeStack.bloatedTube:\n print \"no bloated tube find in this mode, generate one\"\n curBloatedTube = clacBloatedTube(\n curModeStack.mode,\n curModeStack.initial,\n curModeStack.remainTime,\n simFunction,\n params.bloatingMethod,\n params.kvalue,\n userConfig.SIMTRACENUM\n )\n\n # Cut the bloated tube once it intersect with the unsafe set\n curBloatedTube = checker.cutTubeTillUnsafe(curBloatedTube)\n\n # If the tube time horizon is less than minTime, it means\n # we cannot stay in this mode for min thres time, back to the parent node\n if not curBloatedTube or curBloatedTube[-1][0] < minTimeThres:\n print \"bloated tube is not long enough, discard the mode\"\n curModeStack = curModeStack.parent\n continue\n curModeStack.bloatedTube = curBloatedTube\n\n # Generate candidates points for next node\n randomSections = curModeStack.randomPicker(userConfig.RANDSECTIONNUM)\n\n if not randomSections:\n print \"bloated tube is not long enough, discard the mode\"\n curModeStack = curModeStack.parent\n continue\n\n # Sort random points based on the distance to the goal set\n randomSections.sort(key=lambda x: distanceChecker.calcDistance(x[0], x[1]))\n curModeStack.candidates = randomSections\n print \"Generate new bloated tube and candidate, with candidates length\", len(curModeStack.candidates)\n \n\n # Check if the current tube reaches goal\n result, tube = goalSetChecker.goalReachTube(curBloatedTube)\n if result:\n curModeStack.bloatedTube = tube\n goalReached = True\n break\n\n # We have visited all next mode we have, generate some thing new\n # This is actually not necssary, just shuffle all modes would be enough\n # There should not be RANDMODENUM things since it does not make any difference\n # Anyway, for each candidate point, we will try to visit all modes eventually\n # Therefore, using RANDMODENUM to get some random modes visit first is useless\n # TODO, fix this part\n if len(curModeStack.visited) == len(curModeStack.children):\n # leftMode = set(availableModes) - set(curModeStack.children.keys())\n # randomModes = random.sample(leftMode, min(len(leftMode), RANDMODENUM))\n # random.shuffle(randomModes)\n randomModes = availableModes\n random.shuffle(randomModes)\n\n randomSections = curModeStack.randomPicker(userConfig.RANDSECTIONNUM)\n for mode in randomModes:\n candidate = curModeStack.candidates[0]\n curModeStack.children[mode] = GraphSearchNode(mode, curModeStack.remainTime-candidate[1][0], minTimeThres, curModeStack.level+1)\n curModeStack.children[mode].initial = (candidate[0][1:], candidate[1][1:])\n curModeStack.children[mode].parent = curModeStack\n\n # Random visit a candidate that is not visited before\n for key in curModeStack.children:\n if not key in curModeStack.visited:\n break\n\n print \"transit point is\", curModeStack.candidates[0]\n curModeStack.visited.add(key)\n curModeStack = curModeStack.children[key]\n\n # Back track to print out trace\n print \"RRT run time\", time.time()-startTime\n if goalReached:\n print(\"goal reached\")\n traces = []\n modes = []\n while curModeStack:\n modes.append(curModeStack.mode)\n if not curModeStack.candidates:\n traces.append([t for t in curModeStack.bloatedTube])\n else:\n # Cut the trace till candidate\n temp = []\n for t in curModeStack.bloatedTube:\n if t == curModeStack.candidates[0][0]:\n temp.append(curModeStack.candidates[0][0])\n temp.append(curModeStack.candidates[0][1])\n break\n else:\n temp.append(t)\n traces.append(temp)\n if curModeStack.parent != dummyNode:\n curModeStack = curModeStack.parent\n else:\n break\n # Reorganize the content in modes list for plotter use\n modes = modes[::-1]\n traces = traces[::-1]\n buildRrtGraph(modes, traces, isIpynb())\n for i in range(1, len(modes)):\n modes[i] = modes[i-1]+'->'+modes[i]\n\n writeRrtResultFile(modes, traces, RRTOUTPUT)\n else:\n print(\"could not find graph\")", "def build_graph(self):\n self.import_tree(ZOO_PATH, self.import_zoo, self.verify_zoos)\n self.import_tree(WILD_PATH, self.import_wild, self.verify_wilds)\n self.import_tree(PANDA_PATH, self.import_redpanda, self.verify_pandas)\n self.import_tree(MEDIA_PATH, self.import_media, self.verify_media)", "def single_epoch(g,rows,cols,midpoint):\n\n num_top = 10 \n #3 for 8x8\n one_to_select = 0 \n top_nodes = g.top_n_nodes(num_top)\n '''\n for k in range(num_top):\n node_num = top_nodes[k]\n trip_list = g.node2trip_ids[node_num]\n print \"Next Midpoint: %d\" % k\n print node_num\n print g.node_to_coords(node_num)\n print \"Num trips: %d\" % len(trip_list)\n for i in range(len(trip_list)):\n trip_id = trip_list[i]\n line_num = g.trip_id2line_num[trip_id]\n p = Path(trip_id,g,line_num)\n \"\"\"\n print i\n print trip_id\n p.print_path()\n for i in range(p.graph.num_edges):\n if p.edges[i]:\n sys.stdout.write(\"%d, \" % (i + 1))\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"1s: \")\n for key in p.partials.keys():\n if p.partials[key]:\n sys.stdout.write(\"%d, \" % (key + 1))\n sys.stdout.write(\"\\n0s: \")\n for key in p.partials.keys():\n if not p.partials[key]:\n sys.stdout.write(\"%d, \" % (key + 1))\n sys.stdout.write(\"\\n\")\n #\"\"\"\n '''\n\n #trip_list = g.node2trip_ids[g.best_node]\n #midpoint = top_nodes[one_to_select]\n trip_list = g.node2trip_ids[midpoint]\n print \"Selected midpoint: %d\" % midpoint \n print g.node_to_coords(midpoint)\n out_file = open(\"datasets/full_data_%d_%d_%d.txt\" % (rows,cols,midpoint),'w')\n partial_file = open(\"datasets/partials_%d_%d_%d.txt\" % (rows,cols,midpoint), 'w')\n for i in range(len(trip_list)):\n trip_id = trip_list[i]\n line_num = g.trip_id2line_num[trip_id]\n p = Path(trip_id,g,line_num=line_num,midpoint=midpoint)\n \"\"\"\n print i\n print trip_id\n p.print_path()\n for i in range(p.graph.num_edges):\n if p.edges[i]:\n sys.stdout.write(\"%d, \" % (i + 1))\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"1s: \")\n for key in p.partials.keys():\n if p.partials[key]:\n sys.stdout.write(\"%d, \" % (key + 1))\n sys.stdout.write(\"\\n0s: \")\n for key in p.partials.keys():\n if not p.partials[key]:\n sys.stdout.write(\"%d, \" % (key + 1))\n sys.stdout.write(\"\\n\")\n \"\"\"\n out_string = str(p.edges)[1:-1]\n out_file.write(\"%s\\n\" % out_string)\n for i in range(p.graph.num_edges):\n if i in p.partials.keys():\n partial_file.write(\"%d\" % p.partials[i])\n else:\n partial_file.write(\"-1\")\n if i < p.graph.num_edges-1:\n partial_file.write(\",\")\n partial_file.write(\"\\n\")\n\n out_file.close()", "def main(content):\n # with open(in_path) as f:\n redi = redis.StrictRedis(host=\"10.243.55.67\", port=6379)\n filename = content[0].split('/')[-1]\n file_content = content[1]\n #file_content=content\n # line = f.read()\n # hdfs_client = Client('http://10.243.55.67:50070/', root='/')\n # outpath = os.path.join(\"/bres\", middle_path)\n # l = []\n ret = {}\n ret[\"name\"] = \"\"\n ret[\"alias\"] = \"\"\n # index get id\n index = filename.split('__')[0]\n \n # index = in_path.split(split_char)[0].split('/')[-1]\n ret[\"vid\"] = index\n redis_result = redi.get(str(index)).decode(\"utf-8\").split(\"@@\")\n if len(redis_result) == 1:\n ret[\"name\"] = redis_result[-1]\n elif len(redis_result) == 2:\n ret[\"name\"] = redis_result[0]\n ret['alias'] = redis_result[1]\n try:\n obj = json.loads(file_content, encoding=\"utf8\")\n # url, content, status = obj[\"url\"], obj[\"content\"], obj[\"status\"]\n url, content = obj[\"url\"], obj[\"content\"]\n html_handler = html_parse(content, url)\n handler = BaikeGraph()\n # ret[\"url\"] = url\n content = html_handler.html_clean()\n # parse title tag box\n ret[\"iid\"] = html_handler.parse_itemId()\n #html_handler.create_rel(index,ret[\"iid\"])\n boxes = html_handler.parse_box_new()\n desc = html_handler.parse_desc_new()\n titles = html_handler.parse_title_new()\n tags = html_handler.parse_tag_new()\n dict_final = ChainMap(boxes, desc, ret, titles, tags)\n handler.create_baike_node(dict(dict_final))\n html_handler.parse_polysemantic()\n\n except Exception as e:\n print('exception in main @@@@@@@@@@@@@@@@@2222')\n print(e)\n pass\n return", "def test_watch_graph_changes(self):\n self.make_files(foo='foo', bar='bar')\n with pike.Graph('g') as graph:\n pike.glob('.', '*')\n watcher = pike.watch_graph(graph)\n ret = watcher.run()\n self.assertItemsEqual([f.data.read() for f in ret['default']],\n [b'foo', b'bar'])\n self.make_files(foo='foo', bar='foo')\n ret = watcher.run()\n self.assertItemsEqual([f.data.read() for f in ret['default']],\n [b'foo', b'foo'])", "def test_state(self):\n # blocks [0 3583] [3840 4058]\n test_file1 = os.path.join(INPUT_HYPM_PATH, 'unit_364-2013-206-2-0.mdd')\n # blocks [0 1279] [1536 1791] [2048 2303] [2560 2815] [3072 4059]\n test_file2 = os.path.join(INPUT_HYPM_PATH, 'unit_364-2013-206-3-0.mdd')\n\n # parse the two .mdd files into the node and instrument group files\n mdd.procall([test_file1, test_file2])\n\n file_state = self.get_file_state('node58p1.dat')\n # there is an unprocessed '/n' in between records\n expected_file_state = {StateKey.UNPROCESSED_DATA: [[4059, 4060]],\n StateKey.FILE_SIZE: 4060,\n StateKey.OUTPUT_INDEX: 1}\n\n if file_state != expected_file_state:\n print file_state\n self.fail(\"Expected file state 1 does not match\")\n\n # blocks [0 2047] [2304 4095] [4096 7451]\n test_file3 = os.path.join(INPUT_HYPM_PATH, 'unit_364-2013-206-6-0.mdd')\n\n # parse another .mdd file adding on to the node file, and making\n # another sequence of instrument group files\n mdd.procall([test_file3])\n\n file_state = self.get_file_state('node58p1.dat')\n expected_file_state = {StateKey.UNPROCESSED_DATA: [[4059, 4060]],\n StateKey.FILE_SIZE: 7452,\n StateKey.OUTPUT_INDEX: 2}\n\n if file_state != expected_file_state:\n print \"file state: '%s'\" % file_state\n self.fail(\"Expected file state 2 does not match\")\n\n data_orig = self.read_full_file('node58p1.dat')\n\n # read the data from all generated files into one data string\n data_out = self.read_full_file('node58p1_0.status_1236801.dat')\n data_out += self.read_full_file('node58p1_0.wa_wfp_1236820.dat')\n data_out += self.read_full_file('node58p1_0.wc_wfp_1236820.dat')\n data_out += self.read_full_file('node58p1_0.we_wfp_1236820.dat')\n data_out += self.read_full_file('node58p1_1.status_1236801.dat')\n data_out += self.read_full_file('node58p1_1.wa_wfp_1236822.dat')\n data_out += self.read_full_file('node58p1_1.wc_wfp_1236822.dat')\n data_out += self.read_full_file('node58p1_1.we_wfp_1236822.dat')\n\n # confirm data in the node file matches those output in the instrument groups\n if not TestSioUnpack.compare_sio_matches(data_orig, data_out):\n self.fail(\"Failed sio block compare\")", "def _build_graph(self):\n pass", "def test_load_graph(self):\n config = {\n 'network_params': {\n 'path': join(ROOT, 'test.gexf')\n }\n }\n G = serialization.load_network(config['network_params'])\n assert G\n assert len(G) == 2\n with self.assertRaises(AttributeError):\n config = {\n 'network_params': {\n 'path': join(ROOT, 'unknown.extension')\n }\n }\n G = serialization.load_network(config['network_params'])\n print(G)", "def get_sim(file, graph1, graph2, pickle_name = \"\"):\r\n def build_sim(file, graph1, graph2):\r\n similarity = np.zeros((len(graph1), len(graph2)))\r\n if file.endswith(\"xz\"):\r\n with lzma.open(file, mode = 'rt') as f:\r\n for line in f:\r\n node_y, node_h, sim_val = line.strip().split(\" \") # 8 seconds\r\n try:\r\n node_y, node_h, sim_val = graph1.indexes[node_y], graph2.indexes[node_h], float(sim_val) #17 seconds\r\n similarity[node_y][node_h] = sim_val\r\n except:\r\n pass\r\n else:\r\n with open(file, mode = 'rt') as f:\r\n for line in f:\r\n node_y, node_h, sim_val = line.strip().split(\" \") # 8 seconds\r\n try:\r\n node_y, node_h, sim_val = graph1.indexes[node_y], graph2.indexes[node_h], float(sim_val) #17 seconds\r\n similarity[node_y][node_h] = sim_val\r\n except:\r\n pass\r\n return similarity\r\n \r\n #if \".sim.pickle\" == pickle_name:\r\n # pickle_name = graph1.name + graph2.name + pickle_name\r\n if pickle_name == \"\":\r\n pickle_name = graph1.name + graph2.name + \".sim.pickle\"\r\n try:\r\n with open(pickle_name,'rb') as f:\r\n return pickle.load(f)\r\n except FileNotFoundError as e:\r\n sims = build_sim(file, graph1, graph2)\r\n with open(pickle_name,'wb') as f:\r\n pickle.dump(sims,f)\r\n return sims", "def test_save_node(self):\n\n with mock.patch(\"builtins.open\", mock.mock_open(read_data=\"data\")) as mock_file, \\\n mock.patch('pypeman.nodes.os.makedirs') as mock_makedirs:\n\n mock_makedirs.return_value = None\n\n n = nodes.Save(uri='file:///tmp/test/?filename=%(msg_year)s/%(msg_month)s/message%(msg_day)s-%(counter)s.txt')\n n.channel = FakeChannel(self.loop)\n\n m = generate_msg(timestamp=(1981, 12, 28, 13, 37))\n m.payload = \"content\"\n\n ret = self.loop.run_until_complete(n.handle(m))\n\n self.assertTrue(isinstance(ret, message.Message))\n\n # Asserts\n mock_makedirs.assert_called_once_with('/tmp/test/1981/12')\n mock_file.assert_called_once_with('/tmp/test/1981/12/message28-0.txt', 'w')\n handle = mock_file()\n handle.write.assert_called_once_with('content')", "def my_solver(filename: str) -> str:\n print(\"Running my solver\")\n time.sleep(random.random() * 2)\n return filename", "def file_parse():\n\n\tfilename = input(\"Enter the file path for your graph: \")\n\ttarget = open(filename, 'r')\n\n\ttarget_lines = [] \t# List of lines from target file\n\t\n\t# Grab the graph count and node/edge count for the first graph\n\ti = 0\n\tfor line in target:\n\t\tif i == 0:\n\t\t\tgraph_count = int(line)\n\t\telif i == 1:\n\t\t\tnode_count = int(line)\n\t\telif i == 2:\n\t\t\tedge_count = int(line)\n\t\telse:\t\n\t\t\ttarget_lines.append(line.strip('\\n'))\n\t\ti += 1\n\n\treturn graph_create(target_lines, graph_count, node_count, edge_count)", "def Subdue(parameters, graph):\n startTime = time.time()\n iteration = 1\n done = False\n while ((iteration <= parameters.iterations) and (not done)):\n iterationStartTime = time.time()\n if (iteration > 1):\n print(\"----- Iteration \" + str(iteration) + \" -----\\n\")\n print(\"Graph: \" + str(len(graph.vertices)) + \" vertices, \" + str(len(graph.edges)) + \" edges\")\n patternList = DiscoverPatterns(parameters, graph)\n if (not patternList):\n done = True\n print(\"No patterns found.\\n\")\n else:\n print(\"\\nBest \" + str(len(patternList)) + \" patterns:\\n\")\n for pattern in patternList:\n pattern.print_pattern(' ')\n print(\"\")\n # write machine-readable output, if requested\n if (parameters.writePattern):\n outputFileName = parameters.outputFileName + \"-pattern-\" + str(iteration) + \".json\"\n patternList[0].definition.write_to_file(outputFileName)\n if (parameters.writeInstances):\n outputFileName = parameters.outputFileName + \"-instances-\" + str(iteration) + \".json\"\n patternList[0].write_instances_to_file(outputFileName)\n if ((iteration < parameters.iterations) or (parameters.writeCompressed)):\n graph.Compress(iteration, patternList[0])\n if (iteration < parameters.iterations):\n # consider another iteration\n if (len(graph.edges) == 0):\n done = True\n print(\"Ending iterations - graph fully compressed.\\n\")\n if ((iteration == parameters.iterations) and (parameters.writeCompressed)):\n outputFileName = parameters.outputFileName + \"-compressed-\" + str(iteration) + \".json\"\n graph.write_to_file(outputFileName)\n if (parameters.iterations > 1):\n iterationEndTime = time.time()\n print(\"Elapsed time for iteration \" + str(iteration) + \" = \" + str(iterationEndTime - iterationStartTime) + \" seconds.\\n\")\n iteration += 1\n endTime = time.time()\n print(\"SUBDUE done. Elapsed time = \" + str(endTime - startTime) + \" seconds\\n\")", "def create_graph(self,root_path):\n graph = self.graph\n\n #get the path lists recursively from the root directory \n path_list = sorted(Path(root_path).rglob('*'))\n # create the necessary amount of vertices \n graph.add_vertices(len(path_list)+1)\n \n # these list will be used for inserting to sqlite database\n vertices = []\n edges = []\n\n # now the edges have to be created, and the vertices need the file informations\n # first is the root vertex\n temp=graph.vs[0]\n temp[\"name\"]=root_path\n temp[\"parent\"]=\"\"\n temp[\"size\"]=os.stat(root_path).st_size\n temp[\"last_modified\"]=os.stat(root_path).st_mtime\n temp[\"last_accessed\"]=os.stat(root_path).st_atime\n \n vertices.append((0,temp[\"name\"],temp[\"parent\"],temp[\"size\"],temp[\"last_modified\"],temp[\"last_accessed\"]))\n\n # walking through the paths, the vertices get the informations, and the egdes will be created\n for i, file in enumerate(path_list):\n temp=graph.vs[i+1]\n temp[\"name\"]=file.as_posix()\n temp[\"parent\"]=file.parent.as_posix()\n temp[\"size\"]=os.stat(file).st_size\n temp[\"last_modified\"]=os.stat(file).st_mtime\n temp[\"last_accessed\"]=os.stat(file).st_atime\n vertices.append((temp.index,temp[\"name\"],temp[\"parent\"],temp[\"size\"],temp[\"last_modified\"],temp[\"last_accessed\"]))\n \n parent_id=graph.vs.find(name=temp[\"parent\"]).index\n # Egde is defined beetween the current vertex and its parent\n graph.add_edges([(i+1,parent_id)])\n edges.append((i+1,parent_id))\n\n # save the created Graph object into the connected redis database as JSON\n cache= get_redis()\n cache.execute_command('JSON.SET', 'vertices_name','.',json.dumps(self.graph.vs[\"name\"]))\n cache.execute_command('JSON.SET', 'vertices_parent','.',json.dumps(self.graph.vs[\"parent\"]))\n cache.execute_command('JSON.SET', 'vertices_size','.',json.dumps(self.graph.vs[\"size\"]))\n cache.execute_command('JSON.SET', 'vertices_last_modified','.',json.dumps(self.graph.vs[\"last_modified\"]))\n cache.execute_command('JSON.SET', 'vertices_last_accessed','.',json.dumps(self.graph.vs[\"last_accessed\"]))\n cache.execute_command('JSON.SET', 'edges','.',json.dumps(self.graph.get_edgelist()))\n\n # save the created Graph object into the connected sqlite database \n conn = get_sqlite()\n c = conn.cursor()\n c.execute('DROP TABLE IF EXISTS vertices')\n c.execute('DROP TABLE IF EXISTS edges')\n c.execute('CREATE TABLE vertices (id, name, parent, size, last_modified, last_accessed)')\n c.execute('CREATE TABLE edges (start, end)')\n c.executemany('INSERT INTO vertices VALUES (?,?,?,?,?,?)', vertices)\n c.executemany('INSERT INTO edges VALUES (?,?)', edges)\n conn.commit()\n\n # optionally create image from the Graph object\n # unfortunatelly this feature not working correctly now\n plot(graph,\"plot.png\", layout=\"tree\")", "def testEvaluatingRandomDAG(self):\n jobStore = self._getTestJobStorePath()\n for test in range(5):\n # Temporary file\n tempDir = self._createTempDir(purpose='tempDir')\n # Make a random DAG for the set of child edges\n nodeNumber = random.choice(range(2, 8))\n childEdges = self.makeRandomDAG(nodeNumber)\n # Get an adjacency list representation and check is acyclic\n adjacencyList = self.getAdjacencyList(nodeNumber, childEdges)\n self.assertTrue(self.isAcyclic(adjacencyList))\n # Add in follow on edges - these are returned as a list, and as a set of augmented\n # edges in the adjacency list\n followOnEdges = self.addRandomFollowOnEdges(adjacencyList)\n self.assertTrue(self.isAcyclic(adjacencyList))\n # Make the job graph\n rootJob = self.makeJobGraph(nodeNumber, childEdges, followOnEdges, tempDir)\n # Run the job graph\n options = Job.Runner.getDefaultOptions(\"%s.%i\" % (jobStore, test))\n options.logLevel = \"DEBUG\"\n options.retryCount = 1\n options.badWorker = 0.25\n options.badWorkerFailInterval = 0.01\n # Because we're going to be killing the services all the time for\n # restarts, make sure they are paying attention.\n options.servicePollingInterval = 1\n\n # Now actually run the workflow\n try:\n with Toil(options) as toil:\n toil.start(rootJob)\n numberOfFailedJobs = 0\n except FailedJobsException as e:\n numberOfFailedJobs = e.numberOfFailedJobs\n\n # Restart until successful or failed\n totalTrys = 1\n options.restart = True\n while numberOfFailedJobs != 0:\n try:\n with Toil(options) as toil:\n toil.restart()\n numberOfFailedJobs = 0\n except FailedJobsException as e:\n numberOfFailedJobs = e.numberOfFailedJobs\n if totalTrys > 32: #p(fail after this many restarts) ~= 0.5**32\n self.fail() #Exceeded a reasonable number of restarts\n totalTrys += 1\n\n # For each job check it created a valid output file and add the ordering\n # relationships contained within the output file to the ordering relationship,\n # so we can check they are compatible with the relationships defined by the job DAG.\n ordering = None\n for i in range(nodeNumber):\n with open(os.path.join(tempDir, str(i))) as fH:\n ordering = list(map(int, fH.readline().split()))\n self.assertEqual(int(ordering[-1]), i)\n for j in ordering[:-1]:\n adjacencyList[int(j)].add(i)\n # Check the ordering retains an acyclic graph\n if not self.isAcyclic(adjacencyList):\n print(\"ORDERING\", ordering)\n print(\"CHILD EDGES\", childEdges)\n print(\"FOLLOW ON EDGES\", followOnEdges)\n print(\"ADJACENCY LIST\", adjacencyList)\n self.assertTrue(self.isAcyclic(adjacencyList))", "def create_all(graph,first_last_fn):\n trip_id = 1\n line_num = 0\n num_trips = 0\n trip_id2model = {}\n #paths = {}\n p = Path(trip_id,graph,line_num=line_num)\n trip_id2model[trip_id] = p.edges\n num_trips += 1\n #paths[trip_id] = p\n while p.next_line != len(graph.lines):#file_length:\n graph.trip_id2line_num[trip_id] = line_num\n line_num = p.next_line\n trip_id = normalize_simple(graph.lines[line_num])[0]\n #trip_id = dg.normalize(lines[line_num])[0]\n p = Path(trip_id,graph,line_num=line_num)\n trip_id2model[trip_id] = p.edges\n num_trips += 1\n # paths[trip_id] = p\n graph.trip_id2line_num[trip_id] = line_num\n graph.num_trips = num_trips\n\n\n with open(first_last_fn,'wb') as output:\n pickle.dump(graph.first_last2trip_ids,output)\n\n with open('pickles/trip_id2model.pickle','wb') as output:\n pickle.dump(trip_id2model,output)\n #return paths", "def gen_graph(file_path, data_string):\n with open(file_path, 'w+') as f:\n f.write(data_string)\n os.system('python files/termgraph.py {}'.format(file_path))", "def load_graph(graph_url): # Function Provided By instructor - Grabs a specific graph from the internet and converts it to a form we can use\n graph_file = urllib2.urlopen(graph_url) # sets graph_file var to the file downloaded by urlopen\n graph_text = graph_file.read() # invokes read on the file downloaded\n graph_lines = graph_text.split('\\n')\n graph_lines = graph_lines[ : -1]\n\n print \"Loaded graph with\", len(graph_lines), \"nodes\"\n\n answer_graph = {}\n for line in graph_lines:\n neighbors = line.split(' ')\n node = int(neighbors[0])\n answer_graph[node] = set([])\n for neighbor in neighbors[1 : -1]:\n answer_graph[node].add(int(neighbor))\n\n print \"Finished processing Out-Degrees\"\n\n return answer_graph", "def step1(step1_input, step1_output, map_file):\n\n logger.info(\"###Step 1:\")\n graf = defaultdict(dict)\n start = clock()\n #create brands-website map dictionsry\n website_map = {}\n with open(map_file) as csv_file:\n rdr = csv.DictReader(csv_file)\n for row in rdr:\n website_map[re.sub('[.,;:]', '', row['BrandName'])] = row['Website'] \n check_time(start, \"populate website_map\")\n #gather graph data \n start1 = clock()\n with open(step1_input, 'rb') as csv_file:\n rdr = csv.reader(csv_file)\n check_time(start1, \"Reading input file...\")\n header = rdr.next()\n start_gather = clock()\n for row in rdr:\n one_list = [i for i,x in enumerate(row) if x == '1']\n for pos in one_list:\n pos_idx = one_list.index(pos)\n try:\n site = website_map[header[pos_idx]]\n except KeyError:\n site = header[pos_idx]\n for other_pos in one_list[pos_idx+1:]:\n other_pos_idx = one_list.index(other_pos)\n try:\n other_site = website_map[header[other_pos_idx]]\n except KeyError:\n other_site = header[other_pos_idx]\n try:\n graf[site][other_site] += 1\n except KeyError:\n graf[site][other_site] = 1\n check_time(start_gather, \"Done gathering data...\")\n start_write = clock()\n write_to_file(step1_output, graf)\n check_time(start_write, \"Done writing output file.\")\n check_time(start1, \"Step 1 end...\")\n return 0", "def generate_output(input_filename: str, output_filename: str, goal_node: Node,\n generated: set) -> None:\n\n input_stream = io.open(input_filename, 'r', encoding='utf-8', errors='ignore',\n newline='\\n')\n with open(output_filename, 'w') as out_file:\n for i in range(0, 10):\n out_file.write(input_stream.readline().rstrip())\n out_file.write('\\n')\n \"\"\" The first ten lines of the output file are identical to those in the \n input file. The tenth line should be skipped because it's blank.\"\"\"\n out_file.write(str(goal_node.path_cost) + '\\n')\n # Line 11 of the output, the depth level d\n out_file.write(str(len(generated)) + '\\n')\n # Line 12 of the output, the total number of nodes generated\n\n # Writing Line 13 of the output, the sequence of moves\n length = len(goal_node.path_history)\n for i in range(length - 1):\n out_file.write(goal_node.path_history[i] + ' ')\n out_file.write(goal_node.path_history[length - 1] + '\\n')\n\n # Writing Line 14 of the output, the f(n) values\n f_line = str(goal_node.f) + ' '\n parent = goal_node.parent\n while parent: # Loop stops when parent == None\n f_line += (str(parent.f) + ' ')\n parent = parent.parent\n f_list = f_line.split(' ')\n # Breaks down the string to the integers it contains\n reverse = ''\n for i in range(len(f_list) - 2, -1, -1):\n # f_line[len(f_line)-1] is an extra whitespace character and\n # thus shouldn't be copied\n reverse += str(f_list[i])\n if i != 0:\n reverse += ' '\n \"\"\" The order of the f(n) values in f_line is from goal node \n to root node. The four lines above reverse the order, which \n is what the output format expects.\"\"\"\n out_file.write(reverse)\n\n out_file.close()", "def create_social_graph(file):\n social_graph = NonDirectionalGraph(\"SocialGraph\")\n with open(file, \"rt\") as f:\n data = f.readlines()\n n_friendship = 0 # Represents the number of friendships in the graph in each iteration\n highest_n_friendship = 0 # Captures the highest record of n_friendship in the graph\n highest_n_neighbors_per_node_dict = {} # Captures the highest record of friendship per node\n for line in data:\n split_line = line.split()\n if \"became\" in split_line: # \"became\" is in lines where persons become connected\n for name in [split_line[0], split_line[2]]:\n # The following if statement makes sure to instantiate the node and adds it to the graph\n if name not in social_graph:\n node = Node(name)\n social_graph.add_node(node)\n highest_n_neighbors_per_node_dict[name] = 0 ##\n social_graph.add_edge(split_line[0],split_line[2]) # Adds a connection between the nodes\n n_friendship += 1 # Updates the number of friendships\n # The following for loop updates the highest number of friends (neighbors) if it changes\n for name in [split_line[0], split_line[2]]:\n if len(social_graph.nodes[name].neighbors) > highest_n_neighbors_per_node_dict[name]:\n highest_n_neighbors_per_node_dict[name] = len(social_graph.nodes[name].neighbors)\n elif \"cancelled\" in split_line: # \"became\" is in lines where persons become disconnected\n social_graph.remove_edge(split_line[0], split_line[2])\n n_friendship -= 1 # Updates the number of friendships\n # In case any of the words \"cancelled\" or \"became\" is in the line\n else:\n print(\"Unrecognized line\")\n # The following for loop updates the highest number of friendship if it changes\n if n_friendship > highest_n_friendship:\n highest_n_friendship = n_friendship\n return social_graph, highest_n_friendship, highest_n_neighbors_per_node_dict", "def test_graph_creation(self):\n with open(os.path.join(data_dir, 'sample_graph.pkl'), 'rb') as f:\n truth_graph = pickle.load(f)\n f.close()\n output_graph = geojson_to_graph(os.path.join(data_dir,\n 'sample_roads.geojson'))\n\n assert nx.is_isomorphic(truth_graph, output_graph)" ]
[ "0.6007912", "0.59982365", "0.5962933", "0.5850433", "0.5817421", "0.57249165", "0.57078034", "0.568576", "0.5582051", "0.55256677", "0.55047196", "0.54700655", "0.54588395", "0.54585487", "0.5439813", "0.5436548", "0.54200363", "0.54097277", "0.5380385", "0.5369311", "0.5364876", "0.53577006", "0.5350429", "0.5329439", "0.5309778", "0.5303986", "0.52944165", "0.52916026", "0.52811486", "0.5257055" ]
0.6569553
0
Create a (potentially existing) directory without errors. Raise OSError if directory can't be created. If clobber is True, remove dirpath if it exists.
def mkdir(dirpath, clobber=False): if clobber: shutil.rmtree(dirpath, ignore_errors=True) try: os.mkdir(dirpath) except OSError: pass if not path.exists(dirpath): raise OSError('Failed to create %s' % dirpath) return dirpath
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def makedirectory(path):\n\n exist_ok = True\n if not exist_ok and os.path.isdir(path):\n with contextlib.suppress(OSError):\n Path.mkdir(path, parents=True)", "def mkDir(path):\n if not os.path.exists(path):\n try:\n os.makedirs(path)\n except OSError:\n # In a race between two threads, this thread may have lost,\n # in which case the directory will now exist. Otherwise this\n # is a real exception.\n if not os.path.exists(path):\n raise", "def create_dir_if_necessary(path):\n try:\n os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise", "def create_directory(dirpath: str, dryrun: bool):\n if not dryrun:\n try:\n os.mkdir(dirpath)\n except FileExistsError as error:\n raise error\n else:\n print(f\"Creating new directory: {dirpath}\")\n return(dirpath)", "def _ensure_dir(directory):\r\n try:\r\n os.makedirs(directory)\r\n except OSError as exc:\r\n if exc.errno == errno.EEXIST:\r\n pass\r\n else:\r\n raise", "def create_directory(path):\n try:\n os.makedirs(path) # pylint: disable=no-member\n except OSError as ex:\n if ex.errno != errno.EEXIST:\n raise", "def make_dir(path):\n try:\n os.mkdir(path)\n except OSError:\n pass", "def make_dir(path):\n try:\n os.mkdir(path)\n except OSError:\n pass", "def make_dir(path):\n try:\n os.mkdir(path)\n except OSError:\n pass", "def make_dir(path):\n try:\n os.mkdir(path)\n except OSError:\n pass", "def make_dir(path):\n try:\n os.mkdir(path)\n except OSError:\n pass", "def create_or_clean_directory(dir):\n\tif not os.path.exists(dir):\n\t\tprint(\"The path \\\"\" + dir + \"\\\" does not exist\")\n\t\tprint(\"creating directory \\\"\" + dir + \"\\\"\")\n\t\tos.makedirs(dir)\n\telse: #Directory exists, but we want to clean it before use\n\t\tprint(dir + \" already exists. Cleaning before use...\")\n\t\tshutil.rmtree(dir)\n\t\tos.makedirs(dir)", "def ensuredir(path):\n # Copied from sphinx.util.osutil.ensuredir(): BSD licensed code, so it's OK\n # to add to this project.\n EEXIST = getattr(errno, 'EEXIST', 0)\n try:\n os.makedirs(path)\n except OSError as err:\n # 0 for Jython/Win32\n if err.errno not in [0, EEXIST]:\n raise", "def makeDir(path):\r\n\r\n try:\r\n os.makedirs(path)\r\n except OSError as exc:\r\n if exc.errno == errno.EEXIST and os.path.isdir(path):\r\n pass\r\n else:\r\n raise", "def ensure_dir(dir_path):\n try:\n os.mkdir(dir_path)\n except FileExistsError:\n pass", "def ensure_directory(path):\n\tdir_path = os.path.dirname(path)\n\tif os.path.exists(dir_path):\n\t\treturn\n\tensure_directory(dir_path)\n\ttry:\n\t\tos.mkdir(dir_path)\n\texcept OSError as e:\n\t\t# Ignore if EEXISTS. This is needed to avoid a race if two getters run at once.\n\t\tif e.errno != errno.EEXIST:\n\t\t\traise", "def make_dir(dir_path):\n if os.path.isdir(dir_path) == False:\n os.mkdir(dir_path)", "def ensure_dir(d):\n\n if not os.path.exists(d):\n os.makedirs(d, exist_ok=True)\n\n return", "def make_dir(path=None):\n\n if not os.path.exists(path):\n try:\n os.makedirs(path)\n except OSError:\n exit(\"\\nOSError: You can not use that directory!\\n\")", "def make_dir(self):\n if not os.path.exists(self.d):\n try:\n os.mkdir(self.d)\n except OSError, e:\n if e.errno != 17:\n raise\n pass", "def create_dir(dir_):\n try:\n os.makedirs(dir_)\n logger.debug(\"Creating directory %s\", dir_)\n except OSError as err:\n if err.errno != errno.EEXIST:\n raise", "def force_mkdir(directory):\n try:\n os.makedirs(directory)\n except OSError as error:\n if error.errno != errno.EEXIST: # suppress(PYC90)\n raise error\n\n return directory", "def create_new_dir(path):\n logger.debug('Function Successful: % s',\n 'create_new_dir: create_new_dir successfully called from save_single_file_locally', extra=d)\n\n if not os.path.exists(path):\n logger.debug('Calling Function: % s',\n 'create_new_dir: create_new_dir calling makedirs', extra=d)\n os.makedirs(path)\n logger.debug('Function Successful: % s',\n 'create_new_dir: create_new_dir successfully called makedirs', extra=d)", "def maybe_make_dir(path):\n if not os.path.exists(path):\n os.makedirs(path)", "def create_dir(dir_path):\n\n if not path.exists(dir_path):\n log('Creating directory: {0}'.format(dir_path))\n run(sh.mkdir, dir_path, p=True)", "def make_dir(directory):\n try:\n os.makedirs(directory)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise", "def MaybeMakeDirectory(*path):\n file_path = os.path.join(*path)\n try:\n os.makedirs(file_path)\n except OSError, e:\n if e.errno != errno.EEXIST:\n raise", "def makeDir(dir_path):\n if os.path.exists(dir_path): return\n dir_path = os.path.realpath(dir_path)\n dir_path = os.path.normpath(dir_path)\n if os.path.exists(os.path.dirname(dir_path)):\n os.mkdir(dir_path)\n else:\n makeDir(os.path.dirname(dir_path))\n os.mkdir(dir_path)", "def makedir(path):\n try:\n os.makedirs(path)\n except OSError:\n # Path already exists or cannot be created\n if not os.path.isdir(path):\n raise", "def mkdir(path):\n try: \n os.mkdir(path)\n except OSError:\n if not os.path.isdir(path):\n raise" ]
[ "0.7061584", "0.70569605", "0.70226943", "0.7006338", "0.699954", "0.6977039", "0.6966338", "0.6966338", "0.6966338", "0.6966338", "0.69507277", "0.6921338", "0.68780476", "0.68686837", "0.6837868", "0.6795552", "0.67695946", "0.67676157", "0.67505884", "0.6743762", "0.6729531", "0.6726987", "0.67138416", "0.6709744", "0.670638", "0.6701722", "0.6672073", "0.6661889", "0.6656334", "0.664839" ]
0.85963845
0
Copy fname from package data to outdir/subdir (creating dir if necessary), and return the path to the copy of fname relative to outdir.
def make_local_copy(outdir, subdir, fname): destdir = path.join(outdir, subdir) mkdir(destdir) shutil.copyfile(package_data(fname), path.join(destdir, fname)) return path.join(subdir, fname)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def package_dest_path(self, package):\n\n if self.destdir is None:\n return self.package_final_path(package)\n else:\n return os.path.join(\n self.destdir,\n self.package_install_space(package).lstrip(os.sep))", "def create_file_path(fname, direc=\"data/result/\"):\n path = os.path.join(TOP_LEVEL, direc, fname)\n return path", "def _copy_in_files(self, out_head, pkgname, dep, force=True):\n resource_path = Path(\"installer_data\") / dep\n platform_path = resource_path / self.platform\n not_my_platform_path_parts = [\n (resource_path / p).parts for p in self.not_my_platform\n ]\n if pkg_resources.resource_exists(pkgname, str(resource_path)):\n for root, unused_dirs, files in walk_package(\n pkgname, resource_path\n ):\n root_parts = root.parts\n if root_parts[:3] in not_my_platform_path_parts:\n continue\n elif root_parts[:3] == platform_path.parts:\n subdir_parts = root_parts[3:]\n else:\n subdir_parts = root_parts[2:]\n if len(subdir_parts) > 0 and subdir_parts[-1] == \"__pycache__\":\n continue\n out_path = out_head.joinpath(*subdir_parts)\n if not out_path.exists() and len(files) > 0:\n logger.debug(f'Creating \"{str(out_path)}\" directory')\n out_path.mkdir(0o755, parents=True)\n for filename in files:\n logger.debug(f\"Copying {filename}\")\n try:\n data_string = pkgutil.get_data(\n __name__, str(root / filename)\n ).decode(\"UTF-8\")\n except UnicodeDecodeError:\n logger.warning(\n f\"File {filename} contained an undecodable string, skipping\"\n )\n continue\n file_path = out_path / filename\n if file_path.exists() and not force:\n logger.error(\n f\"File {str(file_path)} already exists.\"\n + \" Use --force to overwrite.\"\n )\n sys.exit(1)\n elif file_path.exists() and force:\n operation = \"Overwriting\"\n else:\n operation = \"Creating\"\n logger.debug(f\"{operation} {file_path}\")\n with file_path.open(mode=\"wt\") as fh:\n fh.write(data_string)\n if filename.suffix in EXECUTABLE_EXTS:\n file_path.chmod(0o755)", "def GetOutputFilename(fname):\n return os.path.join(outdir, fname)", "def copy(self, fname):\n _, ext = osp.splitext(fname)\n spath = osp.join(self.src, fname)\n oname = fname\n path = osp.join(self.dst, oname)\n os.makedirs(osp.dirname(path), exist_ok=True)\n if ext in [\".css\"]:\n content = self.include(fname)\n with open(path, \"wt\") as fp:\n fp.write(content)\n else:\n shutil.copyfile(spath, path)\n return osp.relpath(oname, self.root)", "def fname_dir(fname):\n\treturn os.path.abspath(os.path.join(fname,os.pardir))", "def destPath(file, package, type='files'):\n\treturn tmpDir(package)+'/etc/univention/templates/'+type+'/'+file", "def destDir(file, package, type='files'):\n\treturn tmpDir(package)+'/etc/univention/templates/'+type+'/'+os.path.dirname(file)", "def out_path(dataset, work_dir=consts.WORK_DIR):\r\n return join_path(dataset_path(dataset, work_dir), consts.OUTPUT_DIR)", "def getOutputFile(fname):\n return os.path.join(Configurations.getOutputDir(), fname)", "def fake_full_path(self) -> PurePath:\n if self.category:\n # Giza wrote out yaml file artifacts under a directory. e.g. steps-foo.yaml becomes\n # steps/foo.rst\n return self.source_path.parent.joinpath(\n PurePath(self.category), self.output_filename\n )\n return self.source_path", "def _make_output_path(self, filename):\n return os.path.join(self._output_directory, filename)", "def make_path(self, basename):\n return os.path.join(self.output_folder, basename.format(self.sample_name))", "def package_path(pkg):\n fname = pkgutil.get_loader(pkg).get_filename()\n dirname = op.dirname(fname)\n dirname = op.abspath(op.join(dirname, '..'))\n return dirname", "def opath ( dir_name, file_name = None ):\n if file_name:\n return os.path.join(output_path, dir_name, file_name)\n return os.path.join(output_path, dir_name)", "def _makeAbsolute(fname):\n if fname[0] != '/':\n return os.path.join(os.getcwd(), fname)\n else:\n return fname", "def data_path(tmp_path, request):\n\n filename = Path(request.module.__file__)\n test_dir = filename.parent / filename.stem\n if test_dir.is_dir():\n dir_util.copy_tree(test_dir, str(tmp_path))\n\n return tmp_path", "def outpath(*path_components):\n basedir = os.path.join(\"..\", \"script-output\")\n return os.path.join(basedir, *path_components)", "def get_output_file(run, lens_chunk, source_tilename):\n d=get_output_dir(run, lens_chunk)\n fname=\"%(run)s-lens-%(lens_chunk)06d-src-%(source_tilename)s.dat\"\n fname=fname % {'run':run,\n 'lens_chunk':lens_chunk,\n 'source_tilename':source_tilename}\n\n return os.path.join(d, fname)", "def copy_os_release_file(dut, fname=default_os_release_file):\n # src = os.path.join(os.path.sep, 'shared', os_release_files_dir, fname)\n dst = os.path.join(os.path.sep, 'etc', 'os-release')\n dut(\"/bin/cp /tmp/files/os_releases/\" + fname + \" \" + dst, shell=\"bash\")", "def make_path(self, filename):\n return os.path.join(self.root_path, filename)", "def resolve_path(self):\n # This is the fixed directory template\n out_dir = os.path.join(opts.base_dir, self.board, self.dir)\n\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n os.chdir(out_dir)", "def copyDir(self, src, subpath):\n dst = self.output_path + \"/\" + subpath\n shutil.copytree(src, dst)", "def copy_package_data(self, package, target_dir):\n exts = [ i[0] for i in imp.get_suffixes() ]\n exts.append('.py')\n exts.append('.pyc')\n exts.append('.pyo')\n def datafilter(item):\n for e in exts:\n if item.endswith(e):\n return False\n return True\n\n target_dir = os.path.join(target_dir, *(package.identifier.split('.')))\n for dname in package.packagepath:\n filenames = list(filter(datafilter, zipio.listdir(dname)))\n for fname in filenames:\n if fname in ('.svn', 'CVS', '.hg', '.git'):\n # Scrub revision manager junk\n continue\n if fname in ('__pycache__',):\n # Ignore PEP 3147 bytecode cache\n continue\n if fname.startswith('.') and fname.endswith('.swp'):\n # Ignore vim(1) temporary files\n continue\n if fname.endswith('~') or fname.endswith('.orig'):\n # Ignore backup files for common tools (hg, emacs, ...)\n continue\n pth = os.path.join(dname, fname)\n\n # Check if we have found a package, exclude those\n if zipio.isdir(pth):\n # XXX: the 'and not' part is wrong, need to fix zipio.isdir\n for p in zipio.listdir(pth):\n if p.startswith('__init__.') and p[8:] in exts:\n break\n\n else:\n if os.path.isfile(pth):\n # Avoid extracting a resource file that happens\n # to be zipfile.\n # XXX: Need API in zipio for nicer code.\n copy_file(pth, os.path.join(target_dir, fname))\n else:\n copy_tree(pth, os.path.join(target_dir, fname))\n continue\n\n elif zipio.isdir(pth) and (\n zipio.isfile(os.path.join(pth, '__init__.py'))\n or zipio.isfile(os.path.join(pth, '__init__.pyc'))\n or zipio.isfile(os.path.join(pth, '__init__.pyo'))):\n # Subdirectory is a python package, these will get included later on\n # when the subpackage itself is included, ignore for now.\n pass\n\n else:\n copy_file(pth, os.path.join(target_dir, fname))", "def calc_fullpath(data_folder, name):\n return os.path.join(data_folder, f\"{alias(name)}.json\")", "def create_path(inputfile, outputdir):\n pathdata = '/'.join(inputfile.split('/')[-3:])\n newpath = join(outputdir, pathdata)\n dirout = dirname(newpath)\n if not isdir(dirout):\n os.makedirs(dirout)\n return newpath", "def path_src_to_dest(src_pathname, dest_filename_suffix=None):\n src_relpath = Path(src_pathname).relative_to(config[\"topdir\"])\n dest_pathname = Path(config[\"outdir\"]).joinpath(src_relpath)\n if dest_filename_suffix:\n dest_pathname = dest_pathname.with_suffix(dest_filename_suffix)\n return dest_pathname", "def _get_package_dir ( self, pkg_name ):\n if not pkg_name in self._subdirs:\n self._lock.acquire()\n try:\n if not pkg_name in self._subdirs:\n newpkg = self.packagedir_cls (\n name = pkg_name,\n logger = self.logger,\n directory = self.physical_location + os.sep + pkg_name,\n get_header = self.get_header,\n runtime_incremental = self.runtime_incremental,\n parent = self\n )\n self._subdirs [pkg_name] = newpkg\n finally:\n self._lock.release()\n\n return self._subdirs [pkg_name]", "def set_fileout(self, fileout, builddir=True):\n if builddir:\n dir_ = os.path.dirname(fileout)\n if not os.path.isdir(dir_):\n os.makedirs(dir_)\n \n self._fileout = fileout", "def join(self, fname):\n return os.path.abspath(os.path.expanduser(os.path.join(self.path, fname)))" ]
[ "0.62651706", "0.6082093", "0.60650736", "0.6050992", "0.5938889", "0.58145124", "0.5783093", "0.5764241", "0.57486904", "0.57103425", "0.57036674", "0.5678299", "0.56111586", "0.56056064", "0.5599549", "0.5561045", "0.5521641", "0.54854083", "0.5484105", "0.5466086", "0.5459725", "0.5457355", "0.5448004", "0.5446678", "0.5438214", "0.5426324", "0.54178786", "0.53946406", "0.5382584", "0.5377642" ]
0.7523176
0
Given a course code, requests the correspnding course page
def get_coursepage(code): url = 'http://gla.ac.uk/coursecatalogue/course/?code=' + code print url coursepage = requests.get(url) return coursepage
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_course_page(self):\n\n print(\"Course URL: {}\".format(self.course_url))\n try:\n self.course_page = BeautifulSoup(requests.get(self.course_url).text, \"lxml\")\n except requests.ConnectionError as e:\n print(\"Error Connecting!\\n\", e)\n sys.exit(1)\n except requests.exceptions.HTTPError as errh:\n print(\"Http Error:\", errh)\n sys.exit(1)\n except requests.exceptions.ConnectionError as errc:\n print(\"Error Connecting:\", errc)\n sys.exit(1)\n except requests.exceptions.Timeout as errt:\n print(\"Timeout Error:\", errt)\n sys.exit(1)\n except requests.exceptions.RequestException as err:\n print(\"Oops: Something Else\", err)\n sys.exit(1)", "def create_course_page(self, course):\r\n\r\n course_wiki_home = reverse('course_wiki', kwargs={'course_id': course.id.to_deprecated_string()})\r\n referer = reverse(\"progress\", kwargs={'course_id': self.toy.id.to_deprecated_string()})\r\n\r\n resp = self.client.get(course_wiki_home, follow=True, HTTP_REFERER=referer)\r\n\r\n course_wiki_page = referer.replace('progress', 'wiki/' + self.toy.wiki_slug + \"/\")\r\n\r\n ending_location = resp.redirect_chain[-1][0]\r\n ending_status = resp.redirect_chain[-1][1]\r\n\r\n self.assertEquals(ending_location, 'http://testserver' + course_wiki_page)\r\n self.assertEquals(resp.status_code, 200)\r\n\r\n self.has_course_navigator(resp)", "def get_course(dept, num):\n \n # semester: 10 = Fall, 20 = Spring, 30 = Summer\n host = \"https://selfservice.mypurdue.purdue.edu/prod/bwckctlg.p_disp_course_detail\"\n query = \"?cat_term_in={term}&subj_code_in={dept}&crse_numb_in={num}\".format(term=\"201620\", dept=dept, num=num)\n urlfetch.set_default_fetch_deadline(600)\n result = urlfetch.fetch(host+query)\n \n if result.status_code == 200:\n tree = html.fromstring(result.content)\n text = tree[1][4][2].text_content() # get just the relevant text of the webpage \n\n # remove unicode non-breaking spaces to allow regexing\n text = text.replace(u'\\xa0',u' ')\n return text", "def view_specific_course(request, username, course_code):\n if request.method == 'GET':\n # if user log in \n try:\n user = User.objects.get(username=username)\n if ensure_login(user) == False:\n return JsonResponse({'login': 'User must login'}, status=403) \n except:\n return JsonResponse({'login': 'User must login'}, status=403)\n\n if not user.is_staff:\n return JsonResponse({'error': 'User not an instructor'}, status=403)\n\n # query the db for this course\n course = Course.objects.get(course_code=course_code)\n if course is None:\n return JsonResponse(\n {'error': 'Something went wrong, Course has not been fetched from db'},\n status=401\n )\n \n # get the whole data of this particular course\n return JsonResponse({\n **{ 'success': True },\n **singleCourseSerializer(course)\n }, status=200)\n\n else:\n return JsonResponse({'error': 'Method not Allowed'}, status=405)", "def parse_get_course(xml_course):\n parse_course = parse_create_course(xml_course)\n query_constraints = {\n \"termCode\": parse_course[\"termCode\"],\n \"subject\": parse_course[\"subject\"],\n \"courseNumber\": parse_course[\"courseNumber\"]\n }\n params = urllib.urlencode({\"where\": json.dumps(query_constraints)})\n connection = httplib.HTTPSConnection(PARSE_API_URL, PARSE_API_PORT)\n connection.connect()\n connection.request(\n \"GET\",\n \"%s?%s\" % (COURSES_ENDPOINT, params),\n '',\n {\"X-Parse-Application-Id\": app_id, \"X-Parse-REST-API-Key\": rest_api_key}\n )\n response = json.loads(connection.getresponse().read())\n if response.get(\"results\"):\n return response[\"results\"][0]\n else:\n return None", "def get_courses_html():\r\n r = requests.get(URL_CS_ALL_REQ)\r\n if r.status_code == 200:\r\n return r.text\r\n else:\r\n return None", "def test_render_page_wrong_course(self):\n url = reverse(\n 'completion_view', kwargs={\n 'course_id': 'course-v1:mss+MSS001+2019_2'})\n self.response = self.staff_client.get(url)\n self.assertEqual(self.response.status_code, 404)", "def download_course_given(self, course_url: str):\n self.course_url = course_url\n self.get_course_page()\n self.get_course_title()\n self.get_course_unit_titles()\n self.get_course_unit_slugs()\n self.get_course_unit_urls()\n\n print(\"\\nGenerating Path Slugs...\\n\")\n self.get_course_all_slugs()\n self.get_course_youtube_ids()\n self.download_course_videos()", "def course(institution, course, mode):\n\n try:\n logging.info(f\"Process a request for an course resource\\nurl: {request.url}\")\n\n params = dict({\"institution_id\": institution, \"course_id\": course, \"mode\": mode})\n logging.info(f\"Parameters: {params}\")\n\n #\n # The params are used in DB queries, so let's do\n # some basic sanitisation of them.\n #\n if not valid_course_params(params):\n logging.error(f\"valid_course_params returned false for {params}\")\n return Response(\n get_http_error_response_json(\n \"Bad Request\", \"Parameter Error\", \"Invalid parameter passed\"\n ),\n headers={\"Content-Type\": \"application/json\"},\n status=400,\n )\n\n logging.info(\"The parameters look good\")\n \n courses_collection_link = get_collection_link(cosmosdb_database_id, cosmosdb_courses_collection_id)\n dataset_collection_link = get_collection_link(cosmosdb_database_id, cosmosdb_dataset_collection_id)\n\n # Intialise a CourseFetcher\n course_fetcher = CourseFetcher(client, courses_collection_link)\n\n # Initialise dataset helper - used for retrieving latest dataset version\n dsh = DataSetHelper(client, dataset_collection_link)\n version = dsh.get_highest_successful_version_number()\n\n # Get the course\n course = course_fetcher.get_course(version=version, **params)\n\n if course:\n return Response(\n course, headers={\"Content-Type\": \"application/json\"},\n status=200\n )\n else:\n return Response(\n get_http_error_response_json(\n \"Not Found\", \"course\", \"Course was not found.\"\n ),\n headers={\"Content-Type\": \"application/json\"},\n status=404,\n )\n\n except Exception as e:\n logging.error(traceback.format_exc())\n\n # Raise so Azure sends back the HTTP 500\n raise e", "def parseCourses(self, response):\n sel = Selector(response)\n courses = sel.xpath('//div[@class=\"course-info expandable\"]')\n for c in courses:\n item = CourseItem(response.request.meta[\"item\"])\n item['code'] += '-' + c.xpath('@id').get().strip()\n item['name'] = c.xpath('//a[@class=\"courselink\"]/text()').get().strip()\n # everything works up to here #\n href = c.xpath('div/h3/a/@href').get()\n url = urljoin('https://web-app.usc.edu', href)\n yield Request(url=url,callback=self.parseSection,meta={'item':item})", "def get_course_by_id(course_key, depth=0):\r\n course = modulestore().get_course(course_key, depth=depth)\r\n if course:\r\n return course\r\n else:\r\n raise Http404(\"Course not found.\")", "def test_render_page_big_course(self):\n url = reverse('completion_view', kwargs={'course_id': self.course.id})\n self.response = self.staff_client.get(url)\n self.assertEqual(self.response.status_code, 200)", "def get_course_by_code(input):\n\tres = None\n\tuser_input = input\n\tinput = str(input)\n\t# clean input\n\tinput = util.strip_whitespace(input)\n\tinput = util.del_whitespace(input)\n\tinput = util.to_upper(input)\n\t\n\tprint input\n\t\n\tcourse = query_cat.filter(Course.course_code == input).first()\n\tschedule = query_sch.filter(Schedule.course_code == input).first()\n\t\n\tif (course != None and schedule != None):\n\t\tres = util.format_course_output(course, schedule)\n\telse:\n\t\tres = \"Sry seems like I can't find \" + user_input\n\treturn res", "def course_pages(self):\n courses = self.program.courses.all()\n return (\n CoursePage.objects.filter(course_id__in=courses)\n .select_related(\"course\", \"thumbnail_image\")\n .order_by(\"course__position_in_program\")\n )", "def parse_course(browser, college, course_node):\n # open the course details\n course_handle = course_node.find_element_by_class_name('course')\n course_handle.click()\n wait_for_load(browser)\n\n title = course_node.find_element_by_class_name('courseID').text\n m = re.match(r'([A-Z&]+) *(\\d+)', title)\n if not m:\n logging.warning('Unable to parse title: %s', title)\n return\n\n dept = m.group(1)\n code = m.group(2)\n name = titlecase.titlecase(\n course_node.find_element_by_class_name('courseTitle').text)\n creds = float(course_node.find_element_by_class_name('courseCredits').text)\n tags = parse_tags(course_node)\n prerequisites = parse_prerequisites(course_node)\n\n # close the course details\n course_handle.click()\n wait_for_load(browser)\n\n return Course(\n titlecase.titlecase(college), dept, code, name, creds, tags,\n prerequisites)", "def _get_courses(self) -> None:\n\n courses_content: NavigableString = self.soup.find(\"div\", \n {\"class\": \"coursesContent\"})\n course_items: ResultSet = courses_content.find_all(\"div\", \n {\"class\": \"courseItem\"})\n\n for item in course_items:\n course_name: str = item.a[\"href\"].split(\"/\")[-2].lower()\n course_data: ParseType = self._parse(item)\n self._update(course_name, course_data)", "def get_course_by_id(course_id):\n course = Courses.query. \\\n filter_by(id=course_id). \\\n first_or_404()\n\n return course", "def get_cms_course_link(course, page='course'):\r\n # This is fragile, but unfortunately the problem is that within the LMS we\r\n # can't use the reverse calls from the CMS\r\n return u\"//{}/{}/{}\".format(settings.CMS_BASE, page, unicode(course.id))", "def index(request):\n\n\tcourse_list=[]\n\tnext_page = 0\n\tpage = request.GET.get('page')\n\tif page is None:\n\t\tpage_num = \"1\"\n\telse:\n\t\tpage_num = page\n\tnext_page = int(page_num)+1\n\tprevious_page = int(page_num)-1\n\tcourse_api_url= \"http://127.0.0.1:8000/myapi/Course/?page=\" + page_num\n\tcourse_api_data=requests.get(course_api_url)\n\tif course_api_data is None:\n\t\treturn HttpResponseBadRequest(\"400 Bad Request Error\")\n\tdata=course_api_data.json()\n\tif 'next' in data:\n\t\tnext_page_link=data['next']\n\tif 'previous' in data:\n\t\tprevious_page_link=data['previous']\n\tif 'count' in data:\n\t\tnew_count=data['count']\n\tcount=ceil(new_count/8)\n\tfor item in range(count):\n\t\tcourse_list.append(item+1)\n\tresult_data=data['results']\n\treturn render(request,'index.html',{'page_num':page_num, 'next_page_link':next_page_link, 'previous_page_link':previous_page_link, 'result_data':result_data,'course_list':course_list, 'next_page':str(next_page), 'previous_page':str(previous_page)})", "def get_courses_info(url, headers):\n dash = get_page_contents(url, headers)\n soup = BeautifulSoup(dash)\n courses_soup = soup.find_all('article', 'course')\n courses = []\n for course_soup in courses_soup:\n course_id = None\n course_name = course_soup.h3.text.strip()\n course_url = None\n course_state = 'Not yet'\n try:\n # started courses include the course link in the href attribute\n course_url = BASE_URL + course_soup.a['href']\n if course_url.endswith('info') or course_url.endswith('info/'):\n course_state = 'Started'\n # The id of a course in edX is composed by the path\n # {organization}/{course_number}/{course_run]\n course_id = course_soup.a['href'][9:-5]\n except KeyError:\n pass\n courses.append(Course(id=course_id,\n name=course_name,\n url=course_url,\n state=course_state))\n return courses", "def test_get_courses(self):\n doc_id = '123456'\n url = reverse('xds_api:get_courses', args=(doc_id,))\n\n with patch('xds_api.views.get_request') as get_request:\n http_resp = get_request.return_value\n get_request.return_value = http_resp\n http_resp.json.return_value = [{\n \"metadata\": {\n \"Metadata_Ledger\": {},\n \"Supplemental_Ledger\": {}\n },\n \"unique_record_identifier\": \"1234\",\n \"metadata_key_hash\": \"5678\"\n }]\n http_resp.status_code = 200\n\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def view_course(self, course_id):\r\n link_css = self._link_css(course_id)\r\n\r\n if link_css is not None:\r\n self.q(css=link_css).first.click()\r\n else:\r\n msg = \"No links found for course {0}\".format(course_id)\r\n self.warning(msg)", "def select_course(self, subject, course_num):\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with conn:\n cursor.execute(\n \"SELECT * FROM courses WHERE (subject=? AND course_num=?)\",\n (subject, course_num),\n )\n return cursor.fetchone()", "def detail_course(request, pk, template=\"core/detail_course.html\"):\n response = {\n 'course': get_object_or_404(Course, pk=pk)\n }\n return direct_to_template(request, template, response)", "def test_course_navigator(self):\r\n\r\n self.login(self.student, self.password)\r\n self.enroll(self.toy)\r\n self.create_course_page(self.toy)\r\n\r\n course_wiki_page = reverse('wiki:get', kwargs={'path': self.toy.wiki_slug + '/'})\r\n referer = reverse(\"courseware\", kwargs={'course_id': self.toy.id.to_deprecated_string()})\r\n\r\n resp = self.client.get(course_wiki_page, follow=True, HTTP_REFERER=referer)\r\n\r\n self.has_course_navigator(resp)", "def from_url(cls, course_id, stderr_level_override=None):\n # This whole function is full of nasty web scraping as the current HTML\n # structure of the website does not lead to logically systematic\n # scraping. It will break if the TheBigList.aspx page structure is\n # modified. Exceptions are raised accordingly.\n _logger.debug(\"Attempting to get data for course ID, %s, by scraping \"\n \"LearnItFirst.com\" % course_id)\n if stderr_level_override is not _ABSENT:\n _stderr_handler.setLevel(stderr_level_override or _DEFAULT_LEVEL)\n headers = {\"User-Agent\": \"Chromium/Linux\"}\n url = \"http://www.learnitfirst.com/Course/%s/default.aspx\" % course_id\n _logger.debug(\"Making a HTTP request with custom headers as the \"\n \"website block's requests coming from a \"\n \"programattic-looking User-Agent\")\n try:\n request = requests.get(url, headers=headers)\n except requests.exceptions.RequestException as message:\n _logger.error(\"An error was encountered when making the HTTP \"\n \"request; %s\" % message)\n if request.status_code == requests.codes.not_found:\n _logger.critical(\"The course ID, %s, does not exist on \"\n \"LearnItFirst.com. An exception will be raised.\" %\n course_id)\n raise LearnItFirstError(\"course ID, %s, does not exist on \"\n \"LearnItFirst.com\" % course_id)\n try:\n soup = bs4.BeautifulSoup(request.text, \"lxml\")\n except bs4.FeatureNotFound as message:\n _logger.critical(\"The html cannot be parsed. An error was \"\n \"encountered; %s\" % message)\n raise bs4.FeatureNotFound(message)\n course_title = cls._transform_name(course_id,\n soup.find(\"h1\").string.strip())\n for link in soup.find_all(\"a\"):\n string = link.string\n if string is not None:\n if string.strip() == \"View the videos in this course\":\n url = parse.urljoin(url, link[\"href\"])\n break\n else:\n _logger.critical(\"The website structure or link text to the \"\n \"TheBigList.aspx page has changed and the data \"\n \"cannot be found. An exception will be raised.\")\n raise LearnItFirstError(\"the website structure or link text to \"\n \"the TheBigList.aspx page has changed\")\n soup = bs4.BeautifulSoup(requests.get(url, headers=headers).text,\n \"lxml\")\n chapters = {}\n chapter_divs = soup.find_all(\"div\", \"chapterTitle\")\n if not chapter_divs:\n _logger.critical(\"The TheBigList.aspx page has changed structure \"\n \"and chapter data cannot be scraped. An \"\n \"exception will be raised.\")\n raise LearnItFirstError(\"The TheBigList.aspx page has changed \"\n \"structure and chapter data cannot be \"\n \"scraped.\")\n for chapter_div in chapter_divs:\n # Declare variable for readable line length.\n string = chapter_div.h2.b.string.strip()\n chapter_num = int(re.match(r\"Chapter\\s+(\\d+):\", string).group(1))\n # Declare variable for readable line length.\n raw_name = chapter_div.h2.a.string.strip()\n chapter_name = cls._transform_name(chapter_num, raw_name)\n lessons = {}\n # Declare variable for readable line length.\n next_div = chapter_div.find_next_sibling(\"div\")\n lesson_divs = next_div.find_all(\"div\", \"chapterBorder\")\n if not lesson_divs:\n _logger.critical(\"The TheBigList.aspx page has changed \"\n \"structure and lesson data cannot be \"\n \"scraped. An exception will be raised.\")\n raise LearnItFirstError(\"The TheBigList.aspx page has changed \"\n \"structure and lesson data cannot be \"\n \"scraped.\")\n for lesson_div in lesson_divs:\n # Declare variable for readable line length.\n start_slice = len(\"%s.\" % chapter_num)\n first_div, second_div = lesson_div.find_all(\"div\", limit=2)\n lesson_num = int(first_div.string.strip()[start_slice:])\n raw_name = second_div.a.string.strip()\n lesson_name = cls._transform_name(lesson_num, raw_name,\n course_id, chapter_num)\n lessons[lesson_num] = _Lesson(lesson_num, lesson_name)\n chapters[chapter_num] = _Chapter(chapter_num, chapter_name,\n lessons)\n _logger.info(\"Data for course ID, %s, was successfully scraped from \"\n \"LearnItFirst.com\" % course_id)\n course = Course(course_id, course_title, chapters, _ABSENT)\n course.dump()\n return course", "def _show_course_overview(self, course_key):\r\n resp = self.client.get_html(get_url('course_handler', course_key, 'course_key_string'))\r\n _test_no_locations(self, resp)\r\n return resp", "def get_course_offering(_course):\n link = _course['link']\n logging.info(\"calling get_course_offering with url \\\"%s\\\"\", link)\n\n scanned = conn.execute('SELECT * FROM offerings WHERE link=?', (link,)).fetchone()\n if scanned is None:\n # course wasn't checked before, load it up\n driver.get(link)\n sleep(4.7)\n\n logging.info(\"click on enroll ...\")\n try:\n driver.find_elements_by_class_name(\"EnrollButton\")[0].click()\n sleep(2.2)\n\n logging.info(\"check if course is part of multiple specializations ...\")\n is_unique = True\n try:\n choose_specialization = driver.find_element_by_id(\n \"course_enroll_s12n_selection_button_button\")\n is_unique = False\n choose_specialization.click()\n except NoSuchElementException:\n logging.info(\"course is unique!\")\n if not is_unique:\n logging.info(\"course is in multiple specializations, one selected ...\")\n sleep(1.1)\n\n logging.info(\"check if course is completely free ...\")\n is_free = False\n try:\n h4s = driver.find_elements_by_tag_name(\"h4\")\n for h4 in h4s:\n if \"Full Course, No Certificate\" in h4.text:\n is_free = True\n except NoSuchElementException:\n pass\n sleep(1.3)\n\n if is_free:\n logging.info(\"course is free \\\\o/ :)\")\n fare = 0\n\n # proceed only if course is not free\n else:\n logging.info(\"check if one can audit the course ...\")\n is_auditable = False\n\n # most courses have a sublime link \"audit only\" link\n try:\n driver.find_element_by_id(\"enroll_subscribe_audit_button\")\n is_auditable = True\n except NoSuchElementException:\n pass\n\n if is_auditable:\n logging.info(\"course is auditable :)\")\n fare = 1\n\n # if course is not auditable with the link, check if there is same enroll option\n else:\n # some courses have audit option in primary-description\n is_alternatively_auditable = False\n try:\n h4s = driver.find_elements_by_tag_name(\"h4\")\n for h4 in h4s:\n if \"Audit only\" in h4.text:\n is_alternatively_auditable = True\n except NoSuchElementException:\n pass\n finally:\n if is_alternatively_auditable:\n logging.info(\"course is auditable :)\")\n fare = 1\n else:\n logging.info(\"course is pay only :(\")\n fare = 2\n sleep(1.2)\n\n conn.execute(\"\"\"INSERT INTO offerings (\n link, title, university, category, fare) VALUES (?, ?, ?, ?, ?)\"\"\",\n (link, _course['title'], _course['university'], _course['category'], fare))\n conn.commit()\n\n except ElementNotInteractableException:\n logging.info(\"there are no upcoming sessions available ...\")\n\n else:\n logging.info(\"course already scanned ...\")", "def test_instructor_course_access(self):\r\n self.login(self.instructor_user)\r\n\r\n # Now should be able to get to self.course, but not self.test_course\r\n url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})\r\n check_for_get_code(self, 200, url)\r\n\r\n url = reverse('instructor_dashboard', kwargs={'course_id': self.test_course.id.to_deprecated_string()})\r\n check_for_get_code(self, 404, url)", "def get_courses(browser, college, dept_node):\n # enter the department course list\n dept_node.click()\n wait_for_load(browser)\n\n # parse course entries\n entries = browser.find_element_by_id('courseListHolder')\n\n courses = []\n for i in entries.find_elements_by_xpath('div/div'):\n course = parse_course(browser, college, i)\n if course:\n courses.append(course)\n\n # go back to the department index\n back_button = browser.find_element_by_id('btn-deptlist')\n back_button.click()\n wait_for_load(browser)\n\n return courses" ]
[ "0.7398104", "0.6673765", "0.66678524", "0.64198714", "0.6413772", "0.63720846", "0.6232175", "0.61700314", "0.6150578", "0.6142026", "0.61378634", "0.60246366", "0.6009835", "0.5975768", "0.5974896", "0.59581447", "0.5909168", "0.58898294", "0.58715636", "0.5865942", "0.5853008", "0.5830682", "0.58302504", "0.58201647", "0.58179635", "0.5803107", "0.5800324", "0.57977307", "0.57744634", "0.5768133" ]
0.857764
0
Creates a dictionary with a headingvalue pair, which is the structure of all the sections in the courses dictionary
def new_dict(heading, value): value = value.replace('%', '\%').replace('&', '\&').replace(u'\xa0', ' ') # Currently encoding is causeing me problems - the quick fix below removes # all the characters that have broken the code so far. This solution is not # likely to work if more courses were added value = value.replace(u'\u25a0', '\\break').replace(u'\u037e', ';') return { 'heading': heading, 'value': value, }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _section_course_info(course_key, access):\r\n course = get_course_by_id(course_key, depth=None)\r\n\r\n section_data = {\r\n 'section_key': 'course_info',\r\n 'section_display_name': _('Course Info'),\r\n 'access': access,\r\n 'course_id': course_key,\r\n 'course_display_name': course.display_name,\r\n 'enrollment_count': CourseEnrollment.num_enrolled_in(course_key),\r\n 'has_started': course.has_started(),\r\n 'has_ended': course.has_ended(),\r\n 'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': course_key.to_deprecated_string()}),\r\n }\r\n\r\n try:\r\n advance = lambda memo, (letter, score): \"{}: {}, \".format(letter, score) + memo\r\n section_data['grade_cutoffs'] = reduce(advance, course.grade_cutoffs.items(), \"\")[:-2]\r\n except Exception:\r\n section_data['grade_cutoffs'] = \"Not Available\"\r\n # section_data['offline_grades'] = offline_grades_available(course_key)\r\n\r\n try:\r\n section_data['course_errors'] = [(escape(a), '') for (a, _unused) in modulestore().get_course_errors(course.id)]\r\n except Exception:\r\n section_data['course_errors'] = [('Error fetching errors', '')]\r\n\r\n return section_data", "def export_courses():\n courses = Course.query().fetch()\n dictionary = {}\n\n for course in courses:\n dictionary[course.department + \"\" + course.number] = course.to_dict()\n\n return dictionary", "def make_datatable(self):\r\n\r\n data = []\r\n\r\n for course in self.get_courses():\r\n gdir = course.id.course\r\n data.append([course.display_name, course.id.to_deprecated_string()]\r\n + self.git_info_for_course(gdir))\r\n\r\n return dict(header=[_('Course Name'), _('Directory/ID'),\r\n _('Git Commit'), _('Last Change'),\r\n _('Last Editor')],\r\n title=_('Information about all courses'),\r\n data=data)", "def _create_sections(self):\n self._SECTIONS = {}", "def get_course(data):\n\n return {item['course'] for item in data}", "def build_course(self):\n courses = []\n aprovacao_d = {}\n # semestral\n for rate_it in self.__semestral_rate:\n # pega uma lista no qual o primeiro elemento é a taxa, o segundo\n # e o terceiro são quantidades\n rate_data = self.analysis[\"semestral_rate\"][rate_it.name]\n for i in rate_data[0].index:\n if i[0] not in aprovacao_d:\n aprovacao_d[i[0]] = {}\n\n periodo = str(i[1]) + \"/\" + str(i[2])\n aprovacao_d[i[0]][periodo] = [\n float(rate_data[0][i]),\n int(rate_data[1][i]),\n int(rate_data[2][i]),]\n\n note = self.analysis[\"general_note_statistic\"]\n note_last_year = self.analysis[\"last_year_statistic\"]\n for course in self.analysis[\"courses\"].index:\n course_dict = {}\n course_dict[\"disciplina_codigo\"] = course\n course_dict[\"disciplina_nome\"] = self.analysis[\"courses\"][course]\n \n # If the course code is related to more than one name,\n # concatenate these names into an unique string\n if type(course_dict[\"disciplina_nome\"]) != str:\n new_course_name = \" | \".join(list(course_dict[\"disciplina_nome\"]))\n course_dict[\"disciplina_nome\"] = new_course_name\n # quantidade de matriculas\n count = self.analysis[\"general_count_submission\"][course]\n course_dict[\"qtd_alunos\"] = count\n # notas\n course_dict[\"qtd_cursada_aprov\"] = self.analysis[\"coursed_ratio\"][course]\n course_dict[\"nota\"] = [note[0][course], note[1][course]]\n course_dict[\"nota_ultimo_ano\"] = [\n note_last_year[0][course],\n note_last_year[1][course]\n ]\n # taxas\n for rate_it in self.__rates:\n rate_data = self.analysis[\"general_rates\"][rate_it.name]\n course_dict[rate_it.name] = float(rate_data[0][course])\n course_str = rate_it.name.replace(\"taxa\", \"qtd\")\n # count_sel define qual quantidade vai para o json, a especifica\n # ou geral\n course_dict[course_str] = int(\n rate_data[rate_it.count_sel][course])\n # rate_calc = self.analysis[\"general_rates\"][rate_it.name][0]\n\n # taxas do ultimo anos\n course_dict[\"taxa_reprovacao_ultimo_absoluto\"] = self.analysis[\"last_year_taxa_reprovacao_absoluta\"][course]\n course_dict[\"taxa_reprovacao_ultimo_frequencia\"] = self.analysis[\"last_year_taxa_reprovacao_frequencia\"][course]\n\n course_dict[\"grafico_qtd_cursada_aprov\"] = self.analysis[\"coursed_count\"][course]\n course_dict[\"aprovacao_semestral\"] = aprovacao_d[course]\n courses.append(course_dict)\n return courses", "def build_course_dictionary(title_result_set, desc_result_set) -> Dict[str, List[str]]:\n\n course_dictionary = {} # placeholder dictionary\n\n for (tagged_title, tagged_description) in zip(title_result_set, desc_result_set): # iterate through multiple result sets\n full_title_desc_list = {}\n full_title_desc_list = [str(tagged_title.text)] + str(tagged_description.text).strip().splitlines() # remove trailing whitespace, then get list of lines\n course_dictionary[str(tagged_title.text)[:8]] = full_title_desc_list\n\n return course_dictionary", "def sections(self):\r\n # Dict to store the result\r\n nav_dict = dict()\r\n\r\n section_titles = self._section_titles()\r\n\r\n # Get the section titles for each chapter\r\n for sec_index, sec_title in enumerate(section_titles):\r\n\r\n if len(section_titles) < 1:\r\n self.warning(\"Could not find subsections for '{0}'\".format(sec_title))\r\n else:\r\n # Add one to convert list index (starts at 0) to CSS index (starts at 1)\r\n nav_dict[sec_title] = self._subsection_titles(sec_index + 1)\r\n\r\n return nav_dict", "def parse_courses():\n\n subjects = collections.OrderedDict()\n name = '' # the most recent course name acronym (ex. 'COMP')\n\n courses = re.sub(r'\\([^)]*\\)', '', COURSES).split() # Remove parens and their contents\n\n for course in courses:\n if course == 'OR':\n continue\n\n if course[0].isalpha():\n\n index = 0 # the upper bound character index of the subject name\n for char in course:\n if char.isalpha():\n index += 1\n else:\n break\n\n name = course[:index]\n number = course[index:index+4]\n else:\n number = course[:4]\n\n try:\n subjects[name].append(number)\n except KeyError:\n subjects[name] = [number]\n\n return subjects", "def parse_create_section(xml_course):\n\n attrs = [\n \"section\",\n 'crn',\n \"start-time\",\n \"end-time\",\n \"meeting-days\",\n \"location\",\n \"section-number\",\n \"instructor\"\n ]\n\n section = pull_attributes_from_xml(xml_course, attrs)\n\n section[\"places\"] = []\n\n # Create Place attribute pointer based on location string\n # Get places from Parse\n places = get_places()[\"results\"]\n # Get location info from section (of form [\"BRK 101\", \"TBA\"])\n all_locations = section[\"location\"].split(\", \")\n # Filter out TBA\n # TODO Maybe do something else with them\n locations = [location for location in all_locations if location != \"TBA\"]\n\n for location in locations:\n building_code = location.split(\" \")[0]\n for place in places:\n if place.get(\"symbol\") and place[\"symbol\"] == building_code:\n section[\"places\"].append(place[\"objectId\"])\n break;\n\n\n return section", "def get_sections(pages):\n sections = {}\n section_pages = {}\n current_section_name = None\n current_section = []\n\n for page_num, page in enumerate(pages):\n clean_page = [re.sub(\"\\s+\", \" \", i.strip()) for i in page.split(\"\\n\")]\n\n for ind, i in enumerate(clean_page):\n\n if (\n re.findall(\"^Section \\d+\", i)\n and \"page\" not in i\n or (re.sub(\"\\d+ [\\w+\\s+]+\", \"\", i) == \"\" and ind == 0 and len(i) > 6)\n ):\n if current_section_name is not None:\n sections[current_section_name] = current_section\n current_section = []\n current_section_name = i\n break\n\n section_pages[page_num + 1] = current_section_name or \"No Section\"\n\n current_section.extend(clean_page)\n return sections, section_pages", "def build_general_course(self):\n\n courses = {}\n\n if self.__build_analyze is False:\n self.build_analysis()\n\n courses[\"taxa_conhecimento\"] = self.analysis[\"taxa_conhecimento\"]\n courses[\"taxa_reprovacao\"] = self.analysis[\"taxa_reprovacao_absoluta\"]\n courses[\"taxa_trancamento\"] = self.analysis[\"taxa_trancamento\"]\n\n # cria cache\n cache = {}\n note = self.analysis[\"general_note_statistic\"]\n for rate_it in self.__rates:\n rate_calc = self.analysis[\"general_rates\"][rate_it.name][0]\n for course in self.analysis[\"courses\"].index:\n if course not in cache:\n cache[course] = {}\n cache[course][rate_it.name] = rate_calc[course]\n cache[course][\"nota\"] = [note[0][course], note[1][course]]\n\n courses[\"cache\"] = cache\n\n # cria o campo compara_aprov\n courses[\"compara_aprov\"] = self.analysis[\"graph_course\"]\n\n # cria o campo courses\n courses[\"disciplinas\"] = self.analysis[\"courses\"].to_dict()\n\n return courses", "def parse_create_course(xml_course):\n attrs = [\n \"term-code\",\n \"term-description\",\n 'subject',\n \"course-number\",\n \"school\",\n \"department\",\n \"title\",\n \"description\",\n \"credit-hours\",\n \"distribution-group\"\n ]\n course = pull_attributes_from_xml(xml_course, attrs)\n course[\"sections\"] = []\n\n return course", "def all_courses(records):\n \n course_and_id_dict = {} #This creates an empty dictionary\n for all_tuples in records:\n course_info_tuple = all_tuples[0] #Extracts all course information\n course_id = course_info_tuple[0]\n course_name = course_info_tuple[1]\n \n course_and_id_dict[course_id] = course_name\n \n return course_and_id_dict", "def _section_membership(course_key, access):\r\n section_data = {\r\n 'section_key': 'membership',\r\n 'section_display_name': _('Membership'),\r\n 'access': access,\r\n 'enroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': course_key.to_deprecated_string()}),\r\n 'unenroll_button_url': reverse('students_update_enrollment', kwargs={'course_id': course_key.to_deprecated_string()}),\r\n 'modify_beta_testers_button_url': reverse('bulk_beta_modify_access', kwargs={'course_id': course_key.to_deprecated_string()}),\r\n 'list_course_role_members_url': reverse('list_course_role_members', kwargs={'course_id': course_key.to_deprecated_string()}),\r\n 'modify_access_url': reverse('modify_access', kwargs={'course_id': course_key.to_deprecated_string()}),\r\n 'list_forum_members_url': reverse('list_forum_members', kwargs={'course_id': course_key.to_deprecated_string()}),\r\n 'update_forum_role_membership_url': reverse('update_forum_role_membership', kwargs={'course_id': course_key.to_deprecated_string()}),\r\n }\r\n return section_data", "def _section_metrics(course_key, access):\r\n section_data = {\r\n 'section_key': 'metrics',\r\n 'section_display_name': ('Metrics'),\r\n 'access': access,\r\n 'course_id': course_key.to_deprecated_string(),\r\n 'sub_section_display_name': get_section_display_name(course_key),\r\n 'section_has_problem': get_array_section_has_problem(course_key),\r\n 'get_students_opened_subsection_url': reverse('get_students_opened_subsection'),\r\n 'get_students_problem_grades_url': reverse('get_students_problem_grades'),\r\n 'post_metrics_data_csv_url': reverse('post_metrics_data_csv'),\r\n }\r\n return section_data", "def create_variables(self, courses):\n has_sections = isinstance(courses, dict)\n for course in courses:\n self.p.add_variable(course, courses.get(course, []) if has_sections else self.get_sections(course))", "def build_course_map(course_content):\n course_blocks = []\n\n for key, course_data in course_content.items():\n block_data = {}\n block_data['id'] = parse_id(key)\n block_data['type'] = course_data['category']\n\n try:\n block_data['name'] = course_data['metadata']['display_name']\n except KeyError:\n block_data['name'] = block_data['id']\n\n customize_by_type(course_data, block_data)\n add_children(course_data, block_data)\n\n course_blocks.append(block_data)\n\n return course_blocks", "def parseSections(data):\n pro = _sectionSplit.split(data)\n sections = {}\n for x in xrange(1, len(pro), 2):\n sections[pro[x]] = pro[x+1]\n return sections", "def extract_course_summary(course):\n return {\n field: course.fields[field]\n for field in CourseSummary.course_info_fields\n if field in course.fields\n }", "def create_dict(rows, tag, tag_id=None, start=0, enroll=False):\n enrollment_info_map = {\n 'Enrollment Requirement': 'requirements',\n 'Add Consent': 'add_consent',\n 'Drop Consent': 'drop_consent',\n }\n\n data = {}\n\n for row in rows:\n name_raw, desc_raw = row.find_all(tag, id=tag_id)[start:]\n name = name_raw.text.strip()\n desc = desc_raw.text.encode('ascii', 'ignore').decode().strip()\n\n if enroll:\n name = enrollment_info_map[name]\n else:\n name = name.lower().replace(' / ', '_')\n\n data.update({name: desc})\n\n return data", "def get_sections(h):\n secnames = {}\n resec = re.compile('(\\w+)\\[(\\d*)\\]')\n for sec in h.allsec():\n g = resec.match(sec.name())\n if g.group(1) not in secnames.keys():\n secnames[g.group(1)] = [int(g.group(2))]\n else:\n secnames[g.group(1)].append(int(g.group(2)))\n return secnames", "def get_courses_info(url, headers):\n dash = get_page_contents(url, headers)\n soup = BeautifulSoup(dash)\n courses_soup = soup.find_all('article', 'course')\n courses = []\n for course_soup in courses_soup:\n course_id = None\n course_name = course_soup.h3.text.strip()\n course_url = None\n course_state = 'Not yet'\n try:\n # started courses include the course link in the href attribute\n course_url = BASE_URL + course_soup.a['href']\n if course_url.endswith('info') or course_url.endswith('info/'):\n course_state = 'Started'\n # The id of a course in edX is composed by the path\n # {organization}/{course_number}/{course_run]\n course_id = course_soup.a['href'][9:-5]\n except KeyError:\n pass\n courses.append(Course(id=course_id,\n name=course_name,\n url=course_url,\n state=course_state))\n return courses", "def section(data):\n if len(data['index']) == 2 and data['index'][1][0].isdigit():\n element = {}\n element['is_section'] = True\n element['section_id'] = '-'.join(data['index'])\n if u\"§§ \" == data['title'][:3]:\n element['is_section_span'] = True\n else:\n element['is_section_span'] = False\n match = SECTION_TITLE_REGEX.match(data['title'])\n element['label'] = match.group(1)\n element['sub_label'] = match.group(2)\n return element", "def get_all(): \n cdict = {}\n for section in rcp.sections():\n cdict[section] = get_section(section)\n return cdict", "def build_course_sections(self, course_section_data):\n return [self.build_course_section(**row) for row in course_section_data]", "def spamHeaders(self) -> Tuple[List[str], Dict[str, str]]:\n sections = [\"STATUS\", \"TITLE\", \"PROJECT\", \"FILE\", \"SITE\", \"CHANNAME\", \"DATA\"]\n sectionHeaders = {}\n sectionHeaders[\"STATUS\"] = [\"STATUS\"]\n sectionHeaders[\"TITLE\"] = [\"AUTHOR\", \"VERSION\", \"DATE\", \"COMMENT\"]\n sectionHeaders[\"FILE\"] = [\"NAME\", \"FREQBAND\", \"DATE\"]\n sectionHeaders[\"CHANNAME\"] = [\"ITEMS\", \"NAME\"]\n sectionHeaders[\"DATA\"] = [\"ITEMS\", \"CHAN\"]\n return sections, sectionHeaders", "def csc():\n endcaps = [1,2]\n disks = [1,2,3,4]\n rings = {1:[1,2,3], # different rings for different disks\n 2:[1,2], \n 3:[1,2],\n 4:[1,2]}\n\n csc_info = {\n \"endcaps\":endcaps,\n \"disks\": disks,\n \"rings\": rings}\n\n return csc_info", "def reformat_course(courses):\n\n if isinstance(courses, Courses): # if it's just one course object\n reformatted_data = {\n 'startDate': reformat_date(courses.startDate),\n 'organizingMeetingDate': reformat_date(courses.organizingMeetingDate),\n 'startTime': reformat_time(courses.startTime),\n 'organizingMeetingTime': reformat_time(courses.organizingMeetingTime)\n }\n else:\n reformatted_data = []\n for course in courses:\n if isinstance(course, Courses):\n reformatted_data.append(\n {\n 'startDate': reformat_date(course.startDate),\n 'organizingMeetingDate': reformat_date(course.organizingMeetingDate),\n 'startTime': reformat_time(course.startTime),\n 'organizingMeetingTime': reformat_time(course.organizingMeetingTime),\n 'studentLimit': course.studentLimit,\n 'studentCount': len(get_students_by_course(course.id)),\n 'additionalData': course.additionalData\n }\n )\n\n return reformatted_data", "def parseSection(self, response):\n sel = Selector(response)\n sections = sel.xpath('//table[@class=\"sections responsive\"]//tr[not(@class=\"headers\")]')\n for s in sections:\n item = CourseItem(response.request.meta[\"item\"])\n item['section'] = s.xpath('@data-section-id').get().strip()\n item['instructors'] = s.css('.instructor::text').get()\n if item['instructors'] != None:\n item['instructors'].strip()\n item['instructors'] = [x.strip() for x in re.split(',', item['instructors'])]\n item['syllabus'] = s.css('.syllabus a::attr(href)').get()\n if item['syllabus'] != None:\n item['syllabus'].strip()\n return item\n \n\n \"\"\"\n Ignore the code below this. I was trying to get\n the times, days, and number registered from the class sections\n \"\"\"\n #times = s.xpath('//td[@class=\"time\"]/text()').get().strip()\n #times = re.split('-', times)\n #starttime = times[0]\n #endtime = times[1]\n #endt = dt.datetime.strptime(endtime, '%H:%M%p')\n # TODO: Check if \"am\"/\"pm\" from endt, & if endt hour is greater/less than startt \n #startt = dt.datetime.strptime(starttime, '%H:%M')\n #days = s.xpath('//td[@class=\"days\"]/text()').get().strip()\n #days = re.split(',', days)\n #numdays = len(days]\n \n #cap = s.xpath('//td[@class=\"registered\"]//a/text()').get().strip()\n #cap = re.split(' of ', cap.strip())\n #item['capacity'] = cap[1]" ]
[ "0.7070789", "0.60182756", "0.6016982", "0.5990312", "0.5969372", "0.59540147", "0.59495693", "0.59283185", "0.58511907", "0.58379775", "0.58187705", "0.5808631", "0.5805773", "0.58040214", "0.5761286", "0.57347447", "0.5731346", "0.5700729", "0.5679609", "0.5641057", "0.5584008", "0.5567756", "0.55592006", "0.55542386", "0.55371946", "0.55106723", "0.5503354", "0.54758847", "0.5440823", "0.54243755" ]
0.64286023
1
Each course page has a small info section at the beginning, which I had to extract and formulate in a different way to the main sections. This function constructs the dictionary entries for he course when given a string with all the details required for the info section
def get_info_list(info_string, course): info_list = [] split_on_newline = info_string.split("\n") for elem in split_on_newline: split = elem.split(": ") for s in split: info_list.append(s) info_list = info_list[1:-1] info_tags = [ 'session', 'school', 'credits', 'level', 'offered', 'visiting_students', 'erasmus_students', ] i = 0 for info_tag in info_tags: course[info_tag] = new_dict( info_list[i] + ': ', info_list[i + 1]) i += 2 return course
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _section_course_info(course_key, access):\r\n course = get_course_by_id(course_key, depth=None)\r\n\r\n section_data = {\r\n 'section_key': 'course_info',\r\n 'section_display_name': _('Course Info'),\r\n 'access': access,\r\n 'course_id': course_key,\r\n 'course_display_name': course.display_name,\r\n 'enrollment_count': CourseEnrollment.num_enrolled_in(course_key),\r\n 'has_started': course.has_started(),\r\n 'has_ended': course.has_ended(),\r\n 'list_instructor_tasks_url': reverse('list_instructor_tasks', kwargs={'course_id': course_key.to_deprecated_string()}),\r\n }\r\n\r\n try:\r\n advance = lambda memo, (letter, score): \"{}: {}, \".format(letter, score) + memo\r\n section_data['grade_cutoffs'] = reduce(advance, course.grade_cutoffs.items(), \"\")[:-2]\r\n except Exception:\r\n section_data['grade_cutoffs'] = \"Not Available\"\r\n # section_data['offline_grades'] = offline_grades_available(course_key)\r\n\r\n try:\r\n section_data['course_errors'] = [(escape(a), '') for (a, _unused) in modulestore().get_course_errors(course.id)]\r\n except Exception:\r\n section_data['course_errors'] = [('Error fetching errors', '')]\r\n\r\n return section_data", "def parse_create_section(xml_course):\n\n attrs = [\n \"section\",\n 'crn',\n \"start-time\",\n \"end-time\",\n \"meeting-days\",\n \"location\",\n \"section-number\",\n \"instructor\"\n ]\n\n section = pull_attributes_from_xml(xml_course, attrs)\n\n section[\"places\"] = []\n\n # Create Place attribute pointer based on location string\n # Get places from Parse\n places = get_places()[\"results\"]\n # Get location info from section (of form [\"BRK 101\", \"TBA\"])\n all_locations = section[\"location\"].split(\", \")\n # Filter out TBA\n # TODO Maybe do something else with them\n locations = [location for location in all_locations if location != \"TBA\"]\n\n for location in locations:\n building_code = location.split(\" \")[0]\n for place in places:\n if place.get(\"symbol\") and place[\"symbol\"] == building_code:\n section[\"places\"].append(place[\"objectId\"])\n break;\n\n\n return section", "def _parse(self, course: NavigableString) -> ParseType:\n\n info = {\"link\": \"\", \"icon\": \"\", \"title\": \"\", \"description\": \"\",\n \"counts\": {}}\n\n info[\"link\"] = course.a[\"href\"]\n info[\"icon\"] = course.a.img[\"src\"]\n\n description: NavigableString = course.a.div\n info[\"title\"] = description.div.get_text()\n info[\"description\"] = description.p.get_text()\n\n counts: NavigableString = course.find(\"div\", {\"class\": \"courseCounts\"})\n counts_data: ResultSet = counts.find_all(\"li\")\n for data in counts_data:\n name: str = data.span.get_text().lower()\n val: str = data.find(\"p\").get_text()\n info[\"counts\"][name] = int(val.replace(\",\", \"\"))\n\n return info", "def get_sections(pages):\n sections = {}\n section_pages = {}\n current_section_name = None\n current_section = []\n\n for page_num, page in enumerate(pages):\n clean_page = [re.sub(\"\\s+\", \" \", i.strip()) for i in page.split(\"\\n\")]\n\n for ind, i in enumerate(clean_page):\n\n if (\n re.findall(\"^Section \\d+\", i)\n and \"page\" not in i\n or (re.sub(\"\\d+ [\\w+\\s+]+\", \"\", i) == \"\" and ind == 0 and len(i) > 6)\n ):\n if current_section_name is not None:\n sections[current_section_name] = current_section\n current_section = []\n current_section_name = i\n break\n\n section_pages[page_num + 1] = current_section_name or \"No Section\"\n\n current_section.extend(clean_page)\n return sections, section_pages", "def about_course(command):\n\n response = {'code': None, 'title': None,\\\n 'prof': None, 'section1': None,\\\n 'timing1': None, 'room1' : None,\\\n 'section2': None, 'timing2': None,\\\n 'room2': None, 'description': None,\\\n 'prereq': None, 'perm': None}\n \n if re.search('ENPM611', command):\n response = {'code': 'ENPM 611', 'title': 'Software Engineering',\\\n 'prof': 'Christopher Ackermann', 'section1': '0101',\\\n 'timing1': 'Monday 4:00 - 6:40 PM', 'room1': 'JMP 2121',\\\n 'description': 'Software engineering concepts, methods, and practices important to both the theorist and the practitioner will be covered. The entire range of responsibilities expected of a software engineer are presented. The fundamental areas of requirements development, software design, programming languages, and testing are covered extensively. Sessions on supporting areas such as systems engineering, project management, and software estimation are also included.', 'prereq': 'Competency in one programming language; and must have completed an undergraduate software engineering course. Or permission of instructor.', 'perm': 'Permission of ENGR-CDL-Office of Advanced Engineering Education.'\n}\n elif re.search('ENPM613', command):\n response = {'code': 'ENPM 613', 'title': 'Software Design and Implementation', 'prof': 'Ioana Rus', 'section1': '0101', 'timing1': 'Wednesday 7:00 - 9:40 PM', 'room1': 'TBA', 'description': 'Software design concepts and practices within the field important to both the practitioner and the theorist will be covered. Architectural and detailed designs are included for batch, client/server, and real-time systems. Design considerations for structured, object-oriented, and Web-based systems are covered. Design of databases, user interfaces, forms, and reports are also included. Implementation issues that affect the design, including error handling, performance, and inter-process communication, are presented.', 'perm': 'Permission of ENGR-CDL-Office of Advanced Engineering Education.'}\n elif re.search('ENPM631', command):\n response = {'code': 'ENPM631', 'title': 'TCP/IP Networking', 'prof': 'Pedram Fard', 'section1': '0101', 'timing1': 'Tuesday 7:00 - 9:40 PM', 'room1': 'TBA' , 'description': 'Describe how IP datagram travels through the internet and are routed from the source to the destination. Introduce the two transport protocols: UDP and TCP, the proper context to use each one, and related parameters and issues. Cover some other protocols, closely related to the TCP/IP that are responsible for the seamless operation of the Internet.', 'perm': 'ENPM602; or permission of instructor. And permission of ENGR-CDL-Office of Advanced Engineering Education.'}\n elif re.search('ENPM687', command):\n response = {'code': 'ENPM687', 'title': 'Digital Forensics and Incidence Response', 'prof': 'Jonas Amoonarquah', 'section1': 'Online', 'timing1': 'NA', 'room1': 'NA', 'description': 'Students will implement a robust incident response methodology, including proper forensic handling of evidence, and cover legal aspects of national and international law regarding forensics. The bulk of the course covers evidence acquisition, preservation, analysis and reporting on multiple platforms.', 'perm':'None'}\n elif re.search('ENPM691', command):\n response = {'code': 'ENPM691', 'title': 'Hacking of C Programs and Unix Binaries', 'prof': 'Dharmalingam Ganesan', 'section1': '0101', 'timing1': 'Thursday 7:00 - 9:40 PM', 'room1': 'JMP 3201', 'description': 'Teaches the fundamentals of secure programming in C. An in depth discussion on various security vulnerabilities (e.g., buffer overflows) in C applications will be taught with hands-on demo of concepts during the class. Students will learn how a C program runs \"under-the-hood\". The course will teach nitty-gritty of C programs by analyzing at the assembly level. The course discusses best practices (e.g., coding standards) and design principles for secure programming so that security can be built-in during design time. In addition to assignments, students are required to present papers related to this course.', 'perm': None, 'prereq': 'ENEE150; or students who have taken courses with comparable content may contact the department.'}\n elif re.search('ENPM693', command):\n response = {'code': 'ENPM693', 'title': 'Network Security', 'prof': 'Sohraab Soltani', 'section1': '0101', 'timing1': 'Tuesday 7:00 - 9:40 PM', 'room1' : 'JMP 3201', 'description': 'Introduction to various approaches to design; specify and verify security protocols used in large systems and networks; familiarization with some current technologies. Security threats and countermeasures, communication security and basic encryption techniques, authentication protocols, data confidentiality and integrity, analysis of cryptographic protocols, and access control in large systems and networks.', 'perm': None, 'prereq': 'An operating systems and/or network protocol course or equivalent.'}\n elif re.search('ENPM694', command):\n response = {'code': 'ENPM694', 'title': 'Networks and Protocols', 'prof': 'Sohraab Soltani', 'section1': '0101', 'timing1': 'Wednesday 7:00 - 9:40 PM', 'room1': 'JMP 3201', 'description': 'Provides a deep understanding of TCP/IP protocol suit and routing in the internet. The course topics are: overview of TCP/IP, basics of IP protocol, basics of TCP protocol, Network Address Translation (NAT), Dynamic Host Configuration Protocol (DHCP), Internet Protocol Security (IPsec), Internet Control Message Protocol (ICMP), Simple Mail Transfer Protocol (SMTP), Domain Name Service (DNS), IPv6, Concepts of routing (Bellman-Ford and Dijkstra algorithms), Routing Information Protocol (RIP), Open Shortest Path First (OSPF), Interior Gateway Routing Protocol (IGRP), Enhance Gateway Routing Protocol (EIGRP), and Border Gateway Protocol (BGP).'}\n elif re.search('ENPM696', command):\n response = {'code': 'ENPM696', 'title': 'Reverse Software Engineering', 'prof': 'Allen Hazelton', 'section1': '0101', 'timing1': 'Tuesday 4:00 - 6:40 PM', 'room1': 'TBA', 'description': 'An in-depth understanding of software reverse engineering concepts and hands-on training with reverse engineering tools, including disassemblers, decompilers, and code analyzers. Students will become familiar with both low-level software and the x86 instruction set through binary reversing sessions. This course also provides insights into many subjects such as system security, source code analysis, software design, and program understanding that will be beneficial in a variety of fields.', 'prereq': 'ENPM691 and CMSC106; or permission of instructor. And permission of ENGR-CDL-Office of Advanced Engineering Education.'}\n elif re.search('ENPM809J', command):\n response = {'code': 'ENPM809J', 'title': 'Cloud Security', 'prof': 'Kevin Shivers', 'section1': '0101', 'timing1': 'Monday 7:00 - 9:40 PM', 'room1': 'TBA', 'section2': '0201', 'timing2': 'Thursday 4:00 - 6:40 PM', 'room2': 'TBA', 'description': 'NA'}\n elif re.search('ENPM809R', command):\n response = {'code': 'ENPM809R', 'title': 'Software Defined Networking', 'prof': 'Emre Gunduzhan', 'section1': '0101', 'timing1': 'Monday 4:00 - 6:40 PM', 'room1': 'TBA', 'description': 'NA'}\n elif re.search('ENPM809W', command):\n response = {'code': 'ENPM809W', 'title': 'Security and Software', 'prof': 'Mikael Lindvall', 'section1': '0101', 'timing1': 'Thursday 7:00 - 9:40 PM', 'room1': 'TBA', 'description': 'NA'}\n\n return response", "def section(data):\n if len(data['index']) == 2 and data['index'][1][0].isdigit():\n element = {}\n element['is_section'] = True\n element['section_id'] = '-'.join(data['index'])\n if u\"§§ \" == data['title'][:3]:\n element['is_section_span'] = True\n else:\n element['is_section_span'] = False\n match = SECTION_TITLE_REGEX.match(data['title'])\n element['label'] = match.group(1)\n element['sub_label'] = match.group(2)\n return element", "def bsoup(coursepage):\n soup = BeautifulSoup(coursepage.content, 'lxml')\n h1 = soup.find_all('h1')[2]\n html = h1.find_next_siblings()\n all_strings = [h1.string]\n for div in html:\n try:\n text = div.get_text()\n except:\n text = div.string\n if text is not None:\n all_strings.append(text)\n course = {'title': all_strings[0]}\n course = get_info_list(all_strings[1], course)\n course['description'] = new_dict(all_strings[2], all_strings[3])\n course['timetable'] = new_dict(all_strings[4], all_strings[5])\n course['requirements_of_entry'] = new_dict(all_strings[6], all_strings[7])\n course['excluded_courses'] = new_dict(all_strings[8], all_strings[9])\n course['co_requisites'] = new_dict(all_strings[10], all_strings[11])\n course['assessment_weighting'] = new_dict(all_strings[12], all_strings[13])\n course['aims'] = new_dict(all_strings[17], all_strings[18])\n date = all_strings[14].split(': ')\n course['assessment_date'] = new_dict(date[0] + \": \", date[1])\n course['learning_outcomes'] = new_dict(all_strings[19], all_strings[20])\n # TODO Doesn't parse Minimum Requirement for Award of Credit or\n # Reassessment Options\n return course", "def parseSection(self, response):\n sel = Selector(response)\n sections = sel.xpath('//table[@class=\"sections responsive\"]//tr[not(@class=\"headers\")]')\n for s in sections:\n item = CourseItem(response.request.meta[\"item\"])\n item['section'] = s.xpath('@data-section-id').get().strip()\n item['instructors'] = s.css('.instructor::text').get()\n if item['instructors'] != None:\n item['instructors'].strip()\n item['instructors'] = [x.strip() for x in re.split(',', item['instructors'])]\n item['syllabus'] = s.css('.syllabus a::attr(href)').get()\n if item['syllabus'] != None:\n item['syllabus'].strip()\n return item\n \n\n \"\"\"\n Ignore the code below this. I was trying to get\n the times, days, and number registered from the class sections\n \"\"\"\n #times = s.xpath('//td[@class=\"time\"]/text()').get().strip()\n #times = re.split('-', times)\n #starttime = times[0]\n #endtime = times[1]\n #endt = dt.datetime.strptime(endtime, '%H:%M%p')\n # TODO: Check if \"am\"/\"pm\" from endt, & if endt hour is greater/less than startt \n #startt = dt.datetime.strptime(starttime, '%H:%M')\n #days = s.xpath('//td[@class=\"days\"]/text()').get().strip()\n #days = re.split(',', days)\n #numdays = len(days]\n \n #cap = s.xpath('//td[@class=\"registered\"]//a/text()').get().strip()\n #cap = re.split(' of ', cap.strip())\n #item['capacity'] = cap[1]", "def get_sections(data):\n print \" * Extracting sections\"\n sections = OrderedDict()\n\n results = re.finditer(r\"^([A-Z][A-Z]+)([ ]+.*)?$\", data, re.M)\n data_start = None\n data_end = None\n prev_section = None\n cur_section = None\n for res in results:\n print \" * Found\", res.groups()[0]\n data_end = res.start()\n if prev_section is not None:\n # Get rid of potential comments at the end of a line.\n _data = re.sub(r\"\\s*#.*\", \"\", data[data_start:data_end])\n sections[prev_section][\"data\"] = filter(None, _data.splitlines())\n data_start = res.end()\n cur_section = res.groups()[0]\n sections[cur_section] = {\"arguments\": res.groups()[1], \"data\": \"\"}\n prev_section = \"%s\" % cur_section # Only to be sure we get a brand new string...\n\n return sections", "def parse(u):\n rec = {}\n\n try:\n r = requests.get(u, headers=headers)\n\n if r.status_code == 200:\n html = r.text\n soup = BeautifulSoup(html, 'lxml')\n overview_section = soup.select('.Raw-s14xcvr1-0 gXqFYO')\n full_name_section = soup.select('.sc-iwsKbI kjxnCg')\n years_of_practice_section = soup.select('.DataField__Data-c3wc7f-1 gLHSHx')\n language_section = soup.select('.DataField__Data-c3wc7f-1 gLHSHx')\n office_location_section = soup.select('.Paragraph-fqygwe-0 cojhks')\n hospital_affiliation_section = soup.select('.Paragraph-fqygwe-0 fwayNy')\n specialties = soup.select('.DataField__Data-c3wc7f-1 gLHSHx')\n education_and_medical_training_section = soup.select('.EducationAndExperience__Item-xn5fll-0 bzYYRk')\n certification_and_licensure_section = soup.select('.Paragraph-fqygwe-0 bQPwuv')\n\n if overview_section:\n overview = overview_section[0].text.replace('\"', '')\n if full_name_section:\n full_name = full_name_section[0].text\n if years_of_practice_section:\n years_of_practice = years_of_practice_section[0].text.strip().replace('\"', '')\n if language_section:\n language = language_section[0].text.strip().replace('\"', '')\n if office_location_section:\n office_location = office_location_section[0].text\n if hospital_affiliation_section:\n hospital_affiliation = hospital_affiliation_section[0].text.strip().replace('\"', '')\n if specialties_section:\n specialties = specialties_section[0].text.replace('\"', '')\n if education_and_medical_training_section:\n education_and_medical_training = education_and_medical_training_section[0].text\n if certification_and_licensure_section:\n certification_and_licensure = certification_and_licensure_section[0].text\n\n\n rec = {'overview': overview, 'full_name': full_name, 'years_of_practice': years_of_practice, 'language': language,\n 'office_location': office_location, 'hospital_affiliation': hospital_affiliation, 'specialties':specialties,\n 'education_and_medical_training': education_and_medical_training,\n 'certification_and_licensure': certification_and_licensure}\n except Exception as ex:\n print('Exception while parsing')\n print(str(ex))\n finally:\n return json.dumps(rec)", "def get_course_about_section(course, section_key):\r\n\r\n # Many of these are stored as html files instead of some semantic\r\n # markup. This can change without effecting this interface when we find a\r\n # good format for defining so many snippets of text/html.\r\n\r\n # TODO: Remove number, instructors from this list\r\n if section_key in ['short_description', 'description', 'key_dates', 'video',\r\n 'course_staff_short', 'course_staff_extended',\r\n 'requirements', 'syllabus', 'textbook', 'faq', 'more_info',\r\n 'number', 'instructors', 'overview',\r\n 'effort', 'end_date', 'prerequisites', 'ocw_links']:\r\n\r\n try:\r\n\r\n request = get_request_for_thread()\r\n\r\n loc = course.location.replace(category='about', name=section_key)\r\n\r\n # Use an empty cache\r\n field_data_cache = FieldDataCache([], course.id, request.user)\r\n about_module = get_module(\r\n request.user,\r\n request,\r\n loc,\r\n field_data_cache,\r\n course.id,\r\n not_found_ok=True,\r\n wrap_xmodule_display=False,\r\n static_asset_path=course.static_asset_path\r\n )\r\n\r\n html = ''\r\n\r\n if about_module is not None:\r\n try:\r\n html = about_module.render('student_view').content\r\n except Exception: # pylint: disable=broad-except\r\n html = render_to_string('courseware/error-message.html', None)\r\n log.exception(\r\n u\"Error rendering course={course}, section_key={section_key}\".format(\r\n course=course, section_key=section_key\r\n ))\r\n return html\r\n\r\n except ItemNotFoundError:\r\n log.warning(\r\n u\"Missing about section {key} in course {url}\".format(key=section_key, url=course.location.to_deprecated_string())\r\n )\r\n return None\r\n elif section_key == \"title\":\r\n return course.display_name_with_default\r\n elif section_key == \"university\":\r\n return course.display_org_with_default\r\n elif section_key == \"number\":\r\n return course.display_number_with_default\r\n\r\n raise KeyError(\"Invalid about key \" + str(section_key))", "def parseLesson(lesson_text):\n lesson_parsed = dict()\n\n lesson_parsed['section_starts'] = list()\n lesson_parsed['sections'] = list()\n lesson_parsed['end_of_lesson'] = None\n lesson_parsed['source'] = None\n lesson_parsed['title'] = None\n lesson_parsed['number'] = None\n\n end_of_lesson_markers = [\n r'^\\s*\\(End\\s+of\\s+lesson',\n r'^\\s*\\(End\\s+of\\s+lecture',\n r'^\\(End\\s+of\\s+recording',\n r'^\\[End\\s+of\\s+recording',\n r'^\\[End\\s+of\\s+the\\s+recording',\n r'^\\(End of the recording',\n r'^\\[End\\s+of\\s+the\\s+tape',\n r'^\\(End\\s+of\\s+audible\\s+portion\\s+of\\s+the\\s+lesson',\n r'^\\[Note\\:\\s+This\\s+is\\s+the\\s+end\\s+of\\s+the\\s+recording',\n ]\n\n # Do a parsing pass of the overall lesson text. This information will be used\n # to compile the lesson into a form useful for display and use markup.\n line_number = 0\n for line in lesson_text:\n mobj = re.search(r'^Source:\\s+(.+)$', line)\n if mobj:\n # WORKING HERE\n pass\n\n if re.search(r'^\\d+[a-z]?\\.', line):\n lesson_parsed['section_starts'].append(line_number)\n line_number += 1\n continue\n\n found_end_of_lesson = False \n for end_of_lesson_marker in end_of_lesson_markers:\n if re.search(end_of_lesson_marker, line):\n lesson_parsed['end_of_lesson'] = line_number\n line_number += 1\n found_end_of_lesson = True\n break\n\n if found_end_of_lesson:\n continue\n\n for end_of_lesson_marker in end_of_lesson_markers:\n if re.search(end_of_lesson_marker, line):\n lesson_parsed['end_of_lesson'] = line_number\n line_number += 1\n break\n\n if lesson_parsed['end_of_lesson'] is not None:\n continue\n\n # Remove tabs from the start of a line.\n # Replace tabs between the number and the start of text with a space.\n # Skip \"This page is included for printing and binding purposes\"\n # Footnotes \"[fnX]\" will need to be linked.\n\n line_number += 1\n\n\n # Now compile the lesson instructions into their own things.\n\n idx = 0\n\n new_section = list()\n for section_line in xrange(lesson_parsed['section_starts'][idx], lesson_parsed['section_starts'][idx+1]:\n section_start_text = lesson_text[section_line]\n lesson_parsed['sections'].append(lesson_text[section_line])\n\n\n\n return lesson_parsed", "def print_section( secName, dictName ):\n\n\t# Number of entries in IA dictionary.\n\tmaxitem = len(dictName)\n\n\toutFile.write('\\n\\\\begin{enumerate}\\n')\n\n\tfor m in xrange(0,maxitem):\n\n\t\toutFile.write('\\\\item ' + dictName[m])\n\n\t\tbeans = [bean for bean in secName if int(bean[1]) == (m+1)]\n\t\t\n \t\tif beans[0][-1] == 'N/A':\n \t\t\toutFile.write('\\n\\t\\\\begin{description}[font=\\\\normalfont]\\n')\n \t\t\toutFile.write('\\t\\t\\\\item[N/A]\\n')\n \t\t\toutFile.write('\\t\\\\end{description}\\n\\n')\n\t\telse:\n\t\t\tl = len(beans)\n\t\t\tif beans[0][2].startswith('AY'):\n\t\t\t\toutFile.write('\\t\\\\begin{description}[leftmargin=1.75cm, font=\\\\normalfont]\\n')\n\t\t\t\tbeans[0][2] = string.replace(beans[0][2],'AY','\\\\textsc{ay}')\n\t\t\telse:\n\t\t\t\toutFile.write('\\t\\\\begin{description}[font=\\\\normalfont]\\n')\n\t\t\tfor i in xrange(0,l):\n\t\t\t\tif beans[i][2].startswith('AY'):\n\t\t\t\t\tbeans[i][2] = string.replace(beans[i][2],'AY','\\\\textsc{ay}')\n\t\t\t\toutFile.write('\\t\\t\\\\item[\\\\small ' + beans[i][2] + '] ' + beans[i][3] + '\\n')\n\n\t\t\t\tif len(beans[i]) == 5:\n\t\t\t\t\toutFile.write('\\n\\n\\t\\t' + beans[i][4] + '\\n')\n\n\t\t\toutFile.write('\\t\\\\end{description}\\n\\n')\n\n\toutFile.write('\\\\end{enumerate}\\n')", "def Content():\n\tTOPIC_DICT = {\"Basics\": [[\"Introduction to Python\", \"/introduction-to-python/\"], \n\t\t\t\t\t\t\t\t\t\t\t\t\t [\"Print functions\", \"/python-print-function/\"],\n\t\t\t\t\t\t\t\t\t\t\t\t\t [\"Math basics with Python 3\", \"/math-basics-python-3/\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t ]],\n\t\t\t\t\t\t\t\t\"Web Dev\": [[\"wd1\"]]} #Web dev is now the second topic\n\t\t\t\t\t\t\t\t#Introduction to Python\n\t#will be the title and the link to that is immediately after. Each inner array will be a specific sub section of the basics\n\t#section\n\n\treturn TOPIC_DICT", "def get_course_info_section(request, course, section_key):\r\n usage_key = course.id.make_usage_key('course_info', section_key)\r\n\r\n # Use an empty cache\r\n field_data_cache = FieldDataCache([], course.id, request.user)\r\n info_module = get_module(\r\n request.user,\r\n request,\r\n usage_key,\r\n field_data_cache,\r\n course.id,\r\n wrap_xmodule_display=False,\r\n static_asset_path=course.static_asset_path\r\n )\r\n\r\n html = ''\r\n\r\n if info_module is not None:\r\n try:\r\n html = info_module.render('student_view').content\r\n except Exception: # pylint: disable=broad-except\r\n html = render_to_string('courseware/error-message.html', None)\r\n log.exception(\r\n u\"Error rendering course={course}, section_key={section_key}\".format(\r\n course=course, section_key=section_key\r\n ))\r\n\r\n return html", "def parseCourses(self, response):\n sel = Selector(response)\n courses = sel.xpath('//div[@class=\"course-info expandable\"]')\n for c in courses:\n item = CourseItem(response.request.meta[\"item\"])\n item['code'] += '-' + c.xpath('@id').get().strip()\n item['name'] = c.xpath('//a[@class=\"courselink\"]/text()').get().strip()\n # everything works up to here #\n href = c.xpath('div/h3/a/@href').get()\n url = urljoin('https://web-app.usc.edu', href)\n yield Request(url=url,callback=self.parseSection,meta={'item':item})", "def generate_page_sections_dict(self, project_page_data: dict):\n wiki_obj = WikiService()\n\n short_description = (\n f\"\\n{project_page_data['project']['shortDescription']}\\n\"\n )\n\n created_date = wiki_obj.format_date_text(\n project_page_data['project']['created']\n )\n # created_date_text = f\"\\n{created_date}\\n\" \n # due_date = wiki_obj.format_date_text(\n # project_page_data['project']['due_date']\n # )\n timeframe = (\n f\"\\n* '''Start Date:''' {created_date}\\n\"\n # f\"\\n* '''End Date:''' Estimate {due_date}\\n\"\n )\n\n\n project_url = (\n f\"\\n{project_page_data['project']['url']}\\n\"\n )\n\n hashtag = (\n project_page_data['project']['changesetComment']\n )\n hashtag = (\n project_page_data['project']['changesetComment'].replace(\n \"#\", \"<nowiki>#</nowiki>\"\n )\n )\n hashtag_text = (\n f\"\\n{hashtag}\\n\"\n )\n\n instructions_text = (\n project_page_data['project']\n ['externalSource']\n ['instructions']\n )\n instructions = (\n f\"\\n{instructions_text}\\n\"\n )\n\n per_task_instructions_text = (\n project_page_data['project']\n ['externalSource']\n ['perTaskInstructions']\n )\n per_task_instructions = (\n f\"\\n{per_task_instructions_text}\\n\"\n )\n\n imagery_text = (\n project_page_data['project']\n ['externalSource']\n ['imagery']\n )\n imagery = (\n f\"\\n{imagery_text}\\n\"\n )\n\n license_text = (\n project_page_data['project']\n ['externalSource']\n ['license']\n )\n license = (\n f\"\\n{license_text}\\n\"\n )\n\n # metrics = (\n # f\"\\n* {project_page_data.instructions}\\n\"\n # )\n # quality_assurance = (\n # f\"\\n* {project_page_data.quality_assurance}\\n\"\n # )\n\n users = project_page_data['project'][\"users\"]\n project_users = \"\"\n for user in users:\n project_users += (\n f\"\\n| {user['userId']}\\n| {user['userName']}\\n|-\"\n )\n\n \n\n project_page_sections = {\n self.short_description_section: short_description,\n self.timeframe_section: timeframe,\n # self.timeframe_section: {\n # self.created_section: created_date_text\n # }, # if choose use subsection for timeframe \n self.url_section: project_url,\n self.external_sources_section: {\n self.instructions_section: instructions,\n self.per_task_instructions_section: per_task_instructions,\n self.imagery_section: imagery,\n self.license_section: license\n },\n self.hashtag_section: hashtag_text,\n # self.instructions_section: instructions,\n # self.metrics_section: metrics,\n # self.quality_assurance_section: quality_assurance,\n self.team_user_section: {\n self.users_list_section: project_users\n }\n }\n return project_page_sections", "def parse_create_course(xml_course):\n attrs = [\n \"term-code\",\n \"term-description\",\n 'subject',\n \"course-number\",\n \"school\",\n \"department\",\n \"title\",\n \"description\",\n \"credit-hours\",\n \"distribution-group\"\n ]\n course = pull_attributes_from_xml(xml_course, attrs)\n course[\"sections\"] = []\n\n return course", "def insert_course(dept, num, text):\n\n # Course Title \n m = re.search(\"[\\d\\w]{5} - ([\\w ]*)\", text)\n title = m.group(1) if m else \"nomatch\"\n\n # Course Description\n m = re.search(\"\\.\\s(.*)\\sTypically\",text)\n des = m.group(1) if m else \"nomatch\"\n\n # Credit hours aren't fixed for every course\n # Credit Hours: 2.00\n # Credit Hours: 2.00 or 3.00. \n # Credit Hours: 1.00 to 18.00. \n m = re.search(\"Credit Hours: (\\d+\\.\\d+)\",text, flags=re.IGNORECASE)\n m = re.search(\"(\\d+\\.\\d+)(.*?)Credit hours\",text, flags=re.IGNORECASE) if not m else m\n cr = m.group(1) if m else \"-1\"\n\n # Semesters Offered\n m = re.search(\"Typically offered (.*?)\\.\", text)\n sem = m.group(1).split() if m else [\"nomatch\"]\n\n # Course Type: Lecture, Recitation, Lab, Seminar, etc.\n m = re.search(\"Schedule Types:\\s((?:[\\w ]+)(?:,[\\w ]+)*) \\s+\", text)\n form = m.group(1).split(\", \") if m else [\"nomatch\"]\n\n # Learning objectives will not necessarily follow campuses\n m = re.search(\"campuses:(\\s+([\\w\\s])+\\n)\", text)\n campus = m.group(1).strip().split(\"\\n\\n\") if m else [\"nomatch\"]\n campus = [camp.strip() for camp in campus]\n\n # prereq regex and decomosition of prereqs into lists of AND conditions (works for most classes, not 477 and similar)\n # re.DOTALL matches all characters, including \"\\n\"\n idx = text.find(\"campuses:\")\n m = re.search(\"Prerequisites:(.*)\",text[idx:],flags=re.DOTALL)\n if m:\n allReqs = []\n prereqText = m.group(1).strip()\n prereqText = prereqText.encode('ascii', 'ignore') \n for i in PrereqParser.parseprereq(prereqText):\n reqArr = []\n for j in i.split():\n if j.find(\"-C\") != -1:\n j = j.replace(\"-C\",\"\")\n reqArr.append(Requisite(course=j,reqType=False))\n else:\n reqArr.append(Requisite(course=j,reqType=True)) \n allReqs.append(RequisiteList(courses=reqArr))\n\n else:\n allReqs = []\n\n # create course entity\n course = Course(number=num, title=title, department=dept, form=form,\n description=des, credits=float(cr), semesters=sem,\n campuses=campus,requisites=allReqs, id=dept + num)\n # store course \n course.put()", "def get_course(data):\n\n return {item['course'] for item in data}", "def _recipe_details_generator(self, converted_content, overview_recipe):\n def get_cooking_shop_strings(lines):\n ret = []\n buf = None\n is_recipe_step_area = False\n for l in lines:\n if re.search(\"軒目\", l.strip()) or re.match(r\"^[①-⑳*].*『.*』\", l.strip()) or re.match(r\"^[①-⑳*].*「.*」\", l.strip()):\n if buf:\n ret.append(buf)\n buf = l.strip()\n continue\n\n if re.search(\"^(料理|万能調味料)\", l.strip()):\n is_recipe_step_area = False\n\n if re.search(\"^材料\", l.strip()):\n title, materials = re.search(\"(材料)(.*)\", l.strip()).groups()\n # buf += \"\\n\" + \"\\n\".join(l.strip().split(None, 1))\n buf += \"\\n\" + title + \"\\n\" + materials.strip()\n continue\n\n if re.search(\"^作り方\", l.strip()):\n is_recipe_step_area = True\n title, recipe_steps = re.search(\"(作り方)(.*)\", l.strip()).groups()\n # buf += \"\\n\" + \"\\n\".join(l.strip().split(None, 1))\n buf += \"\\n\" + title + \"\\n\" + recipe_steps.strip()\n continue\n \n if buf:\n if is_recipe_step_area:\n if re.match(r\"^[①-⑳*]\", l.strip()):\n buf += \"\\n\" + l.strip()\n else:\n buf += l.strip()\n else:\n buf += \"\\n\" + l.strip()\n if buf:\n ret.append(buf)\n\n return ret\n \n \n for ii, l in enumerate(converted_content.splitlines()):\n if ii == 1:\n overview_recipe.cooking_name_sub = l.strip()\n continue\n \n if -1 < l.find(\"初回放送\"):\n overview_recipe.program_date = dateutil.parser.parse(\"/\".join(re.search(r\"(\\d+)\\D+(\\d+)\\D+(\\d+)\\D+\", l).groups()))\n break\n\n cooking_shop_strings = get_cooking_shop_strings(converted_content.splitlines())\n\n logger.debug(\"-\" * 20)\n logger.debug(cooking_shop_strings)\n for shop_string in cooking_shop_strings:\n recipe_shop = None\n recipe = None\n is_material_area = False\n is_recipe_step_area = False\n for l in shop_string.splitlines():\n if len(l.strip()) == 0:\n continue\n \n if is_material_area == False and is_recipe_step_area == False:\n if re.search(\"軒目\", l.strip()) or re.match(r\"^[①-⑳*].*『.*』\", l.strip()) or re.match(r\"^[①-⑳*].*「.*」\", l.strip()):\n recipe_shop = copy.deepcopy(overview_recipe)\n recipe = None\n \n m = re.search(r\"「(.*)」\", l)\n if m:\n recipe_shop.cooking_name_sub += \"/\" + m.group(1)\n else:\n m2 = re.search(r\"『(.*)』\", l)\n if m2:\n recipe_shop.cooking_name_sub += \"/\" + m2.group(1)\n \n continue\n \n if re.search(\"^(料理|万能調味料)\", l.strip()):\n is_material_area = False\n is_recipe_step_area = False\n if recipe:\n yield recipe\n\n if recipe_shop:\n recipe = copy.deepcopy(recipe_shop)\n else:\n recipe = copy.deepcopy(overview_recipe)\n \n if -1 < l.find(\":\"):\n recipe.cooking_name = l.split(\":\")[1].strip()\n elif -1 < l.find(\":\"):\n recipe.cooking_name = l.split(\":\")[1].strip()\n elif re.search(r\"^(料理|万能調味料)[①-⑳]\", l.strip()):\n # https://www.nhk.or.jp/program/manpuku/recipe/dg0_200115.pdf\n # 料理①カルパッチョ\n recipe.cooking_name = l.strip()[3:].strip()\n else:\n recipe.cooking_name = l.split(None, 1)[1].strip()\n continue\n \n if re.search(\"^材料\", l.strip()):\n is_material_area = True\n is_recipe_step_area = False\n if l.strip() == \"材料\":\n continue\n \n if re.search(\"^作り方\", l.strip()):\n is_material_area = False\n is_recipe_step_area = True\n if l.strip() == \"作り方\":\n pass\n else:\n l = l.replace(\"作り方\", \"\", 1)\n # recipeがNoneの場合はエラーとして検出したい\n recipe.recipe_steps.append(RecipeText(l.strip()))\n continue\n \n \n if is_material_area:\n for material in l.strip().split(\"、\"):\n material = material.strip()\n if len(material):\n if material.startswith(\"(\"):\n recipe.materials.append(RecipeText(material))\n else:\n recipe.materials.append(RecipeText(material.replace(\"(\", \": \").replace(\")\", \"\")))\n \n if is_recipe_step_area:\n recipe.recipe_steps.append(RecipeText(l.strip()))\n if recipe:\n yield recipe", "def get_courses_info(url, headers):\n dash = get_page_contents(url, headers)\n soup = BeautifulSoup(dash)\n courses_soup = soup.find_all('article', 'course')\n courses = []\n for course_soup in courses_soup:\n course_id = None\n course_name = course_soup.h3.text.strip()\n course_url = None\n course_state = 'Not yet'\n try:\n # started courses include the course link in the href attribute\n course_url = BASE_URL + course_soup.a['href']\n if course_url.endswith('info') or course_url.endswith('info/'):\n course_state = 'Started'\n # The id of a course in edX is composed by the path\n # {organization}/{course_number}/{course_run]\n course_id = course_soup.a['href'][9:-5]\n except KeyError:\n pass\n courses.append(Course(id=course_id,\n name=course_name,\n url=course_url,\n state=course_state))\n return courses", "def __init__(self, course_id, name, content):\n # course id and name need to go over the string_correct function\n self.ID = self.id_correct(str(course_id))\n self.name = self.string_correct(str(name))\n\n self.content = str(content)\n\n # uninitialized variables\n self.prere = {}\n self.postre = {}\n self.description = None\n self.department_title = None\n self.course_level = None\n\n # parse description and prerequisite raw data from content var\n self.seperate_content()", "def test_extract_info():\n string = \"Mika Hakkinen Mclaren-Mercedes 79694 1\"\n assert type(race.extract_info(string)) == dict\n assert race.extract_info(string) == {\"Name\": \"Mika Hakkinen\", \"Team\": \"Mclaren-Mercedes\", \"Time\": 79694, \"Diff\": \"\",\n \"Race\": 1}\n assert \"Hello, world!\" not in race.extract_info(string)", "def extract_course_summary(course):\n return {\n field: course.fields[field]\n for field in CourseSummary.course_info_fields\n if field in course.fields\n }", "def build_course(self):\n courses = []\n aprovacao_d = {}\n # semestral\n for rate_it in self.__semestral_rate:\n # pega uma lista no qual o primeiro elemento é a taxa, o segundo\n # e o terceiro são quantidades\n rate_data = self.analysis[\"semestral_rate\"][rate_it.name]\n for i in rate_data[0].index:\n if i[0] not in aprovacao_d:\n aprovacao_d[i[0]] = {}\n\n periodo = str(i[1]) + \"/\" + str(i[2])\n aprovacao_d[i[0]][periodo] = [\n float(rate_data[0][i]),\n int(rate_data[1][i]),\n int(rate_data[2][i]),]\n\n note = self.analysis[\"general_note_statistic\"]\n note_last_year = self.analysis[\"last_year_statistic\"]\n for course in self.analysis[\"courses\"].index:\n course_dict = {}\n course_dict[\"disciplina_codigo\"] = course\n course_dict[\"disciplina_nome\"] = self.analysis[\"courses\"][course]\n \n # If the course code is related to more than one name,\n # concatenate these names into an unique string\n if type(course_dict[\"disciplina_nome\"]) != str:\n new_course_name = \" | \".join(list(course_dict[\"disciplina_nome\"]))\n course_dict[\"disciplina_nome\"] = new_course_name\n # quantidade de matriculas\n count = self.analysis[\"general_count_submission\"][course]\n course_dict[\"qtd_alunos\"] = count\n # notas\n course_dict[\"qtd_cursada_aprov\"] = self.analysis[\"coursed_ratio\"][course]\n course_dict[\"nota\"] = [note[0][course], note[1][course]]\n course_dict[\"nota_ultimo_ano\"] = [\n note_last_year[0][course],\n note_last_year[1][course]\n ]\n # taxas\n for rate_it in self.__rates:\n rate_data = self.analysis[\"general_rates\"][rate_it.name]\n course_dict[rate_it.name] = float(rate_data[0][course])\n course_str = rate_it.name.replace(\"taxa\", \"qtd\")\n # count_sel define qual quantidade vai para o json, a especifica\n # ou geral\n course_dict[course_str] = int(\n rate_data[rate_it.count_sel][course])\n # rate_calc = self.analysis[\"general_rates\"][rate_it.name][0]\n\n # taxas do ultimo anos\n course_dict[\"taxa_reprovacao_ultimo_absoluto\"] = self.analysis[\"last_year_taxa_reprovacao_absoluta\"][course]\n course_dict[\"taxa_reprovacao_ultimo_frequencia\"] = self.analysis[\"last_year_taxa_reprovacao_frequencia\"][course]\n\n course_dict[\"grafico_qtd_cursada_aprov\"] = self.analysis[\"coursed_count\"][course]\n course_dict[\"aprovacao_semestral\"] = aprovacao_d[course]\n courses.append(course_dict)\n return courses", "def _get_page_sections(self, sectionNum=None, sectionName=None):\n self.section = {}\n self.sections = [] # list maintains order\n content = self.page.content\n lines = content.split(\"\\n\")\n currentSection = None\n for line in lines:\n if \"==\" in line:\n line = line.replace(\"Edit =\",\"\")\n line = line.replace(\"=\",\"\").lstrip().rstrip()\n self.section[line] = []\n currentSection = line\n self.sections.append(currentSection)\n elif currentSection is not None:\n line = line.lstrip().rstrip()\n self.section[currentSection].append(line)\n else:\n pass\n logger.info(\"Sections in page: \"+str(self.sections))\n # and return some section:\n if sectionNum is not None:\n if sectionNum > len(self.sections) or sectionNum < 0:\n sectionNum = 0\n return self.section[self.sections[sectionNum]]\n elif sectionName is not None:\n pass", "def parse_text(self, text: str) -> SectionDict:", "def get_doctor_info(url, html):\n \n # create a dictionary to save data\n doctor_info = {}\n \n # parsed html by using lxml\n # In the following processing\n # the one ending in org is the raw data in the HTML\n # the one ending in inf is the processed data by extracting from raw data\n select = etree.HTML(html)\n \n # part 1: get basic information about your doctor\n # attribute: Name, Title, Hospital, Department\n name_org = select.xpath('//div[@class=\"profile-text\"]//h1[@class=\"doctor-name\"]//text()')\n name_inf = name_org[0].strip()\n \n title_org = select.xpath('//div[@class=\"profile-text\"]//span[@class=\"positon\"]//text()')\n title_inf = [i.strip() for i in title_org if len(i.strip()) > 0]\n title_inf = ' '.join(title_inf)\n \n hospital_department_org = select.xpath('//div[@class=\"profile-text\"]//p[@class=\"doctor-faculty\"]//text()')\n hospital_department_inf = [i.strip() for i in hospital_department_org if len(i.strip()) > 0]\n hospital_inf = hospital_department_inf[0]\n department_inf = hospital_department_inf[1]\n \n doctor_info['姓名'] = name_inf\n doctor_info['职称'] = title_inf\n doctor_info['医院'] = hospital_inf\n doctor_info['科室'] = department_inf\n \n # part2: get header format data\n org = select.xpath('//div[@class=\"profile-sta\"]//text()')\n inf = [i.strip() for i in org if len(i.strip()) > 0 and i.strip() != '%']\n for i in range(len(inf)//2):\n doctor_info[inf[2*i]] = inf[2*i + 1]\n \n \n # part3: get sidebar format data\n org_1 = select.xpath('//div[@class=\"item-body\"]//div[@class=\"clearfix\"]//div[@class=\"per-sta-label\"]//text()')\n org_2 = select.xpath('//div[@class=\"item-body\"]//div[@class=\"clearfix\"]//div[@class=\"per-sta-data\"]//text()')\n for i in range(len(org_1)):\n doctor_info[org_1[i][:-1]] = org_2[i]\n \n # part4: get body format data\n honour_org = select.xpath('//div[@class=\"honour-header\"]//text()')\n honour_inf = ''.join([i.strip() for i in honour_org])\n \n honour_detail_org = select.xpath('//li[@class=\"honour-title\"]//text()')\n honour_detail_inf = [i.strip()[:4] for i in honour_detail_org if len(i.strip()) > 0]\n honour_detail_inf = ' '.join(honour_detail_inf)\n \n satisfaction_org = select.xpath('//div[@class=\"item-body\"]//div[@class=\"satisfaction clearfix\"]//i[@class=\"sta-num\"]//text()')\n satisfaction_inf = [i.strip() for i in satisfaction_org if len(i.strip()) > 0 and i.strip() != '%']\n \n resume_org = select.xpath('//div[@class=\"good-at-text\"]//text()')\n resume_inf = [i.strip() for i in resume_org]\n if len(resume_inf) <= 20:\n resume_inf = ''.join(resume_inf)\n resume_inf = ''.join(resume_inf[:20])\n \n star_org = select.xpath('//div[@class=\"experience-row clearfix\"]//span[@class=\"experience-label\"]//text()')\n star_inf = 1 if len(star_org) >= 1 else 0\n\n doctor_info['好大夫届数'] = honour_inf\n doctor_info['好大夫具体年份'] = honour_detail_inf\n doctor_info['简历'] = resume_inf \n doctor_info['诊后服务星'] = star_inf\n try:\n doctor_info['疗效满意度'] = satisfaction_inf[0]\n doctor_info['态度满意度'] = satisfaction_inf[1]\n except:\n pass\n \n # part5: personal url\n personal_url = url\n doctor_info['医生个人链接'] = personal_url\n \n return doctor_info", "def create_detail(l):\n parsed_detail = create_soup(l)\n kv_pair = \"\"\n details = dict()\n for i in parsed_detail.find_all('tbody'):\n for j in i.find_all('tr'):\n for k in j.find_all('td'):\n cln = clean_string(k.text)\n if cln.find(\":\")<0:\n #if re.search(r'[A-Za-z]:', cln):\n kv_pair += \" \" + cln\n else:\n if len(kv_pair) > 0:\n kv_process = kv_pair.split(\":\")\n det_key = re.sub(\n r'\\s', '_', kv_process[0].strip()\n ).lower()\n details[det_key] = kv_process[1].strip()\n kv_pair = cln\n return details" ]
[ "0.698141", "0.6433248", "0.61811566", "0.608286", "0.6082416", "0.6056426", "0.6050662", "0.6001302", "0.5982004", "0.59207666", "0.58908296", "0.5850639", "0.5830395", "0.5818191", "0.5790374", "0.577954", "0.57718724", "0.5770274", "0.57416594", "0.57249576", "0.57086146", "0.56785774", "0.5651648", "0.56339157", "0.56309235", "0.56044084", "0.55505025", "0.5549235", "0.5538389", "0.55373746" ]
0.7118217
0
Given a list of course codes, ge5t their corresponding titles and format them in a bulletted TeX list. This is used to indicate in the abstract which courses have been deliberately discluded from the document
def create_not_included_list(codes): string = '\\begin{itemize}\n' for code in codes: title = get_course_title_only(code) string += '\\item{' + title + '}\n' string += '\\end{itemize}\n' return string
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_to_latex(codelist, unwanted_courses):\n # TODO: investigate a way to add large amounts of text outside of the\n # function\n abstract01 = \"I created this document to practice parsing html and using\\\n tools like Beautiful Soup which I've previously had little experience\\\n in. As a result, it's not perfect.\\\\newline\\\n It is also a slightly condensed all-in-one-place look at a selection\\\n of courses that are available for fourth year computer science\\\n students at the University of Glasgow. For the purposes of clarity I\\\n have removed several courses from this selection. The following\\\n courses have been omitted:\"\n abstract02 = \"For more insight into the project, to report issues or to\\\n inspect the code, have a look at the GitHub:\\\n \\\\url{https://github.com/IS0metric/course-ripper}\"\n unincluded = create_not_included_list(unwanted_courses)\n with open('courses.tex', 'w') as f:\n # TODO Try and move all this to a separate function?\n # TODO: Check if it's more efficient to write a single, massive string\n # to file\n f.write('\\\\documentclass{hitec}\\n')\n f.write('\\\\usepackage[document]{ragged2e}\\n')\n f.write('\\\\usepackage{url}\\n')\n f.write('\\\\usepackage{hyperref}\\n')\n f.write('\\\\setcounter{tocdepth}{4}\\n')\n f.write('\\\\begin{document}\\n')\n f.write('\\\\title{Fourth Year (2016-17) Courses}\\n')\n f.write('\\\\author{Jack Parkinson}\\n')\n f.write('\\\\date{August 2016}\\n')\n f.write('\\\\maketitle\\n')\n f.write('\\\\abstract{' + abstract01 + unincluded + abstract02 + '}\\n')\n f.write('\\\\newpage\\n\\n')\n f.write('\\\\tableofcontents\\n')\n f.write('\\\\newpage\\n\\n')\n # TODO: Look into alternatives to the three lists\n all_courses = []\n sem1_courses = []\n sem2_courses = []\n for code in codelist:\n course = bsoup(get_coursepage(code))\n if course['offered']['value'] == 'Runs Throughout Semesters 1 and 2':\n all_courses.append(course)\n elif \"1\" in course['offered']['value']:\n sem1_courses.append(course)\n elif \"2\" in course['offered']['value']:\n sem2_courses.append(course)\n f.write('\\\\section{Semester 1 and 2 Courses}\\n\\n')\n for course in all_courses:\n f.write(latex_course(course))\n f.write('\\\\section{Semester 1 Only Courses}\\n\\n')\n for course in sem1_courses:\n f.write(latex_course(course))\n f.write('\\\\section{Semester 2 Only Courses}\\n\\n')\n for course in sem2_courses:\n f.write(latex_course(course))\n f.write('\\\\end{document}')\n return None", "def create_tex(unwanted_courses, wanted_courses=None):\n page = requests.get(\n 'http://gla.ac.uk/coursecatalogue/courselist/' +\n '?code=REG30200000&name=School+of+Computing+Science')\n tree = html.fromstring(page.content)\n spans = tree.xpath('//span/text()')\n codes = []\n if wanted_courses is None:\n for s in spans:\n if s[0:4] == \"COMP\" and s[7] == '4' and s not in unwanted_courses:\n codes.append(s)\n else:\n for s in wanted_courses:\n codes.append(s)\n write_to_latex(codes, unwanted_courses)\n return None", "def get_course_name(self,soup):\n\t\tcourse_title = []\n\t\tfor title in soup.find_all(\"div\", class_=\"views-field views-field-title\"):\n\t\t\tcourse_title.append(''.join(title.findAll(text=True)))\n\t\t\tcourse_title.append(' \\n')\t\n\t\t\n\t\tself.new_list.append(course_title)\n\t\treturn course_title", "def scrape_all_course_names(filename, verbose):\n \n\tsoup =\tBeautifulSoup(open(filename, 'r'), 'html.parser')\n\t#print(soup)\n\tcourses = []\n\t# all the courses are stored in a div w/class=view-content\n\t\t\n\th4_field_content = soup.find('h4', 'field-content')\n\t#print(h4_field_content)\n\tall_course_content = soup.find(\"div\", \"view-content\") # this contains ALL the classes...\n\tif all_course_content == None:\n\t\tprint(\"There are no courses on this page. Try a smaller page number!\")\n\t\treturn []\n\t\n\tcandidate_classes = all_course_content.find_all('a') # we want all the 'a' tags within\n\tclass_list=[]\n course_code_and_number = {}\n\tfor c in candidate_classes:\n\t\t#print(c.text, \"\\n\")\n\t\ttext = c.text # ex. AEMA 611 Experimental Designs 1 (3 credits)\n\t\ttext = text.split(\" \")# split on the space\n\t\tcourse_id = \" \".join(text[:2]).replace('\\n', '') # the first 2 are the course id\n\t\tcourse_name = \" \".join(text[2:-2]).replace('\\n', '')\n\t\tnum_credits = text[-2].replace(\"(\", \"\")# just get the course number, replace the ( with nothing \n\t\n\t\n\t\t#print(f\"{course_id}\\n{course_name}\\n{num_credits}\\n\")\n\t\ttry:# Check that the course number is a digit bc sometimes it is something weird\n\t\t\tfloat(num_credits)\n\t\texcept ValueError:\n\t\t\t#print(f\"Wrong course format. Ignoring {c}\")\n\t\t\tif(verbose):\n\t\t\t\tprint(f\"Wrong course format. Ignoring course: {c.text}\")\n\t\t\tcontinue\n\t\tclass_list.append(course_id)\n\treturn class_list", "def create_menu_text(list_of_items):\n ret = \"\"\n for item in list_of_items:\n item = clean(item)\n ret += item + \"\\n\"\n # translate = gs.translate(item, 'en', 'de')\n # ret += \"_\" + translate.replace(\" , \", \", \") + \"_\\n\"\n # ret += \"\\n\"\n return ret[:-1] # ignore last newline", "def print_output(filtered_courses_list):\n\n if not filtered_courses_list:\n print(\"No courses matched the query.\")\n return\n\n max_name_length = max([len(course.name) for course in filtered_courses_list])\n\n print(\" Sem | Course ID | Pts | \" +\n \" \" * ((max_name_length - 11) // 2 + (max_name_length - 1) % 2) + \"Course Name\" + \" \" * ((max_name_length - 11) // 2) +\n \" | Grade\")\n print(\"______|___________|_____|\" + \"_\" * (max_name_length + 2) + \"|______\")\n\n for course in filtered_courses_list:\n print(str(course.year) + course.semester, end=\" | \")\n print(course.cid, end=\" | \")\n print(course.creditpts, end=\" | \")\n print(course.name, end=\" \" * (max_name_length - len(course.name) + 1) + \"| \")\n\n print(course.grade)\n\n print(\"\\nGPA: \" + (\"%.3f\" % calculate_weighted_average(filtered_courses_list)))", "def presidente(lst_presidente):\n tmp=''\n tmp+='\\t\\t<para style=\"P3\">\\n'\n tmp+='\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp+='\\t\\t</para>\\n'\n tmp+='\\t\\t<para style=\"P3\">\\n'\n tmp+='\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp+='\\t\\t</para>\\n'\n tmp+='\\t\\t<para style=\"P3\" spaceAfter=\"40\">\\n'\n tmp+='\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp+='\\t\\t</para>\\n'\n tmp+='\\t\\t<para style=\"P1\"><b>' + str(lst_presidente) + '</b></para>\\n'\n tmp+='\\t\\t<para style=\"P4\">Presidente </para>\\n'\n tmp+='\\t</story>\\n'\n return tmp", "def createtext(lst):\n newlst = []\n for item in lst:\n item = item.replace(\"_!\",\"\")\n newlst.append(item)\n text = ' '.join(newlst)\n # Lower-casing\n return text.lower()", "def insert_course(dept, num, text):\n\n # Course Title \n m = re.search(\"[\\d\\w]{5} - ([\\w ]*)\", text)\n title = m.group(1) if m else \"nomatch\"\n\n # Course Description\n m = re.search(\"\\.\\s(.*)\\sTypically\",text)\n des = m.group(1) if m else \"nomatch\"\n\n # Credit hours aren't fixed for every course\n # Credit Hours: 2.00\n # Credit Hours: 2.00 or 3.00. \n # Credit Hours: 1.00 to 18.00. \n m = re.search(\"Credit Hours: (\\d+\\.\\d+)\",text, flags=re.IGNORECASE)\n m = re.search(\"(\\d+\\.\\d+)(.*?)Credit hours\",text, flags=re.IGNORECASE) if not m else m\n cr = m.group(1) if m else \"-1\"\n\n # Semesters Offered\n m = re.search(\"Typically offered (.*?)\\.\", text)\n sem = m.group(1).split() if m else [\"nomatch\"]\n\n # Course Type: Lecture, Recitation, Lab, Seminar, etc.\n m = re.search(\"Schedule Types:\\s((?:[\\w ]+)(?:,[\\w ]+)*) \\s+\", text)\n form = m.group(1).split(\", \") if m else [\"nomatch\"]\n\n # Learning objectives will not necessarily follow campuses\n m = re.search(\"campuses:(\\s+([\\w\\s])+\\n)\", text)\n campus = m.group(1).strip().split(\"\\n\\n\") if m else [\"nomatch\"]\n campus = [camp.strip() for camp in campus]\n\n # prereq regex and decomosition of prereqs into lists of AND conditions (works for most classes, not 477 and similar)\n # re.DOTALL matches all characters, including \"\\n\"\n idx = text.find(\"campuses:\")\n m = re.search(\"Prerequisites:(.*)\",text[idx:],flags=re.DOTALL)\n if m:\n allReqs = []\n prereqText = m.group(1).strip()\n prereqText = prereqText.encode('ascii', 'ignore') \n for i in PrereqParser.parseprereq(prereqText):\n reqArr = []\n for j in i.split():\n if j.find(\"-C\") != -1:\n j = j.replace(\"-C\",\"\")\n reqArr.append(Requisite(course=j,reqType=False))\n else:\n reqArr.append(Requisite(course=j,reqType=True)) \n allReqs.append(RequisiteList(courses=reqArr))\n\n else:\n allReqs = []\n\n # create course entity\n course = Course(number=num, title=title, department=dept, form=form,\n description=des, credits=float(cr), semesters=sem,\n campuses=campus,requisites=allReqs, id=dept + num)\n # store course \n course.put()", "def upcoming_courses(aud):\n \n courses = [c for c in aud.all_courses() if c.grade == u\"*\"]\n return [c.number.replace(\"-\", \"\") for c in courses]", "def complete_description(description):\n local_find_all_re = find_all_re\n local_find_one_re = find_one_re\n local_find_series_2_re = find_series_2_re\n local_find_series_3_re = find_series_3_re\n local_course_re = course_re\n local_three_digits = three_digits\n\n find_all = re.findall(local_find_all_re, description)\n find_one = re.findall(local_find_one_re, description)\n \n def remove_in(a, b):\n remove = [y for x in a for y in b if y in x]\n a.extend(b)\n for x in remove: a.remove(x)\n return a\n \n if find_one:\n find_all = remove_in(find_all, find_one)\n for i, x in enumerate(find_all):\n completed = []\n find_all[i] = x.replace(' ', '')\n number = re.search(local_three_digits, find_all[i]).group(0)\n for crs in re.findall(local_course_re, find_all[i]):\n completed.append(f'{crs}{number}')\n description = description.replace(x, ' {}'.format('/'.join(completed)), 1)\n\n find_series_2 = re.findall(local_find_series_2_re, description)\n find_series_3 = re.findall(local_find_series_3_re, description)\n if find_series_2 or find_series_3:\n find_series_2 = remove_in(find_series_3, find_series_2)\n for i, series in enumerate(find_series_2):\n find_series_2[i] = series.replace(' ', '')\n course_dep = re.search(local_course_re, find_series_2[i])\n if course_dep: \n depmnt = course_dep.group(0)\n description = description.replace(series, f'{depmnt}{find_series_2[i][-3:]}', 1)\n return description", "def description(soup: str, nb:int):\n desc = []\n for span in soup.findAll('article', attrs={'itemprop': 'review'}):\n dat = str(recovTextBetweenTags(str(span.findAll('time', attrs={\n 'itemprop': 'datePublished'})), ',')).replace(\"['[\", '').replace(\"]']\", '')\n dat = (format_date(dat))\n if (dat) > (datetime.now() - timedelta(nb)):\n top = span.findAll('div', attrs={'class': 'text_content'})\n desc.append(translate(recovTextBetweenTags(str(top), ',')))\n\n return desc", "def title_comm(soup: str, nb:int):\n title = []\n for span in soup.findAll('article', attrs={'itemprop': 'review'}):\n dat = str(recovTextBetweenTags(str(span.findAll('time', attrs={\n 'itemprop': 'datePublished'})), ',')).replace(\"['[\", '').replace(\"]']\", '')\n dat = (format_date(dat))\n if (dat) > (datetime.now() - timedelta(nb)):\n top = span.findAll('h2', attrs={'class': 'text_header'})\n top = translate(recovTextBetweenTags(str(top), 'non'))\n title.append(top[0][1:len(top[0])])\n\n return title", "def display_words(word_list,specifier):\n \n if specifier.lower() == 'score':\n print(\"{:>6s} - {:s}\".format(\"Score\", \"Word\"))\n if len(word_list) < 5:\n for tup in word_list:\n print(\"{:>6d} - {:s}\".format(tup[1], tup[0]))\n else:\n \n for tup in word_list[:5]:\n print(\"{:>6d} - {:s}\".format(tup[1], tup[0]))\n \n \n elif specifier.lower() == 'length':\n print(\"{:>6s} - {:s}\".format(\"Length\", \"Word\"))\n if len(word_list) < 5:\n for tup in word_list:\n print(\"{:>6d} - {:s}\".format(tup[2], tup[0]))\n else:\n \n for tup in word_list[:5]:\n print(\"{:>6d} - {:s}\".format(tup[2], tup[0]))", "def test_reformat_paragraph_list_5_of_5(self):\n before_b = \"\"\"\\\n This paragraph leads of this test. It is\n the \"lead\" paragraph.\n\n 1. This is item number 1. It is the\n first item in the list.\n\n 2. This is item number 2. It is the\n second item in the list.\n\n 3. This is item number 3. It is the\n third item in the list.\n\n This paragraph ends the test. It is the \"final\"\n paragraph.\n \"\"\"\n after_b = \"\"\"\\\n This paragraph leads of this test. It is\n the \"lead\" paragraph.\n\n 1. This is item number 1. It is the\n first item in the list.\n\n 2. This is item number 2. It is the\n second item in the list.\n\n 3. This is item number 3. It is the\n third item in the list.\n\n This paragraph ends the test. It is the\n \"final\" paragraph.\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"13.0\", \"13.0\"),\n after_sel=(\"15.1\", \"15.1\"),\n command_name=\"reformat-paragraph\",\n directives=\"@language plain\\n@pagewidth 40\\n@tabwidth 8\",\n )", "def cv_list(self):\n\n mystr = \"\"\n for p in self.mypapers:\n mystr += f\"{p.title[0]}\\n\"\n if len(p.author) > 12:\n a = f\"{p.author[0]} et al. \"\n elif len(p.author) > 2:\n a = \", \".join(p.author[:-1]) + f\" & {p.author[-1]} \"\n elif len(p.author) == 2:\n a = f\"{p.author[0]} & {p.author[1]} \"\n else:\n a = f\"{p.author[0]} \"\n\n mystr += f\"{a}\"\n mystr += f\"{p.year}, {p.pub}\"\n if p.volume is not None:\n mystr += f\", {p.volume}\"\n if p.issue is not None:\n mystr += f\", {p.issue}\"\n if p.page is not None:\n mystr += f\", {p.page[0]}\"\n mystr += \"\\n\\n\"\n return mystr", "def presidente(lst_presidente):\n tmp=''\n tmp+='\\t\\t<para style=\"P3\" spaceAfter=\"35\">\\n'\n tmp+='\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp+='\\t\\t</para>\\n'\n tmp+='\\t\\t<para style=\"P4\"><b>' + str(lst_presidente) + '</b></para>\\n'\n tmp+='\\t\\t<para style=\"P4\">Presidente </para>\\n'\n return tmp", "def create_formatted_accomplishments(accomplishments):\n output = ''\n\n for index, accomplishment in enumerate(accomplishments):\n output += f\"* {accomplishment}\"\n if index + 1 != len(accomplishments):\n output += '\\n'\n\n return output", "def display_courses(courses):\n\n _print('You can access %d courses' % len(courses))\n for i, course in enumerate(courses, 1):\n _print('%d - [%s] - %s' % (i, course.state, course.name))", "def output_labelled_ranked_list(sysdoclab):\n for did, grade in sysdoclab:\n print(did + (\" L\" + str(grade) if grade is not None else \"\"))", "def latex_course(course):\n basic_info_list = [\n 'session', 'school', 'credits', 'level', 'offered',\n 'visiting_students', 'erasmus_students'\n ]\n generic_subsection_list = [\n 'description', 'timetable', 'requirements_of_entry',\n 'excluded_courses', 'co_requisites', 'assessment_weighting'\n ]\n string = '\\\\subsection{' + course[\"title\"] + '}\\n'\n for info in basic_info_list:\n string += latex_info(course[info])\n for subsection in generic_subsection_list:\n string += latex_subsection(course[subsection])\n string += '\\\\break \\\\textbf{' + course['assessment_date'][\n 'heading'] + '}' + course['assessment_date']['value'] + '\\n'\n string += latex_subsection(course['aims'])\n string += '\\\\subsubsection*{' + \\\n course['learning_outcomes']['heading'] + '}\\n'\n outcome_list = re.split(\n '\\d+\\. ', course['learning_outcomes']['value'])\n string += outcome_list[0] + '\\n'\n string += '\\\\begin{enumerate}\\n'\n for i in outcome_list[1:-1]:\n string += '\\\\item ' + i + '\\n'\n string += '\\\\end{enumerate}\\n'\n return string", "def get_course_title_only(code):\n coursepage = get_coursepage(code)\n soup = BeautifulSoup(coursepage.content, 'lxml')\n title = [soup.find_all('h1')[2].string][0]\n return title", "def main():\n\n #Courses\n years = [2016, 2017, 2018, 2019, 2020]\n with sqlite3.connect(\"determined.db\") as conn:\n c = conn.cursor()\n c.execute(\n \"\"\"\n CREATE TABLE \"course\" (\n \"course_number\"\tint NOT NULL,\n \"dept_id\"\tvarchar(4) NOT NULL,\n \"title\"\tvarchar(100) NOT NULL,\n \"instructor_fname\"\tvarchar(35) DEFAULT NULL,\n \"instructor_lname\"\tvarchar(35) DEFAULT NULL,\n \"student_work_products\"\tjson DEFAULT NULL,\n `term` varchar(7) NOT NULL,\n `year` int NOT NULL,\n PRIMARY KEY(\"course_number\", \"term\", \"year\")) \n \"\"\"\n )\n conn.commit()\n courses = [\n (1370, \"CPSC\", \"Computer Literacy\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\"),\n (1375, \"CPSC\", \"Programming I\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\"),\n (2376, \"CPSC\", \"Intro to Game Programming\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\"),\n (2380, \"CPSC\", \"Algorithms\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\"),\n (2482, \"CPSC\", \"Computer Organization\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\"),\n (3377, \"CPSC\", \"Advanced Game Programming\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\"),\n (3380, \"CPSC\", \"Operating Systems\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\"),\n (3383, \"CPSC\", \"Programming Languages\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\"),\n (3384, \"CPSC\", \"Computer Networks\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Summer\"),\n (4360, \"CPSC\", \"Computer Security\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Summer\")\n ]\n #Adding years\n upload_courses = []\n for year in years:\n upload_courses += [x + (year,) for x in courses]\n #Making a few instructors teach multiple course\n new_courses = [\n (4557, \"CPSC\", \"Natural Language Processing\", ),\n (2375, \"CPSC\", \"Programming II\",),\n (2776, \"CPSC\", \"Data Structures and Algorithms\",),\n (4862, \"CPSC\", \"Image Recognition\", ),\n ]\n for i in range(0,len(new_courses)):\n year = choice(years)\n for y in range(0,2): #Number of times new course is taught\n c = upload_courses[i]\n new_data = (c[3], c[4], c[5], choice([\"Fall\", \"Spring\", \"Summer\"]), year+y)\n data = new_courses[i] + new_data\n upload_courses.append(data)\n #Adding solo instructors and solo courses\n upload_courses += [\n (4672, \"CPSC\", \"Programming Memes\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\", choice(years)),\n (1872, \"CPSC\", \"Information Systems\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Summer\", choice(years)),\n (1123, \"CPSC\", \"Microsoft Office\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\", choice(years))\n ]\n\n with sqlite3.connect(\"determined.db\") as conn:\n c = conn.cursor()\n c.executemany('''INSERT INTO course (course_number, dept_id, title, instructor_fname, instructor_lname, student_work_products, term, year)\n VALUES ( ?, ?, ?, ?, ?, ?, ?, ?)''', upload_courses)\n conn.commit()\n\n #SWP\n with sqlite3.connect(\"determined.db\") as conn:\n c = conn.cursor()\n c.execute(\n \"\"\"\n CREATE TABLE `student_work_product` (\n `id` INTEGER PRIMARY KEY,\n `product` varchar(250) NOT NULL,\n `course_id` int NOT NULL,\n `dept_id` int NOT NULL,\n `student_fname` varchar(35) NOT NULL,\n `student_lname` varchar(35) NOT NULL,\n `student_outcome` int DEFAULT NULL,\n `score` int DEFAULT NULL,\n `term` varchar(7) NOT NULL,\n `year` int NOT NULL,\n CONSTRAINT `course` FOREIGN KEY (`course_id`) REFERENCES `course` (`course_number`)\n CONSTRAINT `course` FOREIGN KEY (`dept_id`) REFERENCES `course` (`dept_id`)\n )\n \"\"\"\n )\n conn.commit()\n \n swps = []\n with sqlite3.connect(\"determined.db\") as conn:\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute (\"Select * from course\")\n records = [dict(x) for x in c.fetchall()]\n #Generating 20 student records for each swp in each course\n for i, course in enumerate(records):\n student_names = []\n for _ in range(20):\n student_names.append({'fname': names.get_first_name(),\n 'lname': names.get_last_name()})\n for product in json.loads(course['student_work_products'])['swp']:\n for student in student_names:\n if i%7 == 0:\n score = int(triangular(50, 85))\n else:\n score = int(triangular(50, 100))\n if score >= 90: outcome = 4\n elif score >= 80: outcome = 3\n elif score >= 70: outcome = 2\n elif score >= 60: outcome = 1\n else: outcome = 0 \n swps.append((\n product,\n course['course_number'],\n \"CPSC\",\n student['fname'],\n student['lname'],\n outcome,\n score, \n course['term'], \n course['year']\n ))\n \n c.executemany('''INSERT INTO student_work_product (product, course_id, dept_id, student_fname, student_lname, student_outcome, score, term, year)\n VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?)''', swps)\n conn.commit()", "def parse_course_pre_to_list(self):\n prere_courses = []\n\n # convert non-word to spaces except \"-\"\n self.prere_raw = re.sub(\"[^\\w-]\", \" \", self.prere_raw)\n\n # split the string by spaces\n words = self.prere_raw.split()\n\n # check if the string contains number, if True then the string is of the form: \"140A\"\n def append_to_list(word, previous_word):\n try:\n if word[0].isdigit():\n toappend = None\n # course abbs = words[i-1]\n try:\n toappend = \"{} {}\".format(previous_word.upper(), word.upper())\n except AttributeError:\n #TODO check this error for HIGR 216A-B\n print(\"previous word is {}, word is {}\".format(previous_word, word))\n if toappend not in prere_courses:\n prere_courses.append(toappend)\n except IndexError:\n #TODO why this would occur?\n print(\"word is {}, previous word is {}\".format(word, previous_word))\n\n # iterate through words to find numbers\n for i in range(len(words)):\n\n previous_word = None\n if i is not 0:\n # define the previous word like MATH\n previous_word = words[i-1]\n\n if \"-\" in words[i]:\n num = re.split(\"[A-Z]\", words[i])[0]\n letters = re.split(\"-\", words[i])\n new_words = []\n for i in range(len(letters)):\n if i is 0:\n new_words.append(letters[0])\n else:\n new_words.append(num + letters[i])\n for word in new_words:\n if word is not None and previous_word is not None:\n append_to_list(word, previous_word)\n else:\n #TODO: what if the word is None?\n pass\n else:\n append_to_list(words[i], previous_word)\n\n return prere_courses", "def group_list(self, group_number=1):\n text = []\n group = self.varexercise_numbers[group_number-1]\n group_name = _('Group %s') % group[0]\n text.append('\\n\\\\subsection*{%s}\\n' % group_name)\n for number in range(1, self.number_of_variations+1):\n print(\"---------\", number) # !!!\n variation = '\\n\\n\\\\textbf{%s}\\\\\\\\\\n' %\\\n _('Variation %d') % number\n text.append(variation)\n exercise_number = 1\n for item in group[1:]:\n # print(' '*5, item) # !!!\n if not isinstance(item, str):\n latex_plain = item.latex_plain(number)\n if latex_plain:\n text.append('%d. ' % exercise_number)\n # print(' '*5, number) # !!!\n # print(' '*5, latex_plain) # !!!\n text.append(latex_plain + '\\n')\n exercise_number += 1\n # if with_solution:\n # text.extend(self.one_group(group_number=group_number))\n # text.append(\"\\\\newpage\\n\")\n return text", "def courses_string(self):\n course_string = \"\"\n for c in self.courses[0]:\n course_string += c + \", \"\n course_string = course_string[:-2]\n return course_string", "def snippetList(requeset, format = None):", "def clean_text_clues(texts):\n\n clean_clues = []\n for clues in texts:\n clue = remove_hyphens(clues)\n clue = tokenize(clue)\n clue = remove_characters(clue)\n clue = lowercase(clue)\n clue = remove_stopwords(clue)\n clue = lemmatized_words(clue)\n clue = remove_short_tokens(clue)\n clue = remove_non_wordnet(clue)\n clue = apply_lemmatize(clue)\n clean_clues.append(clue)\n return [' '.join(item) for item in clean_clues]", "def build_latex(file_list):\n eingabe=[]\n anhang_count=0\n anhaenge=[]\n anhaenge_file=[]\n for file in file_list:\n x=load_file(file)[1]\n eingabe.append(\"\\section{%s}\" %(x[2]))\n eingabe.append(\"\\subsection{Infos}\")\n eingabe.append(\"\\\\begin{tabularx}{\\linewidth}{@{}lX}\")\n eingabe.append(r\"\\textbf{Datum} & %s\\\\\" %(x[0]))\n eingabe.append(r\"\\textbf{Gremium} & %s\\\\\" %(x[1]))\n eingabe.append(r\"\\textbf{Anatrag/Beschluss wurde} & %s\\\\\" %(x[9]))\n x[11]=x[11].replace(\" \",\"\")\n kw=x[11].split(\",\")\n for i in range(0,len(kw)):\n if i==0:\n eingabe.append(r\"\\textbf{Keyword:} & %s\\\\\" %(kw[i]))\n else:\n eingabe.append(r\" & %s\\\\\" %(kw[i]))\n eingabe.append(\"\\end{tabularx}\")\n eingabe.append(\"\\\\begin{tabularx}{\\linewidth}{@{}XXX}\")\n eingabe.append(r\"\\textbf{Abstimmungsergebniss:}&&\\\\\")\n eingabe.append(r\"Zustimmung & Ablehnung & Enthaltungen \\\\\")\n eingabe.append(r\"{} & {} & {} \\\\\".format(x[6],x[7],x[8]))\n eingabe.append(\"\\end{tabularx}\")\n eingabe.append(\"\\subsection{Antrags/Beschlusstext}\")\n line_text=len(eingabe)\n eingabe.append(x[3])\n eingabe.append(\"\\subsection{Begründung}\")\n eingabe.append(x[4])\n if x[23]==\"Ja\" and x[24]!=\"\":\n delta=7\n anzahl=int((len(x)-23)/delta)\n if anzahl==1:\n eingabe.append(\"\\subsection{Änderungsantrag}\")\n eingabe.append(\"\\subsubsection*{Vorschlag}\")\n eingabe.append(x[24])\n eingabe.append(\"\\subsubsection*{Begründung}\")\n eingabe.append(x[25]+\"\\\\vspace{1.5ex} \\\\\\\\\")\n eingabe.append(\"\\\\begin{tabularx}{\\linewidth}{@{}XXX}\")\n eingabe.append(r\"\\textbf{Abstimmungsergebniss:}&&\\\\\")\n eingabe.append(r\"Zustimmung & Ablehnung & Enthaltungen \\\\\")\n eingabe.append(r\"{} & {} & {} \\\\\".format(x[26],x[27],x[28]))\n eingabe.append(r\"\\multicolumn{2}{@{}l}{\\textbf{Änderungsantrag wurde:}} & %s \\\\\" %(x[29]))\n eingabe.append(\"\\\\end{tabularx}\")\n else:\n eingabe.append(\"\\subsection{Änderungsanträge}\")\n for i in range(0,anzahl):\n eingabe.append(\"\\subsubsection{Änderungsvorschlag %s}\" %(i+1))\n eingabe.append(\"\\\\paragraph*{Vorschlag}\")\n eingabe.append(x[24+(delta*i)])\n eingabe.append(\"\\\\paragraph*{Begründung}\")\n eingabe.append(x[25+(delta*i)]+\"\\\\vspace{1.5ex} \\\\\\\\\")\n eingabe.append(\"\\\\begin{tabularx}{\\linewidth}{@{}XXX}\")\n eingabe.append(r\"\\textbf{Abstimmungsergebniss:}&&\\\\\")\n eingabe.append(r\"Zustimmung & Ablehnung & Enthaltungen \\\\\")\n eingabe.append(r\"{} & {} & {} \\\\\".format(x[26+(delta*i)],x[27+(delta*i)],x[28+(delta*i)]))\n eingabe.append(r\"\\multicolumn{2}{@{}l}{\\textbf{Änderungsantrag wurde:}} & %s \\\\\" %(x[29+(delta*i)]))\n eingabe.append(\"\\\\end{tabularx}\")\n if x[10]!=\"\":\n anhang=x[10].split(\",\")\n bennenung=x[11].split(\",\")\n eingabe[line_text]=eingabe[line_text]+\"\\\\\\\\ \\n Dieser Antrag enthält %s Anhänge: \" %(len(anhang))\n for i in range(0,len(anhang)):\n anhang_count=anhang_count+1\n anhaenge.append(\"\\section{%s - %s} \\label{An:%s}\" % (x[2],bennenung[i],str(anhang_count)))\n anhaenge.append(\"\\includepdf[pages=-]{%s}\" %(anhang[i]))\n anhaenge_file.append(anhang[i])\n if i!=len(anhang)-1:\n eingabe[line_text]=eingabe[line_text]+\"\\\\nameref{An:%s}, \" % (str(anhang_count))\n else:\n eingabe[line_text]=eingabe[line_text]+\"\\\\nameref{An:%s} \" % (str(anhang_count)) \n \n eingabe.append(\"\\\\newpage\") \n eingabe.append(\"\\\\appendix\") \n eingabe.append(\"\\\\pagenumbering{Roman}\") \n ausgabe=\"\"\n for i in range(0,len(eingabe)):\n ausgabe=ausgabe+eingabe[i]+\"\\n\"\n \n for i in range(0,len(anhaenge)):\n ausgabe=ausgabe+anhaenge[i]+\"\\n\"\n \n return ausgabe,anhaenge_file", "def completed_course()->str:\r\n try:\r\n db: sqlite3.Connection = sqlite3.connect(DB_FILE)\r\n query: str = \"SELECT s.Name, s.CWID, g.Course, g.Grade, i.Name AS 'Instructor' \" \\\r\n \"FROM grades2 g JOIN students2 s ON g.StudentCWID = s.CWID \" \\\r\n \"JOIN instructors2 i ON g.InstructorCWID = i.CWID ORDER BY s.Name\" \r\n except sqlite3.OperationalError as e:\r\n print(e)\r\n\r\n data: Dict[str,str]=\\\r\n [{\"name\": name,\"cwid\": cwid,\"course\": course , \"grade\":grade, \"instructor\": instructor}\r\n for name, cwid, course, grade, instructor in db.execute(query)]\r\n db.close()\r\n \"\"\"Render the template from templates folder\"\"\"\r\n return render_template(\"student_summary.html\",\r\n title=\"Stevens Repository\",\r\n table_title=\"Student, Course, Grade and Instructor\",\r\n students=data )" ]
[ "0.61776614", "0.61535215", "0.5460157", "0.53928626", "0.5347271", "0.5328471", "0.5140035", "0.5117348", "0.51146877", "0.5056962", "0.5048898", "0.50328934", "0.50134706", "0.50019485", "0.49917555", "0.49796224", "0.49568665", "0.4950176", "0.49257633", "0.49147525", "0.48917115", "0.4876727", "0.48721468", "0.48712775", "0.48478454", "0.48441556", "0.48265794", "0.48217246", "0.47843155", "0.47706375" ]
0.66535985
0
Creates a TeX formatted string for a given subsubsection
def latex_subsection(section): string = '\\subsubsection*{' + section['heading'] + '}\n' string += section['value'] + '\n' return string
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sub(string, subscript):\n return string + \"<sub>\" + subscript + \"</sub>\"", "def _build_sub(self) -> str:\n return dedent(\n \"\"\"\n @SP\n M=M-1\n A=M\n D=M\n @SP\n M=M-1\n A=M\n M=M-D\n @SP\n M=M+1\n \"\"\"\n )", "def print_sub_section(self, s, level=0):\n section = s.capitalize()\n\n self.print_newline()\n self._write('%s+ %s\\n' % ('-' * level, section))\n self.print_newline()", "def subtitle(self, txt):\n num = len(txt)\n ticks = \"-\" * num\n print(txt)\n print(ticks)", "def subtitle(string):\n print(\"{}\\n{}\\n\".format(bold(string), underline(string, \"-\")))", "def sub_section(title, lines, pad_len=4, sep='-'):\n pad = pad_len * \" \"\n msg = \"{pad}{}\\n{pad}{}\\n\".format(title, sep * len(title), pad=pad)\n msg += pad + (\"\\n\" + pad).join(lines) + \"\\n\"\n\n return msg", "def add_subsection(self, text: str) -> None:\n\n tag = r'\\subsection{%s}' % (text)\n self.doc = self.doc + tag", "def get_subtitle_print(subs: List[Track]) -> List[str]:\n data = []\n if not subs:\n data.append(\"--\")\n for sub in subs:\n line_items = []\n\n # following sub.title tree checks and supports three different language and title scenarios\n # The second scenario is the recommended option to choose if you are open to choosing any\n # The third scenario should be used if you have nothing unique to state about the track\n # | Language | Track Title | Output |\n # | ------------ | ----------------------------- | --------------------------------------------- |\n # | es / Spanish | Spanish (Latin American, SDH) | - Spanish (Latin American, SDH), SubRip (SRT) |\n # | es / Spanish | Latin American (SDH) | - Spanish, Latin American (SDH), SubRip (SRT) |\n # | es / Spanish | None | - Spanish, SubRip (SRT) |\n language = pycountry.languages.get(alpha_2=sub.language).name\n if sub.title:\n if language.lower() in sub.title.lower():\n line_items.append(sub.title)\n else:\n line_items.append(f\"{language}, {sub.title}\")\n else:\n line_items.append(language)\n\n line_items.append(sub.format.replace(\"UTF-8\", \"SubRip (SRT)\"))\n\n line = \"- \" + \", \".join(line_items)\n data += [\n (\" \" + x if i > 0 else x)\n for i, x in enumerate(textwrap.wrap(line, 64))\n ]\n return data", "def makeTexInput(self, tabletitle, subsection=True):\r\n tablestring = \"\"\r\n\r\n # if there's enough effluent data\r\n if self.ds.effluent.include:\r\n if subsection:\r\n tablestring += r\"\\subsection{%s}\" % (self.bmp,)\r\n\r\n # caption for the stats plot\r\n prob_caption = \"Box and Probability Plots of {} at {} BMPs\".format(\r\n self.parameter.name, self.bmp\r\n )\r\n\r\n # caption for the scatter plot\r\n scatter_caption = \"Influent vs. Effluent Plots of {} at {} BMPs\".format(\r\n self.parameter.name, self.bmp\r\n )\r\n\r\n # warning about having a lot of non-detects\r\n warning = \"\"\"\r\n Warning: there is a very high percentage of non-detects in\r\n this data set. The hypothesis test results and other\r\n statistics reported in this table may not be valid.\r\n \"\"\"\r\n\r\n # make the table and write it to the output file\r\n tablestring += self._make_tex_table(tabletitle)\r\n\r\n # if less than 80% of the data is ND\r\n if self.ds.effluent.ND / self.ds.effluent.N <= 0.8:\r\n\r\n # make the stat plot string\r\n statfig = self._make_tex_figure(\r\n self.stat_fig_name, prob_caption, clearpage=False\r\n )\r\n\r\n # make the scatter plot string\r\n scatterfig = self._make_tex_figure(\r\n self.scatter_fig_name, scatter_caption, clearpage=True\r\n )\r\n\r\n # write the strings to the file\r\n tablestring += statfig\r\n tablestring += scatterfig\r\n\r\n else:\r\n # if there are too many non-detect,\r\n # issue the warning\r\n tablestring += warning\r\n\r\n return tablestring", "def subtitle(\n pdf, text, indent=10, border=BORDER, font_size=12, font_style=\"B\"\n): # pylint: disable = too-many-arguments\n pdf.cell(indent, border=border)\n pdf.set_font(\"arial\", font_style, font_size)\n pdf.cell(75, 10, text, border, 1)", "def create_caption(section, superscript, text):\n section.append('\\n')\n\n # Superscript\n section.append(bold(pylatex.NoEscape(r'{\\footnotesize \\textsuperscript {' + superscript + '}}')))\n\n # Text\n section.append(italic(pylatex.NoEscape(r'{\\footnotesize {' + text + '}}')))", "def _parse_sub(self, parsetree, text, fpos=0):\r\n curr = 0\r\n for match in self._reSubstitution.finditer(text):\r\n start = match.start()\r\n if start > curr:\r\n parsetree.append((\"str\", self._reComment.sub('', text[curr:start])))\r\n\r\n if match.group(\"sub\") is not None:\r\n if not match.group(\"end\"):\r\n raise TemplateSyntaxError(\"Missing closing tag '%s' for '%s'.\" \r\n % (self._sub_end, match.group()), self._errpos(fpos+start))\r\n if len(match.group(\"sub\")) > 0:\r\n self._testexpr(match.group(\"sub\"), fpos+start)\r\n parsetree.append((\"sub\", match.group(\"sub\")))\r\n else:\r\n assert(match.group(\"escsub\") is not None)\r\n if not match.group(\"escend\"):\r\n raise TemplateSyntaxError(\"Missing closing tag '%s' for '%s'.\"\r\n % (self._subesc_end, match.group()), self._errpos(fpos+start))\r\n if len(match.group(\"escsub\")) > 0:\r\n self._testexpr(match.group(\"escsub\"), fpos+start)\r\n parsetree.append((\"esc\", self.escape, match.group(\"escsub\")))\r\n\r\n curr = match.end()\r\n\r\n if len(text) > curr:\r\n parsetree.append((\"str\", self._reComment.sub('', text[curr:])))", "def latex_course(course):\n basic_info_list = [\n 'session', 'school', 'credits', 'level', 'offered',\n 'visiting_students', 'erasmus_students'\n ]\n generic_subsection_list = [\n 'description', 'timetable', 'requirements_of_entry',\n 'excluded_courses', 'co_requisites', 'assessment_weighting'\n ]\n string = '\\\\subsection{' + course[\"title\"] + '}\\n'\n for info in basic_info_list:\n string += latex_info(course[info])\n for subsection in generic_subsection_list:\n string += latex_subsection(course[subsection])\n string += '\\\\break \\\\textbf{' + course['assessment_date'][\n 'heading'] + '}' + course['assessment_date']['value'] + '\\n'\n string += latex_subsection(course['aims'])\n string += '\\\\subsubsection*{' + \\\n course['learning_outcomes']['heading'] + '}\\n'\n outcome_list = re.split(\n '\\d+\\. ', course['learning_outcomes']['value'])\n string += outcome_list[0] + '\\n'\n string += '\\\\begin{enumerate}\\n'\n for i in outcome_list[1:-1]:\n string += '\\\\item ' + i + '\\n'\n string += '\\\\end{enumerate}\\n'\n return string", "def print_help():\n \n print(\"\"\"\n catsub - substitutes every value for each variable in each word of a template file.\n\n Usage:\n\n catsub [--help] [-s] [-u] [-D|-dSTR] [TEMPLATEFILES] [%VARNAME VALUE1 VALUE2 ... ]*\n\n Arguments:\n\n TEMPLATEFILES Name(s) of file(s) containg the template with\n variables of the from %VARNAME; If no file name\n is given, or the name is '-', catsub will read\n from standard input;\n %VARNAME Variable name to substitute;\n VALUE1 VALUE2 ... Values to substitute for the variable;\n -s Print statistics to stderr on resolved and unresolved variables.\n -u Escaped percentage in template are returned unescaped;\n -dSTR Use STR to divide multiple substituted values\n -D Use newline to divide multiple substituted values\n --help Show this help page.\n\n Notes: \n\n - The names of the template files may not start with a percent sign. \n\n - All variables must start with a percent sign and cannot contain\n whitespace.\n\n - Substituted values cannot start with a percent sign.\n\n - Substitution happens only once per variable, i.e., substituted\n values do not undergo subsequent substitutions.\n\n - When substituting several values, those values are separated by a\n space, by a newline if the -D argument was given, or by STR if\n the -dSTR argument was given.\n\n - When a variable has been given several values to substitute and\n the variable occurs in a substring of a word in the template,\n that word get repeated. E.g. \"echo un%X | catsub %X kind tidy\"\n gives \"unkind untidy\"\n\n - Substitution happens combinatorically within a word. E.g. a word\n \"%X,%Y\" in the template, when processed with \"catsub %X a b %Y c d\"\n becomes \"a,c a,d b,c b,d\". Combinatorics can be circumvented by\n quoting the replacement values, i.e. \"catsub %X 'a b' %Y 'c d'\"\n gives \"a b,c d\".\n\n - Substitution uses the longest possible variable name. E.g. in\n \"%HELLOWORLD\", both %HELLO and %HELLOWORLD could be substituted\n if values for both are specified on the catsub command, but it is\n the longer %HELLOWORLD that gets used.\n \n - Percentage signs in the template can escape substitution by\n prepeding them with a slash, i.e., '\\%'. Every '\\%' in the\n template will be remain a '\\%' unless the -u argument is used in\n the catsub command, in which case, they are replaced by '%'.\n\n - The template cannot use the unicode character '%'.\n\n Examples:\n\n $ echo %HELLO %UNIVERSE | catsub %HELLO Hi %UNIVERSE world\n Hi world\n\n $ echo %HELLO %UNIVERSE > example.tmpl\n $ catsub example.tmpl %HELLO Greetings %UNIVERSE universe!\n Greetings universe!\n\"\"\")", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title}: \" + s\n\n return s", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title}: \" + s\n\n return s", "def repr_subfigure(self):\n default_kwargs = {'placement': self.subfig_placement,\n 'width': self.subfig_width,\n 'caption': self.caption,\n 'label': self.label}\n\n myfig = self.extension_mapping[self.extension]()\n\n return self.subfig_str.format(myfig=myfig, **default_kwargs)", "def format_subrange(start, end, step):\n \n if start == end:\n return str(start)\n elif step == 1:\n return \"%d-%d\" % (start, end)\n else:\n return \"%d-%dx%d\" % (start, end, step)", "def format_sub_part(cls, field, length):\n try:\n if not length:\n raise ValueError\n\n length = int(length)\n return \"`%s`(%d)\" % (field, length)\n\n except ValueError:\n return \"`%s`\" % (field,)", "def sub(abbreviation: str, alias: str) -> str:\n return f'<sub alias=\"{alias}\">{abbreviation}</sub>'", "def format_substitutions(subs: Union[SubstituteTerm, List[SubstituteTerm]]):\n text = \"\"\n if isinstance(subs, SubstituteTerm):\n term_str = str(subs)\n for line in term_str.split('\\n'):\n text += Markup.escape(line) + Markup('<br />')\n text += Markup('<br />')\n return text\n for term in subs:\n term_str = str(term)\n for line in term_str.split('\\n'):\n text += Markup.escape(line) + Markup('<br />')\n text += Markup('<br />')\n return text", "def write_subtitle(self, subtitle: str, break_page: bool, class_txt: str) -> str:\n if break_page:\n str_title = \"\"\"<h2 class=\"break-before\">\"\"\" + subtitle + \"\"\"</h2>\\n\"\"\"\n else:\n str_title = \"\"\"<h2 class=\\\"\"\"\" + class_txt + \"\"\"\\\">\"\"\" + subtitle + \"\"\"</h2>\\n\"\"\"\n self.html_doc = self.html_doc + str_title\n return self.html_doc", "def _Subscript(self, t):\n self.dispatch(t.value)\n self.write(\"[\")\n self.dispatch(t.slice)\n self.write(\"]\")", "def sub(proto, *args):\n try:\n text = proto.format(*args)\n except:\n text = \"--\"\n #print sub(\"WARNING: Couldn't sub {} with {}\", proto, args)\n return text", "def get_as_subtext_field(field, field_title=None) -> str:\n s = \"\"\n if field:\n s = f\"{field} | \"\n else:\n return \"\"\n\n if field_title:\n s = f\"{field_title} :\" + s\n\n return s", "def Subtlety(self):\n s = self.subtlety\n assert s in range(1,6), \"Subtlety score out of bounds.\"\n return _char_to_word_[::-1][ s-1 ] + ' Subtlety'", "def _var_quote_sub(self, text, VARS):\n ## No need to live on class. Can be moved to tools. - Add assert test.\n qvars = map(lambda x: \"\\{ \" + x + \" \\}\", VARS)\n return text % tuple(qvars)", "def print_substep(text, style=\"\"):\n console.print(text, style=style)", "def test__render_inline_section_into(field_added, section_title, section_content):\n into, field_added = render_inline_section_into([], field_added, section_title, section_content)\n return ''.join(into), field_added", "def format_sections(self, sections: SectionDict) -> str:" ]
[ "0.65773475", "0.63128084", "0.62126887", "0.6208371", "0.61632997", "0.6140767", "0.6039124", "0.6012975", "0.5978812", "0.5934507", "0.58059335", "0.5795126", "0.569796", "0.5690162", "0.5618066", "0.5618066", "0.5604984", "0.5596953", "0.5590398", "0.55688125", "0.55626035", "0.5555129", "0.5532533", "0.55151886", "0.55123144", "0.5450941", "0.54002756", "0.5374349", "0.532257", "0.5321723" ]
0.75560915
0
Creates a TeX formatted string for a course
def latex_course(course): basic_info_list = [ 'session', 'school', 'credits', 'level', 'offered', 'visiting_students', 'erasmus_students' ] generic_subsection_list = [ 'description', 'timetable', 'requirements_of_entry', 'excluded_courses', 'co_requisites', 'assessment_weighting' ] string = '\\subsection{' + course["title"] + '}\n' for info in basic_info_list: string += latex_info(course[info]) for subsection in generic_subsection_list: string += latex_subsection(course[subsection]) string += '\\break \\textbf{' + course['assessment_date'][ 'heading'] + '}' + course['assessment_date']['value'] + '\n' string += latex_subsection(course['aims']) string += '\\subsubsection*{' + \ course['learning_outcomes']['heading'] + '}\n' outcome_list = re.split( '\d+\. ', course['learning_outcomes']['value']) string += outcome_list[0] + '\n' string += '\\begin{enumerate}\n' for i in outcome_list[1:-1]: string += '\\item ' + i + '\n' string += '\\end{enumerate}\n' return string
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_tex(unwanted_courses, wanted_courses=None):\n page = requests.get(\n 'http://gla.ac.uk/coursecatalogue/courselist/' +\n '?code=REG30200000&name=School+of+Computing+Science')\n tree = html.fromstring(page.content)\n spans = tree.xpath('//span/text()')\n codes = []\n if wanted_courses is None:\n for s in spans:\n if s[0:4] == \"COMP\" and s[7] == '4' and s not in unwanted_courses:\n codes.append(s)\n else:\n for s in wanted_courses:\n codes.append(s)\n write_to_latex(codes, unwanted_courses)\n return None", "def format_course_output(self, course, schedule):\n\t\t\n\t\t# handle schedule\n\t\tstr_list = schedule.schedule.strip().split(' ')\n\t\tsch_res = ''\n\t\tif (len(str_list) >= 6):\n\t\t\tval_list = str_list[:6]\n\t\t\tval_list[5] = val_list[5][:3]\n\t\t\t\n\t\t\t# time\n\t\t\ttime = val_list[0] + ' ' + val_list[1] + ' ' + val_list[2] + ' ' + val_list[3]\n\t\t\tlocation = val_list[4] + ' ' + val_list[5]\n\t\t\tsch_res = str(time + '\\t' + location)\n\t\t\n\t\tres = \"\"\n\t\tres += 'Course code: ' + course.course_code + '\\n'\n\t\tres += 'Title: ' + course.title + '\\n'\n\t\tres += 'Department: ' + course.department + '\\n'\n\t\tres += 'Credit Hours: ' + course.credit_hours + '\\n'\n\t\tres += 'Description: ' + course.description + '\\n'\n\t\tif (sch_res != ''):\n\t\t\tres += 'Schedule: ' + sch_res + '\\n'\n\t\tres += 'Instructor: ' + schedule.instructor + '\\n'\n\t\treturn res", "def completed_course()->str:\r\n try:\r\n db: sqlite3.Connection = sqlite3.connect(DB_FILE)\r\n query: str = \"SELECT s.Name, s.CWID, g.Course, g.Grade, i.Name AS 'Instructor' \" \\\r\n \"FROM grades2 g JOIN students2 s ON g.StudentCWID = s.CWID \" \\\r\n \"JOIN instructors2 i ON g.InstructorCWID = i.CWID ORDER BY s.Name\" \r\n except sqlite3.OperationalError as e:\r\n print(e)\r\n\r\n data: Dict[str,str]=\\\r\n [{\"name\": name,\"cwid\": cwid,\"course\": course , \"grade\":grade, \"instructor\": instructor}\r\n for name, cwid, course, grade, instructor in db.execute(query)]\r\n db.close()\r\n \"\"\"Render the template from templates folder\"\"\"\r\n return render_template(\"student_summary.html\",\r\n title=\"Stevens Repository\",\r\n table_title=\"Student, Course, Grade and Instructor\",\r\n students=data )", "def __str__(self):\r\n return \"<CourseFixture: org='{org}', number='{number}', run='{run}'>\".format(**self._course_dict)", "def to_string(self):\n return \"{base_msg} Courses: {courses}\".format(\n base_msg=super().to_string(),\n courses=self.courses_string()\n )", "def get_text(self, course):\r\n return views.render_accordion(\r\n self.request, course, course.get_children()[0].scope_ids.usage_id.to_deprecated_string(), None, None\r\n )", "def get_text(self, course): # pylint: disable=unused-argument\r\n raise NotImplementedError", "def render_plaintext(self, plaintext, context):\r\n return CourseEmailTemplate._render(self.plain_template, plaintext, context)", "def format_course_for_view(course):\r\n return (\r\n course.display_name,\r\n reverse_course_url('course_handler', course.id),\r\n get_lms_link_for_item(course.location),\r\n course.display_org_with_default,\r\n course.display_number_with_default,\r\n course.location.name\r\n )", "def courses_string(self):\n course_string = \"\"\n for c in self.courses[0]:\n course_string += c + \", \"\n course_string = course_string[:-2]\n return course_string", "def write_to_latex(codelist, unwanted_courses):\n # TODO: investigate a way to add large amounts of text outside of the\n # function\n abstract01 = \"I created this document to practice parsing html and using\\\n tools like Beautiful Soup which I've previously had little experience\\\n in. As a result, it's not perfect.\\\\newline\\\n It is also a slightly condensed all-in-one-place look at a selection\\\n of courses that are available for fourth year computer science\\\n students at the University of Glasgow. For the purposes of clarity I\\\n have removed several courses from this selection. The following\\\n courses have been omitted:\"\n abstract02 = \"For more insight into the project, to report issues or to\\\n inspect the code, have a look at the GitHub:\\\n \\\\url{https://github.com/IS0metric/course-ripper}\"\n unincluded = create_not_included_list(unwanted_courses)\n with open('courses.tex', 'w') as f:\n # TODO Try and move all this to a separate function?\n # TODO: Check if it's more efficient to write a single, massive string\n # to file\n f.write('\\\\documentclass{hitec}\\n')\n f.write('\\\\usepackage[document]{ragged2e}\\n')\n f.write('\\\\usepackage{url}\\n')\n f.write('\\\\usepackage{hyperref}\\n')\n f.write('\\\\setcounter{tocdepth}{4}\\n')\n f.write('\\\\begin{document}\\n')\n f.write('\\\\title{Fourth Year (2016-17) Courses}\\n')\n f.write('\\\\author{Jack Parkinson}\\n')\n f.write('\\\\date{August 2016}\\n')\n f.write('\\\\maketitle\\n')\n f.write('\\\\abstract{' + abstract01 + unincluded + abstract02 + '}\\n')\n f.write('\\\\newpage\\n\\n')\n f.write('\\\\tableofcontents\\n')\n f.write('\\\\newpage\\n\\n')\n # TODO: Look into alternatives to the three lists\n all_courses = []\n sem1_courses = []\n sem2_courses = []\n for code in codelist:\n course = bsoup(get_coursepage(code))\n if course['offered']['value'] == 'Runs Throughout Semesters 1 and 2':\n all_courses.append(course)\n elif \"1\" in course['offered']['value']:\n sem1_courses.append(course)\n elif \"2\" in course['offered']['value']:\n sem2_courses.append(course)\n f.write('\\\\section{Semester 1 and 2 Courses}\\n\\n')\n for course in all_courses:\n f.write(latex_course(course))\n f.write('\\\\section{Semester 1 Only Courses}\\n\\n')\n for course in sem1_courses:\n f.write(latex_course(course))\n f.write('\\\\section{Semester 2 Only Courses}\\n\\n')\n for course in sem2_courses:\n f.write(latex_course(course))\n f.write('\\\\end{document}')\n return None", "def get_about_text(self, course_key):\r\n text = views.course_about(self.request, course_key.to_deprecated_string()).content\r\n return text", "def __str__(self):\n # Use 'Unknown' if the course instance does not have a term\n if self.course_instance.term:\n term = self.course_instance.term.verbose_name()\n else:\n term = 'Unknown'\n\n exam_unicode = '{term} {number} {type} for {course}'.format(\n term=term,\n number=self.get_exam_number_display(),\n type=self.get_exam_type_display(),\n course=self.course_instance.course)\n if self.instructors:\n instructors = ', '.join([i.last_name for i in self.instructors])\n return '{}, taught by {}'.format(exam_unicode, instructors)\n else:\n return '{} (Instructors Unknown)'.format(exam_unicode)", "def course_info(self):\n print(\"Course name: {}\".format(self._course_name))\n print(\"Lead teacher: {}\".format(self._teacher))\n\n if len(self._students) == 0:\n print(\"Course does not enrolled by any student\")\n else:\n print(\"Enrolled: {}/{}\".format(len(self._students), self._total_place))", "def getFullCourseTitle(self, brain):\n full_title = ''\n\n id = brain.getCourseId\n if id:\n full_title = '%s - ' %id\n full_title += brain.Title\n term = brain.getTerm\n if term:\n full_title += ', %s' %term\n\n return full_title", "def _create_formatted_string(self):\n string = NALSyntax.StatementSyntax.Start.value + \\\n self.get_subject_term().get_formatted_string()\n\n string += \" \" + self.get_copula_string() + \" \"\n\n string += self.get_predicate_term().get_formatted_string() + \\\n NALSyntax.StatementSyntax.End.value\n\n return string", "def help_text(command):\n\n courses_list = ('ENPM611', 'ENPM613', 'ENPM631', 'ENPM687',\\\n 'ENPM691', 'ENPM693', 'ENPM694', 'ENPM696',\\\n 'ENPM809J','ENPM809R', 'ENPM809W')\n\n response = 'I have course descriptions for: '\n for course_name in courses_list:\n response = response + course_name + ' '\n\n response = response + '\\nTo get the course description, execute command: about ENPM<course_number>'\n\n return response", "def render_htmltext(self, htmltext, context):\r\n return CourseEmailTemplate._render(self.html_template, htmltext, context)", "def MakeTexStr(self):\n if self._latex_str == '':\n self._latex_str += bmr_hdr.format(\n self._filename.replace('.tex', ''))\n else:\n # Get rid of ftr so new images maybe added\n self._latex_str = self._latex_str.split(bmr_ftr)[0]\n self.AddImageFrames()\n # print self._latex_str\n self._latex_str += bmr_ftr", "def display_courses(courses):\n\n _print('You can access %d courses' % len(courses))\n for i, course in enumerate(courses, 1):\n _print('%d - [%s] - %s' % (i, course.state, course.name))", "def pdflatex(unwanted_courses):\n create_tex(unwanted_courses)\n cmd = ['pdflatex', '-interaction', 'nonstopmode', 'courses.tex']\n proc = subprocess.Popen(cmd)\n proc.communicate()\n return None", "def get_description(self):\n text = \"is a student's t distribution; characterised by its degrees of freedom, which here is\"+str(self.dofs)+\".\"\n return text", "def _get_course_email_context(course):\r\n course_id = course.id.to_deprecated_string()\r\n course_title = course.display_name\r\n course_url = 'https://{}{}'.format(\r\n settings.SITE_NAME,\r\n reverse('course_root', kwargs={'course_id': course_id})\r\n )\r\n image_url = 'https://{}{}'.format(settings.SITE_NAME, course_image_url(course))\r\n email_context = {\r\n 'course_title': course_title,\r\n 'course_url': course_url,\r\n 'course_image_url': image_url,\r\n 'account_settings_url': 'https://{}{}'.format(settings.SITE_NAME, reverse('dashboard')),\r\n 'platform_name': settings.PLATFORM_NAME,\r\n }\r\n return email_context", "def insert_course(dept, num, text):\n\n # Course Title \n m = re.search(\"[\\d\\w]{5} - ([\\w ]*)\", text)\n title = m.group(1) if m else \"nomatch\"\n\n # Course Description\n m = re.search(\"\\.\\s(.*)\\sTypically\",text)\n des = m.group(1) if m else \"nomatch\"\n\n # Credit hours aren't fixed for every course\n # Credit Hours: 2.00\n # Credit Hours: 2.00 or 3.00. \n # Credit Hours: 1.00 to 18.00. \n m = re.search(\"Credit Hours: (\\d+\\.\\d+)\",text, flags=re.IGNORECASE)\n m = re.search(\"(\\d+\\.\\d+)(.*?)Credit hours\",text, flags=re.IGNORECASE) if not m else m\n cr = m.group(1) if m else \"-1\"\n\n # Semesters Offered\n m = re.search(\"Typically offered (.*?)\\.\", text)\n sem = m.group(1).split() if m else [\"nomatch\"]\n\n # Course Type: Lecture, Recitation, Lab, Seminar, etc.\n m = re.search(\"Schedule Types:\\s((?:[\\w ]+)(?:,[\\w ]+)*) \\s+\", text)\n form = m.group(1).split(\", \") if m else [\"nomatch\"]\n\n # Learning objectives will not necessarily follow campuses\n m = re.search(\"campuses:(\\s+([\\w\\s])+\\n)\", text)\n campus = m.group(1).strip().split(\"\\n\\n\") if m else [\"nomatch\"]\n campus = [camp.strip() for camp in campus]\n\n # prereq regex and decomosition of prereqs into lists of AND conditions (works for most classes, not 477 and similar)\n # re.DOTALL matches all characters, including \"\\n\"\n idx = text.find(\"campuses:\")\n m = re.search(\"Prerequisites:(.*)\",text[idx:],flags=re.DOTALL)\n if m:\n allReqs = []\n prereqText = m.group(1).strip()\n prereqText = prereqText.encode('ascii', 'ignore') \n for i in PrereqParser.parseprereq(prereqText):\n reqArr = []\n for j in i.split():\n if j.find(\"-C\") != -1:\n j = j.replace(\"-C\",\"\")\n reqArr.append(Requisite(course=j,reqType=False))\n else:\n reqArr.append(Requisite(course=j,reqType=True)) \n allReqs.append(RequisiteList(courses=reqArr))\n\n else:\n allReqs = []\n\n # create course entity\n course = Course(number=num, title=title, department=dept, form=form,\n description=des, credits=float(cr), semesters=sem,\n campuses=campus,requisites=allReqs, id=dept + num)\n # store course \n course.put()", "def toStudentString(self):\r\n return \"{0}th year, section {1}, {2} {3}\".format(self.batch, self.batch_id, self.batch, self.batch_id)", "def create_course_with_unit():\r\n world.clear_courses()\r\n course = world.CourseFactory.create()\r\n world.scenario_dict['COURSE'] = course\r\n section = world.ItemFactory.create(parent_location=course.location)\r\n world.ItemFactory.create(\r\n parent_location=section.location,\r\n category='sequential',\r\n display_name='Subsection One',\r\n )\r\n user = create_studio_user(is_staff=False)\r\n add_course_author(user, course)\r\n\r\n log_into_studio()\r\n world.css_click('a.course-link')\r\n\r\n world.wait_for_js_to_load()\r\n css_selectors = [\r\n 'div.section-item a.expand-collapse', 'a.new-unit-item'\r\n ]\r\n for selector in css_selectors:\r\n world.css_click(selector)\r\n\r\n world.wait_for_mathjax()\r\n world.wait_for_xmodule()\r\n\r\n assert world.is_css_present('ul.new-component-type')", "def parse_create_course(xml_course):\n attrs = [\n \"term-code\",\n \"term-description\",\n 'subject',\n \"course-number\",\n \"school\",\n \"department\",\n \"title\",\n \"description\",\n \"credit-hours\",\n \"distribution-group\"\n ]\n course = pull_attributes_from_xml(xml_course, attrs)\n course[\"sections\"] = []\n\n return course", "def add_s_courses(student_courses, resume_output):\n # creates a list to represent each line of the html code for courses section\n html_courses_section = []\n\n # opens the html section with a div tag\n html_courses_section.append(\"<div>\")\n\n # adds a courses section with header3 format to html section\n html_courses_section.append(surround_block(\"h3\", \"Courses\"))\n\n # makes a string of comma-separated (\", \") student courses\n student_courses_str = \"\" # creates a list of student courses, and assigns the first course\n for course in student_courses: # goes second course to last course... for each course\n\n # debugging\n # print(\"idl\" + course)\n\n # adds the student course to the string of courses\n student_courses_str += course\n\n # adds a comma after the course, unless it is the last course in the list\n if course is not student_courses[-1]:\n student_courses_str += \", \"\n\n # print(\"student_courses_str:\" + student_courses_str)\n\n # adds a line with str list of courses in span tag\n html_courses_section.append(surround_block(\"span\", student_courses_str))\n\n # closes the courses section\n html_courses_section.append(\"</div>\")\n # print(\"html courses section\", html_courses_section)\n\n # adds the courses section to the html code to be printed\n resume_output.extend(html_courses_section)\n # print(resume_output)\n\n # returns output code\n return resume_output", "def createCourse():\n\tif request.method == 'POST':\n\t\tcname = request.form['cname']\n\t\tcourseterm = request.form['courseterm']\n\t\tcoursepoint = request.form['coursepoint']\n\t\tcoursetype = request.form['coursetype']\t\n\t\tcourseyear = request.form['courseyear']\t\n\t\ttname = request.form['tname']\t\n\t\terror = None\n\n\t\tif not cname:\n\t\t\terror = 'Course name is required.'\n\t\telif not courseterm:\n\t\t\terror = 'Course term is required'\n\t\telif not courseterm:\n\t\t\terror = 'Course point is required'\n\n\t\tif error is not None:\n\t\t\tflash(error)\n\t\telse:\n\t\t\tdb = get_db()\n\t\t\tcur = db.cursor()\n\t\t\tcur.execute(\n\t\t\t\t'INSERT INTO course (cname, courseyear, coursetype, courseterm, coursepoint, tname)'\n\t\t\t\t' VALUES (%s, %s, %s, %s, %s, %s)',\n\t\t\t\t(cname, courseyear, coursetype, courseterm, coursepoint, tname)\n\t\t\t)\n\t\t\tdb.commit()\n\t\t\treturn redirect(url_for('info.index'))\n\n\treturn render_template('info/createCourse.html')", "def render_preview_certificate(request, course_id):\n return render_html_view(request, str(course_id))" ]
[ "0.6946371", "0.6540668", "0.63720375", "0.62898386", "0.6107234", "0.6071788", "0.5923579", "0.585324", "0.5844796", "0.58082366", "0.5749922", "0.5556818", "0.54912657", "0.53892386", "0.5379833", "0.53704685", "0.53665537", "0.5356205", "0.53560764", "0.5349088", "0.5335669", "0.53247696", "0.5314801", "0.5314547", "0.5305893", "0.52961653", "0.5255212", "0.52499", "0.52436703", "0.5241185" ]
0.7629145
0
Creates the TeX document from the Computer Science Course Catalog
def create_tex(unwanted_courses, wanted_courses=None): page = requests.get( 'http://gla.ac.uk/coursecatalogue/courselist/' + '?code=REG30200000&name=School+of+Computing+Science') tree = html.fromstring(page.content) spans = tree.xpath('//span/text()') codes = [] if wanted_courses is None: for s in spans: if s[0:4] == "COMP" and s[7] == '4' and s not in unwanted_courses: codes.append(s) else: for s in wanted_courses: codes.append(s) write_to_latex(codes, unwanted_courses) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def makeCourse( xmlFile, genPath, importPaths, commonFiles, rendererContent=True):\n\ttry:\n\n\t\t# parse the command line\n\t\tConfig.add_option('--verbose', help='Set verbosity to maximum', dest='verbosity', default=0, action='store_const', const=2)\n\t\tConfig.add_option('-v', '--verbosity', help='Set the verbosity level (0: quiet, 1: display the command lines, 2: display command lines and their outputs', dest='verbosity', default=0, type=int)\n\t\tConfig.add_option('-d', '--debug', help='Create the files in the debug/ folder, instead of in a temporary one', dest='debug', action='store_true', default=False)\n\t\tConfig.add_option('-f', '--force', help='Force the generation of the documents, even if nothing changes from last run', dest='force', action='store_true', default=False)\n\t\tConfig.add_option('-q', '--quick', help='Quick pdf generation (do not compile twice the latex, do not produce handout, etc.)', dest='quick', action='store_true', default=False)\n\t\tConfig.add_option('-w', '--wordpress', help='Publish to wordpress', dest='wordpress', default=False, action='store_true')\n\t\tConfig.add_option('-c', '--HTMLcorrection', help='Display an HTML correction', dest='HTMLcorrection', default=False, action='store_true')\n\t\tConfig.add_option('-s', '--shared', help='Copy the required files to the <shared> path (via ssh)', default=False,\taction='store_true')\n\t\tConfig.parse()\n\t\targs = Config.args\n\t\toptions = Config.options\n\t\tConfig.importPaths = importPaths \n\t\tConfig.commonFiles = commonFiles\n\t\tConfig.allSessions = { x.__name__:x for x in Session.__subclasses__()}\t# list of the created session classes\n\t\tConfig.rendererContent = rendererContent\n\t\t\n\t\t# clean the debug directory in debug mode\n\t\tbasePath = os.path.abspath('.')+'/'\t\t\t# base path (from where the script is run, because the path are relative)\n\t\tif options.debug:\n\t\t\tif os.path.exists('debug/'):\n\t\t\t\trunCommand(['rm','-rf','debug/'])\n\n\t\t# open and parse the course file\n\t\twith codecs.open(xmlFile, encoding='utf-8') as f:\n\t\t\tbs = BeautifulSoup(f, features=\"xml\")\n\n\n\t\t# build the recursively the sessions\n\t\ttop = createTagSession( bs, father=None )\t\t# bs.contents[0]\n\t\tsessionsToBuild = Session.sessionsToBuild\t\t# get the list of the sessions object\n\t\t\n\n\t\t\"\"\"\n\t\timportFiles( bs.contents[0], importPaths)\n\n\t\t# get the list of sessions we can build (with a 'make' method)\n\t\tbuildableSessions = { x.__name__:x for x in Session.__subclasses__() if 'make' in x.__dict__ }\n\n\t\t#This set the PATH for PyDev only...\n\t\tos.environ['PATH'] = os.environ['PATH']+':'+os.getenv('PATH')\n\n\n\t\t# build the list of Sessions to build\n\t\tsessionsToBuild = []\n\t\tfor name,session in buildableSessions.items():\n\t\t\tsessionsToBuild.extend( session(tag, commonFiles) for tag in bs(name) )\n\t\t\"\"\"\n\t\t\n\t\t\n\n\t\t# if possible, load the previous xml file, and look for the differences\n\t\tdirName,baseName = split(xmlFile) \n\t\ttry:\n\t\t\twith open(dirName+\"/.\"+baseName+\".makeCourse\", \"rb\") as f:\n\t\t\t\tdata = load( f )\n\t\t\t\tfor s in sessionsToBuild:\n\t\t\t\t\tif s.name in data:\n\t\t\t\t\t\ts.checkDifferences( data[s.name] )\n\t\texcept IOError:\n\t\t\tpass\n\n\n\t\t# build every argument in the command line arguments\n\t\tsomethingHasBeDone = False\n\t\tfor s in sessionsToBuild:\n\t\t\tif (not args) or (\"all\" in args) or (s.name in args) or (s.type in args):\n\t\t\t\t\n\t\t\t\tcd( basePath)\n\t\t\t\t\n\t\t\t\t# check if something has to be done\n\t\t\t\tif s.shouldBeMake(basePath+'/'+genPath, options) or options.force:\n\t\t\t\t\tsomethingHasBeDone = True\n\n\t\t\t\t\t#Make one build (TP, course, etc.)\n\t\t\t\t\tprint ( Fore.BLUE+\"*) Make \"+Style.BRIGHT+s.name+Fore.RESET+Style.NORMAL)\n\n\t\t\t\t\t# make temp directory and copy all the file in resources dir\n\t\t\t\t\tif options.debug:\n\t\t\t\t\t\ttmp = \"debug/\"+s.name+'/'\n\t\t\t\t\t\tcreateDirectory(tmp)\n\t\t\t\t\telse:\n\t\t\t\t\t\ttmp = mkdtemp()\n\n\t\t\t\t\ts.prepareResources(tmp )\n\t\t\t\t\tcd( tmp)\n\n\t\t\t\t\t# call the custom function associated with the type, to produce the documents\n\t\t\t\t\ts.make(options)\n\n\t\t\t\t\t# then move the files in the right place\n\t\t\t\t\tfor f in s.files(options):\n\t\t\t\t\t\tcreateDirectory( basePath+'/'+genPath.format( **s.dict ) )\n\t\t\t\t\t\tnewFile = basePath+'/'+genPath.format( **s.dict )+f\n\t\t\t\t\t\tif not os.path.exists(f):\n\t\t\t\t\t\t\tprint( Fore.YELLOW+'The file '+f+' has not been created by '+s.type+' function !'+Fore.RESET)\n\t\t\t\t\t\trunCommand( ['cp', f, newFile])\n\n\t\t\t\t\t# del the temporary directory or clean debug directory\n\t\t\t\t\tif not options.debug:\n\t\t\t\t\t\trunCommand( ['rm', '-rf', tmp])\n\t\t\t\telse:\n\t\t\t\t\tif options.verbosity>0:\n\t\t\t\t\t\tprint( Fore.BLUE + \"*) Nothing changed for \"+Style.BRIGHT+s.name+Style.NORMAL+\", skipped\"+Fore.RESET)\n\n\n\n\t\tif not somethingHasBeDone:\n\t\t\tprint( Fore.BLUE + \"Nothing has changed, nothing to do, so nothing has been done...\" + Fore.RESET)\n\n\n\t\t# save the data file\n\t\tdata = {L.name: {key:md5(str(val).encode('utf-8')).hexdigest() for key,val in L.dict.items()} for L in sessionsToBuild }\n\t\tcd( basePath)\n\t\twith open(dirName+\"/.\"+baseName+\".makeCourse\", 'wb') as f:\n\t\t\tdump( data, f)\n\n\n\n\n\n\n\n\texcept mkcException as err:\n\t\tprint( err )", "def parse_create_course(xml_course):\n attrs = [\n \"term-code\",\n \"term-description\",\n 'subject',\n \"course-number\",\n \"school\",\n \"department\",\n \"title\",\n \"description\",\n \"credit-hours\",\n \"distribution-group\"\n ]\n course = pull_attributes_from_xml(xml_course, attrs)\n course[\"sections\"] = []\n\n return course", "def latex_course(course):\n basic_info_list = [\n 'session', 'school', 'credits', 'level', 'offered',\n 'visiting_students', 'erasmus_students'\n ]\n generic_subsection_list = [\n 'description', 'timetable', 'requirements_of_entry',\n 'excluded_courses', 'co_requisites', 'assessment_weighting'\n ]\n string = '\\\\subsection{' + course[\"title\"] + '}\\n'\n for info in basic_info_list:\n string += latex_info(course[info])\n for subsection in generic_subsection_list:\n string += latex_subsection(course[subsection])\n string += '\\\\break \\\\textbf{' + course['assessment_date'][\n 'heading'] + '}' + course['assessment_date']['value'] + '\\n'\n string += latex_subsection(course['aims'])\n string += '\\\\subsubsection*{' + \\\n course['learning_outcomes']['heading'] + '}\\n'\n outcome_list = re.split(\n '\\d+\\. ', course['learning_outcomes']['value'])\n string += outcome_list[0] + '\\n'\n string += '\\\\begin{enumerate}\\n'\n for i in outcome_list[1:-1]:\n string += '\\\\item ' + i + '\\n'\n string += '\\\\end{enumerate}\\n'\n return string", "def write_to_latex(codelist, unwanted_courses):\n # TODO: investigate a way to add large amounts of text outside of the\n # function\n abstract01 = \"I created this document to practice parsing html and using\\\n tools like Beautiful Soup which I've previously had little experience\\\n in. As a result, it's not perfect.\\\\newline\\\n It is also a slightly condensed all-in-one-place look at a selection\\\n of courses that are available for fourth year computer science\\\n students at the University of Glasgow. For the purposes of clarity I\\\n have removed several courses from this selection. The following\\\n courses have been omitted:\"\n abstract02 = \"For more insight into the project, to report issues or to\\\n inspect the code, have a look at the GitHub:\\\n \\\\url{https://github.com/IS0metric/course-ripper}\"\n unincluded = create_not_included_list(unwanted_courses)\n with open('courses.tex', 'w') as f:\n # TODO Try and move all this to a separate function?\n # TODO: Check if it's more efficient to write a single, massive string\n # to file\n f.write('\\\\documentclass{hitec}\\n')\n f.write('\\\\usepackage[document]{ragged2e}\\n')\n f.write('\\\\usepackage{url}\\n')\n f.write('\\\\usepackage{hyperref}\\n')\n f.write('\\\\setcounter{tocdepth}{4}\\n')\n f.write('\\\\begin{document}\\n')\n f.write('\\\\title{Fourth Year (2016-17) Courses}\\n')\n f.write('\\\\author{Jack Parkinson}\\n')\n f.write('\\\\date{August 2016}\\n')\n f.write('\\\\maketitle\\n')\n f.write('\\\\abstract{' + abstract01 + unincluded + abstract02 + '}\\n')\n f.write('\\\\newpage\\n\\n')\n f.write('\\\\tableofcontents\\n')\n f.write('\\\\newpage\\n\\n')\n # TODO: Look into alternatives to the three lists\n all_courses = []\n sem1_courses = []\n sem2_courses = []\n for code in codelist:\n course = bsoup(get_coursepage(code))\n if course['offered']['value'] == 'Runs Throughout Semesters 1 and 2':\n all_courses.append(course)\n elif \"1\" in course['offered']['value']:\n sem1_courses.append(course)\n elif \"2\" in course['offered']['value']:\n sem2_courses.append(course)\n f.write('\\\\section{Semester 1 and 2 Courses}\\n\\n')\n for course in all_courses:\n f.write(latex_course(course))\n f.write('\\\\section{Semester 1 Only Courses}\\n\\n')\n for course in sem1_courses:\n f.write(latex_course(course))\n f.write('\\\\section{Semester 2 Only Courses}\\n\\n')\n for course in sem2_courses:\n f.write(latex_course(course))\n f.write('\\\\end{document}')\n return None", "def createCourse():\n\tif request.method == 'POST':\n\t\tcname = request.form['cname']\n\t\tcourseterm = request.form['courseterm']\n\t\tcoursepoint = request.form['coursepoint']\n\t\tcoursetype = request.form['coursetype']\t\n\t\tcourseyear = request.form['courseyear']\t\n\t\ttname = request.form['tname']\t\n\t\terror = None\n\n\t\tif not cname:\n\t\t\terror = 'Course name is required.'\n\t\telif not courseterm:\n\t\t\terror = 'Course term is required'\n\t\telif not courseterm:\n\t\t\terror = 'Course point is required'\n\n\t\tif error is not None:\n\t\t\tflash(error)\n\t\telse:\n\t\t\tdb = get_db()\n\t\t\tcur = db.cursor()\n\t\t\tcur.execute(\n\t\t\t\t'INSERT INTO course (cname, courseyear, coursetype, courseterm, coursepoint, tname)'\n\t\t\t\t' VALUES (%s, %s, %s, %s, %s, %s)',\n\t\t\t\t(cname, courseyear, coursetype, courseterm, coursepoint, tname)\n\t\t\t)\n\t\t\tdb.commit()\n\t\t\treturn redirect(url_for('info.index'))\n\n\treturn render_template('info/createCourse.html')", "def _create_course(self):\r\n super(TestOrphan, self)._create_course()\r\n\r\n self._create_item('chapter', 'Chapter1', {}, {'display_name': 'Chapter 1'}, 'course', 'runid')\r\n self._create_item('chapter', 'Chapter2', {}, {'display_name': 'Chapter 2'}, 'course', 'runid')\r\n self._create_item('chapter', 'OrphanChapter', {}, {'display_name': 'Orphan Chapter'}, None, None)\r\n self._create_item('vertical', 'Vert1', {}, {'display_name': 'Vertical 1'}, 'chapter', 'Chapter1')\r\n self._create_item('vertical', 'OrphanVert', {}, {'display_name': 'Orphan Vertical'}, None, None)\r\n self._create_item('html', 'Html1', \"<p>Goodbye</p>\", {'display_name': 'Parented Html'}, 'vertical', 'Vert1')\r\n self._create_item('html', 'OrphanHtml', \"<p>Hello</p>\", {'display_name': 'Orphan html'}, None, None)\r\n self._create_item('static_tab', 'staticuno', \"<p>tab</p>\", {'display_name': 'Tab uno'}, None, None)\r\n self._create_item('about', 'overview', \"<p>overview</p>\", {}, None, None)\r\n self._create_item('course_info', 'updates', \"<ol><li><h2>Sep 22</h2><p>test</p></li></ol>\", {}, None, None)", "def _createCoursesTable(self):\n\t\tcommand = \"\"\"CREATE TABLE courses (ID INTEGER PRIMARY KEY,\n\t\t\tname TEXT,\n\t\t\tauthor_id INTEGER,\n\t\t\tdescription TEXT\n\t\t\t);\n\"\"\"\n\n\t\tself._run_command(command)", "def pdflatex(unwanted_courses):\n create_tex(unwanted_courses)\n cmd = ['pdflatex', '-interaction', 'nonstopmode', 'courses.tex']\n proc = subprocess.Popen(cmd)\n proc.communicate()\n return None", "def create_courses():\n\n\t# create list for courses\n\tallcourses = []\n\n\t# load courses as classes in allcourses-list\n\twith open(\"../data/vakken.csv\", \"rt\") as coursefile:\n\n\t\t# clean text\n\t\tcourses = csv.reader(coursefile)\n\t\tfor row in courses:\n\t\t\tfor text in row:\n\t\t\t\tcourse_info = text.split(\";\")\n\n\t\t\t\t# add course name\n\t\t\t\tcourse_name = course_info[0]\n\n\t\t\t\t# add amount of lectures\n\t\t\t\tcourse_lectures = course_info[1]\n\n\t\t\t\t# add amount of seminars\n\t\t\t\tcourse_seminars = course_info[2]\n\n\t\t\t\t# add max amount seminars\n\t\t\t\tcourse_max_sem = course_info[3]\n\t\t\t\tif course_max_sem == \"nvt\":\n\t\t\t\t\tcourse_max_sem = 0\n\n\t\t\t\t# add amount of practicals\n\t\t\t\tcourse_practicals = course_info[4]\n\n\t\t\t\t# add max amount practicals\n\t\t\t\tcourse_max_prac = course_info[5]\n\t\t\t\tif course_max_prac == \"nvt\":\n\t\t\t\t\tcourse_max_prac = 0\n\n\t\t\t\t# add course to list\n\t\t\t\tallcourses.append(Course(course_name, course_lectures, course_seminars, course_max_sem, course_practicals, course_max_prac))\n\n\treturn allcourses", "def _course_factory_create_course():\r\n return CourseFactory.create(org='MITx', course='999', display_name='Robot Super Course')", "def main():\n\n #Courses\n years = [2016, 2017, 2018, 2019, 2020]\n with sqlite3.connect(\"determined.db\") as conn:\n c = conn.cursor()\n c.execute(\n \"\"\"\n CREATE TABLE \"course\" (\n \"course_number\"\tint NOT NULL,\n \"dept_id\"\tvarchar(4) NOT NULL,\n \"title\"\tvarchar(100) NOT NULL,\n \"instructor_fname\"\tvarchar(35) DEFAULT NULL,\n \"instructor_lname\"\tvarchar(35) DEFAULT NULL,\n \"student_work_products\"\tjson DEFAULT NULL,\n `term` varchar(7) NOT NULL,\n `year` int NOT NULL,\n PRIMARY KEY(\"course_number\", \"term\", \"year\")) \n \"\"\"\n )\n conn.commit()\n courses = [\n (1370, \"CPSC\", \"Computer Literacy\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\"),\n (1375, \"CPSC\", \"Programming I\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\"),\n (2376, \"CPSC\", \"Intro to Game Programming\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\"),\n (2380, \"CPSC\", \"Algorithms\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\"),\n (2482, \"CPSC\", \"Computer Organization\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\"),\n (3377, \"CPSC\", \"Advanced Game Programming\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\"),\n (3380, \"CPSC\", \"Operating Systems\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\"),\n (3383, \"CPSC\", \"Programming Languages\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\"),\n (3384, \"CPSC\", \"Computer Networks\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Summer\"),\n (4360, \"CPSC\", \"Computer Security\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Summer\")\n ]\n #Adding years\n upload_courses = []\n for year in years:\n upload_courses += [x + (year,) for x in courses]\n #Making a few instructors teach multiple course\n new_courses = [\n (4557, \"CPSC\", \"Natural Language Processing\", ),\n (2375, \"CPSC\", \"Programming II\",),\n (2776, \"CPSC\", \"Data Structures and Algorithms\",),\n (4862, \"CPSC\", \"Image Recognition\", ),\n ]\n for i in range(0,len(new_courses)):\n year = choice(years)\n for y in range(0,2): #Number of times new course is taught\n c = upload_courses[i]\n new_data = (c[3], c[4], c[5], choice([\"Fall\", \"Spring\", \"Summer\"]), year+y)\n data = new_courses[i] + new_data\n upload_courses.append(data)\n #Adding solo instructors and solo courses\n upload_courses += [\n (4672, \"CPSC\", \"Programming Memes\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Spring\", choice(years)),\n (1872, \"CPSC\", \"Information Systems\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Summer\", choice(years)),\n (1123, \"CPSC\", \"Microsoft Office\", names.get_first_name(), names.get_last_name(), json.dumps({\"swp\": [\"Midterm\", \"Final Exam\", \"Project 1\"]}), \"Fall\", choice(years))\n ]\n\n with sqlite3.connect(\"determined.db\") as conn:\n c = conn.cursor()\n c.executemany('''INSERT INTO course (course_number, dept_id, title, instructor_fname, instructor_lname, student_work_products, term, year)\n VALUES ( ?, ?, ?, ?, ?, ?, ?, ?)''', upload_courses)\n conn.commit()\n\n #SWP\n with sqlite3.connect(\"determined.db\") as conn:\n c = conn.cursor()\n c.execute(\n \"\"\"\n CREATE TABLE `student_work_product` (\n `id` INTEGER PRIMARY KEY,\n `product` varchar(250) NOT NULL,\n `course_id` int NOT NULL,\n `dept_id` int NOT NULL,\n `student_fname` varchar(35) NOT NULL,\n `student_lname` varchar(35) NOT NULL,\n `student_outcome` int DEFAULT NULL,\n `score` int DEFAULT NULL,\n `term` varchar(7) NOT NULL,\n `year` int NOT NULL,\n CONSTRAINT `course` FOREIGN KEY (`course_id`) REFERENCES `course` (`course_number`)\n CONSTRAINT `course` FOREIGN KEY (`dept_id`) REFERENCES `course` (`dept_id`)\n )\n \"\"\"\n )\n conn.commit()\n \n swps = []\n with sqlite3.connect(\"determined.db\") as conn:\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute (\"Select * from course\")\n records = [dict(x) for x in c.fetchall()]\n #Generating 20 student records for each swp in each course\n for i, course in enumerate(records):\n student_names = []\n for _ in range(20):\n student_names.append({'fname': names.get_first_name(),\n 'lname': names.get_last_name()})\n for product in json.loads(course['student_work_products'])['swp']:\n for student in student_names:\n if i%7 == 0:\n score = int(triangular(50, 85))\n else:\n score = int(triangular(50, 100))\n if score >= 90: outcome = 4\n elif score >= 80: outcome = 3\n elif score >= 70: outcome = 2\n elif score >= 60: outcome = 1\n else: outcome = 0 \n swps.append((\n product,\n course['course_number'],\n \"CPSC\",\n student['fname'],\n student['lname'],\n outcome,\n score, \n course['term'], \n course['year']\n ))\n \n c.executemany('''INSERT INTO student_work_product (product, course_id, dept_id, student_fname, student_lname, student_outcome, score, term, year)\n VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?)''', swps)\n conn.commit()", "def preprocess_courses_corpus():\n soup = None\n with open('courses_corpus.html', 'r') as infile:\n content = infile.read()\n\n soup = BeautifulSoup(content, 'html.parser')\n\n docid = 0\n data = {}\n data['documents'] = []\n\n main_table = soup.find_all(\"div\", attrs={'class': 'courseblock'})\n for course in main_table:\n docid += 1\n title = course.find_all('p', attrs={'class':'courseblocktitle noindent'})[0].text.lstrip('\\n') if len(course.find_all('p', attrs={'class':'courseblocktitle noindent'}))!=0 else ''\n description = (course.find_all('p', attrs={'class':'courseblockdesc noindent'})[0].text.lstrip('\\n') if len(course.find_all('p', attrs={'class':'courseblockdesc noindent'}))!=0 else '') + ' ' + (course.find_all('p', attrs={'class':'courseblockextra noindent'})[0].text if len(course.find_all('p', attrs={'class':'courseblockextra noindent'}))!=0 else '')\n\n data['documents'].append({\n 'docId' : docid,\n 'title' : title.strip(),\n 'description' : description.strip()\n })\n\n with open('courses_data.json', 'w') as outfile:\n json.dump(data, outfile)", "def print_catalog(self):\n # first download the json for the catalog\n self.download_json()\n\n # open the saved json file and load the json\n with self.file.open(\"r\") as catalog_file:\n pages = json.load(catalog_file)\n\n # the catalog json is just a list of pages\n # so we begin by iterating through the pages\n for page_num in range(len(pages)):\n # get each page\n page = pages[page_num]\n\n # get the threads on each page\n threads = page[\"threads\"]\n\n # print the page heading\n print(\"*** PAGE \", page_num + 1, \"***\")\n\n # iterate through the threads on each page\n for thread_num in range(len(threads)):\n # get each thread\n thread = threads[thread_num]\n\n # print the thread number\n num = thread[\"no\"]\n print(\"---\", \"Thread:\", num, \"---\")\n\n # not all threads have a subject or comment\n try:\n subject = thread[\"sub\"]\n comment = thread[\"com\"]\n\n print(\"Sub:\", subject)\n print(\"Comment:\", comment)\n except KeyError:\n print(\"N/A\")", "def _create_course(self):\r\n super(TestPublish, self)._create_course(split=False)\r\n\r\n self._create_item('chapter', 'Chapter1', {}, {'display_name': 'Chapter 1'}, 'course', 'runid', split=False)\r\n self._create_item('chapter', 'Chapter2', {}, {'display_name': 'Chapter 2'}, 'course', 'runid', split=False)\r\n self._create_item('vertical', 'Vert1', {}, {'display_name': 'Vertical 1'}, 'chapter', 'Chapter1', split=False)\r\n self._create_item('vertical', 'Vert2', {}, {'display_name': 'Vertical 2'}, 'chapter', 'Chapter1', split=False)\r\n self._create_item('html', 'Html1', \"<p>Goodbye</p>\", {'display_name': 'Parented Html'}, 'vertical', 'Vert1', split=False)\r\n self._create_item(\r\n 'discussion', 'Discussion1',\r\n \"discussion discussion_category=\\\"Lecture 1\\\" discussion_id=\\\"a08bfd89b2aa40fa81f2c650a9332846\\\" discussion_target=\\\"Lecture 1\\\"/>\\n\",\r\n {\r\n \"discussion_category\": \"Lecture 1\",\r\n \"discussion_target\": \"Lecture 1\",\r\n \"display_name\": \"Lecture 1 Discussion\",\r\n \"discussion_id\": \"a08bfd89b2aa40fa81f2c650a9332846\"\r\n },\r\n 'vertical', 'Vert1',\r\n split=False\r\n )\r\n self._create_item('html', 'Html2', \"<p>Hellow</p>\", {'display_name': 'Hollow Html'}, 'vertical', 'Vert1', split=False)\r\n self._create_item(\r\n 'discussion', 'Discussion2',\r\n \"discussion discussion_category=\\\"Lecture 2\\\" discussion_id=\\\"b08bfd89b2aa40fa81f2c650a9332846\\\" discussion_target=\\\"Lecture 2\\\"/>\\n\",\r\n {\r\n \"discussion_category\": \"Lecture 2\",\r\n \"discussion_target\": \"Lecture 2\",\r\n \"display_name\": \"Lecture 2 Discussion\",\r\n \"discussion_id\": \"b08bfd89b2aa40fa81f2c650a9332846\"\r\n },\r\n 'vertical', 'Vert2',\r\n split=False\r\n )\r\n self._create_item('static_tab', 'staticuno', \"<p>tab</p>\", {'display_name': 'Tab uno'}, None, None, split=False)\r\n self._create_item('about', 'overview', \"<p>overview</p>\", {}, None, None, split=False)\r\n self._create_item('course_info', 'updates', \"<ol><li><h2>Sep 22</h2><p>test</p></li></ol>\", {}, None, None, split=False)", "def generate_courses():\r\n for category in CourseCategory.objects.all():\r\n Course.objects.create(name=category.name, category=category, is_active=True,\r\n is_featured=True)", "def testCosmologyCatalog(self):\n dbObj = myTestGals(database=self.dbName)\n cat = cosmologicalGalaxyCatalog(dbObj)\n cat.write_catalog(self.catName)", "def createStructuredTranscript_Non_Core_Doc():\n\n #create a temporary folder that will hold the data transformed from doc to docx\n os.system('mkdir ' + INPUT_FOLDER+'temp')\n\n core_doc_asset = []\n missing_count = 0\n missing_files=[]\n # get all the docx files that are part of the core asset\n for file in glob.glob(INPUT_FOLDER+\"*.doc\"):\n\n # RG numbers for the core asset\n if (\"RG-50.030\" not in file and\n \"RG-50.106\" not in file and\n \"RG-50.549\" not in file):\n \n\n \n # convert file to docx, storing it in an untracked folder called temp\n file_docx = file + 'x'\n command = 'textutil -convert docx ' + file + ' -output ' + INPUT_FOLDER+'temp/'+ file_docx.split('/')[-1]\n call(command, shell=True)\n\n # append to the array\n core_doc_asset.append(file_docx)\n \n\n \n\n # get the units for each file, store them and update tracker\n core_doc_asset=create_dictionary_of_file_list(core_doc_asset)\n \n not_processed=0\n processed_doc=0\n \n # get the units for each file, store them and update tracker \n for mongo_rg in core_doc_asset:\n # get text units for this entry\n processed=[]\n result=[]\n \n for file in core_doc_asset[mongo_rg]:\n \n \n \n units = getTextUnits(INPUT_FOLDER+'temp/'+file.split('/')[-1])\n \n if units:\n #replace white spaces\n for i,element in enumerate(units):\n units[i]['unit']=' '.join(element['unit'].split())\n result.extend(units)\n \n processed.append(True)\n else:\n #check if processed\n processed.append(False)\n\n #set the method used to transform the transcript\n h.update_field(DB, TRACKER, \"rg_number\", mongo_rg, \"method\", \"transcribe_non_core_doc\")\n\n not_processed=not_processed+1\n\n if False in processed:\n\n h.update_field(DB, TRACKER, \"rg_number\", mongo_rg, \"status\", \"Unprocessed\")\n not_processed=not_processed+1\n missing_files.append(' '.join(core_doc_asset[mongo_rg]))\n else:\n # insert units on the output collection\n h.update_field(DB, OUTPUT, \"shelfmark\", 'USHMM '+mongo_rg, \"structured_transcript\", result)\n\n \n # update status on the stracker\n \n h.update_field(DB, TRACKER, \"rg_number\", mongo_rg, \"status\", \"Processed\")\n processed_doc=processed_doc+1\n \n\n #delete the temporary folder\n os.system('rm -r ' + INPUT_FOLDER+'temp')\n\n \n #write the missing files to text file\n file = open(OUTPUT_FOLDER_USHMM_PROCESSING_LOGS+'transcribe_non_core_doc_failed.txt','w')\n file.write('\\n'.join(missing_files))\n\n \n # success\n pprint.pprint(\"Non-core doc files were successfully processed, but there are \" + str(missing_count) + \" missing\")", "def process_course(xml_course):\n\n parse_course = parse_get_course(xml_course)\n if not parse_course: # Does not already exist, create\n parse_course = parse_create_course(xml_course)\n else:\n # TODO: Update existing attributes of the course here and upload the changes\n None\n parse_course_id = upload_course(parse_course)\n\n print (\"Processed Course: {0} {1}\"\n .format(parse_course[\"subject\"], parse_course[\"courseNumber\"]))\n\n # TODO: Implement the functions for this stuff\n parse_section = parse_get_section(xml_course)\n if not parse_section: # Does not already exist, create\n parse_section = parse_create_section(xml_course)\n else:\n # TODO: Update existing attributes of the section here and upload the changes\n None\n parse_section_id = upload_section(parse_section)\n\n put_child(parse_course_id, parse_section_id)", "def setup_xml_course(self):\r\n course_key = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')\r\n location = course_key.make_usage_key('chapter', 'Overview')\r\n descriptor = modulestore().get_item(location)\r\n\r\n self.module = self._get_module(course_key, descriptor, location)", "def create_course( # lint-amnesty, pylint: disable=arguments-differ\n self, org, course, run, user_id, master_branch=None, fields=None,\n versions_dict=None, search_targets=None, root_category='course',\n root_block_id=None, **kwargs\n ):\n # either need to assert this or have a default\n assert master_branch is not None\n # check course and run's uniqueness\n locator = CourseLocator(org=org, course=course, run=run, branch=master_branch)\n return self._create_courselike(\n locator, user_id, master_branch, fields, versions_dict,\n search_targets, root_category, root_block_id, **kwargs\n )", "def _create_course(self):\r\n # If the course already exists, this will respond\r\n # with a 200 and an error message, which we ignore.\r\n response = self.session.post(\r\n STUDIO_BASE_URL + '/course/',\r\n data=self._encode_post_dict(self._course_dict),\r\n headers=self.headers\r\n )\r\n\r\n try:\r\n err = response.json().get('ErrMsg')\r\n\r\n except ValueError:\r\n raise CourseFixtureError(\r\n \"Could not parse response from course request as JSON: '{0}'\".format(\r\n response.content))\r\n\r\n # This will occur if the course identifier is not unique\r\n if err is not None:\r\n raise CourseFixtureError(\"Could not create course {0}. Error message: '{1}'\".format(self, err))\r\n\r\n if not response.ok:\r\n raise CourseFixtureError(\r\n \"Could not create course {0}. Status was {1}\".format(\r\n self._course_dict, response.status_code))", "def start(self) -> None:\n\n self.doc = self.doc + r'''\n \\documentclass[\n 10pt, % Main document font size\n a4paper, % Paper type, use 'letterpaper' for US Letter paper\n ]{scrartcl}\n\n \\usepackage{graphicx}\n \\usepackage{epstopdf}\n \\usepackage{float}\n \\usepackage[scale=0.75]{geometry} % Reduce document margins\n \\usepackage{hyperref}\n \\usepackage{longtable}\n\n \\begin{document}\n\n \\title{Automatic Exploratory Data Analysis} % The article title\n\n \\subtitle{Study Case} % Uncomment to display a subtitle\n\n \\author{Jacob} % The article author(s) - author affiliations need to be specified in the AUTHOR AFFILIATIONS block\\\n\n \\maketitle % Print the title/author/date block\n\n \\newpage\n \\tableofcontents % Print the table of contents\n\n \\newpage\n \\listoffigures % Print the list of figures\n\n \\newpage\n \\listoftables % Print the list of tables\n '''", "def newCatalog():\n catalog = {'videosContext': None,\n 'caraContenido': None,\n 'musicalGenero': None,\n 'fechaMusica': None}\n\n catalog['videosContext'] = lt.newList('ARRAY_LIST')\n catalog['caraContenido'] = mp.newMap(30,\n maptype='PROBING',\n loadfactor=0.4)\n catalog['musicaGenero'] = mp.newMap(30,\n maptype='PROBING',\n loadfactor=0.4)\n catalog['fechaMusica'] = om.newMap('RBT')\n\n return catalog", "def create_course(self, org, offering, user_id=None, fields=None, **kwargs):\r\n\r\n course, _, run = offering.partition('/')\r\n course_id = SlashSeparatedCourseKey(org, course, run)\r\n\r\n # Check if a course with this org/course has been defined before (case-insensitive)\r\n course_search_location = SON([\r\n ('_id.tag', 'i4x'),\r\n ('_id.org', re.compile(u'^{}$'.format(course_id.org), re.IGNORECASE)),\r\n ('_id.course', re.compile(u'^{}$'.format(course_id.course), re.IGNORECASE)),\r\n ('_id.category', 'course'),\r\n ])\r\n courses = self.collection.find(course_search_location, fields=('_id'))\r\n if courses.count() > 0:\r\n raise InvalidLocationError(\r\n \"There are already courses with the given org and course id: {}\".format([\r\n course['_id'] for course in courses\r\n ]))\r\n\r\n location = course_id.make_usage_key('course', course_id.run)\r\n course = self.create_and_save_xmodule(location, fields=fields, **kwargs)\r\n\r\n # clone a default 'about' overview module as well\r\n about_location = location.replace(\r\n category='about',\r\n name='overview'\r\n )\r\n overview_template = AboutDescriptor.get_template('overview.yaml')\r\n self.create_and_save_xmodule(\r\n about_location,\r\n system=course.system,\r\n definition_data=overview_template.get('data')\r\n )\r\n\r\n return course", "def _create_course(self, split=True):\r\n metadata = {\r\n 'start': datetime.datetime(2000, 3, 13, 4),\r\n 'display_name': 'Migration test course',\r\n }\r\n data = {\r\n 'wiki_slug': 'test_course_slug'\r\n }\r\n fields = metadata.copy()\r\n fields.update(data)\r\n if split:\r\n # split requires the course to be created separately from creating items\r\n self.split_mongo.create_course(\r\n self.split_course_key.org, self.split_course_key.offering, self.userid, fields=fields, root_block_id='runid'\r\n )\r\n old_course = self.old_mongo.create_course(self.split_course_key.org, 'test_course/runid', fields=fields)\r\n self.old_course_key = old_course.id\r\n self.runtime = old_course.runtime", "def create_document(self, data):\n command = CreateDocumentFromOneOffixxTemplateCommand(self.context, data['title'], data['template'])\n return command.execute()", "def insert_course(dept, num, text):\n\n # Course Title \n m = re.search(\"[\\d\\w]{5} - ([\\w ]*)\", text)\n title = m.group(1) if m else \"nomatch\"\n\n # Course Description\n m = re.search(\"\\.\\s(.*)\\sTypically\",text)\n des = m.group(1) if m else \"nomatch\"\n\n # Credit hours aren't fixed for every course\n # Credit Hours: 2.00\n # Credit Hours: 2.00 or 3.00. \n # Credit Hours: 1.00 to 18.00. \n m = re.search(\"Credit Hours: (\\d+\\.\\d+)\",text, flags=re.IGNORECASE)\n m = re.search(\"(\\d+\\.\\d+)(.*?)Credit hours\",text, flags=re.IGNORECASE) if not m else m\n cr = m.group(1) if m else \"-1\"\n\n # Semesters Offered\n m = re.search(\"Typically offered (.*?)\\.\", text)\n sem = m.group(1).split() if m else [\"nomatch\"]\n\n # Course Type: Lecture, Recitation, Lab, Seminar, etc.\n m = re.search(\"Schedule Types:\\s((?:[\\w ]+)(?:,[\\w ]+)*) \\s+\", text)\n form = m.group(1).split(\", \") if m else [\"nomatch\"]\n\n # Learning objectives will not necessarily follow campuses\n m = re.search(\"campuses:(\\s+([\\w\\s])+\\n)\", text)\n campus = m.group(1).strip().split(\"\\n\\n\") if m else [\"nomatch\"]\n campus = [camp.strip() for camp in campus]\n\n # prereq regex and decomosition of prereqs into lists of AND conditions (works for most classes, not 477 and similar)\n # re.DOTALL matches all characters, including \"\\n\"\n idx = text.find(\"campuses:\")\n m = re.search(\"Prerequisites:(.*)\",text[idx:],flags=re.DOTALL)\n if m:\n allReqs = []\n prereqText = m.group(1).strip()\n prereqText = prereqText.encode('ascii', 'ignore') \n for i in PrereqParser.parseprereq(prereqText):\n reqArr = []\n for j in i.split():\n if j.find(\"-C\") != -1:\n j = j.replace(\"-C\",\"\")\n reqArr.append(Requisite(course=j,reqType=False))\n else:\n reqArr.append(Requisite(course=j,reqType=True)) \n allReqs.append(RequisiteList(courses=reqArr))\n\n else:\n allReqs = []\n\n # create course entity\n course = Course(number=num, title=title, department=dept, form=form,\n description=des, credits=float(cr), semesters=sem,\n campuses=campus,requisites=allReqs, id=dept + num)\n # store course \n course.put()", "def load2TexAS(data):\n # State global variable\n global cache_stanza, cache_spacy, cache_udpipe, cache_trankit\n\n # Collect the data\n string = data['text']\n lang = data['lang']\n packages = data['packages']\n\n hash_string = hashlib.sha1(string.encode()).hexdigest()\n\n final_HTML = \"\"\n message_HTML = \"<div class=\\'message\\'>\"\n isMessage = False\n header_input = []\n log_row = [datetime.now().strftime('%Y-%m-%d %H:%M:%S'), lang]\n\n if \"stanza\" in packages:\n # Initialize the TexAS document\n mydoc = tx.Document(string)\n mydoc.meta().set(\"authors\",\"hegler,yiwen,celine,yuqian\")\n mydoc.date().setTimestamp(\"2021-01-19T14:44\")\n\n ## If cache is full, reload the cache.\n if cache.count(cache_stanza) > 100:\n cache.write(cache_stanza, \"stanza\")\n cache_stanza = cache.load(\"stanza\")\n \n ## Check text whether is already in cache\n if hash_string in cache_stanza[lang].keys():\n tokens, end_pos, lemma, pos, nlpWordsList, hasCompoundWords, cache_stanza = cache.read(\"stanza\", cache_stanza, lang, string) #The output cache_stanza has 'count' been updated.\n else:\n tokens, end_pos, lemma, pos, nlpWordsList, hasCompoundWords, cache_stanza = cache.add(\"stanza\", cache_stanza, lang, string, get_services_stanza)\n \n mydoc.setTokenList(tokens, indexed=True)\n mydoc.views().get(\"TOKENS\").meta().set(\"generator\", \"stanza\")\n mydoc.views().get(\"TOKENS\").meta().set(\"model\", \"stanza\" + \"-\" + lang)\n mydoc.setSentenceList(end_pos)\n\n if hasCompoundWords:\n mydoc.addTokenView( \"WORDS\", nlpWordsList )\n mydoc.addTokenView(\"LEMMA\", lemma)\n mydoc.addTokenView(\"POS\", pos)\n \n # Extract HTML View\n myTabView = tx.UITabularView(mydoc)\n if hasCompoundWords:\n myTabView.showView(\"WORDS\")\n myTabView.showView(\"LEMMA\", labelCSS=False)\n myTabView.showView(\"POS\")\n\n # concatenate the myTabView.HTML()\n header_input.append((\"Stanza\", str(len(end_pos)) , str(len(tokens)), str(get_tokens_per_sents(end_pos))))\n final_HTML += \"<div class='subtitle'>Stanza</div> <br>\" + myTabView.HTML().replace(\"\\n\", \"\") + \"<br>\"\n log_row.append(\"stanza\")\n \n else:\n log_row.append(\"\")\n\n if \"spacy\" in packages:\n # SpaCy does not support Arabic and Russian\n if lang == 'ara' or lang == 'rus':\n message_HTML += \"SpaCy does not support Arabic or Russian. <br>\"\n isMessage = True\n\n else:\n mydoc = tx.Document(string)\n mydoc.meta().set(\"authors\",\"hegler,yiwen,celine,yuqian\")\n mydoc.date().setTimestamp(\"2021-01-19T14:44\")\n \n ## If cache is full, reload the cache.\n if cache.count(cache_spacy) > 100:\n cache.write(cache_spacy, \"spacy\")\n cache_spacy = cache.load(\"spacy\")\n \n ## Check text whether is already in cache\n if hash_string in cache_spacy[lang].keys():\n tokens, end_pos, lemma, pos, cache_spacy = cache.read(\"spacy\", cache_spacy, lang, string)\n else:\n tokens, end_pos, lemma, pos, cache_spacy = cache.add(\"spacy\", cache_spacy, lang, string, get_services_spacy)\n \n mydoc.setTokenList(tokens, indexed=True)\n mydoc.views().get(\"TOKENS\").meta().set(\"generator\", \"spacy\")\n mydoc.views().get(\"TOKENS\").meta().set(\"model\", \"spacy\" + \"-\" + lang )\n mydoc.setSentenceList(end_pos)\n mydoc.addTokenView(\"LEMMA\", lemma)\n mydoc.addTokenView(\"POS\", pos)\n \n # Extract HTML View\n myTabView = tx.UITabularView(mydoc)\n myTabView.showView(\"LEMMA\", labelCSS=False)\n myTabView.showView(\"POS\")\n\n # concatenate the myTabView.HTML()\n header_input.append((\"SpaCy\", str(len(end_pos)) , str(len(tokens)), str(get_tokens_per_sents(end_pos))))\n final_HTML += \"<div class='subtitle'>\" + \"SpaCy\" + \"</div><br>\" + myTabView.HTML().replace(\"\\n\", \"\") + \"<br>\"\n log_row.append(\"spacy\")\n \n else:\n log_row.append(\"\")\n\n if \"udpipe\" in packages: \n ## If cache is full, reload the cache.\n if cache.count(cache_udpipe) > 100:\n cache.write(cache_udpipe, \"udpipe\")\n cache_udpipe = cache.load(\"udpipe\")\n \n ## Check text whether is already in cache\n if hash_string in cache_udpipe[lang].keys():\n tokens, end_pos, lemma, pos, cache_udpipe = cache.read(\"udpipe\", cache_udpipe, lang, string)\n else:\n tokens, end_pos, lemma, pos, cache_udpipe = cache.add(\"udpipe\", cache_udpipe, lang, string, get_services_udpipe)\n \n string_udpipe = \" \".join(tokens)\n\n # Initialize the TexAS document\n mydoc = tx.Document(string_udpipe)\n mydoc.meta().set(\"authors\",\"hegler,yiwen,celine,yuqian\")\n mydoc.date().setTimestamp(\"2021-01-19T14:44\")\n\n mydoc.setTokenList(tokens, indexed=True)\n mydoc.views().get(\"TOKENS\").meta().set(\"generator\", \"udpipe\")\n mydoc.views().get(\"TOKENS\").meta().set(\"model\", \"udpipe\" + \"-\" + lang )\n mydoc.setSentenceList(end_pos)\n mydoc.addTokenView(\"LEMMA\", lemma)\n mydoc.addTokenView(\"POS\", pos)\n \n # Extract HTML View\n myTabView = tx.UITabularView(mydoc)\n myTabView.showView(\"LEMMA\", labelCSS=False)\n myTabView.showView(\"POS\")\n\n # concatenate the myTabView.HTML()\n header_input.append((\"UDpipe\", str(len(end_pos)) , str(len(tokens)), str(get_tokens_per_sents(end_pos))))\n final_HTML += \"<div class='subtitle'>UDpipe</div> <br>\" + myTabView.HTML().replace(\"\\n\", \"\") + \"<br>\"\n log_row.append(\"udpipe\")\n \n else:\n log_row.append(\"\")\n \n if \"trankit\" in packages:\n # trankit temporarily only support english\n if lang == 'eng':\n mydoc = tx.Document(string)\n mydoc.meta().set(\"authors\",\"hegler,yiwen,celine,yuqian\")\n mydoc.date().setTimestamp(\"2021-01-19T14:44\")\n \n ## If cache is full, reload the cache.\n if cache.count(cache_trankit) > 100:\n cache.write(cache_trankit, \"trankit\")\n cache_trankit = cache.load(\"trankit\")\n \n ## Check text whether is already in cache\n if hash_string in cache_trankit[lang].keys():\n tokens, end_pos, lemma, pos, cache_trankit = cache.read(\"trankit\", cache_trankit, lang, string)\n else:\n tokens, end_pos, lemma, pos, cache_trankit = cache.add(\"trankit\", cache_trankit, lang, string, get_services_trankit)\n \n mydoc.setTokenList(tokens, indexed=True)\n mydoc.views().get(\"TOKENS\").meta().set(\"generator\", \"spacy\")\n mydoc.views().get(\"TOKENS\").meta().set(\"model\", \"spacy\" + \"-\" + lang )\n mydoc.setSentenceList(end_pos)\n mydoc.addTokenView(\"LEMMA\", lemma)\n mydoc.addTokenView(\"POS\", pos)\n \n # Extract HTML View\n myTabView = tx.UITabularView(mydoc)\n myTabView.showView(\"LEMMA\", labelCSS=False)\n myTabView.showView(\"POS\")\n\n # concatenate the myTabView.HTML()\n header_input.append((\"Trankit\", str(len(end_pos)) , str(len(tokens)), str(get_tokens_per_sents(end_pos))))\n final_HTML += \"<div class='subtitle'>\" + \"Trankit\" + \"</div><br>\" + myTabView.HTML().replace(\"\\n\", \"\") + \"<br>\"\n log_row.append(\"trankit\")\n\n else:\n message_HTML += \"Trankit temporarily only supports English. <br>\"\n isMessage = True \n \n else:\n log_row.append(\"\")\n\n message_HTML += \"</div>\"\n if isMessage:\n return message_HTML + get_header_table(header_input) + \"<br><br>\" + final_HTML\n\n writeLog(log_row)\n return get_header_table(header_input) + \"<br><br>\" + final_HTML", "def test_get_toc_text(self) -> None:\n extraction = docx2python(RESOURCES / \"zen_of_python.docx\")\n assert extraction.document_runs == [\n [\n [[[\"Contents\"], [\"\\t\", \"Beautiful is better than ugly.\\t1\"], []]],\n [\n [\n [],\n [],\n [\"Beautiful is better than ugly.\"],\n [\"Explicit is better than implicit.\"],\n [\"Simple is better than complex.\"],\n [\"Complex is better than complicated.\"],\n [\"Flat is better than nested.\"],\n [\"Sparse is better than dense.\"],\n [\"Readability counts.\"],\n [\"Special cases aren't special enough to break the rules.\"],\n [\"Although practicality beats purity.\"],\n [\"Errors should never pass silently.\"],\n [\"Unless explicitly silenced.\"],\n [\"In the face of ambiguity, refuse the temptation to guess.\"],\n [\n \"There should be one-- and preferably only one --obvious way to do it.\"\n ],\n [\n \"Although that way may not be obvious at first unless you're Dutch.\"\n ],\n [\"Now is better than never.\"],\n [\"Although never is often better than *right* now.\"],\n [\"If the implementation is hard to explain, it's a bad idea.\"],\n [\n \"If the implementation is easy to explain, it may be a good idea.\"\n ],\n [\n \"Namespaces are one honking great idea -- let's do more of those!\"\n ],\n ]\n ],\n ]\n ]\n extraction.close()", "def test_course_index_view_with_course(self):\r\n CourseFactory.create(display_name='Robot Super Educational Course')\r\n resp = self.client.get_html('/course/')\r\n self.assertContains(\r\n resp,\r\n '<h3 class=\"course-title\">Robot Super Educational Course</h3>',\r\n status_code=200,\r\n html=True\r\n )\r\n _test_no_locations(self, resp)" ]
[ "0.6260808", "0.6079598", "0.5850458", "0.5665877", "0.55810404", "0.55767274", "0.5510529", "0.55077094", "0.550131", "0.5462771", "0.53828937", "0.537524", "0.53713846", "0.5369341", "0.533082", "0.52978146", "0.52650625", "0.52579224", "0.5252961", "0.52399004", "0.5219007", "0.5206727", "0.5205716", "0.5194098", "0.5182053", "0.51784045", "0.5156206", "0.51533103", "0.51482713", "0.5148134" ]
0.64500177
0
Generates a TeX document and then runs the pdflatex command to create a PDF from the TeX
def pdflatex(unwanted_courses): create_tex(unwanted_courses) cmd = ['pdflatex', '-interaction', 'nonstopmode', 'courses.tex'] proc = subprocess.Popen(cmd) proc.communicate() return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pdf():\n env.file_ext = \".pdf\"\n local(\"pandoc {input_files} -o {output_file}{file_ext} -H {preamble_file} --template {template_file} --bibliography={bib_file} --csl={csl_file} -V fontsize=12pt -V papersize=a4paper -V documentclass:report -N --latex-engine=xelatex\".format(**env))", "def compile_latex(filepath_to_latex, workdir):\n proc = subprocess.Popen(['pdflatex', filepath_to_latex], cwd=workdir)\n proc.communicate()", "def tex():\n env.file_ext = \".tex\"\n local(\"pandoc {input_files} -o {output_file}{file_ext} -H {preamble_file} --template {template_file} --bibliography={bib_file} --csl={csl_file} -V fontsize=12pt -V papersize=a4paper -V documentclass:report -N --latex-engine=xelatex\".format(**env))", "def createPDF(self, tex):\n for b in [TIMEOUT_BINARY, RUBBER_BINARY]:\n if not os.path.isfile(b):\n raise ValueError(\"Binary %s not available\" % b)\n\n self.explodeTeX(tex)\n\n p = subprocess.Popen(\n [\n TIMEOUT_BINARY, TIMEOUT,\n RUBBER_BINARY, '--inplace', '--ps', '-ops2pdf', '-Wall', '--maxerr=100', '--force',\n os.path.join(self.dir, 'exploded.tex'),\n ],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n (processOut, processErr) = p.communicate(\"\")\n exitCode = p.wait()\n if exitCode != 0:\n return processOut + processErr + \"\\nCompilation of document failed!\"\n\n self._outputFile = os.path.join(self.dir, 'exploded.pdf')\n return processOut + processErr", "def createPDFDoc(self, filepath):\n print(\"Starting pdf creation\")\n strMD=\"\"\n for fileMD,data in self.graph.nodes(data=True):\n if not os.path.isfile(fileMD):\n sys.exit(\"Error: \" + fileMD + \" does not exist\")\n if not fileMD.endswith(\"md\" or \"markdown\"):\n sys.exit(fileMD + \" is not a markdown file\");\n print(\"Found file: \" + fileMD)\n strMD = strMD + \" \" + fileMD\n cmd = \"pandoc --latex-engine=xelatex -s -o \" + filepath + strMD\t\n print(\"Starting file conversion.\")\n if subprocess.call(cmd) != 0:\n print(\"Conversion failed\")\n else:\n print(\"Saving pdf file to: \" + filepath)\n print(\"Conversion successfull\")", "def compile_latex_str(latex: str, filename: str):\n document = pylatex.Document()\n document.packages.append(pylatex.Package('amsfonts'))\n document.packages.append(pylatex.Package('amsmath'))\n document.append(pylatex.NoEscape(r'\\thispagestyle{empty}'))\n document.append(pylatex.NoEscape(' $' + latex + '$ '))\n document.generate_pdf(filename, compiler='pdflatex')", "def build(self) -> None:\n\n print(\"Genereting files..\")\n self.doc = self.doc + r'\\end{document}'\n\n f = open(\"latex\\\\\" + self.report_name + '.tex', 'w')\n f.write(self.doc)\n f.close()\n\n os.chdir('latex')\n\n cmd = ['pdflatex', '-interaction', 'nonstopmode', self.report_name + '.tex']\n #cmd = ['pdflatex', '-interaction', self.report_name + '.tex']\n\n for i in range(2):\n proc = subprocess.Popen(cmd)\n proc.communicate()\n retcode = proc.returncode\n if not retcode == 0:\n os.chdir('..')\n raise ValueError('Error {} executing command: {}'.format(retcode, ' '.join(cmd)))\n\n os.unlink(self.report_name + '.aux')\n os.unlink(self.report_name + '.lof')\n os.unlink(self.report_name + '.log')\n os.unlink(self.report_name + '.lot')\n os.unlink(self.report_name + '.out')\n os.unlink(self.report_name + '.toc')\n\n os.chdir('..')", "def buildPDF(self):\n\n # TODO: get this working\n # TODO: make this configurable via a dialog\n os.chdir(self.file_path.parent)\n proc = subprocess.Popen(\n [\"make\", \"latexpdf\"],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n proc.wait()\n for line in proc.stdout:\n print(\"stdout: \" + line.rstrip())", "def compile_pdf_from_template(template, insert_variables, out_path, pdflatex_options=\"-quiet\"):\n\n rendered_template = template.render(**insert_variables)\n build_d = os.path.join(os.path.dirname(os.path.realpath(out_path)), '.build')\n\n if not os.path.exists(build_d): # create the build directory if not exisiting\n os.makedirs(build_d)\n\n temp_out = os.path.join(build_d, \"tmp\")\n print(\"Writing output to {}.pdf\".format(temp_out))\n with open(temp_out + '.tex', \"w\") as f: # saves tex_code to output file\n f.write(rendered_template)\n\n os.system('pdflatex {} -output-directory {} {}'.format(pdflatex_options, build_d, temp_out + '.tex'))\n # shutil.copy2(temp_out+\".pdf\", os.path.relpath(out_path))", "def generate_pdf (tmp_folder, filename, pdf_page_size):\n\n shell_cmd = PDF_CONVERT_CMD.substitute(wkhtmltox_path=WKHTMLTOX_PATH, folder=tmp_folder, article_id=filename, page_size=pdf_page_size)\n proc = subprocess.Popen(shell_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout_value, stderr_value = proc.communicate()\n\n print u'\\n'.join(filter(None, [shell_cmd, stdout_value, stderr_value]))", "def render_latex(self, latex_snippet, output_png_file, output_pdf_file):\n\t\ttmp_dir = tempfile.mkdtemp(prefix = \"mdx_latex_\")\n\t\ttex_file = os.path.join(tmp_dir, \"file.tex\")\n\t\tpdf_file = os.path.join(tmp_dir, \"file.pdf\")\n\t\t\n\t\ttry:\n\t\t\t# Generate the tex file\n\t\t\ttex = LaTeXBlockProcessor.LATEX_TEMPLATE.render(\n\t\t\t\tpreamble = self.preamble,\n\t\t\t\tdocument = latex_snippet,\n\t\t\t)\n\t\t\ttex_hash = hashlib.sha1(tex).hexdigest()\n\t\t\t\n\t\t\t# Check if anything needs to be done\n\t\t\ttry:\n\t\t\t\tpng_meta = self.get_png_metadata(output_png_file)\n\t\t\t\tif tex_hash == png_meta.get(\"tex_hash\", None):\n\t\t\t\t\t# Already got the latest version in the PNG, do nothing!\n\t\t\t\t\treturn\n\t\t\texcept IOError:\n\t\t\t\t# No file exists yet so we definately should build it\n\t\t\t\tpass\n\t\t\t\n\t\t\t\n\t\t\t# Store the tex in a file to be compiled\n\t\t\twith open(tex_file, \"w\") as f:\n\t\t\t\tf.write(tex)\n\t\t\t\n\t\t\t# Try and build the file\n\t\t\tfor build_num in range(LaTeXBlockProcessor.LATEX_BUILDS):\n\t\t\t\tp = Popen( [ \"pdflatex\"\n\t\t\t\t , \"-shell-escape\"\n\t\t\t\t , \"-halt-on-error\"\n\t\t\t\t , \"-output-directory\", tmp_dir\n\t\t\t\t , tex_file]\n\t\t\t\t , cwd = os.path.realpath(self.configs.get(\"input_path\", \"./\"))\n\t\t\t\t , stdin = None\n\t\t\t\t , stdout = sys.stderr\n\t\t\t\t , stderr = sys.stderr\n\t\t\t\t )\n\t\t\t\tif p.wait() != 0:\n\t\t\t\t\traise Exception(\"LaTeX Compilation Failed for:\\n%s\"%latex_snippet)\n\t\t\t\n\t\t\t# Make a copy of required\n\t\t\tif output_pdf_file is not None:\n\t\t\t\tshutil.copy(pdf_file, output_pdf_file)\n\t\t\t\n\t\t\t# Convert to PNG\n\t\t\tp = Popen( [\"convert\", \"-density\", \"440\", pdf_file, \"-resize\", \"25%\", output_png_file]\n\t\t\t , stdin = None\n\t\t\t , stdout = sys.stderr\n\t\t\t , stderr = sys.stderr\n\t\t\t )\n\t\t\tif p.wait() != 0:\n\t\t\t\traise Exception(\"Converting PDF to PNG failed for:\\n%s\"%latex_snippet)\n\t\t\t\n\t\t\t# Add the hash of the source to the metadata \n\t\t\tself.set_png_metadata(output_png_file, {\"tex_hash\":tex_hash})\n\t\t\t\n\t\tfinally:\n\t\t\tshutil.rmtree(tmp_dir)", "def latex(name,\n latex_program='pdflatex', # or 'latex'\n options='--latex_code_style=vrb',\n version='paper', # or 'screen', '2up', 'A4', 'A4-2up'\n postfix='', # or 'auto'\n ptex2tex=None, # only for ptex2tex step\n ):\n if name.endswith('.do.txt'):\n name = name.replace('.do.txt', '')\n system('rm -f %(name)s.aux' % vars())\n\n if version in ('paper', 'A4', '2up', 'A4-2up'):\n if not '--device=paper' in options:\n options += ' --device=paper'\n elif version == 'screen' and '--device=paper' in options:\n options = options.replace('--device=paper', '')\n if version in ('A4', 'A4-2up'):\n if not '--latex_papersize=a4' in options:\n options += ' --latex_papersize=a4'\n if postfix == 'auto':\n if version == 'paper':\n postfix = '4print'\n elif version == 'screen':\n postfix = '4screen'\n else:\n postfix = version\n\n # Compile source\n cmd = 'doconce format %(latex_program)s %(name)s %(options)s ' % vars()\n system(cmd)\n\n cmd = r\"doconce replace '%% insert custom LaTeX commands...' '\\usepackage[russian]{babel} \\usepackage{titlesec} \\titleformat{\\subsubsection}[runin] {\\normalfont\\normalsize\\bfseries}{\\thesubsubsection.}{1em}{} \\let\\paragraph=\\subsubsection' %(name)s.tex\" % vars()\n system(cmd)\n\n cmd = r\"doconce replace '\\usepackage{lmodern}' '%%\\usepackage{lmodern}' %(name)s.tex\" % vars()\n system(cmd)\n # Transform .p.tex to .tex?\n if ptex2tex is not None:\n cmd = ptex2tex\n system(cmd)\n\n # Load latex file into string for examination\n dofile = open(name + '.tex', 'r')\n text = dofile.read()\n dofile.close()\n\n latex_options = ''\n if latex_program == 'pdflatex':\n latex_options = '-file-line-error -interaction nonstopmode -halt-on-error'\n\n # Run latex\n shell_escape = ' -shell-escape' if 'begin{minted}' in text else ''\n cmd_latex = '%(latex_program)s%(shell_escape)s %(latex_options)s %(name)s' % vars()\n system(cmd_latex)\n\n if 'idx{' in text:\n cmd = 'makeindex %(name)s' % vars()\n system(cmd)\n\n if 'BIBFILE:' in text:\n cmd = 'bibtex %(name)s' % vars()\n system(cmd)\n system(cmd_latex)\n system(cmd_latex)\n if latex_program == 'latex':\n cmd = 'dvipdf %(name)s' % vars()\n system(cmd)\n # Could instead of dvipdf run the old-fashioned dvips and ps2pdf\n\n if version in ('2up', 'A4-2up'):\n # Use pdfnup to make two pages per sheet\n cmd = 'pdfnup --frame true --outfile %(name)s.pdf %(name)s.pdf' % vars()\n system(cmd)\n if postfix:\n shutil.copy(name + '.pdf', name + '-' + postfix + '.pdf')", "def generate_problems_pdf(args):\n contents = generate_table(start_int=args.start_int, end_int=args.end_int, table_type=args.table_type)\n convert_latex_to_pdf(args.filename, contents=contents, view=True)\n remove_temporary_files(args.filename)", "def make_pdf(self):\n source = self.get_page_source()\n if not source:\n self.errors.append('no_source')\n if not self.errors:\n self.generate_pdf_file(source)", "def compile_latex(filename):\n working_directory, texfile = os.path.split(filename)\n try:\n run_command([\"pdflatex\", \"-draftmode\", \"-interaction=batchmode\", texfile], working_directory)\n run_command([\"pdflatex\", \"-interaction=batchmode\", texfile], working_directory)\n return True\n except subprocess.TimeoutExpired:\n return False", "def main():\n init_latex()", "def compile_pdf(directory, filename, move_result_to):\n cmd = ['latexmk', '-pdf', '-dvi-', '-interaction=nonstopmode', filename]\n proc = subprocess.Popen(cmd, cwd=directory)\n proc.communicate()\n\n retcode = proc.returncode\n if not retcode == 0:\n raise ValueError('Error {} executing command: {}'.format(retcode, ' '.join(cmd)))\n\n pdf_name = filename.replace('.tex', '.pdf')\n from_file = os.path.join(directory, pdf_name)\n to_file = os.path.join(move_result_to, pdf_name)\n \n if not os.path.exists(move_result_to):\n os.makedirs(move_result_to)\n \n os.rename(from_file, to_file)", "def compile(lines, name):\n with open(\"{}.tex\".format(name), \"w\") as f:\n for line in lines:\n f.write(line+\"\\n\")\n os.system(\"pdflatex {}.tex\".format(name))\n os.system(\"xdg-open {}.pdf\".format(name))\n os.system(\"rm {}.log\".format(name))\n os.system(\"rm {}.aux\".format(name))", "def generate_pdf(self):\n x = 100\n y = 100\n buffer = BytesIO()\n p = canvas.Canvas(buffer, pagesize=\"A4\")\n p.drawString(x, y, \"TO DO\")\n p.showPage()\n p.save()\n pdf = buffer.getvalue()\n buffer.close()\n return pdf", "def build(self,\n save_to_disk=True,\n compile_to_pdf=True,\n show_pdf=True,\n delete_files=list(),\n build_from_dir='cwd'):\n tex = super().build()\n\n tex = build(self.doc_class) + '\\n' + self.build_preamble() + '\\n' + tex\n if save_to_disk:\n self.file.save(tex)\n\n if compile_to_pdf:\n self.file.compile_to_pdf(build_from_dir=build_from_dir)\n\n if show_pdf:\n open_file_with_default_program(self.filename, self.filepath)\n\n if isinstance(delete_files, str):\n if delete_files == 'all':\n delete_files = ['tex', 'aux', 'log']\n else:\n delete_files = [delete_files]\n\n for ext in delete_files:\n if ext in ['tex', 'aux', 'log', 'pdf']:\n os.remove(f'{self.filepath}/{self.filename}.{ext}')\n\n return tex", "def build_pdf(branch):\n os.chdir(os.path.join(gitdname,'statsmodels','docs'))\n sphinx_dir = os.path.join(virtual_dir,'bin')\n retcode = subprocess.call(\" \".join(['make','latexpdf',\n 'SPHINXBUILD='+sphinx_dir+'/sphinx-build']), shell=True)\n if retcode != 0:\n os.chdir(old_cwd)\n msg = \"\"\"Could not build the pdf docs for branch %s\"\"\" % branch\n raise Exception(msg)\n os.chdir(dname)", "def test_buildPDF(self):\n bookPath = self._setupTeXFiles()\n outputPath = FilePath(self.mktemp())\n\n builder = BookBuilder()\n builder.buildPDF(bookPath, self.howtoDir, outputPath)\n\n self.assertTrue(outputPath.exists())", "def generate_envelope_pdf(document):\n compiler = LatexCompiler()\n\n return compiler.compile(document)", "def generate_pdf(pdf_data):\n\n html = HTML(string=pdf_data)\n f = html.write_pdf()\n\n return f", "def convert_to_latex(self, builder, filename, latex_metadata):\n relative_path = ''\n tex_data = ''\n tex_build_path = self.texdir + relative_path\n pdf_build_path = self.pdfdir + relative_path\n template_folder = builder.config['jupyter_template_path']\n\n\n ensuredir(tex_build_path)\n ensuredir(pdf_build_path)\n\n ## setting the working directory\n os.chdir(self.texdir)\n\n ## copies all theme folder images to static folder\n if os.path.exists(builder.confdir + \"/theme/static/img\"):\n copy_tree(builder.confdir + \"/theme/static/img\", self.texdir + \"/_static/img/\", preserve_symlinks=1)\n else:\n self.logger.warning(\"Image folder not present inside the theme folder\")\n\n fl_ipynb = self.texdir + \"/\" + \"{}.ipynb\".format(filename)\n fl_tex = self.texdir + \"/\" + \"{}.tex\".format(filename)\n fl_tex_template = builder.confdir + \"/\" + template_folder + \"/\" + builder.config['jupyter_latex_template']\n\n ## do not convert excluded patterns to latex\n excluded_files = [x in filename for x in builder.config['jupyter_pdf_excludepatterns']]\n\n if not True in excluded_files: \n ## --output-dir - forms a directory in the same path as fl_ipynb - need a way to specify properly?\n ### converting to pdf using xelatex subprocess\n if sys.version_info[0] < 3:\n subprocess.call([\"jupyter\", \"nbconvert\",\"--to\",\"latex\",\"--template\",fl_tex_template,\"from\", fl_ipynb])\n else:\n subprocess.run([\"jupyter\", \"nbconvert\",\"--to\",\"latex\",\"--template\",fl_tex_template,\"from\", fl_ipynb])\n\n ### check if subdirectory\n subdirectory = \"\"\n index = filename.rfind('/')\n if index > 0:\n subdirectory = filename[0:index]\n filename = filename[index + 1:]\n\n ### set working directory for xelatex processing\n os.chdir(self.texdir + \"/\" + subdirectory)\n\n try:\n self.subprocess_xelatex(fl_tex, filename)\n if 'bib_include' in latex_metadata:\n self.subprocess_bibtex(filename)\n self.subprocess_xelatex(fl_tex, filename)\n self.subprocess_xelatex(fl_tex, filename)\n except OSError as e:\n print(e)\n except AssertionError as e:\n pass\n # exit() - to be used when we want the execution to stop on error", "def main(\n files: List[Path] = typer.Argument(default=None, dir_okay=False, exists=True),\n template: Optional[str] = typer.Option(\n None, '--template', help='Name of template file'\n ),\n logo: Optional[str] = typer.Option(None, '--logo', help='Name of logo file'),\n logo_width: Optional[str] = typer.Option(\n None, '--logo-width', help='Logo width (default 35mm)'\n ),\n highlight_style: Optional[str] = typer.Option(None, '--highlight-style',\n help='Specify coloring style to be used in highlighting source code'),\n syntax_definition: Optional[str] = typer.Option(None, '--syntax-definition',\n help='Specify a directory which contains syntax definition files'),\n no_toc: bool = typer.Option(\n False, '--no-toc', help='table of contents in PDF document'\n ),\n no_number_sections: bool = typer.Option(False, '--no-number-sections', help='no section numbering'),\n\n no_titlepage: bool = typer.Option(False, '--no-titlepage', help='title in PDF document'),\n tex_file: bool = typer.Option(\n False, '--tex', help='create TeX file instead of PDF document'\n ),\n email: Optional[str] = typer.Option(None, '--email', help='Author email'),\n company: Optional[str] = typer.Option(None, '--company', help='Name of company'),\n department: Optional[str] = typer.Option(\n None, '--department', help='Name of department'\n ),\n confidential: bool = typer.Option(\n False, '--confidential', help='indicate confidential'\n ),\n debug: bool = typer.Option(False, '--debug', help='turns debugging on'),\n pdf_engine: str = typer.Option(\n 'xelatex',\n '--pdf-engine',\n help='Specify pdf engine, one of lualatex, xelatex or tectonic ',\n ),\n _version: bool = typer.Option(\n None, '-V', '--version', callback=version_callback, help='Show version and exit'\n ),\n):\n\n if not files:\n typer.echo('Error: Must specify at least one .md file.')\n raise typer.Abort()\n\n mdfiles: List[str] = [str(md) for md in files]\n\n template = template or os.environ.get('MD2PDF_TEMPLATE')\n if template is None:\n print('No template specified')\n sys.exit(1)\n\n email = email or os.environ.get('MD2PDF_AUTHOR_EMAIL')\n footer_center = ''\n\n # command line overwrites `MD2PDF_PDF_ENGINE`. if both are not given\n # then `xelatex` is the default\n pdf_engine = pdf_engine or os.environ.get('MD2PDF_PDF_ENGINE') or 'xelatex'\n # check that pdf-engine is one of the following\n if pdf_engine not in ['xelatex', 'lualatex', 'tectonic']:\n print('--pdf-engine must be one of \"xelatex\", \"lualatex\", \"tectonic\"')\n sys.exit(1)\n\n ext = '.pdf'\n if tex_file:\n ext = '.tex'\n\n if len(mdfiles) == 1:\n toml_file = os.path.splitext(mdfiles[0])[0] + '.toml'\n\n if os.path.exists(toml_file):\n print(f'TOML file {toml_file} found')\n parsed_toml = toml.load(toml_file)\n default_val = parsed_toml.get('default')\n if default_val is None:\n print(f'No file names found in {toml_file}')\n else:\n mdfiles = default_val.get('files')\n\n for mdf in mdfiles:\n print(f'Compiling {mdf}')\n\n main_mdfile = os.path.realpath(mdfiles[0])\n\n outfile = Path(main_mdfile).stem + ext\n\n year = date.today().year\n\n company = company or os.environ.get('MD2PDF_COMPANY')\n department = department or os.environ.get('MD2PDF_DEPARTMENT')\n\n if company:\n if confidential:\n footer_center = f'© Copyright {year} {company}'\n else:\n footer_center = f'{year} {company}'\n\n pdcmd = PandocCmd(outfile)\n pdcmd.append(f'--template={template}')\n pdcmd.append(f'--pdf-engine={pdf_engine}')\n\n pdcmd.set_v('footer-center', footer_center)\n pdcmd.set_v('company', company)\n pdcmd.set_v('department', department)\n\n syntax_definition = syntax_definition or os.environ.get('MD2PDF_SYNTAX_DEFINITION_DIR')\n if syntax_definition is not None:\n add_syntax_definition(pdcmd, syntax_definition)\n\n pdcmd.append('--highlight-style')\n highlight_style = highlight_style or os.environ.get('MD2PDF_HIGHLIGHT_STYLE')\n if highlight_style is None:\n pdcmd.append('pygments')\n else:\n check_highlight_style(highlight_style)\n pdcmd.append(highlight_style)\n\n if not no_number_sections:\n pdcmd.append('--number-sections')\n\n if no_titlepage:\n pdcmd.set_m('titlepage', 'false')\n\n logo = logo or os.environ.get('MD2PDF_LOGO')\n pdcmd.set_v('logo', logo)\n\n logo_width = logo_width or os.environ.get('MD2PDF_LOGO_WIDTH')\n pdcmd.set_v('logo-width', logo_width)\n\n pdcmd.set_m('email', email)\n\n if not no_toc:\n pdcmd.append('--toc')\n\n pdcmd.extend(mdfiles)\n\n if debug:\n print(' '.join(pdcmd.pandoc))\n\n\n pdcmd.run()", "def render_to_pdf(template_src, context_dict={}):\n template = get_template(template_src)\n html = template.render(context_dict)\n result = BytesIO()\n pdf = pisa.pisaDocument(BytesIO(html.encode(\"ISO-8859-1\")), result)\n if not pdf.err:\n return HttpResponse(result.getvalue(), content_type='application/pdf')\n return None", "def convert_to_latex(filename):\n basename, ext = os.path.splitext(filename)\n print(\"Converting '{0:s}' to LaTeX...\".format(filename))\n filename1 = basename + \".tex\"\n call_pandoc(filename, filename1, \"markdown\", \"latex\")\n print(\"Done!\")", "def generate_command(ctx, template):\n config_extension = '.py'\n template_extension = '.html'\n output_extension = '.pdf'\n\n context = Context(\n config=path.join(\n ctx.obj.get(\"config_path\"),\n template + config_extension\n ),\n template_path=path.join(\n ctx.obj.get(\"template_path\"),\n template + template_extension\n ),\n variables=ctx.obj.get(\"varibales\"),\n output_path=path.join(\n ctx.obj.get(\"output_path\"),\n template, output_extension),\n )\n\n generator = Generator(context)\n generator.execute()\n exit(0)", "def main():\n f_name = sys.argv[1]\n file_contents = open(f_name).read()\n C = CAST([], \"python\")\n C2 = C.from_json_str(file_contents)\n\n V = CASTToAGraphVisitor(C2)\n last_slash_idx = f_name.rfind(\"/\")\n file_ending_idx = f_name.rfind(\".\")\n pdf_file_name = f\"{f_name[last_slash_idx + 1 : file_ending_idx]}.pdf\"\n V.to_pdf(pdf_file_name)" ]
[ "0.7378825", "0.71747476", "0.7139049", "0.70846206", "0.6956496", "0.6915404", "0.69058365", "0.68944186", "0.67462385", "0.66899025", "0.66854495", "0.66612506", "0.65569717", "0.6552778", "0.65360767", "0.64799696", "0.6461581", "0.6451979", "0.6432288", "0.63957024", "0.6367583", "0.63053197", "0.628626", "0.6249333", "0.62437063", "0.61355364", "0.612611", "0.6104166", "0.6089054", "0.6032495" ]
0.7367564
1
Trim an upper triangle sparse matrix so that only the first n diagonals are kept.
def diag_trim(mat, n): if sp.issparse(mat): if mat.format != "csr": raise ValueError("input type must be scipy.sparse.csr_matrix") # Trim diagonals by removing all elements further than n in the # upper triangle trimmed = sp.tril(mat, n, format="csr") trimmed = sp.triu(trimmed, format="csr") else: trimmed = mat.copy() n_diags = trimmed.shape[0] for diag in range(n, n_diags): set_mat_diag(trimmed, diag, 0) return trimmed return trimmed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trim_whitespace(matrix, details, min_gap):\r\n if details == -1:\r\n row = matrix[0, ]\r\n else:\r\n row = matrix[matrix.shape[0] - 1, ]\r\n\r\n min_left = np.argmin(row)\r\n min_right = np.argmin(row[::-1])\r\n\r\n if min_left > min_gap:\r\n matrix = matrix[:, min_left - min_gap:]\r\n\r\n if min_right > min_gap:\r\n matrix = matrix[:, 0:len(row) - (min_right - min_gap)]\r\n\r\n return matrix", "def _elimination_matrix(dim):\n n = dimension_to_number_of_triangular_elements(dim)\n\n counter = np.zeros((dim, dim), int) - 1\n counter[np.tril_indices(dim)] = np.arange(n, dtype=int)\n\n columns = [_unit_vector_or_zeros(i, n) for i in counter.ravel(\"F\")]\n\n eliminator = np.column_stack(columns)\n return eliminator", "def _trim_margins(self, img):\n oldsize = (0, 0)\n while oldsize != img.shape: # while the size is changing\n oldsize = img.shape\n for i in range(4): # 4 times\n img = num.rot90(img) # rotate 90\n if num.std(img[0, :]) < self.trim_std: # if low std\n img = img[1:, :] # trim edge\n\n return img", "def trim(x):\n # make sure we get a 3D stack not 2D slice\n assert (x.shape) != 3\n if x.shape[-1] > 576:\n newx = x[:,32:-32, 32:-32]\n else:\n newx = x\n return newx[np.newaxis,...]", "def trim_dataset(mat, batch_size):\n no_of_rows_drop = mat.shape[0]%batch_size\n print(\"number of rows dropped\", no_of_rows_drop)\n if(no_of_rows_drop > 0):\n return mat[:-no_of_rows_drop]\n else:\n return mat", "def remove_blanks(mat):\n ids = []\n for idx, row in enumerate(mat):\n if not 1 in row:\n ids.append(idx)\n mat = np.delete(mat, ids, 0)\n mat = np.delete(mat, ids, 1)\n return mat", "def _drop_empty_rows_and_cols(confmat: Tensor) ->Tensor:\n confmat = confmat[confmat.sum(1) != 0]\n confmat = confmat[:, confmat.sum(0) != 0]\n return confmat", "def remove_border(src): #---- remove blank border\r\n rows = src.shape[0]; VMIN= 0; VMAX= rows; \r\n cols = src.shape[0]; UMIN= 0; UMAX= cols;\r\n for ky in range(1,rows):\r\n sum0 = np.sum(src[ky,:,:]);\r\n sum1 = np.sum(src[rows-ky-1,:,:]);\r\n if sum0== 0 and VMIN== ky-1: VMIN= ky;\r\n if sum1== 0 and VMAX== rows-ky+1: VMAX= rows-ky;\r\n for kx in range(1,cols):\r\n sum0 = np.sum(src[:,kx,:]);\r\n sum1 = np.sum(src[:,cols-kx-1,:]);\r\n if sum0== 0 and UMIN== kx-1: UMIN= kx;\r\n if sum1== 0 and UMAX== cols-kx+1: UMAX= cols-kx;\r\n #--- --- \r\n DV = np.minimum(VMIN, rows-VMAX);\r\n DU = np.minimum(UMIN, cols-UMAX);\r\n return src[DV:(rows-DV), DU:(cols-DU), :];", "def unscrew( S ):\n S = asarray(S)\n assert allclose(S[...,:3,:3].transpose(0,1),-S[...,:3,:3]),\"S[...,:3,:3] is skew\"\n assert allclose(S[...,3,:],0),\"Bottom row is 0\"\n return unscrew_UNSAFE(S)", "def trim_axs(axs, N):\n axs = axs.flat\n for ax in axs[N:]:\n ax.remove()\n return axs[:N]", "def trim_axs(axs, N):\n axs = axs.flat\n for ax in axs[N:]:\n ax.remove()\n return axs[:N]", "def trim_axs(axs, N):\n axs = axs.flat\n for ax in axs[N:]:\n ax.remove()\n return axs[:N]", "def trim_axs(axs, n):\n axs = axs.flat\n for ax in axs[n:]:\n ax.remove()\n return axs[:n]", "def trim_zeros(x):\n assert len(x.shape) == 2\n return x[~np.all(x == 0, axis=1)]", "def _reduce_triangle(to_reduce):\n last_row = to_reduce[-1]\n for index in xrange(len(to_reduce) - 1):\n to_reduce[-2][index] += max(last_row[index:index + 2])\n del to_reduce[-1]", "def trim_zeros(array):\n multislice = []\n for i in range(array.ndim):\n sum_axes = tuple(j for j in range(array.ndim) if j is not i)\n edges = np.where(np.sum(array, axis=sum_axes) > 0)\n if edges[0].size == 0:\n return np.array([], dtype=array.dtype)\n low = edges[0][0]\n high = edges[0][-1]\n multislice.append(slice(low, high+1, 1))\n return array[tuple(multislice)]", "def eliminate(self, A):\r\n length = len(A)\r\n width = len(A[0])\r\n for i in range(length):\r\n if A[i][i] != 1:\r\n # if current entry on the diagonal is 0 (off), proceed row interchange. \r\n for k in range(i + 1, length):\r\n if A[k][i] == 1:\r\n A[i], A[k] = A[k], A[i]\r\n self.step += 1\r\n break\r\n for j in range(i + 1, length):\r\n # eliminate the entries under the diagonal\r\n if A[j][i] == 1:\r\n A[j] = [(A[i][m] + A[j][m]) % 2 for m in range(width)]\r\n self.step += width\r\n\r\n for i in range(length - 1, -1, -1):\r\n # eliminate the entries above the diagonal\r\n for j in range(i - 1, -1, -1):\r\n if A[j][i] != 0:\r\n A[j] = [(A[i][k] + A[j][k]) % 2 for k in range(width)]\r\n self.step += width\r\n # Now, only the entries on the diagonal are 1, the rest are 0.\r", "def upper(mat):\n idx = np.triu_indices_from(mat, k=1)\n return mat[idx]", "def trim(args) :\n from trimmer import trim_reads\n trim_reads(args)", "def triu(m, k=0):\r\n m = asanyarray(m)\r\n out = multiply((1-tri(m.shape[0], m.shape[1], k-1, int)),m)\r\n return out", "def cut_array_border(array): \n array[:, [0, array.shape[1]-1]]=0\n array[[0, array.shape[0]-1], :]=0\n \n \n return array", "def _flatten_lower_triangle(matrix):\r\n matrix = asarray(matrix)\r\n flattened = []\r\n for col_num in range(matrix.shape[1]):\r\n for row_num in range(matrix.shape[0]):\r\n if col_num < row_num:\r\n flattened.append(matrix[row_num][col_num])\r\n return flattened", "def reduce_triangle(to_reduce):\r\n last_row = to_reduce[-1]\r\n for index in xrange(len(to_reduce)-1):\r\n to_reduce[-2][index] += max(last_row[index:index+2])\r\n del to_reduce[-1]\r\n return to_reduce", "def offDiagPairs(self):\n return np.transpose(np.nonzero(np.triu(self.LaplacianMatrix,k=2)))", "def upper_triangular_matrix_to_full_matrix(arr, n):\n triu0 = np.triu_indices(n, 0)\n triu1 = np.triu_indices(n, 1)\n tril1 = np.tril_indices(n, -1)\n\n mat = np.zeros((n, n), dtype=float)\n mat[triu0] = arr\n mat[tril1] = mat[triu1]\n\n return mat", "def Problem2(n):\n diag_entries = np.empty((3,n))\n diag_entries[0] = np.ones(n)*(-1)\n diag_entries[1] = np.ones(n)*2\n diag_entries[2] = np.ones(n)*(-1)\n A = sparse.spdiags(diag_entries, [-1,0,1],n,n,format=\"csr\")\n return A", "def get_off_diagonal(matrix):\n\toff_diag = scipy.array(matrix, dtype=matrix.dtype)\n\toff_diag[scipy.diag_indices_from(matrix)] = 0\n\treturn off_diag", "def reduce_size(data):\r\n data_ = pd.DataFrame(data)\r\n nonzero_idxs = np.flatnonzero(np.diag(data_))\r\n cov_reduced = data_.iloc[nonzero_idxs, nonzero_idxs]\r\n return nonzero_idxs, cov_reduced", "def delete_all_gap(self):\n # pdb.set_trace()\n\n rem = set(self.get_all_gap_cols())\n subset = [x for x in range(0, self.get_length()) if x not in rem]\n self.remove_columns(set(rem))\n #_LOG.debug(\"Alignment length reduced to %d\" % len(subset))\n return subset", "def EliminateRowsCols(self, *args):\n return _hypre.HypreParMatrix_EliminateRowsCols(self, *args)" ]
[ "0.64127666", "0.58649224", "0.57881296", "0.5781573", "0.5728801", "0.57002896", "0.5696073", "0.568997", "0.5671774", "0.566362", "0.566362", "0.566362", "0.56233585", "0.5622041", "0.5614353", "0.55710584", "0.55435014", "0.5482823", "0.5455417", "0.54416484", "0.54353005", "0.54291195", "0.5388212", "0.5386462", "0.53654027", "0.5352046", "0.53428036", "0.53383636", "0.5330673", "0.53039026" ]
0.7814169
0
Computes genomic distance law by averaging over each diagonal in the upper triangle matrix. If a list of detectable bins is provided, pixels in missing bins will be excluded from the averages. A maximum distance can be specified to define how many diagonals should be computed.
def distance_law( matrix, detectable_bins=None, max_dist=None, smooth=True, fun=np.nanmean ): mat_n = matrix.shape[0] if max_dist is None: max_dist = mat_n n_diags = min(mat_n, max_dist + 1) dist = np.zeros(mat_n) if detectable_bins is None: detectable_bins = np.array(range(mat_n)) for diag in range(n_diags): # Find detectable which fall in diagonal detect_mask = np.zeros(mat_n, dtype=bool) detect_mask[detectable_bins] = 1 # Find bins which are detectable in the diagonal (intersect of # hori and verti) detect_mask_h = detect_mask[: (mat_n - diag)] detect_mask_v = detect_mask[mat_n - (mat_n - diag) :] detect_mask_diag = detect_mask_h & detect_mask_v detect_diag = matrix.diagonal(diag)[detect_mask_diag] dist[diag] = fun(detect_diag[detect_diag > 0]) # Smooth the curve using isotonic regression: Find closest approximation # with the condition that point n+1 cannot be higher than point n. # (i.e. contacts can only decrease when increasing distance) if smooth and mat_n > 2: ir = IsotonicRegression(increasing=False) dist[~np.isfinite(dist)] = 0 dist = ir.fit_transform(range(len(dist)), dist) return dist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sum_diag(max_lines):\r\n dsum = 1 # sum of diagonals\r\n cpt = 1 # number of lines processed\r\n val = 1 # value of the current place in the square\r\n inc = 0 # the increment between number for one line\r\n \r\n while cpt < max_lines:\r\n cpt += 2\r\n inc += 2\r\n \r\n for corner in range(4):\r\n val += inc\r\n dsum += val\r\n\r\n return dsum", "def _calculate_mi(self, array, corr_range, bins, tau_max, lag_mode):\n\n # lag_mode dict\n mode = self.lag_modi[lag_mode]\n only_tri = int(self.only_tri)\n\n # Initialize\n hist2D = numpy.zeros((bins, bins), dtype=\"int32\")\n if lag_mode == 'all':\n corrmat = numpy.zeros((2*tau_max + 1, self.N, self.N),\n dtype='float32')\n elif lag_mode == 'sum':\n corrmat = numpy.zeros((2, self.N, self.N), dtype='float32')\n elif lag_mode == 'max':\n corrmat = numpy.zeros((2, self.N, self.N), dtype='float32')\n\n # Precalculation of the log\n gfunc = numpy.zeros(corr_range+1)\n for t in range(1, corr_range + 1):\n gfunc[t] = t*numpy.log(t)\n\n # loop over all node pairs, NOT symmetric due to time shifts!\n for i in range(self.N-only_tri):\n for j in range((i+1)*only_tri, self.N):\n\n if mode == 2:\n maxcross = 0.0\n argmax = 0\n\n # loop over taus from -tau_max to tau_max INCLUDING the last\n # tau value\n for t in range(2*tau_max + 1):\n tau = t - tau_max\n\n # here the joint probability distribution is calculated\n for k in range(corr_range):\n indexi = array[tau_max, i, k]\n indexj = array[t, j, k]\n hist2D[indexi, indexj] += 1\n\n # here the joint entropy is calculated by summing over all\n # pattern combinations\n jointent = 0.0\n for l in range(bins):\n for m in range(bins):\n jointent -= gfunc[hist2D[l, m]]\n hist2D[l, m] = 0\n\n jointent /= float(corr_range)\n jointent += numpy.log(float(corr_range))\n\n # Mutual Information is...\n mi = 0.0\n mi = 2. * numpy.log(bins) - jointent\n\n # norm the mi\n mi /= numpy.log(bins)\n\n # fill in values in matrix depending on lag_mode\n if mode == 0:\n corrmat[t, i, j] = mi\n\n elif mode == 1:\n if t <= tau_max:\n corrmat[1, i, j] += mi\n if t >= tau_max:\n corrmat[0, i, j] += mi\n\n elif mode == 2:\n # calculate max and argmax by comparing to previous\n # value and storing max\n if mi > maxcross:\n maxcross = mi\n argmax = tau\n\n if mode == 2:\n corrmat[0, i, j] = maxcross\n corrmat[1, i, j] = argmax\n\n if self.only_tri:\n if lag_mode == 'all':\n corrmat = corrmat + corrmat.transpose(0, 2, 1)[::-1]\n if lag_mode == 'sum':\n corrmat[0] += corrmat[1].transpose()\n corrmat[1] = corrmat[0].transpose()\n elif lag_mode == 'max':\n corrmat[0] += corrmat[0].transpose()\n corrmat[1] -= corrmat[1].transpose()\n\n return corrmat", "def endtoend(d):\n dists = distance_matrix(d)\n avgdists = np.array([np.diag(dists, i).mean() for i in range(dists.shape[0])])\n return avgdists", "def _CalculateMaxDistanceBetweenBins(self):\n total_ops = len(self._mix_bins) * len(self._mix_bins)\n print 'Calculating max distance between bins. Total calculations: %s' % (\n total_ops)\n self._max_distance_between_bins = 0\n \n for bin_i in self._mix_bins:\n for bin_j in self._mix_bins:\n d = _Dist(bin_i.GetFixedMean(), bin_j.GetFixedMean())\n if self._max_distance_between_bins < d:\n self._max_distance_between_bins = d\n print 'Max distance is calculated'", "def _compute_hist_distances(\n self,\n all_histograms: Dict,\n n_attr: int\n ) -> np.ndarray:\n all_distances = np.empty((self.n_keep_nuclei, self.n_class_pairs, n_attr))\n for k_id , k in enumerate(self.keep_nuclei_list):\n omega = 0\n for tx in range(self.n_tumors):\n for ty in range(self.n_tumors):\n if tx < ty:\n for attr_id in range(n_attr):\n all_distances[k_id, omega, attr_id] = wasserstein_distance(\n all_histograms[k][tx][attr_id],\n all_histograms[k][ty][attr_id]\n )\n omega += 1\n return all_distances", "def ratio_cut_laplacian(affinity_mat):\n D_mat = np.zeros(affinity_mat.shape)\n d_trace = np.sum(affinity_mat, axis=0)\n D_mat.flat[::D_mat.shape[0]+1] = d_trace\n return D_mat - affinity_mat", "def thin_edges(magnitude, angle, low):\n # define footprints for the angle cases (1, 2, 3 and 4)\n t1 = time.time()\n f1 = np.array([[1, 0, 1]])\n f2 = np.array([[1, 0, 0], [0, 0, 0], [0, 0, 1]]) # hier had ik f4 verwacht, ma de hoeken kloppen niet\n f3 = np.array([[1], [0], [1]])\n f4 = np.array([[0, 0, 1], [0, 0, 0], [1, 0, 0]]) # hier had ik f2 verwacht\n # define conditions\n t2 = time.time()\n cond1 = magnitude >= ndimage.maximum_filter(magnitude, footprint=f1, mode='constant', cval=-np.inf) # True/False matrix\n cond2 = magnitude >= ndimage.maximum_filter(magnitude, footprint=f2, mode='constant', cval=-np.inf) # True/False matrix\n cond3 = magnitude >= ndimage.maximum_filter(magnitude, footprint=f3, mode='constant', cval=-np.inf) # True/False matrix\n cond4 = magnitude >= ndimage.maximum_filter(magnitude, footprint=f4, mode='constant', cval=-np.inf) # True/False matrixcond1 = magnitude >= ndimage.maximum_filter(magnitude, footprint=f1, mode='constant', cval=-np.inf) # True/False matrix\n t3 = time.time()\n pos_ang = np.where(angle < 0, angle + 180, angle) # make the negative angles positive, works for this application\n t4 = time.time()\n # transform the angle matrix to a matrix of 1/0, indicating wether the element is the highest along its gradient\n ang_to_bool = np.where(pos_ang <= 22.5, cond1, np.where(pos_ang <= 67.5, cond2, np.where(pos_ang <= 112.5, cond3, np.where(\n pos_ang <= 157.5, cond4, np.where(pos_ang > 157.5, cond1, pos_ang)))))\n t5 = time.time()\n filtered = np.where(magnitude > low, ang_to_bool, 0) # keep only the bools of the values higher than the low th\n t6 = time.time()\n remasked = np.where(filtered, magnitude, 0) # og waardes er weer over trekken\n t7 = time.time()\n\n global timed\n if timed:\n print(\"EDGE THINNING:\")\n print(f\"Defining footprints: {t2-t1}s\")\n print(f\"Defining conditions: {t3-t2}s\")\n print(f\"Calculating pos_ang: {t4-t3}s\")\n print(f\"Calculating ang_to_bool: {t5-t4}s\")\n print(f\"Filtering: {t6-t5}s\")\n print(f\"Remasking: {t7-t6}s\")\n print(\"-----------------------------------\")\n print(f\"TOTAL TIME: {t7-t1}s\\n\")\n\n return remasked", "def compute_density(\n traj,\n area,\n surface_normal_dim=2,\n pore_center=0.0,\n max_distance = 1.0,\n bin_width = 0.01\n ):\n distances = traj.xyz[:,:,surface_normal_dim] - pore_center\n bin_centers = []\n density = []\n for bin_center in np.arange(-max_distance, max_distance, bin_width):\n mask = np.logical_and(\n distances > bin_center - 0.5 * bin_width,\n distances < bin_center + 0.5 * bin_width\n )\n bin_centers.append(bin_center)\n #changed the density from original below: added the conversion factor nm^2 to A^2 to mult by 10**2\n #density.append(mask.sum() / (area * bin_width * traj.n_frames))\n density.append(10**2 * mask.sum() / (area * bin_width * traj.n_frames))\n return bin_centers, density", "def freedman_diaconis_bins(self, arr):\n # From https://stats.stackexchange.com/questions/798/\n if len(arr) < 2:\n return 1\n # Calculate the iqr ranges.\n self.iqr(arr)\n # Calculate the h\n h = 2 * (self.q3 - self.q1) / (len(arr) ** (1 / 3))\n # fall back to sqrt(a) bins if iqr is 0\n if h == 0:\n return int(np.sqrt(arr.size))\n else:\n return int(np.ceil((arr.max() - arr.min()) / h))", "def metricize(dist):\n dist = np.sqrt(dist)\n olddist = dist + 1\n d_ij = dist\n different = (olddist == dist).all()\n while(not different):\n # rint 'in loop'\n olddist = dist\n for i in range(len(dist)):\n for j in range(len(dist)):\n for k in range(len(dist)):\n dijk = dist[i, k] + dist[k, j]\n d_ij[i, j] = np.amin([d_ij[i, j], dijk])\n dist[i, j] = d_ij[i, j]\n different = (olddist == dist).all()\n return dist ** 2", "def avg_dists(self):\n \n d = self.descriptors\n # make an empty array to fill b/c it is a touch faster\n averages = np.empty([1, self.d_length])\n for i, u in enumerate(d):\n s = 0\n for j, v in enumerate(d):\n if i != j:\n s += self.jaccard(u, v)\n averages[0, i] = (s / (self.d_length-1))\n return averages[0]", "def define_areas(\n pixel_filtered_map: np.ndarray, district_heating_zone_threshold: float\n):\n structure = np.ones((3, 3)).astype(int)\n expanded_map = binary_dilation(input=pixel_filtered_map, structure=structure)\n eroded_map = binary_erosion(input=expanded_map, structure=structure)\n labels_array, n_label = measurements.label(\n input=eroded_map,\n structure=structure,\n )\n\n # labels start from 1, therefore the array size is 'num_labels_array + 1'\n areas_potential = np.zeros((n_label + 1)).astype(float)\n if n_label > 0:\n end, start, sorted_array = get_browsing_indexes(\n labels_array=labels_array,\n pixel_filtered_map=pixel_filtered_map,\n n_label=n_label,\n )\n\n for i, (start_index, end_index) in enumerate(zip(start, end)):\n area = sorted_array[start_index:end_index, 3]\n area_potential = np.sum(area)\n if area_potential >= district_heating_zone_threshold:\n # i+1 because labeling starts from 1 and not from 0\n # factor 0.001 for conversion from MWh/ha to GWh/ha\n areas_potential[i + 1] = np.around(np.sum(area_potential) / 1000, 2)\n\n areas = areas_potential[labels_array]\n filtered_map = pixel_filtered_map * (areas > 0).astype(int)\n total_potential = np.sum(areas_potential)\n return areas, filtered_map, total_potential, areas_potential[1:]", "def CL_histogram_MMD(sketch,Phi,domain,dimension,nb_cat_per_dim=None,bins_cont=10):\n ## 0) Parsing the inputs\n # Number of categorical inputs\n if nb_cat_per_dim is None:\n nb_cat_per_dim = np.zeros(Phi.d)\n \n is_integer_dimension = False\n if nb_cat_per_dim[dimension] > 0:\n # The data is integer-type\n is_integer_dimension = True\n bins = int(nb_cat_per_dim[dimension])\n else:\n bins = bins_cont\n\n m = sketch.size\n # 1) Construct the A matrix\n A = 1j*np.zeros((m,bins)) # Pre-allocation\n bin_edges = np.linspace(domain[dimension,0],domain[dimension,1],bins+1)\n box = domain.copy()\n for p in range(bins):\n # move to the next box\n if is_integer_dimension:\n box[dimension,0] = p\n box[dimension,1] = p\n else:\n box[dimension,0] = bin_edges[p]\n box[dimension,1] = bin_edges[p+1]\n A[:,p] = fourierSketchOfBox(box,Phi,nb_cat_per_dim) \n \n # 1.b) cast to real \n Ari = np.r_[A.real, A.imag]\n \n # 2) create b vector\n b = np.r_[sketch.real, sketch.imag]\n \n # 3) solve the optimization problem\n def _f_grad(x):\n r = Ari@x-b\n f = 0.5*np.linalg.norm(r)**2\n grad = Ari.T@r\n return (f,grad)\n \n # Starting point\n x0 = np.ones(bins)/bins\n # Linear constraints\n A_constr = np.zeros((bins,bins))\n l_constr = 0*np.ones(bins) # Positive constraints\n A_constr[:bins,:bins] = np.eye(bins)\n upper_bound = 5 # weird that it must be large\n u_constr = upper_bound*np.ones(bins) # Sum-to one constraints\n constr = LinearConstraint(A_constr,l_constr,u_constr)\n\n # Solve\n sol = minimize(_f_grad, x0, method='trust-constr', bounds=None, constraints=constr, jac=True, options={'verbose': 0})\n\n return project_probabilitySimplex(sol.x)", "def hyperdiagonal(coords):\n \n mini = coords.min(axis=0)\n maxi = coords.max(axis=0)\n dist = (maxi - mini)**2\n dist = np.sqrt(dist.sum())\n return dist", "def diagonalizing_gates(self):\n raise NotImplementedError", "def adist(field, bins=(40,80), weights=1.0, \\\n meridian=0, equator= 0, rrange=[0, None]):\n pos = field['locations']\n boxsize = field.boxsize\n center = field.origin + field.boxsize / 2\n x = pos[:, 0] - center[0]\n y = pos[:, 1] - center[1]\n z = pos[:, 2] - center[2]\n \n if isscalar(rrange): \n rmin = 0\n rmax = rrange\n else :\n rmin = rrange[0]\n rmax = rrange[1]\n\n if rmax == None : rmax = boxsize.min()/2\n\n r = sqrt(x**2 + y ** 2 + z**2)\n mask = (rmin < r) & (r < rmax)\n\n x = x[mask]\n y = y[mask]\n z = z[mask]\n r = r[mask]\n\n xx,yy = sinusoid(x,y,z, meridian=meridian, equator=equator)\n\n weights = field[weights][mask]\n\n print xx.max(), xx.min(), weights,yy.max(), yy.min()\n N, xe, ye = histogram2d(xx, yy, bins=bins, weights=weights)\n print xe, ye\n field = field['default'][mask]\n\n h, xe, ye = histogram2d(xx, yy, bins=(xe,ye), weights=weights * field)\n\n h[N == 0] = NaN\n\n h /= N\n\n return h, N, xe, ye", "def adaptiveCentroid(data=None,sigma=None):\n nrow,ncol=data.shape\n Isum = data.sum()\n Icol = data.sum(axis=0) # sum over all rows\n Irow = data.sum(axis=1) # sum over all cols\n colgrid = np.arange(ncol)\n rowgrid = np.arange(nrow)\n rowmean=np.sum(rowgrid*Irow)/Isum\n colmean=np.sum(colgrid*Icol)/Isum\n ROW,COL=np.indices((nrow,ncol))\n maxItr = 50\n EP = 0.0001\n for i in range(maxItr):\n wrmat = wr(ROW,COL,rowmean,colmean,sigma)\n IWmat = data*wrmat\n IWcol = IWmat.sum(axis=0)\n IWrow = IWmat.sum(axis=1)\n drowmean = np.sum((rowgrid-rowmean)*IWrow)/np.sum(IWrow)\n dcolmean = np.sum((colgrid-colmean)*IWcol)/np.sum(IWcol)\n rowmean = rowmean+2.*drowmean\n colmean = colmean+2.*dcolmean\n if drowmean**2+dcolmean**2 <= EP:\n break\n\n return rowmean,colmean", "def distance_metric(seg_A, seg_B, dx):\n table_md = []\n table_hd = []\n X, Y, Z = seg_A.shape\n for z in range(Z):\n # Binary mask at this slice\n slice_A = seg_A[:, :, z].astype(np.uint8)\n slice_B = seg_B[:, :, z].astype(np.uint8)\n\n # The distance is defined only when both contours exist on this slice\n if np.sum(slice_A) > 0 and np.sum(slice_B) > 0:\n # Find contours and retrieve all the points\n _, contours, _ = cv2.findContours(cv2.inRange(slice_A, 1, 1),\n cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_NONE)\n pts_A = contours[0]\n for i in range(1, len(contours)):\n pts_A = np.vstack((pts_A, contours[i]))\n\n _, contours, _ = cv2.findContours(cv2.inRange(slice_B, 1, 1),\n cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_NONE)\n pts_B = contours[0]\n for i in range(1, len(contours)):\n pts_B = np.vstack((pts_B, contours[i]))\n\n # Distance matrix between point sets\n M = np.zeros((len(pts_A), len(pts_B)))\n for i in range(len(pts_A)):\n for j in range(len(pts_B)):\n M[i, j] = np.linalg.norm(pts_A[i, 0] - pts_B[j, 0])\n\n # Mean distance and hausdorff distance\n md = 0.5 * (np.mean(np.min(M, axis=0)) + np.mean(np.min(M, axis=1))) * dx\n hd = np.max([np.max(np.min(M, axis=0)), np.max(np.min(M, axis=1))]) * dx\n table_md += [md]\n table_hd += [hd]\n\n # Return the mean distance and Hausdorff distance across 2D slices\n mean_md = np.mean(table_md) if table_md else None\n mean_hd = np.mean(table_hd) if table_hd else None\n return mean_md, mean_hd", "def compute_gradient_mean(chol, y_unbiased, n):\n solve = cho_solve(chol, -1.0 * np.ones(n))\n return -1.0 * np.dot(y_unbiased, solve)", "def demo_diag21():\n n_dim = 2\n A = np.diag([2, 1])\n covar = np.eye(n_dim)\n mean = np.zeros(n_dim)\n approx = approx_quad_form(mean, covar, A)\n\n # Sample from true dist\n n_sample = 10000\n x = np.random.multivariate_normal(mean, covar, n_sample)\n q_samples = np.zeros(n_sample)\n for i in range(n_sample):\n q_samples[i] = x[i] @ A @ x[i]\n\n q = np.linspace(0, 10)\n\n plt.plot(\n q, approx(q), label='Approx.',\n color='tab:blue', linestyle='--')\n bins = np.linspace(0, 10, 51)\n bins[-1] = np.inf\n plt.hist(\n q_samples, density=True, histtype='stepfilled',\n bins=bins,\n alpha=0.5, color='black', label='Samples')\n plt.xlabel('q')\n plt.ylabel('pdf(q) [-]')\n plt.legend()", "def diag_multidim_gaussian_log_likelihood(z_u, mean_u, logvar_u, varmin):\n return np.sum(diag_gaussian_log_likelihood(z_u, mean_u, logvar_u, varmin), axis=0)", "def get_detectable_bins(mat, n_mads=3, inter=False):\n matrix = mat.copy()\n matrix.eliminate_zeros()\n\n def mad(x): return ss.median_abs_deviation(x, nan_policy=\"omit\")\n\n if not inter:\n if matrix.shape[0] != matrix.shape[1]:\n raise ValueError(\"Intrachromosomal matrices must be symmetric.\")\n # Replace nonzero pixels by ones to work on prop. of nonzero pixels\n matrix.data = np.ones(matrix.data.shape)\n # Compute number of nonzero values in each bin\n sum_bins = sum_mat_bins(matrix)\n # Compute variation in the number of nonzero pixels\n sum_mad = mad(sum_bins)\n # Find poor interacting rows and columns\n sum_med = np.median(sum_bins)\n detect_threshold = max(1, sum_med - sum_mad * n_mads)\n\n # Removal of poor interacting rows and columns\n good_bins = np.flatnonzero(sum_bins >= detect_threshold)\n good_bins = (good_bins, good_bins)\n else:\n # Adapted for asymetric matrices (need to compute rows and columns)\n sum_rows, sum_cols = matrix.sum(axis=1).A1, matrix.sum(axis=0).A1\n mad_rows, mad_cols = mad(sum_rows), mad(sum_cols)\n med_rows, med_cols = np.median(sum_rows), np.median(sum_cols)\n detect_threshold_rows = max(1, med_rows - mad_rows * n_mads)\n detect_threshold_cols = max(1, med_cols - mad_cols * n_mads)\n good_rows = np.flatnonzero(sum_rows > detect_threshold_rows)\n good_cols = np.flatnonzero(sum_cols > detect_threshold_cols)\n good_bins = (good_rows, good_cols)\n return good_bins", "def bin_discretize(self, variables=[], bins=3,\n min_const_samples_bin_size=1.0/3):\n self.edges=np.zeros((self.arity.size,bins+1))\n for i in variables:\n un_cnt=np.unique(self.data[:,i],return_counts=True)\n constvals=un_cnt[0][un_cnt[1]>self.data.shape[0]*min_const_samples_bin_size]\n mask=np.ones(self.data.shape[0],dtype=bool)\n if constvals.size>0:\n for j,cv in enumerate(constvals):\n mask*=(self.data[:,i]!=cv)\n self.data[self.data[:,i]==cv,i]=j\n\n size=np.sum(mask)/bins\n sorted_i=np.argsort(self.data[mask,i])\n edges=[self.data[mask,i][sorted_i[int(size*num)-1]] for num in range(1,bins)]\n self.edges[i]=[self.data[mask,i][sorted_i[0]]]+edges+[self.data[mask,i][sorted_i[-1]]]\n self.data[mask,i]=np.searchsorted(edges,self.data[mask,i])+constvals.size\n self.arity[i]=len(edges)+1+constvals.size", "def correlation_d(mat):\n\n print(\"DO NOT USE. BROKEN?\")\n\n if mat.ndim != 2:\n raise ValueError(\"mat must be a 2d matrix\")\n if np.any(mat > 1) or np.any(mat < 0):\n raise ValueError(\"mat must be binary\")\n\n N = mat.size\n g = np.diagonal(mat)\n # g = np.tril(mat, -1) # g is the sum over the heavside used in Grassberger\n # g = g[g.nonzero()]\n g = g.sum()\n\n return (2.0 / N * (N - 1)) * g", "def bin_width_doane(a):\n bad = np.isnan(a) | np.isinf(a)\n data = a[~bad]\n n = data.size\n g1 = skew(data)\n sigma_g1 = np.sqrt(6 * (n - 2) / ((n + 1) * (n + 3)))\n k = 1 + np.log2(n) + np.log2(1 + np.abs(g1) / sigma_g1)\n acc = (data.max() - data.min()) / k\n return acc", "def create_diags(self):\n\n num_diags = self.rows + self.cols - 2\n diag_counts = [0 for i in range(num_diags)]\n for diag_index in range(num_diags):\n first = (0,0)\n second = (0,0)\n if diag_index < self.rows - 1:\n first = (diag_index+1,0)\n elif diag_index == self.rows - 1:\n first = (diag_index,0)\n else:\n first = (self.rows-1,diag_index-self.rows+1)\n if diag_index < self.cols - 1:\n second = (0,diag_index+1)\n elif diag_index == self.cols - 1:\n second = (0,diag_index)\n else:\n second = (diag_index-self.cols+1,self.cols-1)\n #print str(first) + \" \" + str(second)\n diag_counts[diag_index] = dist_points(first,second) \n \n \"\"\"holds the sum of edges in diagonals previous to a given edge\"\"\"\n diag_full = [0 for i in range(num_diags + 1)]\n for i in range(1,num_diags+1):\n diag_full[i] = diag_full[i-1] + diag_counts[i-1]\n\n #print diag_counts\n #print diag_full\n return diag_full", "def calculate_sdm_min_diagonal(sdm, window_size = 48, is_partial = True):\n length = len(sdm)\n dig_mean = np.zeros(length)\n for i in range(length):\n diag = np.diag(sdm, -i)\n if is_partial and len(diag) > window_size:\n window = np.ones(window_size) / window_size\n dig_mean[i] = np.min(np.convolve(diag, window, mode = 'valid'))\n else:\n dig_mean[i] = np.sum(np.diag(sdm, -i)) / (length - i)\n\n return dig_mean", "def image_quad_norm(inarray):\n # If there is an hermitian symmetry\n if inarray.shape[-1] != inarray.shape[-2]:\n return 2 * np.sum(np.sum(np.abs(inarray)**2, axis=-1), axis=-1) - \\\n np.sum(np.abs(inarray[..., 0])**2, axis=-1)\n else:\n return np.sum(np.sum(np.abs(inarray)**2, axis=-1), axis=-1)", "def calculate_sdm(feature_matrix, is_normalization = False):\n length = len(feature_matrix)\n self_distance_matrix = np.zeros((length, length))\n for i in range(length):\n row1 = feature_matrix[i, :]\n for j in range(length):\n row2 = feature_matrix[j, :]\n self_distance_matrix[i, j] = np.sqrt(np.sum(np.square(row1 - row2)))\n\n if is_normalization:\n minima = np.min(self_distance_matrix)\n maxima = np.max(self_distance_matrix)\n self_distance_matrix = (self_distance_matrix - minima) / (maxima - minima)\n return self_distance_matrix", "def guess_2D_gauss(data):\n total = data.sum()\n Y, X = np.indices(data.shape)\n yCenter = (Y*data).sum()/total\n xCenter = (X*data).sum()/total\n col = data[int(yCenter),:]\n xWidth = np.sqrt(((X[0]-xCenter)**2*col).sum()/col.sum())\n row = data[:,int(xCenter)]\n yWidth = np.sqrt(((Y[:,0]-yCenter)**2*row).sum()/row.sum())\n\n offset = np.min(data)\n amplitude = np.max(data)-offset\n\n return [amplitude, xCenter, yCenter, xWidth, yWidth, offset, 0.]" ]
[ "0.5350398", "0.52518773", "0.5155675", "0.5135908", "0.50542927", "0.502655", "0.5009027", "0.5004774", "0.4977624", "0.49264264", "0.48782477", "0.48724344", "0.4847657", "0.4825151", "0.48010293", "0.4734856", "0.47299528", "0.47192916", "0.47074535", "0.46959183", "0.46754423", "0.46545872", "0.46416897", "0.4636898", "0.46058112", "0.45979014", "0.45874825", "0.456804", "0.45655823", "0.45610264" ]
0.6471979
0
Compute the sum of matrices bins (i.e. rows or columns) using only the upper triangle, assuming symmetrical matrices.
def sum_mat_bins(mat): # Equivalaent to row or col sum on a full matrix # Note: mat.sum returns a 'matrix' object. A1 extracts the 1D flat array # from the matrix return mat.sum(axis=0).A1 + mat.sum(axis=1).A1 - mat.diagonal(0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def row_col_sums(i, b_j, bins, C, n_u):\n s= C[i][i]*n_u[i]*n_u[i]\n for j in range(bins[b_j], bins[b_j+1]):\n if i != j:\n s+= (C[i][j] + C[j][i])*n_u[i]*n_u[j]\n return s", "def block_sum(i, bins, C, n_u):\n s= 0.0\n for j in range(bins[i], bins[i+1]):\n for k in range(bins[i], bins[i+1]):\n s+= C[j][k]*n_u[j]*n_u[k]\n return s", "def _get_integrals_fast(bins):\n\n if min(bins.shape) == 1: \n flatsum = np.cumsum(bins.flat[::-1])[::-1]\n sums = np.subtract(*np.meshgrid(flatsum,flatsum))\n assert sums.shape[0] == sums.shape[1]\n return sums\n\n else: \n y_sums = np.cumsum(bins[:,::-1], axis = 1)[:,::-1]\n sums = np.cumsum(y_sums[::-1,:], axis = 0)[::-1,:]\n return sums", "def _sum_on_axis(self, M, undirected=True):\n\n if undirected:\n M = (M + M.T).astype('bool')\n colsum = M.sum(axis=0) # , dtype=np.int64)\n rowsum = M.sum(axis=1) # , dtype=np.int64) # already np.int64\n return rowsum.T, colsum", "def column_sums(square):\n total = 0", "def cartesian_sum(matrix, rows, cols):\n\n res = 0 \n for row in rows:\n for col in cols:\n res += matrix[row, col]\n return res", "def Sum2d(a):\n return(np.sum(np.sum(a,-1),-1))", "def bin_input(neu, bin_size, overlap = 0):\n win_d = bin_size - overlap\n n_bins = int(((neu.shape[0]-bin_size)/win_d)+1)\n FRmat = np.empty([n_bins, neu.shape[1]])\n for i in range(n_bins):\n FRmat[i,:] = np.sum(neu[i*win_d:i*win_d+bin_size,:], axis = 0)\n \n return FRmat", "def correlation_bins(shred):\n return 0", "def sum_diag(max_lines):\r\n dsum = 1 # sum of diagonals\r\n cpt = 1 # number of lines processed\r\n val = 1 # value of the current place in the square\r\n inc = 0 # the increment between number for one line\r\n \r\n while cpt < max_lines:\r\n cpt += 2\r\n inc += 2\r\n \r\n for corner in range(4):\r\n val += inc\r\n dsum += val\r\n\r\n return dsum", "def sum(matrix):\n\n return float(sum([sum(row) for row in matrix]))", "def create(matrix):\n limit_y = len(matrix)\n limit_x = len(matrix[0])\n\n for y in range(1, limit_y):\n bit.create(matrix[y])\n\n for x in range(1, limit_x):\n for y in range(1, limit_y):\n k = y + (y & -y)\n if k < limit_y:\n matrix[k][x] += matrix[y][x]", "def rowsums (self):\n return self.values.sum (axis=0)", "def binned_matrix(in_tensor, method='max'):\n print(in_tensor[0])\n in_tensor = nn.Softmax(dim=2)(torch.einsum('cij -> ijc', in_tensor))\n if method == 'max':\n # Predict the bins with the highest probability\n return in_tensor.max(2)[1]\n elif method == 'avg':\n # Predict the bin that is closest to the average of the probability dist\n # predicted_bins[i][j] = round(sum(bin_index * P(bin_index at i,j)))\n bin_indices = torch.arange(in_tensor.shape[-1]).float()\n predicted_bins = torch.round(torch.sum(in_tensor.mul(bin_indices),\n dim=len(in_tensor.shape)-1))\n return predicted_bins\n else:\n raise ValueError('method must be in {\\'avg\\',\\'max\\'}')", "def sum_numba(A):\n N = A.shape\n B = np.zeros((N[0], N[2]))\n for i in range(N[0]):\n for j in range(N[2]):\n for k in range(N[1]):\n B[i, j] += A[i, k, j]\n return B", "def bin_binarise(self):\n pass", "def area(self):\n return numpy.prod(\n numpy.meshgrid(*self.binwidths, indexing='ij'), axis=0)", "def amount_of_stairs(n):\n\n matrix = [[0] * n for i in range(n)]\n\n for i in range(0, n):\n for j in range(1, i):\n matrix[i][j] = sum(matrix[i - j - 1][:j])\n matrix[i][i] = 1\n\n # print_matrix(matrix)\n return sum(matrix[n-1])", "def total2d(arr: List[List[int]]) -> int: # _8 [✅]\n # ** try to solve this in one line using a list comprehension\n return sum( [sum(sub_arr) for sub_arr in arr ] )", "def island_perimeter(grid):\n total = 0\n for b in range(len(grid)):\n for a in range(len(grid[b])):\n # left corner\n if (a == 0) and (b == 0):\n if grid[b][a] == 1:\n total = total + 2\n if grid[b][a + 1] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n # right corner\n elif (a == len(grid[b]) - 1) and b == 0:\n if grid[b][a] == 1:\n total = total + 2\n if grid[b + 1][a] == 0:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n # lower-left corner\n elif a == 0 and b == (len(grid) - 1):\n if grid[b][a] == 1:\n total = total + 2\n if grid[b][a + 1] == 0:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n # lower-right corner\n elif b == (len(grid) - 1) and a == (len(grid[b]) - 1):\n if grid[b][a] == 1:\n total = total + 2\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n # top edge\n elif (b == 0 and a > 0) and a < (len(grid[b]) - 1):\n if grid[b][a] == 1:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n if grid[b][a + 1] == 0:\n total = total + 1\n # left edge\n elif (b > 0 and b < (len(grid) - 1)) and ((a == 0) and a <\n len(grid[b]) - 1):\n if grid[b][a] == 1:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b][a + 1] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n # right edge\n elif (b > 0 and (b < len(grid) - 1)) and (a == len(grid[b]) - 1):\n if grid[b][a] == 1:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n # bottom edge\n elif (b == len(grid) - 1) and a > 0 and a < len(grid[b]) - 1:\n if grid[b][a] == 1:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b][a + 1] == 0:\n total = total + 1\n # cases that are neither edges nor corners\n elif (b > 0 and b < len(grid) - 1) and (a > 0 and a <\n len(grid[b]) - 1):\n if grid[b][a] == 1:\n if grid[b][a - 1] == 0:\n total = total + 1\n if grid[b][a + 1] == 0:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n return total", "def _gu_sum(a, **kwds):\n return np.sum(np.ascontiguousarray(a), axis=-1, **kwds)", "def minesweeper(matrix):\n \n num_rows = len(matrix)\n num_cols = len(matrix[0])\n \n adj_mines = []\n \n adj_row = [0]*num_cols\n \n for i in range(num_rows):\n adj_mines.append(adj_row[:])\n \n for r in range(num_rows):\n for c in range(num_cols):\n if matrix[r][c] == True:\n if (r-1) in range(num_rows) and (c-1) in range(num_cols):\n adj_mines[r-1][c-1] += 1\n if (r-1) in range(num_rows) and (c) in range(num_cols):\n adj_mines[r-1][c] += 1\n if (r-1) in range(num_rows) and (c+1) in range(num_cols): \n adj_mines[r-1][c+1] += 1\n if (r) in range(num_rows) and (c-1) in range(num_cols):\n adj_mines[r][c-1] += 1\n if (r) in range(num_rows) and (c+1) in range(num_cols): \n adj_mines[r][c+1] += 1\n if (r+1) in range(num_rows) and (c-1) in range(num_cols):\n adj_mines[r+1][c-1] += 1\n if (r+1) in range(num_rows) and (c) in range(num_cols):\n adj_mines[r+1][c] += 1\n if (r+1) in range(num_rows) and (c+1) in range(num_cols): \n adj_mines[r+1][c+1] += 1\n\n \n return adj_mines", "def matrixScore(self, A):\n\n def firstCol(matrix):\n \tfor row in matrix:\n \t\tif row[0] == 0:\n \t\t\tfor ind in range(len(row)):\n \t\t\t\trow[ind] ^= 1\n \treturn\n\n def nextCol(matrix):\n \tcolNum = len(matrix[0])\n \tfor col in range(1, colNum):\n \t\tcolSum = 0\n \t\tfor row in matrix:\n \t\t\tcolSum += row[col]\n \t\tif colSum < len(matrix)/2:\n \t\t\tfor row in matrix:\n \t\t\t\trow[col] ^= 1\n \treturn\n\n def binSum(matrix):\n \ttotalSum = 0\n \tfor row in matrix:\n \t\tbinStr = \"\"\n \t\tfor elem in row:\n \t\t\tbinStr += str(elem)\n \t\ttotalSum += int(binStr, 2)\n \treturn totalSum\n\n\n firstCol(A)\n nextCol(A)\n return binSum(A)", "def rowSum(mtx):\n try:\n for i in range(0, len(mtx)):\n assert len(mtx[i]) == len(mtx[i-1]) # check whether each list has the same length.\n \n res = list()\n for j in range(0, len(mtx[0])): \n tmp = 0\n for i in range(0, len(mtx)): \n tmp = tmp + mtx[i][j]\n res.append(tmp)\n return(res)\n \n except AssertionError as detail:\n return ('Length of lists is irregular or input format is wrong.')\n except TypeError as detail:\n return ('Undefined operand type')", "def compute_barycentrics_native(coords, mesh):\n triangle_indices = mesh.find_simplex(coords)\n X = mesh.transform[triangle_indices, :2]\n Y = coords - mesh.transform[triangle_indices, 2]\n b = np.einsum('ijk,ik->ij', X, Y)\n bcoords = np.c_[b, 1 - b.sum(axis=1)]\n return bcoords, triangle_indices", "def colintegrals (self):\n dy = self.ybins[1] - self.ybins[0]\n return self.colsums * dy", "def rowintegrals (self):\n dx = self.xbins[1] - self.xbins[0]\n return self.rowsums * dx", "def bin_centers(radial_bins):\n\n outer = radial_bins[1:]\n inner = radial_bins[:-1]\n return 0.5 * (outer + inner)", "def bin_reduce(data, axes_s):\n d1 = data\n \n # sort axes by stride distance to optimize for locality\n # doesn't seem to make much difference on modern systems...\n axes = [ (axis, d1.strides[axis]) for axis in range(d1.ndim) ]\n axes.sort(key=lambda p: p[1])\n assert len(axes_s) == data.ndim\n\n # reduce one axis at a time to shrink work for subsequent axes\n for axis in [p[0] for p in axes]:\n s = axes_s[axis]\n\n if s == 1:\n # skip useless copying for non-reducing axis\n continue\n\n # accumulate s-strided subsets that belong to each bin\n a = d1[ \n tuple(\n [ slice(None) for i in range(axis) ]\n + [ slice(0, 1-s, s) ]\n + [ slice(None) for i in range(d1.ndim - axis - 1) ]\n )\n ].astype(np.float32, copy=True)\n \n for step in range(1, s):\n a += d1[ \n tuple(\n [ slice(None) for i in range(axis) ]\n + [ slice(step, step < s and 1-s+step or None, s) ]\n + [ slice(None) for i in range(d1.ndim - axis - 1) ]\n )\n ]\n\n # compute single-axis bin averages from accumulation\n d1 = a * (1./s)\n\n return d1", "def colsums (self):\n return self.values.sum (axis=1)" ]
[ "0.7041019", "0.6156732", "0.6129798", "0.5976102", "0.59643245", "0.5771837", "0.57445884", "0.5654173", "0.5501301", "0.5317589", "0.5288595", "0.52820784", "0.5261982", "0.5255658", "0.52305037", "0.5229471", "0.52255195", "0.5207664", "0.5206341", "0.5196019", "0.5180939", "0.5179308", "0.51581776", "0.5140066", "0.5124696", "0.5121876", "0.5114722", "0.511348", "0.50980747", "0.50930995" ]
0.7172675
0
Bootstrap sampling of contacts in a sparse HiC map.
def subsample_contacts(M, n_contacts): S = M.data.copy() # Match cell idx to cumulative number of contacts cum_counts = np.cumsum(S) # Total number of contacts to sample tot_contacts = int(cum_counts[-1]) # Sample desired number of contacts from the range(0, n_contacts) array sampled_contacts = np.random.choice( int(tot_contacts), size=(n_contacts), replace=False ) # Get indices of sampled contacts in the cum_counts array idx = np.searchsorted(cum_counts, sampled_contacts, side="right") # Bin those indices to the same dimensions as matrix data to get counts sampled_counts = np.bincount(idx, minlength=S.shape[0]) # Get nonzero values to build new sparse matrix nnz_mask = sampled_counts > 0 sampled_counts = sampled_counts[nnz_mask].astype(np.float64) sampled_rows = M.row[nnz_mask] sampled_cols = M.col[nnz_mask] return sp.coo_matrix( (sampled_counts, (sampled_rows, sampled_cols)), shape=(M.shape[0], M.shape[1]), )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bootstrap(X):\n return X[np.random.choice(list(range(X.shape[0])), size=X.shape[0]), :]", "def bootstrapping(datasample):\r\n \r\n datasample=df_to_array(datasample)\r\n \r\n boots_indexs=np.random.randint(len(datasample),size=(1,len(datasample)))\r\n \r\n whole_indexs=list(range(len(datasample)))\r\n \r\n missing_indexs=np.array(list(set(whole_indexs).difference(set(list(boots_indexs[0])))))\r\n \r\n \r\n return boots_indexs,missing_indexs", "def bootstrap_resample(labels):\n idxs = np.arange(len(labels))\n num_labels = max(labels) + 1\n bootstrap_idxs = np.zeros_like(idxs)\n ptr = 0\n for i in range(num_labels):\n strat = idxs[labels == i]\n bootstrap_idxs[ptr:ptr + len(strat)] = np.random.choice(strat, len(strat), replace=True)\n ptr += len(strat)\n return bootstrap_idxs", "def standard_bootstrap(dataset):\n randseed=np.random.randint(0,10000)\n np.random.seed(randseed)\n \n n = dataset.shape[0]\n b = np.random.randint(0, high=n-1, size=n)\n return dataset[b]", "def bootstrap_sample(data):\n return [random.choice(data) for _ in data]", "def bootstrap(data):\r\n size = int(len(data))\r\n train = resample(data, n_samples=size, replace=True)\r\n test = data.drop(train.index) \r\n return train[encoded_features], train[target], test[encoded_features], test[target]", "def bootstrap_data(self):\n for i in range(self.bootstraps):\n df_i = self.training_df.groupby(\n self.random_effect, group_keys=False\n ).apply(\n lambda x: x.sample(len(x), replace=True)\n )\n self.models.append(self.convert(df=df_i))", "def bootstrap_sample_generator_1D(samples: Union[NumpyFloatArray, NumpyIntArray]):\n n_samples = samples.shape[0]\n\n while True:\n _indices = np.random.randint(0, high=n_samples, size=n_samples)\n\n yield samples[_indices]", "def sample_from_ibp(K, alpha, sigma, c):\n pp = poissonparams(K, alpha, sigma, c)\n new_nodes = np.random.poisson(pp)\n Ncols = new_nodes.sum()\n node_count = np.zeros(Ncols)\n\n # used to build sparse matrix, entries of each Zij=1\n colidx = [] \n rowidx = []\n rightmost_node = 0\n\n # for each clique\n for n in range(K):\n # revisit each previously seen node\n for k in range(rightmost_node):\n prob_repeat = (node_count[k] - sigma) / (n + c)\n r = np.random.rand()\n if r < prob_repeat:\n rowidx.append(n)\n colidx.append(k)\n node_count[k] += 1\n\n for k in range(rightmost_node, rightmost_node + new_nodes[n]):\n rowidx.append(n)\n colidx.append(k)\n node_count[k] += 1\n \n rightmost_node += new_nodes[n]\n\n # build sparse matrix\n data = np.ones(len(rowidx), int)\n shape = (K, Ncols)\n Z = csr_matrix((data, (rowidx, colidx)), shape)\n\n return Z", "def sample_indices_from_coupling(c, num_samples=None, return_all = False, thr = 10**(-6)):\n\n if return_all:\n return [[i, j] for i in range(c.shape[0]) for j in range(c.shape[1]) if c[i,j] > thr]\n else:\n linear_samples = np.random.choice(range(c.size), size = num_samples, p = c.flat)\n \n if num_samples in [1, None]:\n linear_samples = [linear_samples]\n \n n_cols = c.shape[1]\n return np.array([[s//n_cols , s%n_cols ] for s in linear_samples])", "def bootstrap_sample_generator_2D(samples: Union[NumpyFloatArray, NumpyIntArray]):\n n_samples = samples.shape[0]\n\n num_cols = samples.shape[1]\n cols = np.arange(num_cols)\n\n while True:\n # generate indices to pick N values from f_A, f_B and f_C_i\n _indices = np.random.randint(0, high=n_samples, size=samples.shape)\n\n yield samples[_indices, cols]", "def bootstrap_sample_generator_3D(samples: Union[NumpyFloatArray, NumpyIntArray]):\n n_samples = samples.shape[1]\n array_shape = samples.shape[1:]\n num_cols = samples.shape[2]\n cols = np.arange(num_cols)\n\n while True:\n _indices = np.random.randint(0, high=n_samples, size=array_shape)\n\n yield samples[:, _indices, cols]", "def sparse(n, k):\n z = np.zeros(n)\n for i in np.random.choice( np.arange(n), k, replace=None ): # supports of nonzero entries\n z[i] = np.random.randn()\n return z", "def initialize_clusters(points, k):\r\n return points[np.random.randint(points.shape[0], size=k)]", "def _sample_from_zeros(n: int, sparse: sp.csr_matrix) -> List[List[int]]:\n zeros = np.argwhere(np.logical_not(sparse.todense()))\n ids = np.random.choice(range(len(zeros)), size=(n,))\n return zeros[ids].tolist()", "def cbindMatrices(hm, args):\n hm2 = heatmapper.heatmapper()\n\n # Make a dict of region name:row associations\n hm.read_matrix_file(args.matrixFile[0])\n d = dict({x: dict() for x in hm.parameters[\"group_labels\"]})\n for idx, group in enumerate(hm.parameters[\"group_labels\"]):\n s = hm.parameters[\"group_boundaries\"][idx]\n e = hm.parameters[\"group_boundaries\"][idx + 1]\n for idx2, reg in enumerate(hm.matrix.regions[s:e]):\n d[group][reg[2]] = idx2 + s\n\n # Iterate through the other matrices\n for idx in range(1, len(args.matrixFile)):\n hm2.read_matrix_file(args.matrixFile[idx])\n # Add the sample labels\n hm.parameters['sample_labels'].extend(hm2.parameters['sample_labels'])\n # Add the sample boundaries\n lens = [x + hm.parameters['sample_boundaries'][-1] for x in hm2.parameters['sample_boundaries']][1:]\n hm.parameters['sample_boundaries'].extend(lens)\n\n # Add on additional NA initialized columns\n ncol = hm.matrix.matrix.shape[1]\n hm.matrix.matrix = np.hstack((hm.matrix.matrix, np.empty(hm2.matrix.matrix.shape)))\n hm.matrix.matrix[:, ncol:] = np.NAN\n\n # Update the values\n for idx2, group in enumerate(hm2.parameters[\"group_labels\"]):\n if group not in d:\n continue\n s = hm2.parameters[\"group_boundaries\"][idx2]\n e = hm2.parameters[\"group_boundaries\"][idx2 + 1]\n for idx3, reg in enumerate(hm2.matrix.regions[s:e]):\n if reg[2] not in d[group]:\n continue\n hm.matrix.matrix[d[group][reg[2]], ncol:] = hm2.matrix.matrix[s + idx3, :]\n\n # Append the special params\n for s in hm.special_params:\n hm.parameters[s].extend(hm2.parameters[s])\n\n # Update the sample parameters\n hm.matrix.sample_labels = hm.parameters['sample_labels']\n hm.matrix.sample_boundaries = hm.parameters['sample_boundaries']", "def boot_matrix(z, B):\n\n n = len(z) # sample size\n idz = np.random.randint(0, n, size=(B, n)) # indices to pick for all boostrap samples\n return z[idz]", "def fetchCooler(c, regions, coolerFetch = lambda coo, ext:coo.matrix(balance=True, sparse=True).fetch(ext),\n mask=True, force=False, ):\n regions = [list(i) for i in regions]\n resolution = c.binsize\n\n for i in regions:\n if i[1] == None:\n i[1] = 0 \n if i[2] == None:\n i[2] = c.chromsizes[i[0]]\n\n \n for a in regions: \n if str(a[0]) not in c.chromnames:\n raise ValueError(\"Chromosome {0} from regions not found in cooler\".format(a))\n if (a[1] % resolution) != 0:\n raise ValueError(\"Start of an region should be a multiple fo resolution\")\n \n# bins = c.bins()[:]\n \n# # managing masks \n# if mask is False: \n# bins[\"mask\"] = 1 \n# elif mask is None:\n# assert \"mask\" in bins.columns\n# elif mask is True: \n# pass \n# elif callable(mask):\n# pass \n# else:\n# bins[\"mask\"] = mask \n \n \n for region in regions:\n matrix = coolerFetch(c, region)\n try: # setting matrix nans to zeros.\n matrix.data = np.nan_to_num(matrix.data, copy=False)\n except TypeError: #workaround for old numpy versions\n matrix.data = np.nan_to_num(matrix.data)\n# st,end = c.extent(region)\n# subbins = bins[st:end].copy()\n if mask is True: \n newmask = np.array((matrix.sum(axis=0) > 0 ))[0]\n# if callable(mask):\n# new_mask = mask(matrix)\n# subbins[\"mask\"] = newmask \n\n assert len(newmask) == matrix.shape[0]\n\n yield matrix, newmask", "def boot_matrix(z, B):\n z = np.array(z).flatten()\n n = len(z) # sample size\n idz = np.random.randint(0, n, size=(B, n)) # indices to pick for all boostrap samples\n return z[idz]", "def bootstrap_idxs(n, rng: np.random.Generator = None):\n if rng is None or type(rng) is not np.random.Generator:\n rng = np.random.default_rng(rng)\n in_bag = rng.integers(low=0, high=n, size=n)\n out_bag = np.array(list(set(range(n)) - set(in_bag)))\n return in_bag, out_bag", "def init_centroids(X,K):\n c = random.sample(list(X),K)\n return c", "def _SampleInputMatrix(nrows, bl, bu, distname='randomUniform'):\n npars = len(bl)\n x = np.zeros((nrows,npars))\n bound = bu-bl\n for i in range(nrows):\n # x[i,:]= bl + DistSelector([0.0,1.0,npars],distname='randomUniform')*bound # only used in full Vhoeys-framework\n x[i,:]= bl + np.random.rand(1,npars)*bound\n return x", "def bootstrap_sample_from_data(data, weights=None, seed=0):\n # Set up the random number generator\n RNG = np.random.default_rng(seed)\n N = data.shape[0]\n\n # Set up weights\n if weights is not None:\n cutoffs = np.cumsum(weights)\n else:\n cutoffs = np.linspace(0, 1, N)\n\n # Draw random indices\n indices = np.searchsorted(cutoffs, RNG.uniform(size=N))\n\n # Create a bootstrapped sample\n new_data = deepcopy(data[indices,])\n return new_data", "def sample_mapped_keys(mapping, min_coverage=50):\r\n if min_coverage == 0:\r\n return {}\r\n sample_keys = {}\r\n for key in mapping.keys():\r\n if (min_coverage > 1):\r\n sample_keys[key] = sample(mapping[key],\r\n min(min_coverage - 1, len(mapping[key])))\r\n else:\r\n sample_keys[key] = []\r\n sample_keys[key].append(key) # always include the centroid\r\n return sample_keys", "def test_sparse():\n\n rng = np.random.RandomState(0)\n\n X = rng.rand(20, 2)\n X[X < 0.8] = 0\n X_csr = sp.csr_matrix(X)\n\n bisect_means = BisectingKMeans(n_clusters=3, random_state=0)\n\n bisect_means.fit(X_csr)\n sparse_centers = bisect_means.cluster_centers_\n\n bisect_means.fit(X)\n normal_centers = bisect_means.cluster_centers_\n\n # Check if results is the same for dense and sparse data\n assert_allclose(normal_centers, sparse_centers, atol=1e-8)", "def simulate_sparse(n_rows, n_columns, row_density=0.001):\n # X = sparse.lil_matrix((n_rows, n_columns))\n X = []\n counter = 0\n for row in range(n_rows):\n counter += 1\n if (counter % 1000) == 0:\n print(counter)\n\n # X[row] = sparse.random(1, n_columns, density=row_density)\n X.append(sparse.random(1, n_columns, density=row_density, format='lil'))\n\n return sparse.vstack(X).tocsr()", "def subsampling(dataset, class_column_index, class_max_count, class_dict):\n out = []\n for row in dataset:\n cls = row[class_column_index]\n rInt = np.random.randint(0, class_dict[cls])\n if rInt <= class_max_count:\n out.append(row)\n ss_data = np.array(out)\n\n return ss_data", "def initialize_chromosomes(self):\n # Generate random chromosomes.\n for i in range(self.n_chromosomes):\n random_chromosome = self.generate_random_chromosome()\n self.chromosomes[0][i, :] = random_chromosome[0]\n self.chromosomes[1][i, :] = random_chromosome[1]\n\n self.repair_chromosomes()", "def ensure_sparse_cols(self,max_density,remove_lowest=True):\n if max_density >= 1:\n max_nnz = int(max_density)\n else:\n max_nnz = int(max_density*self.shape[0])\n for j in range(self.shape[1]):\n col = self.fast_get_col(j)\n excess = col.nnz - max_nnz\n if excess > 0:\n if remove_lowest:\n zero_entries = np.argsort(col.data)[:excess]\n else:\n zero_entries = random.sample(range(col.nnz),excess)\n col.data[zero_entries] = 0\n self.fast_update_col(j,col.data)", "def initialize(img):\n w, h, _ = img.shape\n for c in current_cluster_centers:\n x = np.random.randint(w)\n y = np.random.randint(h)\n c[:] = img[x, y]" ]
[ "0.60307676", "0.5836601", "0.57376236", "0.56982666", "0.5689801", "0.5620543", "0.5541793", "0.5484117", "0.54839253", "0.5435804", "0.54210573", "0.53805405", "0.5368052", "0.5355705", "0.5346038", "0.5340237", "0.5306805", "0.53025925", "0.5296711", "0.5295456", "0.5281978", "0.5273258", "0.526737", "0.52478176", "0.52470666", "0.5246784", "0.52421373", "0.5195706", "0.519006", "0.51863027" ]
0.59620684
1
Adds a frame around input mask, given a kernel. The goal of this frame is define margins around the matrix where the kernel will not perform convolution (denoted by 1). If the matrix is upper symmetric, a margin of half the kernel's width is added below the diagonal and a maximum distance from the diagonal above which margins need not be drawn can be considered. Otherwise Margins are simply added on all 4 sides of the matrix.
def frame_missing_mask(mask, kernel_shape, sym_upper=False, max_dist=None): if mask.dtype != bool: raise ValueError("Mask must contain boolean values") if not sp.issparse(mask): raise ValueError("Mask must be a sparse matrix") framed_mask = mask.copy() ms, ns = mask.shape mk, nk = kernel_shape if sym_upper and (max_dist is not None): # Remove diagonals further than scan distance in the input mask framed_mask = diag_trim(framed_mask, max_dist + max(nk, mk)).tocsr() max_m = max_dist + mk max_n = max_dist + nk else: max_m, max_n = ms, ns # Up and down margins initialized with zeros and filled as needed margin_1 = sp.csr_matrix((mk - 1, ns), dtype=bool) margin_2 = sp.csr_matrix((mk - 1, ns), dtype=bool) if sym_upper and (max_dist is not None): # Margin 1 (top) is in upper triangle -> fill missing up to scan dist margin_1[:, :max_n] = 1 else: margin_1[:, :] = 1 margin_2[:, :] = 1 framed_mask = sp.vstack([margin_1, framed_mask, margin_2], format="csr") # Left and right margin_1 = sp.csr_matrix((ms + 2 * (mk - 1), nk - 1), dtype=bool) margin_2 = sp.csr_matrix((ms + 2 * (mk - 1), nk - 1), dtype=bool) if sym_upper and (max_dist is not None): # Margin 2 (right) is in upper triangle-> fill missing up to scan dist margin_2[-(max_m + 1) :, :] = 1 # Fill only the start of left margin for the top-left corner margin_1[: mk - 1, :] = 1 else: margin_1[:, :] = 1 margin_2[:, :] = 1 framed_mask = sp.hstack([margin_1, framed_mask, margin_2], format="csr") if sym_upper: # LIL format is much faster when changing sparsity framed_mask = framed_mask.tolil() # Add margin below diagonal big_k = max(nk, mk) dia_margins = np.ones(big_k) dia_offsets = np.arange(-1, -big_k-1, -1) framed_mask += sp.diags( dia_margins, dia_offsets, shape=framed_mask.shape, format="lil", dtype=bool, ) framed_mask = framed_mask.tocsr() return framed_mask
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def slidekernelthroughdiagonal(kernel, matrix):\n size_kernel = kernel.shape[0]\n size_matrix = matrix.shape[0]\n result = np.zeros([size_matrix])\n for i in range(size_matrix):\n # Calculate zero padding needed\n padding_b = -min(i - int(size_kernel/2), 0)\n padding_a = -min(size_matrix - int(i + size_kernel/2), 0)\n matrix_selection = matrix[max(0, i-int(size_kernel/2)):min(size_matrix, i+int(size_kernel/2)),max(0, i-int(size_kernel/2)):min(size_matrix, i+int(size_kernel/2))]\n matrix_padded = np.pad(matrix_selection, [(padding_b, padding_a), (padding_b, padding_a)])\n result[i] = np.sum(matrix_padded*kernel)\n return result", "def add_context_margin(image, margin_size, **pad_kwargs):\n return np.pad(image,\n ((margin_size, margin_size),\n (margin_size, margin_size),\n (0, 0)), **pad_kwargs)", "def add_kernel(org: np.ndarray, kernel: np.ndarray, center):\n # place filter at the array, and exclude any locations that are out of bounds\n c_r, c_c = center.row, center.column\n k_r, k_c = kernel.shape\n row_start = max(0, k_r/2 - c_r)\n pass", "def applyKernelToPoints(image,pts,kernel,border_type='BLACK'):\n \n \n pts=np.asarray(pts)\n image=np.asarray(image)\n image.shape\n if len(image.shape)>2:\n grayscale=False\n shaperesult=(len(pts),image.shape[2])\n elif len(image.shape)==1:\n image=image.reshape(1,image.shape[0])\n shaperesult=len(pts)\n grayscale=True\n\n else:\n grayscale=True\n\n # Kernel dimensions - they are integers\n krows=kernel.shape[0] \n kcols=kernel.shape[1]\n\n if krows%2==0:\n # Is even\n ldrows=(krows/2)-1\n udrows=krows/2\n \n else:\n # Is odd\n ldrows=krows/2\n udrows=krows/2\n\n if kcols%2==0:\n # Is even\n ldcols=(kcols/2)-1\n udcols=kcols/2\n else:\n # Is odd\n ldcols=kcols/2\n udcols=kcols/2\n\n #------------------------------------\n # ADD FRAME TO THE ORIGINAL IMAGE\n #------------------------------------\n\n dummyM=image.shape[0]+krows-1\n dummyN=image.shape[1]+kcols-1\n \n if grayscale==True:\n dummyimage=np.asarray(np.zeros((dummyM,dummyN)))\n \n else:\n dummyimage=np.asarray(np.zeros((dummyM,dummyN,image.shape[2])))\n\n if border_type==\"WHITE\":\n dummyimage=dummyimage+255\n\n elif border_type==\"ANTIALIAS\":\n # Fills top border\n dummyimage[0:ldrows,ldcols:ldcols+image.shape[1]]=image[image.shape[0]-ldrows:image.shape[0],:]\n\n # Fills bottom border\n dummyimage[(ldrows+image.shape[0]):,ldcols:(ldcols+image.shape[1])]=image[0:udrows,:]\n \n # Fills left border\n dummyimage[ldrows:ldrows+image.shape[0],0:ldcols]=image[:,image.shape[1]-ldcols:]\n\n # Fills right border\n dummyimage[ldrows:ldrows+image.shape[0],(ldcols+image.shape[1]):]=image[:,0:udcols]\n \n # Fills top, left corner\n dummyimage[0:ldrows,0:ldcols]=image[image.shape[0]-ldrows,image.shape[1]-ldcols]\n\n # Fills bottom, left corner\n dummyimage[(ldrows+image.shape[0]):,0:ldcols]=image[0:udrows,(image.shape[1]-ldcols):]\n \n # Fills top, right corner\n dummyimage[0:ldrows,(ldcols+image.shape[1]):]=image[(image.shape[0]-ldrows):,0:udcols]\n \n # Fills bottom, right corner\n dummyimage[(ldrows+image.shape[0]):,(ldcols+image.shape[1]):]=image[0:udrows,0:udcols]\n \n dummyimage[ldrows:ldrows+image.shape[0],ldcols:ldcols+image.shape[1]]=image \n \n result=np.asarray(np.zeros(shaperesult))\n \n pts[:,0]=pts[:,0]+ldrows\n pts[:,1]=pts[:,1]+ldcols\n \n for k in range(len(pts)):\n total=0\n \n for i in range(-ldrows,udrows+1):\n for j in range(-ldcols,udcols+1):\n total=total+dummyimage[i+pts[k,0],j+pts[k,1]]*kernel[i+ldrows,j+ldcols]\n \n \n result[k]=total\n \n \n return result", "def adjust_border(input_masks, device, margin=3):\n # Convert the mask to numpy array\n dtype = input_masks.dtype\n input_masks = np.squeeze(input_masks.cpu().numpy(), axis=1)\n\n erosion_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,\n (margin*2, margin*2))\n batch_size = input_masks.shape[0]\n \n output_mask_lst = []\n # Erode all the masks\n for i in range(batch_size):\n output_mask = cv2.erode(input_masks[i, ...], erosion_kernel)\n\n output_mask_lst.append(\n torch.tensor(output_mask, dtype=dtype, device=device)[None])\n \n # Concat back along the batch dimension.\n output_masks = torch.cat(output_mask_lst, dim=0)\n return output_masks.unsqueeze(dim=1)", "def cs4243_filter(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n filtered_image = np.zeros((Hi, Wi))\n\n ###Your code here####\n # pad image to handle border pixels\n pad_height = (int)((Hk - 1)/2)\n pad_width = (int)((Wk - 1)/2)\n image_pad = pad_zeros(image, pad_height, pad_width)\n\n # Flip the kernel horizontal and vertical\n kernel = cs4243_rotate180(kernel)\n \n # compute effective output size, assume stride=1\n out_height = 1 + Hi - Hk + 2*pad_height\n out_width = 1 + Wi - Wk + 2*pad_width\n \n # get initial nodes of receptive fields\n recep_fields_h = [i for i in range(out_height)]\n recep_fields_w = [i for i in range(out_width)]\n \n for i in recep_fields_h:\n for j in recep_fields_w: \n # get receptive area\n recep_area = image_pad[i:i+Hk, j:j+Wk] \n\n # multiply recep_area with kernel\n conv_sum = 0.0\n for y in range(Hk):\n for x in range(Wk): \n conv_sum += kernel[y][x] * recep_area[y][x]\n filtered_image[i, j] = conv_sum\n ###\n\n return filtered_image", "def shift_kernel(kernel, shape, centre):\n h, w = kernel.shape\n assert(h % 2 == 1)\n assert(w % 2 == 1)\n half_h = np.floor(h/2)\n half_w = np.floor(w/2)\n \n result = np.zeros((shape[0]+2*half_h, shape[1]+2*half_w)) #zero pad to simplify edge handling \n\n ind_h = centre[0] + np.arange(0, 2*half_h+1, dtype='int') \n ind_w = centre[1] + np.arange(0, 2*half_w+1, dtype='int')\n result[ind_h[:,np.newaxis], ind_w] = kernel\n result = result[half_h:-half_h,half_w:-half_w]\n return result", "def convolution(matrix, kernel):\n assert assert_odd(kernel.shape[0])\n \n # Padded matrix (0s on the outsides)\n N = kernel.shape[0] # Get the dim for the kernel\n I = np.pad(matrix, int(N/2), \"constant\")\n \n # Now do the convolution\n C = np.zeros(matrix.shape) # This is the convolved image\n h, w = C.shape # Get width and height\n s = int(N/2) # Spacing of the matrix\n positions = [(i,j) for i in range(h) for j in range(w)]\n for (i,j) in positions:\n y, x = i+s,j+s # Shift the center to the right position\n \n # Calc the convolution at each pixel\n C[i,j] = np.sum(np.multiply(kernel, I[y-s:y+s+1,x-s:x+s+1]))\n \n # Return the clipped array as uint8\n return C", "def _pad_with_zeros(self, X, margin):\n newX = np.zeros((X.shape[0] + 2 * margin, X.shape[1] + 2 * margin, X.shape[2]))\n x_offset = margin\n y_offset = margin\n newX[x_offset:X.shape[0] + x_offset, y_offset:X.shape[1] + y_offset, :] = X\n return newX", "def gaussian_filter(img,f=5,K=1,var=1):\n i_x, i_y = np.shape(img) # image size\n radi = f//2 # window radius\n\n # create gaussian kernel\n def gaussian_kernel(f,K,var):\n \n # create coordinate information \n if f//2 == 0:\n x = np.linspace(-radi,radi,f+1)\n y = np.linspace(-radi,radi,f+1)\n x = np.delete(x, radi)\n y = np.delete(y, radi)\n else:\n x = np.linspace(-radi,radi,f)\n y = np.linspace(-radi,radi,f)\n\n m_x, m_y = np.meshgrid(x,y) # create coordinate\n r_gauss = m_x**2 + m_y**2 # distance to origin\n gauss = K*(np.exp(-r_gauss/(2*(var**2)))) # create kernel\n return gauss/gauss.sum()\n \n #mirror padding\n def mir_padding(img,f):\n img_p = np.zeros((i_x+2*radi,i_y+2*radi)) #create padding image\n img_p[radi:i_x+radi,radi:i_y+radi] = img #throw original image to padding image\n img_p[0:radi,radi:i_y+radi] = img[radi-1::-1,:] # padding top rows\n img_p[-radi::1,radi:i_y+radi] = img[-1:-radi-1:-1,:] # padding bottom rows\n img_p[radi:i_x+radi,0:radi] = img[:,radi-1::-1] # padding left column\n img_p[radi:i_x+radi,-radi::1] = img[:,-1:-radi-1:-1] # padding right column\n for i in range(f):\n img_p[0:radi,i] = img[radi-1-i,radi-1::-1] # padding upper-left corner\n img_p[0:radi,-i] = img[radi-1-i,-radi::1] # padding upper-righ corner\n img_p[-1:-radi-1:-1,i] = img[-radi+i,radi-1::-1] # padding lower-left corner\n img_p[-1:-radi-1:-1,-i] = img[-radi+i,-radi::1] # padding lower-right corner\n return img_p\n\n img_p = mir_padding(img,f) # create padding image\n g_kernel = gaussian_kernel(f,K,var) # create gaussian kernel\n\n #seperate kernel\n E = g_kernel[0,0]\n c = g_kernel[:,0]\n wT = np.reshape(g_kernel[0,:]/E,(f,1))\n\n gauss_image = np.zeros([i_x,i_y]) # create gauss image\n temp_image = np.zeros([i_x,i_y]) # create temp image for two 1D convolution\n old_c_sum = c.sum() # calculate sum of c before modification\n\n # if elements of kernel are located within area of padding, substitute value with 0\n # calculate new value base on ratio between sum before and after modification\n for j in range(i_y):\n y_bound = i_y - j\n mod_c = c.copy()\n if j < radi:\n mod_c[0:radi-j] = 0 \n new_c_sum = mod_c.sum()\n mod_c = mod_c*old_c_sum/new_c_sum \n if j > i_y - radi - 1:\n mod_c[-1:-radi+y_bound-1:-1] = 0 \n new_c_sum = mod_c.sum()\n mod_c = mod_c*old_c_sum/new_c_sum \n for i in range(i_x):\n temp_image[i,j] = np.sum(img_p[i+radi,j:j+f]*mod_c)\n\n temp_image = mir_padding(temp_image,f) # create padding temp image for next 1D convolution\n old_wT_sum = wT.sum() # calculate sum of wT before modification\n\n # if elements of kernel are located within area of padding, substitute value with 0\n # calculate new value base on ratio between sum before and after modification\n for i in range(i_x):\n x_bound = i_x - i\n mod_wT = wT.copy()\n if i < radi:\n mod_wT[0:radi-i] = 0 \n new_wT_sum = mod_wT.sum()\n mod_wT = mod_wT*old_wT_sum/new_wT_sum \n if i > i_x - radi - 1:\n mod_wT[-1:-radi+x_bound-1:-1] = 0 \n new_wT_sum = mod_wT.sum()\n mod_wT = mod_wT*old_wT_sum/new_wT_sum \n for j in range(i_y):\n gauss_image[i,j] = np.sum(temp_image[i:i+f,j+radi]*mod_wT.T)\n\n return gauss_image", "def _zero_pad(self, kernel, size):\n if len(size) != kernel.ndim:\n size = kernel.shape[:1] + tuple(size) + kernel.shape[-1:]\n padsize = np.array(size) - np.array(kernel.shape)\n paddown = padsize // 2\n padup = padsize - paddown\n padarray = np.concatenate((padup[..., None],\n paddown[..., None]), axis=1)\n pads = tuple([tuple(p) for p in padarray])\n kernel_pad = np.pad(kernel, pads, 'constant', constant_values=0)\n return kernel_pad", "def conv(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n out = np.zeros((Hi, Wi))\n\n # For this assignment, we will use edge values to pad the images.\n # Zero padding will make derivatives at the image boundary very big,\n # whereas we want to ignore the edges at the boundary.\n pad_width0 = Hk // 2\n pad_width1 = Wk // 2\n pad_width = ((pad_width0,pad_width0),(pad_width1,pad_width1))\n padded = np.pad(image, pad_width, mode='edge')\n\n ### YOUR CODE HERE\n for i in range(Hi):\n for j in range(Wi):\n out[i,j] = np.sum(padded[i : i + Hk, j : j + Wk] * np.flip(kernel))\n ### END YOUR CODE\n\n return out", "def conv(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n out = np.zeros((Hi, Wi))\n\n # For this assignment, we will use edge values to pad the images.\n # Zero padding as used in the previous assignment can make\n # derivatives at the image boundary very big.\n \n pad_width0 = Hk // 2\n pad_width1 = Wk // 2\n pad_width = ((pad_width0,pad_width0),(pad_width1,pad_width1))\n padded = np.pad(image, pad_width, mode='edge') \n\n #####################################\n # START YOUR CODE HERE #\n #####################################\n kernel = np.flipud(np.fliplr(kernel)) # flip h/v\n for h in range(Hi):\n for w in range(Wi):\n out[h, w] = np.sum(np.multiply(kernel, padded[h : h + Hk, w : w + Wk]))\n ######################################\n # END OF YOUR CODE #\n ######################################\n\n return out", "def frame_fix_badpix_isolated(array, bpm_mask=None, sigma_clip=3, num_neig=5,\n size=5, protect_mask=0, cxy=None, mad=False, \n ignore_nan=True, verbose=True, full_output=False):\n if array.ndim != 2:\n raise TypeError('Array is not a 2d array or single frame')\n if size % 2 == 0:\n raise TypeError('Size of the median blur kernel must be an odd integer')\n\n if bpm_mask is not None:\n bpm_mask = bpm_mask.astype('bool')\n\n if verbose: start = time_ini()\n\n if num_neig > 0:\n neigh = True\n else:\n neigh = False\n\n frame = array.copy()\n if cxy is None:\n cy, cx = frame_center(frame)\n else:\n cx, cy = cxy\n \n if bpm_mask is None:\n ori_nan_mask = np.where(np.isnan(frame))\n ind = clip_array(frame, sigma_clip, sigma_clip, neighbor=neigh,\n num_neighbor=num_neig, mad=mad)\n bpm_mask = np.zeros_like(frame)\n bpm_mask[ind] = 1\n if ignore_nan:\n bpm_mask[ori_nan_mask] = 0\n if protect_mask:\n cir = disk((cy, cx), protect_mask, shape=bpm_mask.shape)\n bpm_mask[cir] = 0\n bpm_mask = bpm_mask.astype('bool')\n\n smoothed = median_filter(frame, size, mode='mirror')\n frame[np.where(bpm_mask)] = smoothed[np.where(bpm_mask)]\n array_out = frame\n count_bp = np.sum(bpm_mask)\n \n if verbose:\n msg = \"/nDone replacing {} bad pixels using the median of neighbors\"\n print(msg.format(count_bp))\n timing(start)\n \n if full_output:\n return array_out, bpm_mask\n else:\n return array_out", "def mask_frame(image, mask):\n # For segmentation mask display\n mask[mask == 1] = 2\n mask[mask == 0] = 1\n\n # segmentation will display final output\n segmentation = image\n segmentation = cv2.cvtColor(segmentation, cv2.COLOR_BGR2RGB)\n\n segmentation[:, :, 0] = segmentation[:, :, 0] * mask\n segmentation[:, :, 1] = segmentation[:, :, 1] * mask\n return segmentation", "def insert_padding(img, pad_h, pad_w):\n global frame_height, frame_width\n padding_3_dims = ((pad_h, pad_h), (pad_w, pad_w), (0, 0))\n # apply padding in the above dimensions with values 0\n padded_img = numpy.pad(img, padding_3_dims, 'constant', constant_values=0)\n return padded_img", "def dilate_kernel(self, kernel, dilation):\n if dilation == 0:\n return kernel \n # inside padding based on the scaling law\n dilation = torch.tensor(dilation).float()\n delta = dilation%1\n\n d_in = torch.ceil(dilation**2).int()\n new_in = kernel.shape[2] + (kernel.shape[2]-1)*d_in\n\n d_h = torch.ceil(dilation).int()\n new_h = kernel.shape[3] + (kernel.shape[3]-1)*d_h\n\n d_w = torch.ceil(dilation).int()\n new_w = kernel.shape[4] + (kernel.shape[4]-1)*d_h\n\n new_kernel = torch.zeros(kernel.shape[0], kernel.shape[1], new_in, new_h, new_w)\n new_kernel[:,:,::(d_in+1),::(d_h+1), ::(d_w+1)] = kernel\n dilate_factor = 1\n \n new_kernel = F.pad(new_kernel, ((kernel.shape[4]-1)//2, (kernel.shape[4]-1)//2)*3)\n\n dilate_factor = (new_kernel.shape[-1] - 1 - (kernel.shape[4]-1)*(delta))/(new_kernel.shape[-1] - 1) \n\n grid = torch.meshgrid(torch.linspace(-1, 1, new_in)*(dilate_factor**2), \n torch.linspace(-1, 1, new_h)*dilate_factor, \n torch.linspace(-1, 1, new_w)*dilate_factor)\n\n grid = torch.cat([grid[2].unsqueeze(0).unsqueeze(-1), \n grid[1].unsqueeze(0).unsqueeze(-1), \n grid[0].unsqueeze(0).unsqueeze(-1)], dim = -1).repeat(kernel.shape[0],1,1,1,1)\n\n new_kernel = F.grid_sample(new_kernel, grid) \n \n return new_kernel[:,:,-kernel.shape[2]:]", "def convolution(img, kernel, padding=True):\n result = np.zeros_like(img)\n p_size_i = kernel.shape[0] // 2\n p_size_j = kernel.shape[1] // 2\n\n if padding:\n padded_img = np.zeros((img.shape[0] + 2 * p_size_i, img.shape[1] + 2 * p_size_j))\n i_first = p_size_i\n i_last = padded_img.shape[0] - p_size_i - 1\n j_first = p_size_j\n j_last = padded_img.shape[1] - p_size_j - 1\n padded_img[i_first: i_last + 1, j_first: j_last + 1] = img\n else:\n padded_img = img.copy()\n i_first = p_size_i\n i_last = padded_img.shape[0] - p_size_i - 1\n j_first = p_size_j\n j_last = padded_img.shape[1] - p_size_j - 1\n \n for i in range(i_first, i_last):\n for j in range(j_first, j_last):\n window = padded_img[i - p_size_i: i + p_size_i + 1, j - p_size_j: j + p_size_j + 1]\n res_pix = np.sum(window * kernel)\n result[i - p_size_i, j - p_size_j] = res_pix\n return result", "def Padding_Signal(signal,M = 10):\t\t\t\t\t\t\t\t\t\t# Function to pad a signal\n\ts = signal.shape[0]\n\tsignal_change = np.zeros(s+2*M)\n\tsignal_change[M:s+M] = signal\n\t\n\treturn signal_change", "def zero_pad_sparse(mat, margin_h, margin_v, fmt=\"coo\"):\n\n sm, sn = mat.shape\n padded_mat = mat.copy()\n # Up and down margins initialized with zeros and filled as needed\n margin_h_0 = sp.csr_matrix((sm, margin_h), dtype=mat.dtype)\n margin_v_0 = sp.csr_matrix((margin_v, sn + 2 * margin_h), dtype=mat.dtype)\n padded_mat = sp.hstack([margin_h_0, padded_mat, margin_h_0], format=\"csr\")\n padded_mat = sp.vstack([margin_v_0, padded_mat, margin_v_0], format=\"csr\")\n\n return padded_mat", "def hook(module, input):\n image_dimensions = input[0].size()[-2:]\n module.padding = _determine_padding_from_tf_same(image_dimensions, kernel_size, stride)", "def convolution(image: np.array, kernel: np.array) -> np.array:\n\n # default condition: apply SAME padding, and keep stride at 1\n stride_x = 1\n stride_y = 1\n padding_y = int(len(kernel - 1) / 2)\n padding_x = int(len((kernel[0]) - 1) / 2)\n # create the return array with with the same dimensions as <image>,\n # and then create a padded image\n convolved_image = np.zeros((len(image), len(image[0])))\n padded_image = np.zeros((len(image) + 2 * padding_y,\n len(image[0]) + 2 * padding_x))\n padded_image[padding_x: -padding_x, padding_y: -padding_y] = image\n\n for py in range(0, len(padded_image) - len(kernel), stride_y):\n for px in range(0, len(padded_image[0]) - len(kernel[0]), stride_x):\n # scan the matrix over columns in image array, then shift the matrix\n # down, and repeat\n padded_image_section = padded_image[py: py + len(kernel[0]),\n px: px + len(kernel)]\n # print(padded_image_section)\n convolved_image[py, px] = int(np.tensordot(padded_image_section,\n kernel))\n\n return convolved_image", "def fixed_padding(inputs, kernel_size, data_format='channels_last'):\n pad_total = kernel_size - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n return _padding(inputs, (pad_beg, pad_end), data_format)", "def add_padding(im, pad):\n\n return np.pad(im, pad_width=((pad, pad), (pad, pad), (0, 0)), mode='symmetric')", "def cs4243_filter_faster(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n filtered_image = np.zeros((Hi, Wi))\n\n ###Your code here####\n \n # pad image to handle border pixels\n pad_height = (int)((Hk - 1)/2)\n pad_width = (int)((Wk - 1)/2)\n image_pad = pad_zeros(image, pad_height, pad_width)\n \n # compute effective output size, assume stride=1\n out_height = 1 + Hi - Hk + 2*pad_height\n out_width = 1 + Wi - Wk + 2*pad_width\n \n # get initial nodes of receptive fields\n recep_fields_h = [i for i in range(out_height)]\n recep_fields_w = [i for i in range(out_width)]\n \n # extract receptive area into matrix of shape (Hi*Wi, Hk*Wk)\n recep_areas = []\n for i in recep_fields_h:\n for j in recep_fields_w:\n recep_areas.append(image_pad[i: i+Hk, j: j+Wk].reshape(-1))\n out = np.stack(recep_areas)\n \n # Flip the kernel horizontal and vertical\n kernel = cs4243_rotate180(kernel).reshape(Hk*Wk, 1)\n \n # dot product kernel and receptive areas\n filtered_image = np.dot(out, kernel).reshape(Hi, Wi)\n \n ###\n\n return filtered_image", "def pad(self):\n if self._mg_problem.boundaries[0] == 'periodic':\n # left side\n self.left[:] = self.mid[-self.borders[0]:]\n # right side\n self.right[:] = self.mid[:self.borders[1]]\n elif self._mg_problem.boundaries[0] == 'dirichlet':\n\n # left from border\n l_f_b = self.space_tensor[0:self.borders[0]]\n # right_from_border\n r_f_b = self.space_tensor[-self.borders[1]:]\n # left side\n self.left[:] = self.fl(l_f_b)\n # right side\n self.right[:] = self.fr(r_f_b)", "def cs4243_filter_fast(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n filtered_image = np.zeros((Hi, Wi))\n\n ###Your code here####\n \n # pad image to handle border pixels\n pad_height = (int)((Hk - 1)/2)\n pad_width = (int)((Wk - 1)/2)\n image_pad = pad_zeros(image, pad_height, pad_width)\n\n # Flip the kernel horizontal and vertical\n kernel = cs4243_rotate180(kernel)\n \n # compute effective output size, assume stride=1\n out_height = 1 + Hi - Hk + 2*pad_height\n out_width = 1 + Wi - Wk + 2*pad_width\n \n # get initial nodes of receptive fields\n recep_fields_h = [i for i in range(out_height)]\n recep_fields_w = [i for i in range(out_width)]\n \n for i in recep_fields_h:\n for j in recep_fields_w: \n # get receptive area\n recep_area = image_pad[i:i+Hk, j:j+Wk] \n filtered_image[i, j] = np.multiply(kernel, recep_area).sum()\n ###\n\n return filtered_image", "def conv_matrix(matrix, kernel):", "def make_padding(kernel_size, stride, dilation):\n return -((-kernel_size - (kernel_size - 1) * (dilation - 1)) // stride + 1) // 2", "def kernel(self):\n\n # Create a blank kernel the appropriate size\n kernel = np.zeros((self.n_rows, self.n_cols), dtype=np.int)\n\n # Iterate through the offsets, turning on the correct pixels\n for offset in self.offsets:\n row, col = offset\n if np.all(offset == self.index):\n kernel[row, col] = 2\n else:\n kernel[row, col] = 1\n\n # Ensure that the index pixel is not zero for footprints where the\n # index pixel is not part of the footprint\n if kernel[self.index[0], self.index[1]] == 0:\n kernel[self.index[0], self.index[1]] = 3\n return kernel" ]
[ "0.56605875", "0.5587884", "0.5477105", "0.5454366", "0.5391526", "0.5310862", "0.52129424", "0.509257", "0.50285244", "0.5000354", "0.49889517", "0.49485168", "0.49176568", "0.48939496", "0.48860693", "0.4881778", "0.4866566", "0.48574573", "0.4845596", "0.48219603", "0.47991547", "0.4791694", "0.4751322", "0.47490332", "0.46867856", "0.46789053", "0.46648562", "0.46599883", "0.46580628", "0.46489722" ]
0.5740741
0
Ensure all elements defined as missing by the mask are set to zero in the signal. If this is not the case, raises an error.
def check_missing_mask(signal, mask): if sp.issparse(mask): # Check if there are nonzero values in the signal reported as missing # by the mask missing_with_signal = np.nonzero( abs(signal[mask.nonzero()[0], mask.nonzero()[1]]) > 0 )[0] if len(missing_with_signal) > 0: raise ValueError( "There are", len(missing_with_signal), "non-zero elements reported as missing.", ) else: if np.sum(abs(signal[mask > 0])) > 1e-10: raise ValueError( "There are", str(np.sum(abs(signal[mask > 0]))), "non-zero elements reported as missing.", )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applymask(self,mask):\n self.spec[mask==0]=np.nan", "def _check_missing_value_mask(self, missing_mask):\n if not missing_mask.any():\n raise ValueError(\"Input matrix is not missing any values\")\n if missing_mask.all():\n raise ValueError(\"Input matrix must have some non-missing values\")", "def zeros_like(self):\n raise NotImplementedError", "def reset_mask(self):\n\n self.mask = np.ones(self.dispersion.shape, dtype=bool)", "def aerosols(self, mask: Mask):\n raise NotImplementedError(\"Subclass must implement\")", "def getMask(self):\r\n mask = np.array(self.array, dtype=np.float32)\r\n mask[mask == 0] = np.nan\r\n return mask", "def _nodata_mask(self):\n if self.nodata_value is None:\n return np.ones_like(self.array, dtype=np.bool)\n return self.array != self.nodata_value", "def erase_missing(signal, valid_rows, valid_cols, sym_upper=True):\n if sym_upper and sp.issparse(signal):\n if np.any(valid_rows != valid_cols):\n raise ValueError(\n \"Valid rows and columns must be identical with sym_upper=True\"\n )\n if signal.shape[0] != signal.shape[1]:\n raise ValueError(\n \"Input matrix must be square when using sym_upper=True\"\n )\n # Make a boolean mask from good bins\n good_mask = np.isin(range(signal.shape[0]), valid_rows)\n # Set all pixels in a nondetectable bin to 0\n # For faster masking of bins, mask bins using dot product with an\n # identify matrix where bad bins have been masked on the diagonal\n # E.g. if removing the second bin (row and column):\n # 1 0 0 9 6 5 1 0 0 9 0 5\n # 0 0 0 X 6 8 7 X 0 0 0 = 0 0 0\n # 0 0 1 6 7 8 0 0 1 6 0 8\n mask_mat = sp.eye(signal.shape[0])\n mask_mat.data[0][~good_mask] = 0\n erased = mask_mat.dot(signal).dot(mask_mat)\n else:\n # Get a boolean array of missing (1) and valid (0) rows\n missing_rows = valid_to_missing(valid_rows, signal.shape[0])\n missing_cols = valid_to_missing(valid_cols, signal.shape[1])\n erased = signal.copy()\n erased[missing_rows, :] = 0\n erased[:, missing_cols] = 0\n\n return erased", "def unmasked_data(self):\n return numpy.ma.filled(self.data.astype(numpy.float_),\n fill_value=numpy.nan)", "def zero_mask(self):\n accum = 0\n for i in range(self.data.itemsize):\n accum += (0x55 << (i << 3))\n return accum", "def valid_to_missing(valid, size):\n missing = np.ones(size, dtype=bool)\n try:\n missing[valid] = False\n # In case there is no valid index\n except IndexError:\n pass\n missing = np.flatnonzero(missing)\n return missing", "def mask(self, mask, logger=logger):\n if self.nodata is not None:\n da_masked = self._obj.where(mask != 0, self.nodata)\n else:\n logger.warning(\"Nodata value missing, skipping mask\")\n da_masked = self._obj\n return da_masked", "def test_csum_ignore_nans():\n source = [np.zeros((16,), dtype=float) for _ in range(10)]\n source.append(np.full((16,), fill_value=np.nan))\n summed = csum(source, ignore_nan=True)\n assert np.allclose(summed, np.zeros_like(summed))", "def test_check_data_masked_input_data_non_nans(self):\n cube = self.cube.copy()\n cube.data[:, 0, 0] = 1000\n cube.data = np.ma.masked_equal(cube.data, 1000)\n expected_data = np.array(\n [self.percentile_25, self.percentile_50, self.percentile_75]\n )\n expected_data[:, 0, 0] = np.nan\n expected_data = np.ma.masked_invalid(expected_data)\n result = Plugin().process(cube)\n self.assertArrayAlmostEqual(result.data.data, expected_data.data, decimal=5)\n self.assertArrayEqual(result.data.mask, expected_data.mask)", "def setMask(self, mask):\n try:\n self.mask = mask\n self.inds = na.nonzero(self.mask.flat)[0]\n #print \"length of self.inds\",len(self.inds)\n #print self.inds\n self.dim = self.mask.shape[::-1]\n #print self.mask.shape\n return True\n except Exception as error:\n print(\"failed in setMask\", error)", "def updateEmptiesSet(self):\n self.emptiesSet = []\n for i in self.Range:\n if self.get_cell(i) == 0:\n self.emptiesSet.append(i)", "def _zero(array):\n a = array.ravel()\n for i in numba.prange(len(a)):\n a[i] = 0\n return True", "def zero_by_mask(mask, vals, replace_with=0.0):\n assert mask.dtype == tf.as_dtype(np.bool)\n ms = mask.get_shape().as_list()\n vs = vals.get_shape().as_list()\n mask = tf.ensure_shape(mask, vs[:-1] + [1])\n vals = tf.ensure_shape(vals, ms[:-1] + [vs[-1]])\n vals = tf.where_v2(mask, vals, replace_with)\n return vals", "def test_offcenter(self):\n actual = cm.ring_mask((5, 5), 1, 2, center=(2, 3))\n expected = np.array([[False, False, False, True, False],\n [False, False, True, False, True],\n [False, True, False, False, False],\n [False, False, True, False, True],\n [False, False, False, True, False]])\n self.assertIsNone(np.testing.assert_array_equal(actual, expected))", "def test_get_mean_mask():\n arr = np.array([-5, 4, 0, 3, -2, 7, 10, -10, 5, 6])\n m_arr = np.ma.masked_where(arr < 0, arr)\n assert_allclose(iqcalc.get_mean(m_arr), 5.0)", "def shrink_mask(self):\n m = self._mask\n if m.ndim and not m.any():\n self._mask = nomask\n return self", "def _initalize_mask(dataSubStack):\n # Initalize an array to store the output mask values\n outMask = np.zeros(dataSubStack.shape, dtype=bool)\n\n # Start by masking out NaNs or Infs\n NaNsOrInfs = np.logical_not(np.isfinite(dataSubStack.data))\n dataSubStack.mask = NaNsOrInfs\n\n return outMask, dataSubStack", "def clean_mask(image, sig=5, iters=3):\n\n mean, median, stddev = sigma_clipped_stats(image[image>0], sigma=sig, iters=iters)\n\n mask_bad = (np.abs(image - median) > sig * stddev) | (image == 0)\n image_ret = np.copy(image)\n image_ret[mask_bad] = 0\n \n return image_ret, mask_bad", "def _mask(self):\n if self.__mask is None:\n # need this to be *exactly* the numpy boolean False\n return nomask\n return self.__mask", "def getSignedMask(self):\r\n signedMask = np.array(self.array * self.sign, dtype=np.float32)\r\n signedMask[signedMask == 0] = np.nan\r\n return signedMask", "def mask_check(mask):\n if isinstance(mask, u.Quantity):\n if not (mask.unit == u.dimensionless_unscaled):\n raise TypeError(\n \"Mask should not be a non-dimensionless and unscaled Quantity!\"\n )\n mask = mask.value\n\n # Check for values of mask\n if np.any(mask > 1) or np.any(mask < 0):\n raise ValueError(\"Mask should be within range from 0 to 1 only.\")", "def test_set_vec_to_zero(self):\n self.init()\n set_to_zero_by_ptr(self.f64_1)\n set_to_zero_by_ref(self.ff64_1)\n set_to_zero_by_ref(self.i32_1)\n set_to_zero_by_ref(self.fi32_1)\n set_to_zero_by_ref(self.i64_1)\n set_to_zero_by_ref(self.fi64_1)\n set_to_zero_by_ref(self.f32_1)\n assert np.all(self.f64_1 == 0.)\n assert np.all(self.ff64_1 == 0.)\n assert np.all(self.i32_1 == 0)\n assert np.all(self.fi32_1 == 0)\n assert np.all(self.i64_1 == 0)\n assert np.all(self.fi64_1 == 0)\n assert np.all(self.f32_1 == 0.)\n assert np.all(self.ff32_1 == 0.)", "def mask(self):\n return np.ones((self.size, self.size))", "def test_nan_inf_mask(value):\n\n data = np.ones((9, 9))\n mask = np.zeros_like(data, dtype=bool)\n data[4, 4] = value\n mask[4, 4] = True\n radius = 2.0\n aper = CircularAperture((4, 4), radius)\n tbl = aperture_photometry(data, aper, mask=mask)\n desired = (np.pi * radius**2) - 1\n assert_allclose(tbl['aperture_sum'], desired)", "def nozero(arr):\n vals=sorted(list(set(np.array(arr).flatten())))\n if vals[0]<0:\n print(\"correcting for div/zero by replacing 0 with\",vals[1])\n arr[arr==0]=vals[1]\n return arr" ]
[ "0.7038222", "0.6567302", "0.63071376", "0.6267162", "0.6249988", "0.6000625", "0.5920129", "0.5898875", "0.5888325", "0.5880828", "0.5875885", "0.58599186", "0.5855308", "0.5855247", "0.5853476", "0.5831566", "0.5795374", "0.5774761", "0.5770358", "0.57691586", "0.5761126", "0.57492924", "0.5739421", "0.5736292", "0.5720537", "0.5681831", "0.5681092", "0.56380415", "0.5630747", "0.5626749" ]
0.6792724
1
Given lists of valid rows and columns, generate a sparse matrix mask with missing pixels denoted as 1 and valid pixels as 0. If a max_dist is provided, upper symmetric matrices will only be flagged up to max_dist pixels from the diagonal.
def make_missing_mask( shape, valid_rows, valid_cols, max_dist=None, sym_upper=False ): # Error if the matrix upper symmetric but shape is rectangle or missing # rows and cols are different sm, sn = shape if sym_upper and (sm != sn or len(valid_rows) != len(valid_cols)): raise ValueError("Rectangular matrices cannot be upper symmetric") # Get a boolean array of missing (1) and valid (0) rows missing_rows = valid_to_missing(valid_rows, sm) # When matrix is sym., rows and cols are synonym, no need to compute 2x if sym_upper: missing_cols = missing_rows else: missing_cols = valid_to_missing(valid_cols, sn) # If upper sym., fill only upper diag up to max_dist. # E. g. with bins 1 and 3 missing # and a max_dist of 1: # 0 1 0 0 0 # 0 1 1 0 0 # 0 0 0 1 0 # 0 0 0 1 1 # 0 0 0 0 0 # For each missing bin, mask is apply 1 pixel upwards and 1 to the right # to fill only the upper triangle up to max_dist if sym_upper: # If no max dist has been specified, fill the whole upper triangle if max_dist is None: max_dist = min(shape) # Generate matrix of distance shifts by row. # Shape is len(missing_rows) x (max_dist + 1) # e.g.: 2 missing rows and max dist of 1 # 0 0 # 1 1 row_shifts = np.tile( np.array(range(max_dist + 1)), (len(missing_rows), 1) ).T # Compute row positions upwards to diagonal by subtracting missing rows # to the shifts. Following the previous example, if missing rows are # bins 1 and 3: # 1 3 # 0 2 rows_before = (missing_rows - row_shifts).flatten("F") # looking at pixels up from the bins, cols remain the same: # 1 3 # 1 3 cols_before = np.repeat(missing_rows, max_dist+1) # Compute col position to the right until diagonal by adding the shift # Note: upper symmetric, so row_shifts = col_shift_ # 1 3 # 2 4 cols_after = (missing_cols + row_shifts).flatten("F") # This time, rows remain constant since we are computing positions to # the right rows_after = np.repeat(missing_cols, max_dist+1) # Combine positions to the right and upwards rows = np.concatenate([rows_before, rows_after]) cols = np.concatenate([cols_before, cols_after]) data = np.ones(rows.shape, dtype=bool) # Remove entries where rows or cols are negative or larger than shape valid = (cols < sm) & (cols >= 0) & (rows < sm) & (rows >= 0) # Build mask mat with miss bins up to max scan dist in upper triangle mask = sp.coo_matrix( (data[valid], (rows[valid], cols[valid])), shape=shape, dtype=bool ).tocsr() else: mask = sp.csr_matrix(shape, dtype=bool) mask[missing_rows, :] = 1 mask[:, missing_cols] = 1 return mask
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def frame_missing_mask(mask, kernel_shape, sym_upper=False, max_dist=None):\n if mask.dtype != bool:\n raise ValueError(\"Mask must contain boolean values\")\n if not sp.issparse(mask):\n raise ValueError(\"Mask must be a sparse matrix\")\n\n framed_mask = mask.copy()\n ms, ns = mask.shape\n mk, nk = kernel_shape\n if sym_upper and (max_dist is not None):\n # Remove diagonals further than scan distance in the input mask\n framed_mask = diag_trim(framed_mask, max_dist + max(nk, mk)).tocsr()\n max_m = max_dist + mk\n max_n = max_dist + nk\n else:\n max_m, max_n = ms, ns\n # Up and down margins initialized with zeros and filled as needed\n margin_1 = sp.csr_matrix((mk - 1, ns), dtype=bool)\n margin_2 = sp.csr_matrix((mk - 1, ns), dtype=bool)\n if sym_upper and (max_dist is not None):\n # Margin 1 (top) is in upper triangle -> fill missing up to scan dist\n margin_1[:, :max_n] = 1\n else:\n margin_1[:, :] = 1\n margin_2[:, :] = 1\n framed_mask = sp.vstack([margin_1, framed_mask, margin_2], format=\"csr\")\n\n # Left and right\n margin_1 = sp.csr_matrix((ms + 2 * (mk - 1), nk - 1), dtype=bool)\n margin_2 = sp.csr_matrix((ms + 2 * (mk - 1), nk - 1), dtype=bool)\n\n if sym_upper and (max_dist is not None):\n # Margin 2 (right) is in upper triangle-> fill missing up to scan dist\n margin_2[-(max_m + 1) :, :] = 1\n # Fill only the start of left margin for the top-left corner\n margin_1[: mk - 1, :] = 1\n else:\n margin_1[:, :] = 1\n margin_2[:, :] = 1\n framed_mask = sp.hstack([margin_1, framed_mask, margin_2], format=\"csr\")\n\n if sym_upper:\n # LIL format is much faster when changing sparsity\n framed_mask = framed_mask.tolil()\n # Add margin below diagonal\n big_k = max(nk, mk)\n dia_margins = np.ones(big_k)\n dia_offsets = np.arange(-1, -big_k-1, -1)\n framed_mask += sp.diags(\n dia_margins,\n dia_offsets,\n shape=framed_mask.shape,\n format=\"lil\",\n dtype=bool,\n )\n framed_mask = framed_mask.tocsr()\n return framed_mask", "def ensure_sparse_cols(self,max_density,remove_lowest=True):\n if max_density >= 1:\n max_nnz = int(max_density)\n else:\n max_nnz = int(max_density*self.shape[0])\n for j in range(self.shape[1]):\n col = self.fast_get_col(j)\n excess = col.nnz - max_nnz\n if excess > 0:\n if remove_lowest:\n zero_entries = np.argsort(col.data)[:excess]\n else:\n zero_entries = random.sample(range(col.nnz),excess)\n col.data[zero_entries] = 0\n self.fast_update_col(j,col.data)", "def makesparse(matrix):\n n = matrix[0].size\n elements = []\n for i in range(n):\n for j in range(n):\n if matrix[i][j] != 0 :\n temp = MatrixElement(i, j, matrix[i][j])\n elements.append(temp)\n return SparseMatrix(n, elements)", "def mask_sparse(self, threshold=10):\n self.MaskPrefix = 's' + self.MaskPrefix\n print('Masking pixels that do not have at least {0} coherent values'.format(threshold))\n # each pixel assigned an integer corresponding to # of igrams where coherent\n # NOTE: save coverage map if it doesn't exist already\n coverage = self.get_coverage()\n sparse = ma.masked_less(coverage, threshold)\n for ig in self.Set:\n igram = self.load_ma(ig)\n igram[sparse.mask] = ma.masked\n self.save_ma(ig, igram)\n print('Done')", "def list_to_sparse(inputs):\n\n data = list(itertools.chain(*inputs))\n row = list(itertools.chain(\n *[itertools.repeat(i, len(x)) for i, x in enumerate(inputs)]))\n col = list(itertools.chain(*[range(len(x)) for x in inputs]))\n\n s = coo_matrix((data, (row, col)), shape=(\n len(inputs), np.max([len(x) for x in inputs])))\n\n return s", "def _set_builder_fast(m, distances):\n\n if not is_matrix(m):\n raise ValueError(\"Expected numpy matrix\")\n\n if not is_matrix_binary(m):\n raise ValueError(\"Expected binary matrix\")\n\n if not is_matrix_correct(m):\n raise ValueError(\"Expected matrix that has valid structure (not more than one '1' in row)\")\n\n n, k = np.shape(m)\n ones_count, one_pos, has_one, where_one, zero_rows = get_matrix_info(m, n)\n\n # run through all distances in D\n for distance in distances:\n\n # check that distance is correct\n if distance < 0 or distance > ones_count + n:\n continue\n\n if distance == 0:\n yield m.copy()\n continue\n\n for num_ones_removed in range(np.min([distance, ones_count]) + 1):\n for clear_rows in combinations(one_pos[0], num_ones_removed):\n\n cleared_matrix = m.copy()\n cleared_matrix[clear_rows, :] = 0\n remained_rows = np.array(list((set(range(n)) - set(one_pos[0])).union(set(clear_rows))))\n\n for rows in combinations(remained_rows, distance - num_ones_removed):\n\n if len(rows) == 0:\n yield cleared_matrix.copy()\n continue\n\n avaliable_column_indices = [get_zero_indices(where_one[row], k) for row in rows]\n\n rows = np.array(rows, dtype=np.int32)\n rows_count = rows.size\n\n if (k ** rows_count)*n*k*4//1024//1024 > 1024:\n raise ValueError(\"Looks that you don't have enough RAM\")\n\n indices = np.array(np.meshgrid(*avaliable_column_indices)).T.reshape(-1, rows_count)\n\n cache_size = np.shape(indices)[0]\n\n indices += rows*k\n indices = (indices.T + np.arange(cache_size)*(n*k)).T\n indices = indices.reshape(indices.size)\n\n matrix_cache = np.tile(cleared_matrix, (cache_size, 1, 1))\n matrix_cache.put(indices, 1)\n\n # print(\"Mb for cache: \" + str((cache_size*rows_count + cache_size*n*k)*4//1024//1024))\n\n for mat in matrix_cache:\n yield mat", "def generate_inpaint_mask(n_samples, n_colors, spatial_width):\n mask = np.zeros((n_samples, n_colors, spatial_width, spatial_width), dtype=bool)\n # simple mask -- just mask out half the image\n mask[:,:,:,spatial_width/2:] = True\n return mask.ravel()", "def sparse_initialization_weights(self):\n W = []\n mu, sigma = 0, 1/self.non_zero_units\n\n for i in xrange(self.n_in):\n row = np.zeros(self.n_out)\n non_zeros = self.rng.normal(mu, sigma, self.non_zero_units)\n # non_zeros /= non_zeros.sum()\n non_zero_idxs = self.rng.permutation(\n self.n_out)[0:self.non_zero_units]\n for j in xrange(self.non_zero_units):\n row[non_zero_idxs[j]] = non_zeros[j]\n W.append(row)\n W = np.asarray(W, dtype=theano.config.floatX)\n return W", "def idseqs_to_mask(idseqs: List[List[int]],\n n_seqlen: Optional[int] = None,\n n_vocab_sz: Optional[int] = None,\n ignore: Optional[List[int]] = [],\n dtype: Optional[torch.dtype] = torch.bool,\n dense: Optional[bool] = False\n ) -> torch.sparse.FloatTensor:\n if n_seqlen is None:\n n_seqlen = max([len(seq) for seq in idseqs])\n\n # create a list of IDs\n if n_vocab_sz is None:\n ids = set(itertools.chain(*idseqs))\n else:\n ids = set(range(0, n_vocab_sz))\n\n # remove IDs that we ignore\n ids = ids.difference(set(ignore))\n n_features = len(ids)\n\n # convert to list to lookup with .index() method\n ids = list(ids)\n\n # loop over each ID sequence\n masks = []\n for seq in idseqs:\n # extract index pairs of the sparse matrix\n featidx = []\n seqidx = []\n for step, elem in enumerate(seq[:n_seqlen]):\n try:\n featidx.append(ids.index(elem))\n seqidx.append(step)\n except Exception:\n pass\n # convert to COO matrix\n tmp = torch.sparse.FloatTensor(\n indices=torch.LongTensor([seqidx, featidx]),\n values=torch.FloatTensor([1.0 for _ in range(len(seqidx))]),\n size=torch.Size([n_seqlen, n_features])\n ).coalesce()\n # save it\n masks.append(tmp)\n\n # stack into one 3D tensor <batch_sz, n_seqlen, vocab_sz>\n masks = torch.stack(masks).coalesce()\n\n # convert to dense matrix if requested\n if dense:\n masks = masks.to_dense().type(dtype)\n\n # done\n return masks", "def generate_random_sparse_array(nrows, ncols, numdense):\n i = np.random.randint(0, nrows, numdense)\n j = np.random.randint(0, ncols, numdense)\n data = np.random.randint(1,6, numdense)\n ij = np.vstack((i,j))\n return coo_matrix((data, ij), shape=(nrows, ncols))", "def _generate_sketch_matrix(rand_h, rand_s, output_dim):\n\n # Generate a sparse matrix for tensor count sketch\n assert (rand_h.ndim == 1 and rand_s.ndim == 1 and len(rand_h) == len(rand_s))\n assert (np.all(rand_h >= 0) and np.all(rand_h < output_dim))\n\n input_dim = len(rand_h)\n indices = np.concatenate((np.arange(input_dim)[..., np.newaxis],\n rand_h[..., np.newaxis]), axis=1)\n i = torch.LongTensor(indices).t()\n v = torch.IntTensor(rand_s)\n sparse_sketch_matrix = torch.sparse.IntTensor(i, v, torch.Size([input_dim, output_dim] )).to_dense().float().cuda()\n # I used to want to used sparse matrix, but the autograd is not suported.\n return Variable(sparse_sketch_matrix)", "def generate_square_subsequent_mask(sz: int, device: torch.device) -> torch.Tensor:\n mask = (torch.triu(torch.ones(sz, sz, device=device)) == 1).transpose(0, 1)\n mask = (\n mask.float()\n .masked_fill(mask == 0, float(\"-inf\"))\n .masked_fill(mask == 1, float(0.0))\n )\n return mask", "def test_sparse_distance_matrix(coordinates, max_dist, p):\n original_tree = scipyKDTree(coordinates)\n redis_tree = redisKDTree(coordinates)\n\n original_output = original_tree.sparse_distance_matrix(original_tree, max_dist, p)\n redis_output = redis_tree.sparse_distance_matrix(redis_tree, max_dist, p)\n\n assert dict_close(redis_output, original_output)", "def symmetric_submatrix(self, *clip_ids):\n with self._rw_lock.read_lock():\n with SimpleTimer(\"Checking inputs\", self._log):\n if not self.is_symmetric():\n raise RuntimeError(\"Cannot get a symmetric sub-matrix if \"\n \"the kernel is not square!\")\n # DEPRECATED: Allowing the use of this method without explicitly\n # providing background cIDs. This object will\n # probably not ever be used this way, but there's no\n # reason to explicitly disallow it.\n # if self._bg_cid_vec is None:\n # raise RuntimeError(\"Cannot create the square submatrix \"\n # \"without the background flag vector!\")\n\n try:\n clip_ids = [int(e) for e in clip_ids]\n except:\n raise ValueError(\"Not all clip IDs could be used as ints! \"\n \"Given: %s\" % clip_ids)\n\n id_diff = set(clip_ids).difference(self._row_id_index_map)\n assert not id_diff, \\\n \"Not all clip IDs provided are represented in this \" \\\n \"distance kernel matrix! (difference: %s)\" \\\n % id_diff\n del id_diff\n\n with SimpleTimer(\"Computing union of BG clips and provided IDs\",\n self._log):\n if self._bg_cid_set is not None:\n all_cids = self._bg_cid_set.union(clip_ids)\n else:\n all_cids = set(clip_ids)\n\n # Reorder the given clip IDs so that they are in the same relative\n # order as the kernel matrix edges.\n focus_indices = []\n focus_clipids = []\n for idx, cid in enumerate(self._row_id_index_map):\n if (cid in all_cids) and (cid not in focus_clipids):\n focus_indices.append(idx)\n focus_clipids.append(cid)\n\n # index-to-isBG map for return\n # -> IDs provided as arguments are to be considered non-background,\n # even if a the ID is in the background set. All other IDs in the\n # union then must be from the background set.\n focus_id2isbg = []\n for idx in focus_indices:\n cid = self._row_id_index_map[idx]\n focus_id2isbg.append(False if cid in clip_ids else True)\n\n # Create new matrix from row-column intersections\n ret_mat = self._kernel[focus_indices, :][:, focus_indices]\n\n return focus_clipids, focus_id2isbg, ret_mat", "def set_zero(mn_matrix): \r\n \r\n row_flag = [0]*len(mn_matrix)\r\n column_flag = [0]*len(mn_matrix[0])\r\n for i in range(0, len(row_flag)):\r\n for j in range(0, len(column_flag)):\r\n if mn_matrix[i][j] == 0:\r\n row_flag[i] = 1\r\n column_flag[j] = 1\r\n \r\n for i in range(0, len(row_flag)):\r\n for j in range(0, len(column_flag)):\r\n if row_flag[i] == 1 or column_flag[j] == 1: \r\n mn_matrix[i][j] = 0", "def remove_bad_cells(self, *dims):\n ranges = [DimRange(d, 0, np.inf) for d in dims]\n return self.gate(*ranges)", "def create_hop_matrix(G, max_hops, node_list):\n distances = dict(nx.all_pairs_dijkstra_path_length(G))\n hop_matrix = torch.zeros(max_hops, G.number_of_nodes(), G.number_of_nodes(), dtype=torch.int)\n for hop in range(max_hops):\n for i, node_from in enumerate(node_list):\n for j, node_to in enumerate(node_list):\n if node_to in distances[node_from].keys() and distances[node_from][node_to] == hop:\n hop_matrix[hop,i,j] = 1\n return hop_matrix", "def validity_mask(tetrimino, columns):\n validity = np.zeros((4, columns), dtype=bool)\n for i, t in enumerate(tetrimino_dict[tetrimino]):\n l = columns - t.shape[1] + 1\n validity[i, :l] = 1\n return validity.flatten()", "def to_sparse_matrix(self, grid, format=None):\n S = self.centered_stencil()\n # print(\"grid :\")\n\n grid = tuple(grid)\n # print(grid)\n if not (np.asarray(S.shape) % 2 == 1).all():\n raise ValueError('all stencil dimensions must be odd')\n\n assert_condition(len(grid) == np.rank(S), ValueError,\n 'stencil rank must equal number of grid dimensions')\n assert_condition(min(grid) >= 1, ValueError,\n 'grid dimensions must be positive')\n\n N_v = np.prod(grid) # number of vertices in the mesh\n N_s = (S != 0).sum() # number of nonzero stencil entries\n\n # diagonal offsets\n diags = np.zeros(N_s, dtype=int)\n\n # compute index offset of each dof within the stencil\n strides = np.cumprod([1] + list(reversed(grid)))[:-1]\n indices = tuple(i.copy() for i in S.nonzero())\n for i,s in zip(indices,S.shape):\n i -= s // 2\n for stride,coords in zip(strides, reversed(indices)):\n diags += stride * coords\n\n #\n data = S[S != 0].repeat(N_v).reshape(N_s, N_v)\n indices = np.vstack(indices).T\n\n # zero boundary connections\n for index,diag in zip(indices,data):\n diag = diag.reshape(grid)\n for n,i in enumerate(index):\n if i > 0:\n s = [ slice(None) ]*len(grid)\n s[n] = slice(0,i)\n diag[s] = 0\n elif i < 0:\n s = [ slice(None) ]*len(grid)\n s[n] = slice(i,None)\n diag[s] = 0\n\n # remove diagonals that lie outside matrix\n mask = abs(diags) < N_v\n if not mask.all():\n diags = diags[mask]\n data = data[mask]\n\n # sum duplicate diagonals\n if len(np.unique(diags)) != len(diags):\n new_diags = np.unique(diags)\n new_data = np.zeros( (len(new_diags),data.shape[1]), dtype=data.dtype)\n for dia,dat in zip(diags,data):\n n = np.searchsorted(new_diags,dia)\n new_data[n,:] += dat\n\n diags = new_diags\n data = new_data\n\n return sprs.dia_matrix((data,diags), shape=(N_v, N_v)).asformat(format)", "def zero_matrix(matrix):\n rows = set()\n columns = set()\n m = len(matrix)\n n = len(matrix[0])\n for i in range(m):\n for j in range(n):\n if matrix[i][j] == 0:\n rows.add(i)\n columns.add(j)\n\n for i in range(m):\n for j in range(n):\n if i in rows or j in columns:\n matrix[i][j] = 0\n return matrix", "def get_diagonal_mask(data):\n mask = np.zeros_like(data, dtype=np.bool)\n mask[np.triu_indices_from(mask)] = True\n return mask", "def _fix_uniq_col(self):\n # subgradient; for two boolean arrays, multiplication seems to be the best way \n # (equivalent to logical_and)\n n_covered_col = self.a_csr.dot(np.ones(self.ncols)) \n ifix = np.zeros(self.ncols, dtype=bool)\n if (np.count_nonzero(n_covered_col) != self.mrows):\n raise ValueError(\"There are uncovered rows! Please check your input!\")\n if (np.any(n_covered_col==1)):\n inonzero = self.a_csr[n_covered_col==1,:].nonzero()\n ifix[inonzero[1]] = True\n\n return ifix", "def compress_csr(self):\n _, unique, indices = np.unique(\n self.m*self.rows + self.cols,\n return_index=True, return_inverse=True)\n self.rows = self.rows[unique]\n self.cols = self.cols[unique]\n self.vals = np.bincount(indices, weights=self.vals)", "def simulate_sparse(n_rows, n_columns, row_density=0.001):\n # X = sparse.lil_matrix((n_rows, n_columns))\n X = []\n counter = 0\n for row in range(n_rows):\n counter += 1\n if (counter % 1000) == 0:\n print(counter)\n\n # X[row] = sparse.random(1, n_columns, density=row_density)\n X.append(sparse.random(1, n_columns, density=row_density, format='lil'))\n\n return sparse.vstack(X).tocsr()", "def generate_sparse(n, s):\n x = np.zeros(n)\n I = np.random.randint(0, n, s)\n x[I] = 1\n return x", "def Problem2(n):\n diag_entries = np.empty((3,n))\n diag_entries[0] = np.ones(n)*(-1)\n diag_entries[1] = np.ones(n)*2\n diag_entries[2] = np.ones(n)*(-1)\n A = sparse.spdiags(diag_entries, [-1,0,1],n,n,format=\"csr\")\n return A", "def makeZero(m, n):\n Matrix.validate_dimensions(m, n)\n data = [[False for j in range(n)] for i in range(m)]\n return BooleanMatrix(m, n, data)", "def generate_sketch_matrix(rand_h, rand_s, output_dim):\n\n # Generate a sparse matrix for tensor count sketch\n rand_h = rand_h.astype(np.int64)\n rand_s = rand_s.astype(np.float32)\n assert(rand_h.ndim == 1 and rand_s.ndim ==\n 1 and len(rand_h) == len(rand_s))\n assert(np.all(rand_h >= 0) and np.all(rand_h < output_dim))\n\n input_dim = len(rand_h)\n indices = np.concatenate((np.arange(input_dim)[..., np.newaxis],\n rand_h[..., np.newaxis]), axis=1)\n indices = torch.from_numpy(indices)\n rand_s = torch.from_numpy(rand_s)\n sparse_sketch_matrix = torch.sparse.FloatTensor(\n indices.t(), rand_s, torch.Size([input_dim, output_dim]))\n return sparse_sketch_matrix.to_dense().cuda()", "def generate_sketch_matrix(rand_h, rand_s, output_dim):\n\n # Generate a sparse matrix for tensor count sketch\n rand_h = rand_h.astype(np.int64)\n rand_s = rand_s.astype(np.float32)\n assert(rand_h.ndim == 1 and rand_s.ndim ==\n 1 and len(rand_h) == len(rand_s))\n assert(np.all(rand_h >= 0) and np.all(rand_h < output_dim))\n\n input_dim = len(rand_h)\n indices = np.concatenate((np.arange(input_dim)[..., np.newaxis],\n rand_h[..., np.newaxis]), axis=1)\n indices = torch.from_numpy(indices)\n rand_s = torch.from_numpy(rand_s)\n sparse_sketch_matrix = torch.sparse.FloatTensor(\n indices.t(), rand_s, torch.Size([input_dim, output_dim]))\n return sparse_sketch_matrix.to_dense().cuda()", "def _freespace_matrix(distance):\n\n return np.array([[1., distance], [0., 1.]])" ]
[ "0.5894775", "0.58664924", "0.5579", "0.55251396", "0.545152", "0.53834623", "0.53789914", "0.5377302", "0.53023285", "0.5288333", "0.51945496", "0.51902205", "0.5188426", "0.51522386", "0.5135295", "0.5130921", "0.5122746", "0.51174325", "0.51109743", "0.508941", "0.5082233", "0.5076851", "0.5067473", "0.50432616", "0.5035504", "0.5027917", "0.50037354", "0.50015986", "0.50015986", "0.49865156" ]
0.7036244
0
Adds margin of zeros around an input sparse matrix.
def zero_pad_sparse(mat, margin_h, margin_v, fmt="coo"): sm, sn = mat.shape padded_mat = mat.copy() # Up and down margins initialized with zeros and filled as needed margin_h_0 = sp.csr_matrix((sm, margin_h), dtype=mat.dtype) margin_v_0 = sp.csr_matrix((margin_v, sn + 2 * margin_h), dtype=mat.dtype) padded_mat = sp.hstack([margin_h_0, padded_mat, margin_h_0], format="csr") padded_mat = sp.vstack([margin_v_0, padded_mat, margin_v_0], format="csr") return padded_mat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _pad_with_zeros(self, X, margin):\n newX = np.zeros((X.shape[0] + 2 * margin, X.shape[1] + 2 * margin, X.shape[2]))\n x_offset = margin\n y_offset = margin\n newX[x_offset:X.shape[0] + x_offset, y_offset:X.shape[1] + y_offset, :] = X\n return newX", "def OffsetSparseMatrix(SparseMatrix, numberOfVariables, numberOfEquations):\n\n class OffsetSparseMatrixClass(SparseMatrix):\n equationIndex = 0\n varIndex = 0\n\n def __init__(self, mesh, bandwidth=0, sizeHint=None,\n numberOfVariables=numberOfVariables, numberOfEquations=numberOfEquations):\n SparseMatrix.__init__(self, mesh=mesh, bandwidth=bandwidth, sizeHint=sizeHint,\n numberOfVariables=numberOfVariables, numberOfEquations=numberOfEquations)\n\n def put(self, vector, id1, id2):\n SparseMatrix.put(self, vector, id1 + self.mesh.numberOfCells * self.equationIndex, id2 + self.mesh.numberOfCells * self.varIndex)\n\n def addAt(self, vector, id1, id2):\n SparseMatrix.addAt(self, vector, id1 + self.mesh.numberOfCells * self.equationIndex, id2 + self.mesh.numberOfCells * self.varIndex)\n\n def addAtDiagonal(self, vector):\n if type(vector) in [type(1), type(1.)]:\n tmp = numerix.zeros((self.mesh.numberOfCells,), 'd')\n tmp[:] = vector\n SparseMatrix.addAtDiagonal(self, tmp)\n else:\n SparseMatrix.addAtDiagonal(self, vector)\n\n return OffsetSparseMatrixClass", "def OffsetSparseMatrix(SparseMatrix, numberOfVariables, numberOfEquations):\n\n class OffsetSparseMatrixClass(SparseMatrix):\n equationIndex = 0\n varIndex = 0\n\n def __init__(self, mesh, bandwidth=0, sizeHint=None,\n numberOfVariables=numberOfVariables, numberOfEquations=numberOfEquations):\n SparseMatrix.__init__(self, mesh=mesh, bandwidth=bandwidth, sizeHint=sizeHint,\n numberOfVariables=numberOfVariables, numberOfEquations=numberOfEquations)\n\n def put(self, vector, id1, id2):\n SparseMatrix.put(self, vector, id1 + self.mesh.numberOfCells * self.equationIndex, id2 + self.mesh.numberOfCells * self.varIndex)\n\n def addAt(self, vector, id1, id2):\n SparseMatrix.addAt(self, vector, id1 + self.mesh.numberOfCells * self.equationIndex, id2 + self.mesh.numberOfCells * self.varIndex)\n\n def addAtDiagonal(self, vector):\n if type(vector) in [type(1), type(1.)]:\n tmp = numerix.zeros((self.mesh.numberOfCells,), 'd')\n tmp[:] = vector\n SparseMatrix.addAtDiagonal(self, tmp)\n else:\n SparseMatrix.addAtDiagonal(self, vector)\n\n return OffsetSparseMatrixClass", "def makesparse(matrix):\n n = matrix[0].size\n elements = []\n for i in range(n):\n for j in range(n):\n if matrix[i][j] != 0 :\n temp = MatrixElement(i, j, matrix[i][j])\n elements.append(temp)\n return SparseMatrix(n, elements)", "def pad_csr(a, newshape):\n n, m = a.shape\n a._shape = newshape\n a.indptr = np.pad(a.indptr, (0, newshape[0] - n), 'edge')", "def ident_zeros(A):\n assert A.size[0] == A.size[1]\n A.assemble()\n o_range = A.getOwnershipRange()\n rows = []\n for i in range(o_range[1]-o_range[0]):\n indices, values = A.getRow(o_range[0]+i)\n absrow = sum(abs(values))\n if absrow < 1e-6:\n rows.append(o_range[0] + i)\n add_diagonal(A.handle, numpy.array(rows))", "def pad_zeroes(m, axis, thick):\n\n if axis == 1:\n rows = m.shape[0]\n cols = thick\n elif axis == 0:\n rows = thick\n cols = m.shape[1]\n\n return np.concatenate((m, np.zeros((rows,cols))), \n axis)", "def sp_zeros_like(x):\r\n\r\n # TODO: don't restrict to CSM formats\r\n _, _, indptr, shape = csm_properties(x)\r\n return CSM(format=x.format)(data=numpy.array([], dtype=x.type.dtype),\r\n indices=numpy.array([], dtype='int32'),\r\n indptr=tensor.zeros_like(indptr),\r\n shape=shape)", "def pad_zeros(x):\n dim = tf.shape(x)[0]\n log2_dim = tf.math.log(tf.cast(dim, tf.float32)) / tf.math.log(2.0)\n pad_dim = tf.pow(2, tf.cast(tf.math.ceil(log2_dim), tf.int32))\n with tf.control_dependencies([tf.debugging.assert_rank(x, 1)]):\n return tf.pad(x, [[0, tf.maximum(0, pad_dim - dim)]])", "def _zero_pad(self, kernel, size):\n if len(size) != kernel.ndim:\n size = kernel.shape[:1] + tuple(size) + kernel.shape[-1:]\n padsize = np.array(size) - np.array(kernel.shape)\n paddown = padsize // 2\n padup = padsize - paddown\n padarray = np.concatenate((padup[..., None],\n paddown[..., None]), axis=1)\n pads = tuple([tuple(p) for p in padarray])\n kernel_pad = np.pad(kernel, pads, 'constant', constant_values=0)\n return kernel_pad", "def setZeroes(self, matrix: List[List[int]]) -> None:\n zero_columns = []\n zero_rows = []\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n if matrix[i][j] == 0:\n zero_columns.append(j)\n zero_rows.append(i)\n for zero_column in zero_columns:\n for row in matrix:\n row[zero_column] = 0\n for zero_row in zero_rows:\n matrix[zero_row] = [0] * len(matrix[0])", "def add_zero(matrix):\n if len(matrix.shape) == 2:\n matrix = numpy.insert(matrix, 0, 0, axis=1)\n matrix = numpy.insert(matrix, matrix.shape[1], 0, axis=1)\n row01 = numpy.zeros((matrix.shape[1]), numpy.uint8)\n matrix = numpy.insert(matrix, 0, row01, axis=0)\n matrix = numpy.insert(matrix, matrix.shape[0], row01, axis=0)\n\n if len(matrix.shape) == 3:\n pixel = numpy.zeros((matrix.shape[2]), numpy.uint8)\n matrix = numpy.insert(matrix, 0, pixel, axis=1)\n matrix = numpy.insert(matrix, matrix.shape[1], pixel, axis=1)\n row = numpy.zeros((1, matrix.shape[1], 3), numpy.uint8)\n matrix = numpy.insert(matrix, 0, row, axis=0)\n matrix = numpy.insert(matrix, matrix.shape[0], row, axis=0)\n\n return matrix", "def zero_matrix(matrix):\n cols = []\n for row in range(0, len(matrix)):\n for col in range(0, len(matrix[row])):\n if matrix[row][col] == 0:\n cols.append(col)\n matrix[row] = [0 for _ in range(0, len(matrix[row]))]\n break\n for col in cols:\n for row in range(0, len(matrix)):\n matrix[row][col] = 0\n return matrix", "def zero_pad(X, padding_width, dims):\n dims = (dims) if isinstance(dims, int) else dims\n pad = [(0, 0) if idx not in dims else (padding_width, padding_width)\n for idx in range(len(X.shape))]\n X_padded = np.pad(X, pad, 'constant')\n return X_padded", "def zero_pad(data):\n N = len(data)\n pow_2 = np.ceil(np.log2(N))\n return np.pad(data,(0,int((2**pow_2)-N)),'constant')", "def zero_matrix(matrix):\n if not matrix:\n return\n\n n_rows = len(matrix)\n n_cols = len(matrix[0])\n\n # First row and first column of the matrix are used to store row and columns to nullify.\n # At the beginning we determine if the first row and/or first column contain a zero.\n first_row_zero = False\n first_col_zero = False\n for col in range(n_cols):\n if not matrix[0][col]:\n first_row_zero = True\n break\n for row in range(n_rows):\n if not matrix[row][0]:\n first_col_zero = True\n break\n\n # Then go through the rest of the matrix and determine which rows and columns must be zeroed.\n for row in range(1, n_rows):\n for col in range(1, n_cols):\n if not matrix[row][col]:\n matrix[0][col] = 0\n matrix[row][0] = 0\n\n # Go through the rest of the matrix again and insert zeros where needed.\n for row in range(1, n_rows):\n for col in range(1, n_cols):\n if matrix[0][col] == 0 or matrix[row][0] == 0:\n matrix[row][col] = 0\n\n # Nullify first row and column if needed.\n if first_row_zero:\n for col in range(n_cols):\n matrix[0][col] = 0\n if first_col_zero:\n for row in range(n_rows):\n matrix[row][0] = 0", "def zero_diag(mat):\n\n return replace_diag(mat, np.zeros(mat.shape[0]))", "def zeroes(height, width):\n g = [[0.0 for _ in range(width)] for __ in range(height)]\n return Matrix(g)", "def spzeros(size, device='cuda:0'):\n\n return SparseTensor(size=size).to(device=device)", "def posM(M):\r\n M[np.where(M < 0)] = 0\r\n return M", "def setZeroes(self, matrix: List[List[int]]) -> None:\n first_col = False\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n if matrix[i][j] == 0:\n matrix[i][0] = 0\n if j == 0:\n first_col = True\n else:\n matrix[0][j] = 0\n for i in range(1, len(matrix[0])):\n if matrix[0][i] == 0:\n for j in range(len(matrix)):\n matrix[j][i] = 0\n if matrix[0][0] == 0:\n matrix[0] = [0 for x in range(len(matrix[0]))]\n for i in range(1, len(matrix)):\n if matrix[i][0] == 0:\n for j in range(len(matrix[0])):\n matrix[i][j] = 0\n if first_col:\n for i in range(len(matrix)):\n matrix[i][0] = 0", "def set_zero(mn_matrix): \r\n \r\n row_flag = [0]*len(mn_matrix)\r\n column_flag = [0]*len(mn_matrix[0])\r\n for i in range(0, len(row_flag)):\r\n for j in range(0, len(column_flag)):\r\n if mn_matrix[i][j] == 0:\r\n row_flag[i] = 1\r\n column_flag[j] = 1\r\n \r\n for i in range(0, len(row_flag)):\r\n for j in range(0, len(column_flag)):\r\n if row_flag[i] == 1 or column_flag[j] == 1: \r\n mn_matrix[i][j] = 0", "def zeroes(height, width):\n g = [[0.0 for _ in range(width)] for __ in range(height)]\n return Matrix(g)", "def setZeroes(self, matrix: List[List[int]]) -> None:\n rows, cols = [], []\n for i in range(len(matrix)):\n for j in range(len(matrix[i])):\n if matrix[i][j] == 0:\n rows.append(i)\n cols.append(j)\n for i in rows:\n for j in range(len(matrix[i])):\n matrix[i][j] = 0\n for j in cols:\n for i in range(len(matrix)):\n matrix[i][j] = 0\n\n return", "def setZeroes(self, matrix: List[List[int]]) -> None:\r\n \r\n rows, cols = len(matrix), len(matrix[0])\r\n \r\n #This solution has been done in constant space\r\n #using first row and first column for deciding which rows and columns\r\n #should be zero\r\n \r\n #using one extra variable for first row\r\n #because first cell is common in first row and column\r\n first_row = 1\r\n \r\n for i in range(rows):\r\n for j in range(cols):\r\n if matrix[i][j]==0:\r\n #for column\r\n matrix[0][j] = 0\r\n \r\n #for row\r\n if i==0:\r\n first_row = 0\r\n else:\r\n matrix[i][0] = 0\r\n \r\n #checking for rows except first row\r\n for i in range(1,rows):\r\n if matrix[i][0] == 0:\r\n for j in range(cols):\r\n matrix[i][j] = 0\r\n \r\n #checking for columns except first column\r\n for j in range(1, cols):\r\n if matrix[0][j] == 0:\r\n for i in range(rows):\r\n matrix[i][j] = 0\r\n \r\n #for first column\r\n if matrix[0][0] == 0:\r\n for i in range(rows):\r\n matrix[i][0] = 0\r\n \r\n #for first row\r\n if first_row == 0:\r\n for j in range(cols):\r\n matrix[0][j] = 0\r\n \r\n #print(matrix)", "def setZeroes(self, matrix: List[List[int]]) -> None:\n if not matrix:\n return\n row = set()\n col = set()\n rowl = len(matrix[0])\n for i, r in enumerate(matrix):\n for j, c in enumerate(r):\n if c == 0:\n row.add(i)\n col.add(j)\n \n for r in row:\n matrix[r] = [0]*rowl\n for j in col:\n for i in range(len(matrix)):\n matrix[i][j] = 0", "def zero_matrix(matrix):\n rows = set()\n columns = set()\n m = len(matrix)\n n = len(matrix[0])\n for i in range(m):\n for j in range(n):\n if matrix[i][j] == 0:\n rows.add(i)\n columns.add(j)\n\n for i in range(m):\n for j in range(n):\n if i in rows or j in columns:\n matrix[i][j] = 0\n return matrix", "def test_add_nans_to_weights():\n # create input sparse matrix with one empty row (j=2)\n row = np.array([0, 3, 1, 0])\n col = np.array([0, 3, 1, 2])\n data = np.array([4., 5., 7., 9.])\n Matin = sps.coo_matrix((data, (row, col)), shape=(4, 4))\n\n # this is what is expected to come out (Nan added at i=0, j=2)\n row = np.array([0, 3, 1, 0, 2])\n col = np.array([0, 3, 1, 2, 0])\n data = np.array([4., 5., 7., 9., np.nan])\n expected = sps.coo_matrix((data, (row, col)), shape=(4, 4))\n\n Matout = xe.smm.add_nans_to_weights(Matin)\n assert np.allclose(expected.toarray(), Matout.toarray(), equal_nan=True)\n\n # Matrix without empty rows should return the same\n row = np.array([0, 3, 1, 0, 2])\n col = np.array([0, 3, 1, 2, 1])\n data = np.array([4., 5., 7., 9., 10.])\n Matin = sps.coo_matrix((data, (row, col)), shape=(4, 4))\n\n Matout = xe.smm.add_nans_to_weights(Matin)\n assert np.allclose(Matin.toarray(), Matout.toarray())", "def setZeroes(self, matrix: List[List[int]]) -> None:\n m, n = len(matrix), len(matrix[0]) if matrix else 0\n \n # Use first row to store zero-info for each col\n # Use first col to store zero-info for each row\n # matrix[0][0] will store col info\n \n is_first_row = any(x == 0 for x in matrix[0])\n \n for i in range(1, m):\n for j in range(n):\n if matrix[i][j] == 0:\n matrix[0][j] = matrix[i][0] = 0\n \n for i in range(1, m):\n for j in range(1, n):\n if matrix[i][0] * matrix[0][j] == 0:\n matrix[i][j] = 0\n \n if matrix[0][0] == 0:\n for i in range(1, m):\n matrix[i][0] = 0\n \n if is_first_row:\n for j in range(n):\n matrix[0][j] = 0", "def setZeroes(self, matrix: List[List[int]]) -> None:\n colsToZero = set()\n rowsToZero = set()\n for rowIdx, row in enumerate(matrix):\n for colIdx, num in enumerate(row): \n if num == 0: \n colsToZero.add(colIdx)\n rowsToZero.add(rowIdx)\n \n for col in colsToZero:\n self.writeZeroCol(col, matrix)\n for row in rowsToZero:\n self.writeZeroRow(row, matrix)" ]
[ "0.6846492", "0.6157045", "0.6157045", "0.6109819", "0.59839016", "0.59775317", "0.59726745", "0.59028137", "0.5901642", "0.5830433", "0.57942516", "0.57754564", "0.5743076", "0.57350683", "0.57294387", "0.57047725", "0.5701384", "0.5644425", "0.5624889", "0.56114304", "0.5601074", "0.55881506", "0.55743366", "0.5559454", "0.5554051", "0.5549057", "0.55429107", "0.5538183", "0.553792", "0.55256796" ]
0.7736398
0
Crop a kernel matrix to target size horizontally and vertically. If the target size is even, the target size is adjusted to the next integer up.
def crop_kernel(kernel, target_size): # Use list for mutability target = [d for d in target_size] adjusted = False for dim in range(len(target)): if not target[dim] % 2: target[dim] += 1 adjusted = True if adjusted: sys.stderr.write( "WARNING: Cropped kernel size adjusted to " f"{target[0]}x{target[1]} to keep odd dimensions.\n" ) source_m, source_n = kernel.shape target_m, target_n = target # Define horizontal and vertical margins to trim if source_m > target_m: margin_rows = (source_m - target_m) // 2 else: margin_rows = 0 if source_n > target_n: margin_cols = (source_n - target_n) // 2 else: margin_cols = 0 cropped = kernel[ margin_rows : (source_m - margin_rows), margin_cols : (source_n - margin_cols), ] return cropped
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def crop_to_target(x, target):\n\n if target.ndim==3:\n t_h, t_w = target.shape[1], target.shape[2]\n elif target.ndim==4:\n t_h, t_w = target.shape[2], target.shape[3]\n cr = int((x.shape[2] - t_h) / 2)\n cc = int((x.shape[3] - t_w) / 2)\n x_cropped = x[:, :, cr:cr + t_h, cc:cc + t_w]\n return x_cropped", "def crop(X,size_crop=_size_crop):\n b = size_crop//2\n shape = tf.shape(X)\n cx= shape[0]//2\n cy= shape[1]//2\n return X[cx-b:cx+b,cy-b:cy+b,...]", "def shrink_kernel(self, kernel, up_scale):\n up_scale = torch.tensor(up_scale).float()\n # boundary padding based on the scaling law\n pad_in = (torch.ceil(up_scale**2).int())*((kernel.shape[2]-1)//2)\n pad_h = (torch.ceil(up_scale).int())*((kernel.shape[3]-1)//2)\n pad_w = (torch.ceil(up_scale).int())*((kernel.shape[4]-1)//2)\n padded_kernel = F.pad(kernel, (pad_w, pad_w, pad_h, pad_h, pad_in, pad_in))\n delta = up_scale%1\n \n if delta == 0:\n shrink_factor = 1\n else:\n # shrink_factor for coordinates.\n shrink_factor = (((kernel.shape[4]-1))/(padded_kernel.shape[-1]-1)*(up_scale+1))\n \n # Adjustment to deal with weird filtering on the grid sample function.\n shrink_factor = 1.5*(shrink_factor-0.5)**3 + 0.57 \n\n grid = torch.meshgrid(torch.linspace(-1, 1, kernel.shape[2])*(shrink_factor**2),\n torch.linspace(-1, 1, kernel.shape[3])*shrink_factor, \n torch.linspace(-1, 1, kernel.shape[4])*shrink_factor)\n\n grid = torch.cat([grid[2].unsqueeze(0).unsqueeze(-1), \n grid[1].unsqueeze(0).unsqueeze(-1), \n grid[0].unsqueeze(0).unsqueeze(-1)], dim = -1).repeat(kernel.shape[0],1,1,1,1)\n\n new_kernel = F.grid_sample(padded_kernel, grid.to(device))\n if kernel.shape[-1] - 2*up_scale > 0:\n new_kernel = new_kernel * (kernel.shape[-1]**2/((kernel.shape[-1] - 2*up_scale)**2 + 0.01))\n return new_kernel", "def shift_kernel(kernel, shape, centre):\n h, w = kernel.shape\n assert(h % 2 == 1)\n assert(w % 2 == 1)\n half_h = np.floor(h/2)\n half_w = np.floor(w/2)\n \n result = np.zeros((shape[0]+2*half_h, shape[1]+2*half_w)) #zero pad to simplify edge handling \n\n ind_h = centre[0] + np.arange(0, 2*half_h+1, dtype='int') \n ind_w = centre[1] + np.arange(0, 2*half_w+1, dtype='int')\n result[ind_h[:,np.newaxis], ind_w] = kernel\n result = result[half_h:-half_h,half_w:-half_w]\n return result", "def crop(img: torch.Tensor, new_size: Union[int, Tuple[int, int]]) ->torch.Tensor:\n new_size = to_tuple(new_size)\n return F.center_crop(img, output_size=new_size)", "def upsampleImage( arr, kernelSize ):\n return scipy.ndimage.zoom( arr, kernelSize )", "def slidekernelthroughdiagonal(kernel, matrix):\n size_kernel = kernel.shape[0]\n size_matrix = matrix.shape[0]\n result = np.zeros([size_matrix])\n for i in range(size_matrix):\n # Calculate zero padding needed\n padding_b = -min(i - int(size_kernel/2), 0)\n padding_a = -min(size_matrix - int(i + size_kernel/2), 0)\n matrix_selection = matrix[max(0, i-int(size_kernel/2)):min(size_matrix, i+int(size_kernel/2)),max(0, i-int(size_kernel/2)):min(size_matrix, i+int(size_kernel/2))]\n matrix_padded = np.pad(matrix_selection, [(padding_b, padding_a), (padding_b, padding_a)])\n result[i] = np.sum(matrix_padded*kernel)\n return result", "def crop_and_resize(image, boxes, size):\n box_ind = keras.backend.zeros_like(boxes, tensorflow.int32)\n box_ind = box_ind[..., 0]\n box_ind = keras.backend.reshape(box_ind, [-1])\n\n boxes = keras.backend.reshape(boxes, [-1, 4])\n\n return tensorflow.image.crop_and_resize(image, boxes, box_ind, size)", "def conv_matrix(matrix, kernel):", "def center_crop(img:np.array,output_size:List[int])->np.array:\n\n if isinstance(output_size,numbers.Number):\n output_size = (int(output_size),int(output_size))\n elif isinstance(output_size,(tuple,list)) and len(output_size)==1:\n output_size =(output_size[0],output_size[0])\n \n image_height,image_width,_=img.shape\n crop_height,crop_width=output_size\n if crop_width > image_width or crop_height > image_height:\n padding_ltrb = [\n (crop_width - image_width) // 2 if crop_width > image_width else 0,\n (crop_height - image_height) // 2 if crop_height > image_height else 0,\n (crop_width - image_width + 1) // 2 if crop_width > image_width else 0,\n (crop_height - image_height + 1) // 2 if crop_height > image_height else 0,\n ]\n img=cv.copyMakeBorder(img,padding_ltrb[1],padding_ltrb[3],padding_ltrb[0],padding_ltrb[2],cv.BORDER_CONSTANT,value=(0,0,0))\n image_height,image_width,_=img.shape\n if crop_width == image_width and crop_height == image_height:\n return img\n\n crop_top = int(round((image_height - crop_height) / 2.))\n crop_left = int(round((image_width - crop_width) / 2.))\n return img[crop_top:crop_top+crop_height,crop_left:crop_left+crop_width]", "def crop_resize_img(img_path, target_size, crop_amount):\n img = image.load_img(img_path)\n x = image.img_to_array(img)\n x = x[crop_amount:-crop_amount, crop_amount:-crop_amount, :]\n ximg = Image.fromarray(np.uint8(x))\n ximg_resize = ximg.resize((target_size[0], target_size[1]))\n x = image.img_to_array(ximg_resize)\n\n return x", "def get_crop_torch(im: torch.Tensor, pos: torch.Tensor, sample_sz: torch.Tensor, output_sz: torch.Tensor = None,\n mode: str = 'replicate', max_scale_change=None, is_mask=False):\n\n # if mode not in ['replicate', 'inside']:\n # raise ValueError('Unknown border mode \\'{}\\'.'.format(mode))\n\n # copy and convert\n posl = pos.long().clone()\n\n pad_mode = mode\n\n # Get new sample size if forced inside the image\n if mode == 'inside' or mode == 'inside_major':\n pad_mode = 'replicate'\n im_sz = torch.tensor([im.shape[2], im.shape[3]], device=im.device)\n shrink_factor = (sample_sz.float() / im_sz)\n if mode == 'inside':\n shrink_factor = shrink_factor.max()\n elif mode == 'inside_major':\n shrink_factor = shrink_factor.min()\n shrink_factor.clamp_(min=1, max=max_scale_change)\n sample_sz = (sample_sz.float() / shrink_factor).long()\n\n # Compute pre-downsampling factor\n if output_sz is not None:\n resize_factor = torch.min(sample_sz.float() / output_sz.float()).item()\n df = int(max(int(resize_factor - 0.1), 1))\n else:\n df = int(1)\n\n sz = sample_sz.float() / df # new size\n\n # Do downsampling\n if df > 1:\n os = posl % df # offset\n posl = (posl - os) // df # new position\n im2 = im[..., os[0].item()::df, os[1].item()::df] # downsample\n else:\n im2 = im\n\n # compute size to crop\n szl = torch.max(sz.round(), torch.tensor([2.0], dtype=sz.dtype, device=sz.device)).long()\n\n # Extract top and bottom coordinates\n tl = posl - (szl - 1) // 2\n br = posl + szl // 2 + 1\n\n # Shift the crop to inside\n if mode == 'inside' or mode == 'inside_major':\n im2_sz = torch.LongTensor([im2.shape[2], im2.shape[3]])\n shift = (-tl).clamp(0) - (br - im2_sz).clamp(0)\n tl += shift\n br += shift\n\n outside = ((-tl).clamp(0) + (br - im2_sz).clamp(0)) // 2\n shift = (-tl - outside) * (outside > 0).long()\n tl += shift\n br += shift\n\n # Get image patch\n # im_patch = im2[...,tl[0].item():br[0].item(),tl[1].item():br[1].item()]\n\n\n # Get image patch\n if not is_mask:\n im_patch = F.pad(im2, (-tl[1].item(), br[1].item() - im2.shape[3], -tl[0].item(), br[0].item() - im2.shape[2]),\n mode=pad_mode)\n else:\n im_patch = F.pad(im2, (-tl[1].item(), br[1].item() - im2.shape[3], -tl[0].item(), br[0].item() - im2.shape[2]))\n\n # Get image coordinates\n patch_coord = df * torch.cat((tl, br)).view(1, 4)\n\n scale = output_sz / (torch.tensor(im_patch.shape, device=im_patch.device)[-2:] * df)\n\n if output_sz is None or (im_patch.shape[-2] == output_sz[0] and im_patch.shape[-1] == output_sz[1]):\n return im_patch.clone(), patch_coord, scale\n\n # Resample\n if not is_mask:\n im_patch = F.interpolate(im_patch, output_sz.long().tolist(), mode='bilinear')\n else:\n im_patch = F.interpolate(im_patch, output_sz.long().tolist(), mode='nearest')\n\n return im_patch, patch_coord, scale", "def center_crop(image, model_input_image_size):\n im_size = image.get_shape().as_list()\n target_height = model_input_image_size[0]\n target_width = model_input_image_size[1]\n if len(im_size) == 3:\n return tf.image.resize_image_with_crop_or_pad(\n image,\n target_height=target_height,\n target_width=target_width)\n elif len(im_size) == 4:\n time_split_image = tf.split(image, im_size[0], axis=0)\n crops = []\n for idx in range(len(time_split_image)):\n it_crop = tf.image.resize_image_with_crop_or_pad(\n tf.squeeze(time_split_image[idx], axis=0),\n target_height=target_height,\n target_width=target_width)\n crops += [tf.expand_dims(it_crop, axis=0)]\n return tf.concat(crops, axis=0)\n else:\n raise NotImplementedError", "def squeeze(x,kernel=3):\n k = kernel // 2\n idx = [list(i) for i in itertools.product(range(-k,k+1),range(-k,k+1))]\n \n xPad = torch.zeros(x.shape[0]+kernel-1,x.shape[1]+kernel-1)\n xPad[k:-k,k:-k] = x\n \n xSqueezed = [torch.roll(xPad,shifts=(i[0],i[1]),dims=(0,1)) for i in idx]\n xSqueezed = torch.stack(xSqueezed)\n \n return xSqueezed[:,k:-k,k:-k]", "def img_preprocess(self, img, output_size=64):\n w, h = img.size\n if w > h:\n box_param = (int(w * 0.5 - h * 0.5), 0, int(w * 0.5 + h * 0.5), h)\n cropped = img.crop(box_param)\n else: # w < h\n box_param = (0, int(h * 0.5 - w * 0.5), w, int(h * 0.5 + w * 0.5))\n cropped = img.crop(box_param)\n\n resized = cropped.resize((output_size, output_size))\n resized = np.asarray(resized)\n\n return resized", "def unfold(input, kernel_size, dilation=1, padding=0, stride=1):\n nd_util = utils._ntuple(input.ndimension() - 2)\n out = FunctionLib.apply(\n 'Im2Col',\n input.device,\n [input],\n kernel_shape=nd_util(kernel_size),\n strides=nd_util(stride),\n pads=nd_util(padding),\n dilations=nd_util(dilation))\n return out.flatten_(2)", "def crop(arr, target_shape):\n arr_shape = arr.shape\n ncrop = ()\n for dim in range(len(arr_shape)):\n diff = arr_shape[dim] - target_shape[dim]\n if diff > 0:\n start = int(diff / 2)\n stop = start + target_shape[dim]\n ncrop += np.index_exp[start:stop]\n else:\n ncrop += np.index_exp[:]\n cropped = arr[ncrop]\n return cropped", "def crop_and_downsample(originalX, downsample_size=32):\n current_dim = 250\n target_dim = 128\n margin = int((current_dim - target_dim) / 2)\n left_margin = margin\n right_margin = current_dim - margin\n\n # newim is shape (6, 128, 128)\n newim = originalX[:, left_margin:right_margin, left_margin:right_margin]\n\n # resized are shape (feature_width, feature_height, 3)\n feature_width = feature_height = downsample_size\n resized1 = imresize(newim[0:3, :, :], (feature_width, feature_height), interp=\"bicubic\", mode=\"RGB\")\n resized2 = imresize(newim[3:6, :, :], (feature_width, feature_height), interp=\"bicubic\", mode=\"RGB\")\n\n # re-packge into a new X entry\n newX = np.concatenate([resized1, resized2], axis=2)\n\n # the next line is EXTREMELY important.\n # if you don't normalize your data, all predictions will be 0 forever.\n newX = newX / 255.0\n\n return newX", "def crop_or_pad(img: torch.Tensor, new_size: Union[int, Tuple[int, int]]):\n new_size = to_tuple(new_size)\n if list(new_size) == list(img.shape[-2:]):\n return img\n img = pad(img, new_size)\n img = crop(img, new_size)\n return img", "def center_crop(img, output_size, data_format='CHW'):\n _assert_image_tensor(img, data_format)\n\n if isinstance(output_size, numbers.Number):\n output_size = (int(output_size), int(output_size))\n\n image_width, image_height = _get_image_size(img, data_format)\n crop_height, crop_width = output_size\n crop_top = int(round((image_height - crop_height) / 2.0))\n crop_left = int(round((image_width - crop_width) / 2.0))\n return crop(\n img,\n crop_top,\n crop_left,\n crop_height,\n crop_width,\n data_format=data_format,\n )", "def crop_to_square(self, image):\n orig_height, orig_width, orig_channels = image.shape\n if orig_height > orig_width:\n return image[:orig_width, ...]\n elif orig_height < orig_width:\n return image[:, :orig_height, ...]\n return image", "def downsample2d(inputArray, kernelSize):\n average_kernel = np.ones((kernelSize,kernelSize))\n\n blurred_array = sig.convolve2d(inputArray, average_kernel, mode='same')\n downsampled_array = blurred_array[::kernelSize,::kernelSize]\n return downsampled_array", "def crop_and_pad(data, offset, crop_shape, target_shape=None):\n # Spatial dimensions only. All vars in zyx.\n shape = np.array(data.shape[1:])\n crop_shape = np.array(crop_shape)\n offset = np.array(offset[::-1])\n\n start = shape // 2 - crop_shape // 2 + offset\n end = start + crop_shape\n\n assert np.all(start >= 0)\n\n selector = [slice(s, e) for s, e in zip(start, end)]\n selector = tuple([slice(None)] + selector)\n cropped = data[selector]\n\n if target_shape is not None:\n target_shape = np.array(target_shape)\n delta = target_shape - crop_shape\n pre = delta // 2\n post = delta - delta // 2\n\n paddings = [(0, 0)] # no padding for batch\n paddings.extend(zip(pre, post))\n paddings.append((0, 0)) # no padding for channels\n\n cropped = np.pad(cropped, paddings, mode='constant')\n\n return cropped", "def unfold(input, kernel_size=(3,3), stride=(1,1), padding=(0,0)):\n batch_size, in_chan, in_height, in_width = input.shape\n \n vals = ((0,0), (0,0), (padding[0], padding[0]), (padding[1], padding[1]))\n input_padded = np.pad(input, vals, mode='constant')\n\n k, i, j = get_indices(input.shape, kernel_size, stride, padding)\n \n input_f = input_padded[:, k, i, j]\n\n input_f = input_f.transpose(1, 2, 0).reshape(in_chan*kernel_size[0]*kernel_size[1], -1)\n return input_f", "def crop(img: 'np.ndarray', x: int, y: int, width: int, height: int) -> 'np.ndarray':\n return img[y:y+height, x:x+width]", "def test_resize_outputs_crop(self, X: torch.Tensor, scale_factor: float):\n xc, yc = resize_batch(X, X, scale_factor, 'crop', resize_targets=True)\n assert torch.equal(xc, yc)", "def _crop_video(numpy_video, size, desired_size):\r\n\r\n w, h = size\r\n h1, h2 = int(h/2) - int(desired_size/2), int(h/2) + int(desired_size/2)\r\n w1, w2 = int(w/2) - int(desired_size/2), int(w/2) + int(desired_size/2)\r\n return numpy_video[:, :, h1:h2, w1:w2, :]", "def center_crop(image, source=(218, 178, 3), target=128):\n height, width, channel = source\n\n off_h = np.ceil((height - target) / 2).astype(int)\n off_w = np.ceil((width - target) / 2).astype(int)\n return image[off_h: off_h+target, off_w: off_w+target, :]", "def get_crop_matrix_batch(bbx):\n shape = tf.shape(bbx)\n batch_size = shape[0]\n bbx = tf.reshape(bbx, [batch_size, 4, 1])\n\n tmp = tf.convert_to_tensor([[0.0, -1.0, 0.0, 1.0], \n [-1.0, 0.0, 1.0, 0.0]])\n tmp = tf.broadcast_to(tmp, shape=[batch_size, 2, 4])\n # (N, 2, 1) <- (N, 2, 4) x (N, 4, 1)\n cwh = tf.matmul(tmp, bbx)\n cwh = tf.div(1.0, cwh)\n # (N, 3, 1)\n cwh = tf.pad(cwh, paddings=((0, 0), (0, 1), (0, 0)), mode='CONSTANT', constant_values=1.0)\n # (3, 3)\n eye = tf.eye(3)\n # (N, 3, 3)\n eye = tf.broadcast_to(eye, shape=[batch_size, 3, 3])\n # (N, 3)\n cwh = tf.reshape(cwh, [batch_size, 3])\n # (N, 3, 3)\n cwh = tf.linalg.diag(cwh)\n \n # (2, 4)\n xym = tf.convert_to_tensor([[0.0, -1.0, 0.0, 0.0], \n [-1.0, 0.0, 0.0, 0.0]])\n # (N, 2, 4)\n xym = tf.broadcast_to(xym, shape=[batch_size, 2, 4])\n # (N, 2, 1) <- (N, 2, 4) x (N, 4, 1)\n xym = tf.matmul(xym, bbx)\n # (N, 3, 3)\n xym = tf.pad(xym, paddings=((0, 0), (0, 1), (2, 0)), mode='CONSTANT', constant_values=0.0)\n xym = eye + xym\n \n # (N, 3, 3) <- (N, 3, 3) x (N, 3, 3)\n trans = tf.matmul(cwh, xym)\n return trans", "def center_crop_and_pad(data, coor, target_shape):\n target_shape = np.array(target_shape)\n\n start = coor - target_shape // 2\n end = start + target_shape\n\n # assert np.all(start >= 0)\n\n selector = [slice(s, e) for s, e in zip(start, end)]\n cropped = data[tuple(selector)]\n\n if target_shape is not None:\n\n if len(cropped.shape) > 3:\n target_shape = np.array(target_shape)\n delta = target_shape - cropped.shape[:-1]\n pre = delta // 2\n post = delta - delta // 2\n\n paddings = [] # no padding for batch\n paddings.extend(zip(pre, post))\n paddings.append((0, 0))\n cropped = np.pad(cropped, paddings, mode='constant')\n else:\n target_shape = np.array(target_shape)\n delta = target_shape - cropped.shape\n pre = delta // 2\n post = delta - delta // 2\n\n paddings = [] # no padding for batch\n paddings.extend(zip(pre, post))\n cropped = np.pad(cropped, paddings, mode='constant')\n\n return cropped" ]
[ "0.657598", "0.62372273", "0.6060326", "0.58836097", "0.56814396", "0.5558945", "0.5557947", "0.55271006", "0.54924375", "0.5463341", "0.54487395", "0.5447843", "0.5445061", "0.544338", "0.54312927", "0.54123294", "0.54112655", "0.5396363", "0.5391804", "0.53821915", "0.5377355", "0.5306529", "0.5298422", "0.5284605", "0.5283586", "0.5277709", "0.52731436", "0.5270116", "0.5266266", "0.52546686" ]
0.78084946
0
Resize a kernel matrix based on the resolution at which it was defined and the signal resolution. E.g. if a kernel matrix was generated for 10kb and the input signal is 20kb, kernel size will be divided by two. If the kernel is enlarged, pixels are interpolated with a spline of degree 1. Alternatively, a resize factor can be provided. In the example above, the factor would be 0.5.
def resize_kernel( kernel, kernel_res=None, signal_res=None, factor=None, min_size=7, quiet=False, ): km, kn = kernel.shape if km != kn: raise ValueError("kernel must be square.") if not (km % 2) or not (kn % 2): raise ValueError("kernel size must be odd.") if factor is not None: if kernel_res is not None or signal_res is not None: raise ValueError( "factor is mutually exclusive with resolution " "parameters (kernel_res and signal_res)." ) resize_factor = factor else: if kernel_res is None or signal_res is None: raise ValueError( "You must provide either a resize factor or the signal and " "kernel resolutions." ) # Define by how many times kernel must be enlarged for its pixels to # match the signal's pixels resize_factor = kernel_res / signal_res if km * resize_factor < min_size: resize_factor = min_size / km resized_kernel = ndi.zoom(kernel, resize_factor, order=1) if not resized_kernel.shape[0] % 2: # Compute the factor required to yield a dimension smaller by one adj_resize_factor = (resized_kernel.shape[0] - 1) / km if not quiet: sys.stderr.write( f"Adjusting resize factor from {resize_factor} to {adj_resize_factor}.\n" ) resized_kernel = ndi.zoom(kernel, adj_resize_factor, order=1) return resized_kernel
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shrink_kernel(self, kernel, up_scale):\n up_scale = torch.tensor(up_scale).float()\n # boundary padding based on the scaling law\n pad_in = (torch.ceil(up_scale**2).int())*((kernel.shape[2]-1)//2)\n pad_h = (torch.ceil(up_scale).int())*((kernel.shape[3]-1)//2)\n pad_w = (torch.ceil(up_scale).int())*((kernel.shape[4]-1)//2)\n padded_kernel = F.pad(kernel, (pad_w, pad_w, pad_h, pad_h, pad_in, pad_in))\n delta = up_scale%1\n \n if delta == 0:\n shrink_factor = 1\n else:\n # shrink_factor for coordinates.\n shrink_factor = (((kernel.shape[4]-1))/(padded_kernel.shape[-1]-1)*(up_scale+1))\n \n # Adjustment to deal with weird filtering on the grid sample function.\n shrink_factor = 1.5*(shrink_factor-0.5)**3 + 0.57 \n\n grid = torch.meshgrid(torch.linspace(-1, 1, kernel.shape[2])*(shrink_factor**2),\n torch.linspace(-1, 1, kernel.shape[3])*shrink_factor, \n torch.linspace(-1, 1, kernel.shape[4])*shrink_factor)\n\n grid = torch.cat([grid[2].unsqueeze(0).unsqueeze(-1), \n grid[1].unsqueeze(0).unsqueeze(-1), \n grid[0].unsqueeze(0).unsqueeze(-1)], dim = -1).repeat(kernel.shape[0],1,1,1,1)\n\n new_kernel = F.grid_sample(padded_kernel, grid.to(device))\n if kernel.shape[-1] - 2*up_scale > 0:\n new_kernel = new_kernel * (kernel.shape[-1]**2/((kernel.shape[-1] - 2*up_scale)**2 + 0.01))\n return new_kernel", "def resize(self, size):\n if len(size) != len(self._Fkernel.shape[1:-1]):\n raise RuntimeError(\"length of resize shape is incorrect.\")\n if not np.all(size >= self._Fkernel.shape[1:-1]):\n raise RuntimeError(\"resize shape is too small.\")\n kernel = self._frequency_2_real()\n kernel_pad = self._zero_pad(kernel, size)\n self._Fkernel = self._real_2_frequency(kernel_pad)\n self.basis._axes_shape = kernel_pad.shape[1:-1]", "def get_kernel_size(factor):\r\n return 2 * factor - factor % 2", "def __resize_bilinear_with_factor(self, x, factor_size=None, data_format=None):\n height_factor = factor_size[0]\n width_factor = factor_size[1]\n if data_format == 'channels_first':\n height = x.shape[2] * height_factor\n width = x.shape[3] * width_factor\n new_shape = tf.constant(np.array([height, width]).astype('int32'))\n X = K.permute_dimensions(x, [0, 2, 3, 1])\n X = tf.image.resize_bilinear(X, new_shape)\n X = K.permute_dimensions(X, [0, 3, 1, 2])\n return X\n else:\n raise Exception(\"invilid data format\", data_format)", "def resize_function(input):\n\n from keras.backend import tf as ktf\n return ktf.image.resize_images(input, (64, 64))", "def resize(orig, factor, method=\"nearest\"):\r\n method_dict = {'nearest': 0, 'bilinear': 1, 'cubic': 2}\r\n if method.lower() not in method_dict:\r\n raise ValueError(\"Invalid interpolation method. Options are: \" + \", \".join(method_dict.keys()))\r\n try:\r\n return zoom(orig, factor, order=method_dict[method.lower()])\r\n except RuntimeError:\r\n # raised by zoom when factor length does not match orig.shape length\r\n raise ValueError(\"Factor sequence length does not match input length\")", "def resize_coeff(x, new_x):\n return new_x / x", "def resize_coeff(x, new_x):\n return new_x / x", "def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):\n shape = input_tensor.get_shape().as_list()\n if shape[1] is None or shape[2] is None:\n kernel_size_out = kernel_size\n else:\n kernel_size_out = [min(shape[1], kernel_size[0]),\n min(shape[2], kernel_size[1])]\n return kernel_size_out", "def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):\n shape = input_tensor.get_shape().as_list()\n if shape[1] is None or shape[2] is None:\n kernel_size_out = kernel_size\n else:\n kernel_size_out = [min(shape[1], kernel_size[0]),\n min(shape[2], kernel_size[1])]\n return kernel_size_out", "def _resize(img, max_dim=128):\n if max(img.shape[:3]) <= max_dim:\n return img\n else:\n new_size = [max_dim / s if s >= max_dim else 1.0 for s in img.shape[:3]]\n new_size.append(1.0) # for channel\n return scipy.ndimage.zoom(img, new_size, order=2)", "def resize(self):\n e = self.e\n if abs(self.dnp) * ( self.np-self.np_req) > 0:\n e = self.er\n self.dsize = numpy.clip((self.np_req/self.np)**(1./e), 1/self.r, self.r)\n self.size *= self.dsize", "def kernel_size(self, frequency, taper=4.):\n return 2. * taper * self._sigma(frequency)", "def rescale(self, factor):\n scaled_size = (int(self.width * factor), int(self.height * factor))\n return self.resize(scaled_size)", "def scale_2d(orig_matrix, height=None, width=None):\n matrix = orig_matrix.copy()\n\n if len(matrix.shape) != 2:\n raise ValueError(\"The Matrix must have 2 and only 2 dimensions\")\n\n orig_h = matrix.shape[0]\n orig_w = matrix.shape[1]\n\n dim = (0, 0)\n\n if not height and not width:\n dim = matrix.shape\n\n elif not height:\n width = width\n new_ratio = float(width) / float(orig_w)\n height = orig_h * new_ratio\n dim = (width, height)\n\n elif not width:\n height = height\n new_ratio = float(height) / float(orig_h)\n width = orig_w * new_ratio\n dim = (width, height)\n\n else:\n dim = (height, width)\n\n dim = tuple([int(x) for x in dim])\n return cv2.resize(matrix, dim, interpolation=cv2.INTER_AREA)", "def setPixelsPerInchShrinkToFit(self,value):\n self.PDFreactorConfiguration.in1[\"pixelsPerInchShrinkToFit\"] = value", "def resize3D(img, target_size, bspline_order=3, mode='constant'): \n # compute zoom values\n target_size = np.array(target_size, dtype=float)\n image_shape = np.array(img.shape, dtype=float)\n zoom_factors = np.divide(target_size,image_shape)\n print \"Target Size\"\n print target_size\n \n\n print \"Zoom Factors\"\n print zoom_factors\n\n \n # zoom image\n img = zoom(img, zoom_factors, order=bspline_order, mode=mode)\n\n print \"image_shape\"\n print img.shape\n\n return img", "def resize_to_box(im, size):\n #mx = np.max(im.shape[:2])\n\n factors = [size[i]/im.shape[i] for i in range(2)]\n\n f = np.min(factors)\n if f < 1.0:\n return resize_with_factor_new(im, f)\n else:\n return im", "def zoom(self, factor):\n adj = self.canvas.get_hadjustment()\n oldCenter = adj.value + adj.page_size // 2\n\n self.scale *= factor\n self.resizer.rescale()\n self.resize(self.timeExtent * self.scale, self.height)\n for f in self.resizeCallbacks:\n f()\n\n adj.value = oldCenter * factor - adj.page_size // 2", "def resize(im, new_size, preserve_aspect_ratio=True, prefilter=True):\n factors = [new_size[i] / im.shape[i] for i in range(2)]\n\n #assert factors[0] == factors[1], \"Must have same factor for now\"\n f = factors[0] \n \n if f < 1:\n im2 = pyramid_reduce(im, downscale=1/f)\n elif f > 1:\n im2 = pyramid_expand(im, upscale=f)\n else:\n im2 = im\n\n assert im2.shape[:2] == tuple(new_size), \"{0} != {1} (original size: {2})\".format(im2.shape, new_size, im.shape)\n \n return im2", "def bilinear_interpolation_kernel(in_channels, out_channels, ksize):\n\n factor = (ksize + 1) / 2\n if ksize % 2 == 1:\n center = factor - 1\n else:\n center = factor - 0.5\n og = np.ogrid[:ksize, :ksize]\n k = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)\n \n W = np.zeros((in_channels, out_channels, ksize, ksize)).astype(np.float32)\n W[range(in_channels), range(out_channels), :, :] = k\n return W", "def scale_image(img, factor=1):\n return cv2.resize(img, (int(img.shape[1] * factor), int(img.shape[0] * factor)))", "def _aspect_preserving_resize(image, resize_min):\r\n shape = tf.shape(input=image)\r\n height, width = shape[0], shape[1]\r\n\r\n new_height, new_width = _smallest_size_at_least(height, width, resize_min)\r\n\r\n return _resize_image(image, new_height, new_width)", "def resize_image(image, resizeFactor):\n\n return cv.resize(image, (0,0), fx=resizeFactor, fy=resizeFactor)", "def shrink(self, factor_x:int=1, factor_y:int=1):\n # vertical shrink\n shrunk = self._pixels[::factor_y]\n # horizontal shrink\n shrunk = self._outer(_row[::factor_x] for _row in shrunk)\n return type(self)(shrunk, _0=self._0, _1=self._1)", "def coral_image_resize(im, scaling_method, scaling_factor, height_cm):\n\n if scaling_method == 'scale':\n scale = float(scaling_factor) # here scaling_factor is the desired image scaling.\n elif scaling_method == 'ratio':\n scale = float(scaling_factor) * height_cm / im.shape[0] # here scaling_factor is the desited px_cm_ratio.\n im = scipy.misc.imresize(im, scale)\n return (im, scale)", "def downScaleResolution(kv, factor=10):\n sub_img_name = kv[0]\n sub_image = kv[1]\n img_dimension = len(sub_image)\n big_image = sub_image\n Nbig = img_dimension\n Nsmall = Nbig//factor\n small_image = big_image.reshape([Nsmall, Nbig // Nsmall, Nsmall, Nbig // Nsmall]).mean(3).mean(1)\n return (sub_img_name,small_image)", "def resize(im, target_size, max_size, stride=0, interpolation=cv2.INTER_LINEAR):\r\n im_shape = im.shape\r\n im_size_min = np.min(im_shape[0:2])\r\n im_size_max = np.max(im_shape[0:2])\r\n im_scale = float(target_size) / float(im_size_min)\r\n # prevent bigger axis from being more than max_size:\r\n if np.round(im_scale * im_size_max) > max_size:\r\n im_scale = float(max_size) / float(im_size_max)\r\n im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=interpolation)\r\n\r\n if stride == 0:\r\n return im, im_scale\r\n else:\r\n # pad to product of stride\r\n im_height = int(np.ceil(im.shape[0] / float(stride)) * stride)\r\n im_width = int(np.ceil(im.shape[1] / float(stride)) * stride)\r\n im_channel = im.shape[2]\r\n padded_im = np.zeros((im_height, im_width, im_channel))\r\n padded_im[:im.shape[0], :im.shape[1], :] = im\r\n return padded_im, im_scale", "def scale_image(img, factor=1):\n\treturn cv2.resize(img,(int(img.shape[1]*factor), int(img.shape[0]*factor)))", "def resize(img, shape, mode='constant', orig_shape=(155, 240, 240)):\n assert len(shape) == 3, \"Can not have more than 3 dimensions\"\n factors = (\n shape[0] / orig_shape[0],\n shape[1] / orig_shape[1],\n shape[2] / orig_shape[2]\n )\n\n # Resize to the given shape\n return zoom(img, factors, mode=mode)" ]
[ "0.6673881", "0.64376324", "0.63498217", "0.6145752", "0.60997856", "0.6078737", "0.5899416", "0.5899416", "0.5854729", "0.5854729", "0.580574", "0.58015907", "0.57691246", "0.57011503", "0.56532955", "0.55739254", "0.5490137", "0.5478357", "0.5461603", "0.5452446", "0.5299778", "0.52933866", "0.52694404", "0.5243633", "0.52357435", "0.52345717", "0.52280074", "0.5209974", "0.52011526", "0.5189116" ]
0.7894084
0
Performs truncated SVD on an input kernel, returning the singular vectors necessary to retain a given proportion of information contained in the kernel.
def factorise_kernel(kernel, prop_info=0.999): u, sigma, v = la.svd(kernel) total_info = np.sum(sigma ** 2) # Compute min. number of singular vectors to retain enough info keep_k = np.flatnonzero(np.cumsum(sigma ** 2) > prop_info * total_info)[0] + 1 if keep_k > np.floor(min(kernel.shape) / 2): sys.stderr.write( f"Warning: Kernel factorisation required {keep_k} singular," "vectors this may result in slow operations.\n", ) # Truncate singular matrix to the keep only required vectors u = u[:, :keep_k] v = v[:keep_k, :] # Multiply each singular vector by the sqrt of its singular value for i in range(keep_k): u[:, i] *= np.sqrt(sigma[i]) v[i, :] *= np.sqrt(sigma[i]) return (u, v)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def truncated_svd(A,k=None):", "def svd_shrink(X, tau):\n U,s,V = np.linalg.svd(X, full_matrices=False)\n return np.dot(U, np.dot(np.diag(shrink(s, tau)), V))", "def reduce_svd(embeddings, seed=0):\n svd = TruncatedSVD(n_components=2, n_iter=10, random_state=seed)\n return svd.fit_transform(embeddings)", "def computeTruncatedSVD(docTermMatrix, dim=500):\r\n T, S, D = np.linalg.svd(np.transpose(docTermMatrix), full_matrices=False)\r\n\r\n diagS = np.diag(S)\r\n shape = np.shape(diagS)\r\n\r\n if dim <= shape[0] and dim <= shape[1]:\r\n subT = T[:,:dim]\r\n subS = diagS[:dim,:dim]\r\n subD = np.transpose(D)[:,:dim]\r\n else:\r\n subT = T\r\n subS = diagS\r\n subD = np.transpose(D)\r\n\r\n return subT, subS, subD", "def truncated_svd(A,k=None):\n \n \n \n AHA=np.conj(A).T.dot(A)\n evals,evecs=la.eig(AHA)\n order=np.argsort(evals)\n\n evals=evals[order][::-1].copy()\n evecs=evecs.T[order][::-1].copy()\n m,n=AHA.shape\n \n tol=1e-12\n Vh=[]\n for i in xrange(0,m):\n\t\t if np.abs(evals[i])>=tol:\n\t \t\tVh+=[evecs[i]]\n \n Vh=np.array(Vh)\n s=np.sqrt(evals[:Vh.shape[0]])\n U=[]\n for i in xrange(0,len(s)):\n U+=[(1./s[i])*A.dot(Vh[i])]\n U=np.array(U).T\n \n return U,s,Vh", "def tsvd(A, threshold=0.99999, avoid_pathological=True):\n M,N = A.shape\n full_matrices = False\n\n if is_int(threshold):\n # Assume specific number is requested\n r = threshold\n assert 1 <= r <= max(M,N)\n if r > min(M,N):\n full_matrices = True\n r = min(M,N)\n\n U,s,VT = sla.svd(A, full_matrices)\n\n if isinstance(threshold,float):\n # Assume proportion is requested\n r = truncate_rank(s,threshold,avoid_pathological)\n\n # Truncate\n U = U [:,:r]\n VT = VT[ :r]\n s = s [ :r]\n return U,s,VT", "def svd(self, X): # [5pts]\n N,D = X.shape[0],X.shape[1]\n if X.ndim == 3:\n U = np.zeros((N,N,3))\n S = np.zeros((min(N,D),3))\n V = np.zeros((D,D,3))\n for i in range(3):\n U_temp,S_temp,V_temp = np.linalg.svd(X[:,:,i],compute_uv=True, full_matrices=True,hermitian=False)\n U[:,:,i] = U_temp\n S[:,i] = S_temp\n V[:,:,i] = V_temp\n else:\n U,S,V = np.linalg.svd(X,compute_uv=True,full_matrices=True, hermitian=False)\n return U,S,V", "def test_truncate2():\n X = rand(5,5,5)\n T = hosvd(X)\n k = 3\n Tk = T.truncate(k)\n E = X - Tk.asarray()\n Cdk = T.X\n Cdk[:k,:k,:k] = 0\n assert np.allclose(fro_norm(E), fro_norm(Cdk))", "def svdSoft(A, lmbda, k):\n if not scipy.sparse.issparse(A): \n raise ValueError(\"A must be a sparse matrix\")\n \n #U, s, V = scipy.sparse.linalg.svds(A, k)\n U, s, V = sparsesvd(A, k) \n U = U.T\n inds = numpy.flipud(numpy.argsort(s))\n U, s, V = Util.indSvd(U, s, V, inds) \n \n #Soft threshold \n s = s - lmbda\n s = numpy.clip(s, 0, numpy.max(s))\n\n return U, s, V", "def update_model(X, U, S, k, n, mu,\n svdmethod='full',\n missingmethod='zero'):\n\n if len(X) == 0:\n printt(\"Error: No data in X.\")\n return None, None, None, -1, None\n #print('%d items in X' % X.shape[1])\n #print('init U:', U)\n\n # If there is no previous U, and we just got a single item in X,\n # set U to all 0's (degenerate SVD),\n # and return it with mu.\n # (PR #22 sets first value to 1; see decals implementation)\n if len(U) == 0 and X.shape[1] == 1:\n mu = X\n # Do this no matter what. Let mu get NaNs in it as needed.\n U = np.zeros_like(mu)\n U[0] = 1\n S = np.array([0])\n n = 1\n pcts = [1.0]\n return U, S, mu, n, pcts\n\n ###########################################################################\n # Do full SVD of X if this is requested, regardless of what is in U \n # Also, if n = 0 or U is empty, start from scratch\n output_k = False\n if svdmethod == 'full' or len(U) == 0 or n == 0:\n if n == 0:\n if len(U) == 0:\n printt(\"----- initial SVD -----\")\n output_k = True\n else:\n # Reshape so we don't have an empty dimension (yay python)\n U = U.reshape(-1, 1)\n elif len(U) == 0:\n printt(\"WARNING: N (number of items modeled by U) is %d, not zero, but U is empty!\" % n)\n\n # Bootstrap\n if missingmethod == 'ignore':\n printt(\"ERROR: ignore with full is not possible under ordinary circumstances.\")\n printt(\"Use --increm-brand to impute for NaNs.\")\n printt(\"For now, we are filling NaNs with 0.\")\n X = copy.deepcopy(X)\n z = np.where(np.isnan(X))\n X[z] = 0\n\n mu = np.mean(X, axis=1).reshape(-1,1)\n X = X - mu\n U, S, V = linalg.svd(X, full_matrices=False)\n printt('Just did full SVD on %d items.' % X.shape[1])\n #print('X:', X)\n #print('U:', U)\n # Reset U to all 0's if we only have one item in X (degenerate SVD)\n if X.shape[1] == 1:\n U = np.zeros_like(U)\n \n # Keep only the first k components\n S_full = S\n S = S[0:k]\n U = U[:,0:k]\n\n # Update n to number of new items in X\n n = X.shape[1]\n \n ###########################################################################\n # Incremental SVD from Ross\n elif svdmethod == 'increm-ross':\n # Incremental SVD from Ross et al. 2008\n # \"Incremental Learning for Robust Visual Tracking\"\n # based on Lim and Ross's sklm.m implementation in MATLAB.\n\n # This method DOES NOT handle missing values.\n if missingmethod == 'ignore':\n print('ERROR: increm-ross cannot handle missing values.')\n print('If they are present, try svdmethod=increm-brand')\n print(' or use missingmethod=zero to zero-fill.')\n print('If there are no missing values, specify missingmethod=none.')\n sys.exit(1)\n\n n_new = X.shape[1]\n \n # Compute mean\n # Weirdly, the later 'X-mu_new' is MUCH faster if you reshape as shown.\n # This is because of differences in the way numpy treats a 1d array versus a 2d column.\n mu_new = np.mean(X, axis=1).reshape(-1,1)\n\n # Subtract the mean, append it as a column vector, and update mu\n # X - mu_new will be zero if X has only 1 item\n mu_old = mu\n # New mu is a weighted sum of old and new mus\n mu = (n * mu_old + n_new * mu_new) / (n + n_new)\n B = np.hstack((X - mu,\n math.sqrt(n_new * n/float(n_new+n)) * \\\n (mu_old - mu_new)))\n printt(\"Now tracking mean for %d -> %d items; mu.min %f, mu.max %f \" % \\\n (n, n+n_new, np.nanmin(mu), np.nanmax(mu)))\n n = n + n_new\n\n if S.all() == 0:\n npcs = U.shape[1]\n diagS = np.zeros((npcs, npcs))\n else:\n diagS = np.diag(S)\n\n # I don't think this is right. At this point B is the augmented\n # matrix rather than the single observation.\n proj = np.dot(U.T, B)\n reproj_err = B - np.dot(U, proj)\n\n # to get orthogonal form of reproj_err\n # This should return q with dimensions [d(X) by n_new+1], square\n q, dummy = linalg.qr(reproj_err, mode='full')\n # print('q.shape should be 7x2: ', q.shape)\n Q = np.hstack((U, q))\n\n # From Ross and Lim, 2008\n # R = [ [ Sigma, U.T * X ] [ 0, orthog. component of reproj error ] ]\n k_now = diagS.shape[0]\n new_dim = k_now + n_new + 1\n R = np.zeros((new_dim, new_dim))\n R[0:k_now,0:k_now] = diagS\n R[0:k_now,k_now:] = proj\n orthog_reproj_err = np.dot(q.T, reproj_err)\n R[k_now:, k_now:] = orthog_reproj_err\n \n # Perform SVD of R. Then finally update U.\n U, S, V = linalg.svd(R, full_matrices=False)\n printt('Just did increm-ross SVD on %d items.' % n)\n\n U = np.dot(Q, U)\n \n # Keep only the first k components\n U = U[:,0:min([n,k])]\n S_full = S\n S = S[0:min([n,k])]\n\n ###########################################################################\n # Incremental SVD from Brand\n elif svdmethod == 'increm-brand':\n # Pulled out James's attempt to handle NaNs into\n # increm-brand-james.py. Starting over from scratch here.\n n_new = X.shape[1]\n\n if n_new != 1:\n print(\"WARNING: increm-brand will probably only work by adding one item at a time.\")\n input('\\nPress enter to continue or ^C/EOF to exit. ')\n\n if missingmethod == 'ignore':\n # 1. Update mu\n mu_old = mu\n mu_new = X\n\n # Be careful! For any pre-existing NaNs in mu,\n # let mu_new fill them in. Can't get any worse!\n naninds = np.where(np.isnan(mu_old))[0]\n if naninds.size > 0:\n mu_old[naninds,0] = mu_new[naninds,0]\n # And likewise for mu_new -- fill with good values from mu_old.\n naninds = np.where(np.isnan(mu_new))[0]\n if naninds.size > 0:\n mu_new[naninds,0] = mu_old[naninds,0]\n # At this point, the only NaNs that should appear are\n # values that were NaN for both mu and X to start with.\n # They will stay NaN and that's okay.\n \n # New mu is a weighted sum of old and new mus\n mu = (n * mu_old + n_new * mu_new) / (n + n_new)\n printt(\"Now tracking mean for %d -> %d items; mu.min %f, mu.max %f \" % \\\n (n, n+n_new, np.nanmin(mu), np.nanmax(mu)))\n n = n + n_new\n\n # 2. Subtract off the mean\n X = X - mu\n\n # 3. Compute L, the projection of X onto U\n # Note: this will only work for a single item in X\n goodinds = np.where(~np.isnan(X))[0]\n #print('X: %d of %d are good.' % (len(goodinds), X.shape[0]))\n\n diagS = np.diag(S)\n # This is Brand's method, which involves S:\n L = np.dot(diagS,\n np.dot(np.linalg.pinv(np.dot(U[goodinds,:],\n diagS)),\n X[goodinds,:]))\n # Simplified version that does not use S (but is probably wrong):\n #L = np.dot(U[goodinds,:].T,\n # X[goodinds,:])\n # Top row of the Q matrix (eqn 12, Brand 2002)\n Q1 = np.hstack([diagS, L])\n\n # 4. Compute J, the orthogonal basis of H, which is\n # the component of X orthog to U (i.e., unrepresentable direction)\n # 5. Compute K, the projection of X onto J (i.e., unrep. content)\n K = linalg.norm(X[goodinds,:] - np.dot(U[goodinds,:],\n np.dot(U[goodinds,:].T,\n X[goodinds,:])))\n # H = X - UL\n J = np.zeros((U.shape[0], 1))\n J[goodinds] = np.dot(K,\n np.linalg.pinv(X[goodinds,:] -\n np.dot(U[goodinds,:],\n L))).T\n \n # Bottom row of Q matrix (eqn 12, Brand 2002)\n Q2 = np.hstack([np.zeros([1, len(S)]), np.array(K).reshape(1,1)])\n Q = np.vstack([Q1, Q2])\n\n # 6. Take the SVD of Q\n Uq, Sq, Vq = linalg.svd(Q, full_matrices=False)\n\n # 7. Update U and S (eqn 4, Brand 2002)\n # Note: Since J is zero-filled for badinds, now U is too.\n # Alternatively, we give J NaNs and let them get into U as well.\n # I think that is a worse idea though.\n U = np.dot(np.hstack([U, J]), Uq)\n S = Sq\n # Updating V requires knowing old V,\n # but we don't need the new one either so it's okay to skip.\n\n printt('Just did increm-brand SVD on %d items.' % n)\n \n ############# end ###########\n \n else: # No missing values (or not 'ignore')\n # 1. Update mu\n mu_old = mu\n mu_new = X\n # New mu is a weighted sum of old and new mus\n mu = (n * mu_old + n_new * mu_new) / (n + n_new)\n n = n + n_new\n\n # 2. Subtract off the mean\n X = X - mu\n\n # 3. Compute L, the projection of X onto U\n L = np.dot(U.T, X)\n Q1 = np.hstack([np.diag(S), L])\n\n # 4. Compute J, the orthogonal basis of H, which is\n # the component of X orthog to U (i.e., unrepresentable direction)\n # 5. Compute K, the projection of X onto J (i.e., unrep. content)\n JK = X - np.dot(U, L)\n (J, K) = linalg.qr(JK)\n\n Q2 = np.hstack([np.zeros([1, len(S)]), np.array(K).reshape(1,1)])\n Q = np.vstack([Q1, Q2])\n\n # 6. Take the SVD of Q\n Uq, Sq, Vq = linalg.svd(Q, full_matrices=False)\n\n # 7. Update U and S (eqn 4, Brand 2002)\n U = np.dot(np.hstack([U, J]), Uq)\n S = Sq\n # V requires knowing old V,\n # but we don't need the new one either so it's okay.\n \n printt('Just did regular increm SVD on %d items.' % n)\n\n # Keep only the first k components\n U = U[:,0:min([n,k])]\n S = S[0:min([n,k])]\n\n Usum = U.sum(1)\n\n\n ###########################################################################\n # We have a bad svdmethod, but somehow didn't catch it earlier.\n else:\n printt(\"504: Bad Gateway in protocol <Skynet_authentication.exe>\")\n return None, None, None, None, None\n\n indivpcts = None\n\n # This only works if a full SVD was done\n if (svdmethod == 'full' and output_k and opts['k_var'] == -773038.0):\n # Calculate percent variance captured by each \n cumsum = np.cumsum(S_full)\n #print(cumsum.shape)\n if cumsum[-1] != 0:\n indivpcts = S / cumsum[-1]\n indivpcts = indivpcts[0:k] # truncate to first k\n cumpercents = cumsum / cumsum[-1]\n else:\n indivpcts = []\n\n # Calculate percent variance captured\n if k >= cumsum.shape[0]:\n printt('Cannot estimate data variance; specified k (%d) exceeds the number of SVs (%d).' % (k, cumsum.shape[0]))\n else:\n printt(\"Selected value of k=%d captures %5.2f%% of the data variance\" % \\\n (k, cumpercents[k-1] * 100))\n if opts['pause']: input(\"Press enter to continue\\n\")\n\n #print('U:', U)\n #print('mu:', mu)\n return U, S, mu, n, indivpcts", "def reduce_to_k_dim(M, k=2): \n n_iters = 10 # Use this parameter in your call to `TruncatedSVD`\n M_reduced = None\n print(\"Running Truncated SVD over %i words...\" % (M.shape[0]))\n \n # YOUR CODE HERE\n \n svd = TruncatedSVD(n_components = k, n_iter = 10, random_state = 0)\n \n M_reduced = svd.fit_transform(M)\n \n #raise NotImplementedError()\n\n print(\"Done.\")\n return M_reduced", "def ES_SVD(U, sigma, V, time, f_fault, f_side, PMItreshold, estimate_xi_func=get_SVDxi, estimate_xi_func_params=None):\n\n # Get the search region\n m = sigma.size\n f_fault = np.asanyarray(f_fault)\n f_side = np.asanyarray(f_side)\n dt = time[1] - time[0]\n Fs = 1.0/dt\n PMI = [] #PMI is here the envelope score\n W = []\n for i in range(0, f_fault.size):\n PMI.append(np.zeros(m))\n W.append(np.zeros(m))\n\n # Calculate PMI for each fault type\n for i in range(0, m):\n if estimate_xi_func_params is None:\n a_i = estimate_xi_func(U, sigma, V, i)\n else:\n a_i = estimate_xi_func(U, sigma, V, i, estimate_xi_func_params)\n a_i = envelope(a_i)\n Y, df = fft(a_i, Fs)\n # Calculate PMI for each fault type\n for k in range(0, f_fault.size):\n PMI[k][i] = diagnosefft(Y, df, f_fault[k], 1.0, f_side[k])\n\n # Calculate weights\n for k in range(0, f_fault.size):\n temp = 0.0\n for i in range(0, m):\n if PMI[k][i] > PMItreshold:\n temp += PMI[k][i]\n for i in range(0, m):\n if PMI[k][i] > PMItreshold:\n W[k][i] = PMI[k][i]/temp\n\n # Return data\n return PMI, W", "def reduce_dimentions(users):\n\tsvd = TruncatedSVD(n_components=300, n_iter=10, random_state=42)\n\tsvd.fit(users)\n\tusers_svd = svd.transform(users)\n\tusers_svd = pd.DataFrame(users_svd, index=users.index)\n\treturn users_svd", "def svd0(A):\n M,N = A.shape\n if M>N: return sla.svd(A, full_matrices=True)\n else: return sla.svd(A, full_matrices=False)", "def reduce_to_k_dim(M, k=2): \n n_iters = 10 # Use this parameter in your call to `TruncatedSVD`\n M_reduced = None\n print(\"Running Truncated SVD over %i words...\" % (M.shape[0]))\n \n # ------------------\n # Write your implementation here.\n svd = TruncatedSVD(n_components=k, n_iter=n_iters)\n M_reduced = svd.fit_transform(M)\n \n # ------------------\n\n print(\"Done.\")\n return M_reduced", "def HOSVD(A, k=None, tol=None):\n\n d=len(A.shape)\n\n if d==2:\n u, s, vt=svd(A, full_matrices=False)\n U=[u, vt.T]\n S=np.diag(s)\n else:\n U=[None]*d\n for j in range(0, d):\n U[j], s, vt=svd(unfold(A, j), full_matrices=False)\n\n S=A.copy()\n for i in range(0, d):\n S=nModeProduct(S, U[i].T, i)\n\n if k is not None:\n if isinstance(k, int): # if only one integer is assigned to k\n k=k*np.ones((len(A.shape),), dtype=int)\n\n S=subTensor(S, k=k)\n for j in range(0, d):\n U[j]=U[j][:, :k[j]]\n\n return S, U", "def svd(T):\n try:\n U, S, V = splinalg.svd(T, full_matrices=False)\n except splinalg.LinAlgError:\n U, S, V = splinalg.svd(T, full_matrices=False, lapack_driver='gesvd')\n maxU, minU = U.max(0), U.min(0)\n maxV, minV = V.max(1), V.min(1)\n ind = (np.abs(minU) > maxU) & (np.abs(minV) > maxV)\n U[:, ind] *= -1\n V[ind] *= -1\n return U, S, V", "def test_truncate():\n # rank 1 tensor\n X = outer(rand(3), rand(4), rand(5))\n T = hosvd(X)\n assert find_truncation_rank(T.X, 1e-12) == (1,1,1)\n T1 = T.truncate(1)\n assert np.allclose(X, T1.asarray())", "def get_kernel_vector_unit(name, s, e, x, d_1, kernel, hyps,\n cutoffs, hyps_mask):\n\n size = (e-s)\n ds = [1, 2, 3]\n\n args = from_mask_to_args(hyps, hyps_mask, cutoffs)\n\n k_v = np.zeros(size*3, )\n\n for m_index in range(size):\n x_2 = _global_training_data[name][m_index+s]\n for d_2 in ds:\n k_v[m_index*3+d_2-1] = kernel(x, x_2, d_1, d_2, *args)\n\n return k_v", "def calculate_k_SVD(smooth_spreadsheet_matrix, k):\n U_unitary_matrix, singular_value, V_unitary_matrix = linalg.svd(smooth_spreadsheet_matrix)\n S_full_squared_matrix = np.zeros((k, k))\n np.fill_diagonal(S_full_squared_matrix, np.sqrt(singular_value[:k]))\n U_unitary_matrix = U_unitary_matrix[:, :k]\n return U_unitary_matrix, S_full_squared_matrix", "def svt(X, tau):\n U, S, Vt = la.svd(X,full_matrices=False)\n Xs = np.dot(U * st(S,tau), Vt)\n return Xs", "def svd(self):\n U, s, Vh = la.svd(self)\n S = np.zeros(self.shape)\n np.fill_diagonal(S, s)\n return (Matrix(U), Matrix(S), Matrix(Vh))", "def compute_svd(self,data,k):\n m, n =data.shape\n n = self.comm1.allreduce(n)\n print(m,n)\n if k==-1:\n k = min(m,n)\n args = parse()\n args.m,args.n,args.k,args.comm = m,n,k,self.comms\n args.eps = np.finfo(data.dtype).eps\n if args.m<args.n: args.p_r,args.p_c = 1,self.size\n dsvd = DistSVD(args, data)\n singularValues, U, V = dsvd.svd()\n rel_error = dsvd.rel_error(U, np.diag(singularValues), V)\n if self.global_rank==0: print('relative error is:', rel_error )\n return singularValues,U,V,rel_error", "def svd(matrix):\n u = None\n s = None\n v = None\n ### YOUR CODE HERE\n (u,s,v)=np.linalg.svd(matrix)\n ### END YOUR CODE\n\n return u, s, v", "def get_svd(self,new_df):\n\n\t\tnew_matrix = new_df.as_matrix()\n\t\t\n\t\t#get the mean score of users\n\t\tmean_score = np.mean(new_matrix,axis = 1)\n\t\tnew_matrix_dm = new_matrix - mean_score.reshape(-1,1)\n\t\t#decompose interaction matrix into U, Vt, Sigma\n\t\tU, sigma, Vt = svds(new_matrix_dm, k = 5)\n\t\tsigma = np.diag(sigma)\n\t\tall_user_predicted_ratings = np.dot(np.dot(U, sigma), Vt) + mean_score.reshape(-1, 1)\n\t\tpreds_df = pd.DataFrame(all_user_predicted_ratings, columns = new_df.columns)\n\t\tpreds_df = preds_df.round(1)\n\n\t\treturn preds_df", "def implement_svd(data):\n u, s, v = torch.svd(data) # implement svd\n # note: the u returned by this function only includes the top values.\n # u * s will be equivalent due to the zero terms, but will run more efficiently with this implementation.\n s = torch.diag(s) # turn s into a diagonal matrix\n transformed_matrix = torch.mm(u, s) # u * s\n return l21_reg(s), transformed_matrix # return the L2,1 regularization term and matrix", "def fit_svd(self):\n\n # U has the eigenvectors of G.Gt as columns ()\n # S has square roots of the eigenvalues of G.Gt and Gt.G in its diagonal\n # The square roos of the eigenvalues are called singular values\n # V has the eigenvectors of Gt.G as columns ()\n # full_matrices set to false will set the Vt matrix to a shape m x n\n\n U, S, Vt = linalg.svd(self.norm_matrix, full_matrices=False)\n\n # Compute the eigenvalues\n eig_val = (S ** 2)\n\n # Explained_variance tell us how much of the variance in the data each eigen value explains\n explained_variance = eig_val / (self.n_samples - 1)\n # total_var is the total variance in the data\n total_var = explained_variance.sum()\n explained_variance_ratio = explained_variance / total_var\n # The cumulative sum of all ratios\n ratio_cumsum = np.cumsum(explained_variance_ratio)\n\n # We search in the cumsum for the index of the value which, when added, corresponds to the quality_percent\n # The index of the cumsum gives us the components we need to add to explain X quality percent of our data\n n_components = np.searchsorted(ratio_cumsum, self.quality_percent, side='right') + 1\n\n self.components = Vt[:n_components]\n print(\"The principal components have been calculated using svd\", self.components.shape)\n\n return self.components", "def R_SVD(U, sigma, V, time, f_fault, tolerance = 0.02, PMItreshold = 1.0, estimate_xi_func=get_SVDxi, estimate_xi_func_params=None):\n\n # Get the search region\n m = sigma.size\n f_fault = np.asanyarray(f_fault)\n dt = time[1] - time[0]\n T0 = np.zeros(f_fault.size, dtype=int)\n T1 = np.zeros(f_fault.size, dtype=int)\n PMI = []\n W = []\n for i in range(0, f_fault.size):\n T0[i] = int( np.floor( (1.0/(f_fault[i]*(1.0 + tolerance)))/dt ) )\n T1[i] = int( np.ceil( (1.0/(f_fault[i]*(1.0 - tolerance)))/dt ) )\n if T1[i] == T0[i]:\n T1[i] += 1\n PMI.append(np.zeros(m))\n W.append(np.zeros(m))\n\n # Calculate PMI for each fault type\n for i in range(0, m):\n if estimate_xi_func_params is None:\n a_i = estimate_xi_func(U, sigma, V, i)\n else:\n a_i = estimate_xi_func(U, sigma, V, i, estimate_xi_func_params)\n a_i = envelope(a_i)\n a_i -= a_i.mean()\n R_a = fftwconvolve(np.flipud(a_i), a_i)\n # Keep positive part\n R_a = R_a[a_i.size-1:]\n # Scale by dividing by number of elements\n R_a = R_a / np.arange(R_a.size, 0, -1)\n # Get T_0\n R_0 = R_a[0]\n # Calculate R and PMI for each fault type\n for k in range(0, f_fault.size):\n # print('T0[%i] = %f, T1[%i] = %f, R_a.size = %i' % (k, T0[k], k, T1[k], R_a.size))\n # print(R_a[481:501])\n R_T = np.max(R_a[T0[k]:T1[k]])\n PMI[k][i] = R_T/(R_0 - R_T)\n\n # Calculate weights\n for k in range(0, f_fault.size):\n temp = np.sum(PMI[k])\n for i in range(0, m):\n if PMI[k][i] > PMItreshold:\n W[k][i] = PMI[k][i]/temp\n\n # Return data\n return PMI, W", "def svd_S(T):\n try:\n S = splinalg.svd(T, full_matrices=False, compute_uv=False)\n except splinalg.LinAlgError:\n S = splinalg.svd(T, full_matrices=False, lapack_driver='gesvd', compute_uv=False)\n return S", "def svr_kernel(name, kernel, epsilon=None, **kwargs):\n def _name(msg):\n return '%s.%s_%s' % (name, kernel, msg)\n\n hp_space = _svm_hp_space(_name, kernel=kernel, **kwargs)\n hp_space.update(_svr_hp_space(_name, epsilon))\n return scope.sklearn_SVR(**hp_space)" ]
[ "0.7064768", "0.6497113", "0.647693", "0.63251233", "0.6254241", "0.6164676", "0.61603194", "0.60960335", "0.60589564", "0.5987382", "0.59055364", "0.58650464", "0.57826054", "0.57542855", "0.573676", "0.56839186", "0.5663144", "0.5609878", "0.5581168", "0.5574882", "0.5572632", "0.5565599", "0.55464345", "0.5502492", "0.54823816", "0.5477199", "0.54482687", "0.5443486", "0.5436408", "0.54319876" ]
0.6894022
1
r"""Close a registry entry.
def close_registry_entry(cls, value): out = False if not value.closed: # pragma: debug value.close() out = True return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def close(self):\n self.__exit__(None, None, None)", "def close(self):\n self.__exit__(None, None, None)", "def close(self):\n \n self.__exit__(None, None, None)\n return", "def __del__(self):\n for key_path_prefix, registry_file in iter(self._registry_files.items()):\n self._registry_files[key_path_prefix] = None\n if registry_file:\n registry_file.Close()", "def close(self):\n self.exit()", "def close():\n sys.exit()", "def close_registrar(self):\n self.registrar.destroy()", "def delete_registry_value(hive, key, value):\n access = winreg.KEY_ALL_ACCESS\n with winreg.OpenKeyEx(hive, key, 0, access) as k:\n winreg.DeleteValue(k, value)", "def close(self):\n\n self.r.close()", "def __exit__(self, type, value, traceback):\n self.close()", "def __exit__(self, type, value, traceback):\n self.close()", "def __exit__(self, type, value, traceback):\n self.close()", "def __exit__(self, type, value, traceback):\n self.close()", "def __exit__(self, type, value, traceback):\n self.close()", "def close(self):\n\t\t_moRedis.delete(self.__id)", "def close(self):\n self.call('close')", "def __exit__(self, type_, value, traceback):\n self.close()", "def close():", "def close(self) -> None:\n ...", "def close(self) -> None:\n ...", "def exit(self):\n self.close()", "def close_and_exit(self):\n self.close()\n sys.exit(1)", "def Close(self):", "def close(self):\n self._command = \"close\"", "def __exit__(self, type, value, traceback):\n\n self.close()", "def close(self):\n ...", "def close(self):\n ...", "def close(self):\n ckresult(_dll.FMOD_System_Close(self._ptr))", "def close( self ):\n self.__del__()", "def close(self):\n self.__CheckOpen('close')\n self.__closed = True" ]
[ "0.62410325", "0.62410325", "0.6005659", "0.6003081", "0.59938776", "0.5955254", "0.592907", "0.58687013", "0.57975334", "0.5778289", "0.5778289", "0.5778289", "0.5778289", "0.5778289", "0.57693046", "0.57626265", "0.5747791", "0.5740424", "0.5720488", "0.5720488", "0.5716972", "0.57169664", "0.57117724", "0.5696709", "0.5694895", "0.5683764", "0.5683764", "0.5681563", "0.56668615", "0.56611246" ]
0.73456883
0
r"""Record the current position in the file/series.
def record_position(self): _rec_pos = self.fd.tell() _rec_ind = self._series_index return _rec_pos, _rec_ind
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_position(self, file_pos, series_index=None):\n if series_index is None:\n series_index = self._series_index\n self.advance_in_series(series_index)\n self.advance_in_file(file_pos)", "def record(self, pos):\n self.lasts += (datetime.now(), pos),\n if len(self.lasts) > 10:\n self.lasts.pop(0)", "def savepos(self):\n self.out.write(self.csi + \"s\")", "def current_pos(self):\n new_ptr = _decode(self._read_fixed_block(0x0020),\n lo_fix_format['current_pos'])\n if new_ptr is None:\n raise ObservationError('current_pos is None')\n if new_ptr == self._current_ptr:\n return self._current_ptr\n if self._current_ptr and new_ptr != self.inc_ptr(self._current_ptr):\n for k in reading_len:\n if (new_ptr - self._current_ptr) == reading_len[k]:\n log.error('changing data format from %s to %s' % (self.data_format, k))\n self.data_format = k\n break\n self._current_ptr = new_ptr\n return self._current_ptr", "def pos(self):\n return self.file.tell()", "def track(self, offset: int):", "def update(self):\n if self._position:\n self._frequency = len(self._position)\n for i in range(len(self._position)):\n # convert from text file\n self._position[i] = float(self._position[i])\n self._recency = [self._position[0]]\n for i in range(1, self._frequency):\n self._recency.append(self._position[i] - self._position[i - 1])\n self._recency.append(1 - self._position[self._frequency - 1])\n self._isUpdated = True", "def set_position(self, posicion):\n\n # FIXME: Actualmente no funciona bien\n posicion = int(posicion)\n if posicion != self.posicion:\n self.posicion = posicion\n self.entrada.write('seek %s %i 0\\n' % (posicion, 1))\n self.entrada.flush()", "def tell(self):\n return self._seek_pos", "def _update_offset_file(self):\n if self.on_update:\n self.on_update()\n offset = self._filehandle().tell()\n inode = stat(self.filename).st_ino\n fh = open(self._offset_file, \"w\")\n fh.write(\"%s\\n%s\\n\" % (inode, offset))\n fh.close()\n self._since_update = 0", "def seek(self, *args) -> \"int\":\n return _ida_fpro.qfile_t_seek(self, *args)", "def remember_pos(self, directory, position):\n self.dir_pos[directory] = position", "def seek(self, loc):\n assert loc == 0\n\n # rewind progress bar\n if self.progressbar:\n self.progressbar.update(-self._fp.tell())\n\n self._fp.seek(loc)", "def position(self, value):\r\n self._position = value\r\n\r\n # We need to update this so that we start re-calculating the\r\n # the measurements according to this change in status. That is\r\n # if we just opened, then we cannot have any current or\r\n # power flowing.\r\n self.update_mmtr(datetime.utcnow())", "def save(self) -> None:\n self._save_marker = self.get_next()", "def current_frame(self, n):\n self.sound.seek(n)\n self._current_frame = n", "def tell(self):\n _complain_ifclosed(self._closed)\n return self._position", "def set_reader_position(self):\n recorded_position = self.process_application.get_recorded_position(\n self.upstream_name\n )\n self.reader.seek(recorded_position)", "def tell(self):\n return self._pos", "def tell(self):\n return self.offset", "def print_position(self):\n\n while True:\n lat = self.vehicle.location.global_relative_frame.lat\n lon = self.vehicle.location.global_relative_frame.lon\n try:\n self.pathFile.write('{0},{1}'.format(lat, lon))\n time.sleep(1)\n except:\n break\n #print ('{0},{1}'.format(lat, lon))", "def tell(self):\n return self._upload_position", "def _seek(self, iteration):\n\n # Validate it\n if iteration < 1:\n iteration = 1\n\n # Seek to one iteration before the specified iteration, then run the\n # network for one iteration, so the inspectors will show the right data\n self.iteration = iteration - 1\n self.experiment.position.iter = iteration - 1\n for sensor in self.sensors:\n assert sensor.type == 'VectorFileSensor'\n sensor.setParameter('position', self.iteration)\n self._step()", "def set_marker(self, packet):\n if self.file_type == 'csv':\n self.write_data(packet=packet)\n elif self.file_type == 'edf':\n timestamp, code = packet.get_data()\n if self._rectime_offset is None:\n self._rectime_offset = timestamp[0]\n timestamp = timestamp-np.float64(self._rectime_offset)\n self._file_obj.writeAnnotation(timestamp[0], 0.001, str(int(code[0])))", "def advance_in_file(self, file_pos):\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError): # pragma: debug\n if self.is_open:\n raise", "def SetCurrentPosition(self,pos):\n\n if self.Reverse: pos*=-1\n self.Bus.Transaction(chr(self.Address)+chr(0x40)+struct.pack('@l',pos))", "def print_pos(self, byte_offset=-1):\n if self._debug_level > 3:\n if hasattr(self, 'ensemble'):\n k = self.ensemble.k\n else:\n k = 0\n print(' pos: %d, pos_: %d, nbyte: %d, k: %d, byte_offset: %d' %\n (self.f.tell(), self._pos, self._nbyte, k, byte_offset))", "def refresh_mark(self):\n current = self.player.current_position()\n if current != None:\n if self.prev_song != None and self.prev_song < len(self.buf):\n self.buf[self.prev_song] = ' ' + self.buf[self.prev_song][1:]\n self.buf[current] = '-' + self.buf[current][1:]\n self.prev_song = current\n # Move cursor to current position.\n vim.current.window.cursor = (current + 1, 1)", "def increment(self):\n self.pos += 1\n if self.pos == len(self.progress) - 1:\n self.pos = 0", "def seek(self, loc):\n assert loc == 0\n\n # rewind progress bar\n if self.progressbar:\n self.progressbar.update(-self._tell)\n\n self._fp_left.seek(loc)\n self._fp_right.seek(loc)\n self._tell = loc\n self._buf = Buffer()" ]
[ "0.66489357", "0.66329324", "0.6479423", "0.64465714", "0.61586374", "0.61237216", "0.6114697", "0.610729", "0.6095362", "0.60406315", "0.5951866", "0.59466505", "0.5869703", "0.58657867", "0.58627445", "0.58404976", "0.5832795", "0.58294195", "0.5811629", "0.5791412", "0.57500947", "0.57394385", "0.57351726", "0.5733135", "0.57191616", "0.57077116", "0.56901973", "0.5670479", "0.56697345", "0.5636059" ]
0.7691817
0
r"""Change the position in the file/series.
def change_position(self, file_pos, series_index=None): if series_index is None: series_index = self._series_index self.advance_in_series(series_index) self.advance_in_file(file_pos)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setPosition(position):", "def set_position(self, posicion):\n\n # FIXME: Actualmente no funciona bien\n posicion = int(posicion)\n if posicion != self.posicion:\n self.posicion = posicion\n self.entrada.write('seek %s %i 0\\n' % (posicion, 1))\n self.entrada.flush()", "def update_position(position):\n pass", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def setPosition(*args):", "def set_position(self, position):\n self.position = tuple(position)", "def setPosition(self):\n self.data['pos-x'] = \"%s\" % self.x()\n self.data['pos-y'] = \"%s\" % self.y()", "def set_position(self, idx, pos):\n if self.EMULATOR_MODE:\n return\n if idx >= self.nleaflets or idx < 0:\n raise IndexError('index specified is out of bounds')\n self._fserial.write(self.MAGIC_BYTES + bytes([idx]) + pos.to_bytes(2, byteorder='big', signed=False) )\n self._fserial.reset_input_buffer()", "def set_position(self, new_pos):\n self._position = new_pos", "def seek(self, offset, relativeTo):\n self.oFile.seek(offset, relativeTo)", "def position(self, pos: int):\n self.__pos = pos", "def advance_in_file(self, file_pos):\n if self.is_open:\n try:\n self.fd.seek(file_pos)\n except (AttributeError, ValueError): # pragma: debug\n if self.is_open:\n raise", "def update(self):\n if self._position:\n self._frequency = len(self._position)\n for i in range(len(self._position)):\n # convert from text file\n self._position[i] = float(self._position[i])\n self._recency = [self._position[0]]\n for i in range(1, self._frequency):\n self._recency.append(self._position[i] - self._position[i - 1])\n self._recency.append(1 - self._position[self._frequency - 1])\n self._isUpdated = True", "def set_pos(self, newpos : list) :\n if len(newpos) == 2 :\n self.pos = list(newpos).copy()\n else :\n raise UserWarning('wrong position passed')", "def set_position(self, position):\n raise NotImplementedError()", "def setPosition(self,newPos):\n self._position = newPos", "def _update_offset_file(self):\n if self.on_update:\n self.on_update()\n offset = self._filehandle().tell()\n inode = stat(self.filename).st_ino\n fh = open(self._offset_file, \"w\")\n fh.write(\"%s\\n%s\\n\" % (inode, offset))\n fh.close()\n self._since_update = 0", "def set_pos(self, x):\n self._pos = x", "def seek(self, val):\n if self.p:\n self.p.set_position(val/100.0 + self.p.get_position())", "def set_position(self, position):\n self.set_current_position(position)", "def seek(self, loc):\n assert loc == 0\n\n # rewind progress bar\n if self.progressbar:\n self.progressbar.update(-self._fp.tell())\n\n self._fp.seek(loc)" ]
[ "0.72056144", "0.70718175", "0.67845094", "0.6580323", "0.6580323", "0.6580323", "0.6580323", "0.6580323", "0.6580323", "0.6580323", "0.6580323", "0.6580323", "0.6580323", "0.6580323", "0.6469467", "0.6442066", "0.64292395", "0.64211136", "0.6298545", "0.62945306", "0.62706894", "0.62686855", "0.6265556", "0.6263081", "0.6241287", "0.6236901", "0.6218564", "0.62047076", "0.6183511", "0.6183406" ]
0.84161323
0
r"""Advance to a certain position in the current file.
def advance_in_file(self, file_pos): if self.is_open: try: self.fd.seek(file_pos) except (AttributeError, ValueError): # pragma: debug if self.is_open: raise
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def advance(self) -> None:\n self.current_token = self.jack_file_tokens[self._token_idx]\n self._token_idx += 1", "def change_position(self, file_pos, series_index=None):\n if series_index is None:\n series_index = self._series_index\n self.advance_in_series(series_index)\n self.advance_in_file(file_pos)", "def _advance(self):\n self._current += 1", "def advance(self):\n self.pos += 1\n if self.pos > len(self.syntax) - 1:\n self.current_char = None\n else:\n self.current_char = self.syntax[self.pos]", "def _advance_line(self):\n self.current_index += 1\n if self.current_index >= len(self.file):\n self.current_line = 'EOF'\n return\n self.current_line = self.file[self.current_index].strip()\n while self.current_line.startswith('#') or self.current_line == '':\n self.current_index += 1\n if self.current_index >= len(self.file):\n self.current_line = 'EOF'\n return\n self.current_line = self.file[self.current_index].strip()\n self._gobble_comments()", "def _seek(self, offset):\n assert offset % self.recordsize == 0\n file_number, file_offset = divmod(offset,\n self.filesize - self.header_size)\n self.open(file_number)\n self.fh_raw.seek(file_offset + self.header_size)\n self.offset = offset", "def _advance(self, c=1):\n self._index += c", "def advance(self):\n self.pos += 1\n if self.pos < len(self.text):\n self.current_char = self.text[self.pos]\n else:\n self.current_char = None", "def seek(self, offset, relativeTo):\n self.oFile.seek(offset, relativeTo)", "def _advance(self):\n self._current += self._increment # Accessing the superclass's field", "def advance(self):\n self.pos += 1\n if self.pos > len(self.text) - 1:\n self.current_char = None # Indicates end of input\n else:\n self.current_char = self.text[self.pos]", "def advance(self):\n in_bytes = self._pre_pos\n for tag in self._reader:\n if isinstance(tag, Tag):\n # skip the Metadata in flv stream.\n if not self.handle_magic_head(tag):\n if tag.type == VIDEO_TAG and tag.is_keyframe:\n self.append_keyframe(tag)\n self._pre_pos = self.position()\n in_bytes = self._pre_pos - in_bytes\n if in_bytes > 0:\n self.active()\n else:\n self.inactive()", "def advance(self):\n self._current_inst += 1\n self._line = self._lines[self._current_inst].strip()", "def _advance(self):\t\t# override inherited version\n self._current *= self._base", "def advance(self):\n self.currentIndex += 1\n self.updateCurrentCommand()", "def go_to_position(self, position):\n raise NotImplementedError", "def advance(self) -> None:\n pass", "def _advance(self):\n self._prev, self._current = self._current, self._prev + self._current", "def forward(self):\n self.position += 1", "def process_next_char(self): \n self.current_position += 1\n if self.current_position >= len(self.code_input):\n '''End of file since the position is equal to or greater than the input's position'''\n self.current_char = '\\0' #EOF\n print('end of line')\n self.current_char = self.code_input[self.current_position]", "def seek(self, loc):\n assert loc == 0\n\n # rewind progress bar\n if self.progressbar:\n self.progressbar.update(-self._fp.tell())\n\n self._fp.seek(loc)", "def advance(self, distance):\n self.cursor += distance", "def increment(self):\n self.pos += 1\n if self.pos == len(self.progress) - 1:\n self.pos = 0", "def move_forward(self):\n self.pos += 1\n if self.pos > len(self.text) - 1:\n self.current_char = None\n else:\n self.current_char = self.text[self.pos]", "def seek(self, loc):\n assert loc == 0\n\n # rewind progress bar\n if self.progressbar:\n self.progressbar.update(-self._tell)\n\n self._fp_left.seek(loc)\n self._fp_right.seek(loc)\n self._tell = loc\n self._buf = Buffer()", "def seek(self, offset, whence=io.SEEK_SET):\n if whence == io.SEEK_SET:\n self._offset = offset\n elif whence == io.SEEK_CUR:\n self._offset += offset\n elif whence == io.SEEK_END:\n self._offset = self.end_of_file + offset\n return self._offset", "def seekEnd(self):\n self.oFile.seek(0, 2)", "def seek(self, *args) -> \"int\":\n return _ida_fpro.qfile_t_seek(self, *args)", "def advance(self):\n raise NotImplementedError(\"Fragment.advance\")", "async def seek(self, pos: int):\n pos = max(pos, 0) # Prevent seeking before start of track\n await self._bot.lavalink.ws.send(op='seek', guildId=self.guild_id, position=pos)" ]
[ "0.72152966", "0.69049877", "0.6838981", "0.6814385", "0.6776304", "0.6665995", "0.665468", "0.6625875", "0.6596541", "0.65920854", "0.65885144", "0.6423817", "0.62222093", "0.61866915", "0.61842614", "0.6172611", "0.6140963", "0.61377656", "0.61164963", "0.6113868", "0.60734636", "0.60608476", "0.6039304", "0.60278386", "0.5998243", "0.5995119", "0.59916925", "0.5926783", "0.5920471", "0.5904252" ]
0.7815316
0
Return True if there is a pending symbolic updates for any one of the variables in `args`. If called with no arguments, return True if the update dictionary is nonempty.
def pending_update(*args): if len(args) == 0: return len(cf.symbolic_updates) > 0 else: for x in _expand_args(args): if is_graph_object(x) and x in cf.symbolic_updates: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(args):\n\n home = args.assert_home()\n\n if args.all:\n env_repos = list(home.iter_env_repos())\n else:\n env_repos = [home.get_env_repo(x) for x in args.repos] if args.repos else [home.get_env_repo()]\n\n success = True\n\n for env_repo in env_repos:\n did_update = env_repo.update(force=args.force)\n success = success and did_update\n\n return int(not success)", "def isUpdated(self, updates: Dict[str, Dict[str, str]]) -> bool:\n\n for objnam in self._bodies & updates.keys():\n if {STATUS_ATTR, HEATER_ATTR, HTMODE_ATTR} & updates[objnam].keys():\n return True\n return False", "def check_dirty(args):\n man = load_manifest()\n any_dirty = False\n for (name, project) in man.projects.iteritems():\n repo = GitRepo(workdir_for_project(project))\n any_dirty = check_dirty_repo(repo) or any_dirty\n return any_dirty", "def _r_env_needs_updating(local_history: History, remote_history: History) -> bool:\n if not local_history:\n new_actions = remote_history.actions\n else:\n new_actions = set(remote_history.actions) - set(local_history.actions)\n for action in new_actions:\n if action.startswith(R_COMMAND):\n return True\n return False", "def check_updates(self, *, force: bool = False, ignore_updates: bool = False) -> Optional[PaperJar]:\n # TODO: Better name (check_updates + ignore_updates option is stupid)\n # I'm thinking just make 'validate_cache' abstract and add an option to ignore that\n pass", "def is_reachable_mut(self, mut, prev_args):\n mut_args = mut.args()\n for arg in prev_args:\n if arg not in mut_args:\n return False\n return True", "def check_args(args):\n for arg in vars(args):\n if getattr(args, arg):\n return True\n return False", "def _is_updated(self, var):\n # unknown\n if var.find(\"#u\") != -1:\n return True\n # constant\n if AsmParser.is_constant(var):\n return False\n cvar = self.arch.expand_reg_expr(var)\n for wrt in self.syncinfo.wrt_set:\n # If the var is updated with the lexicographically same value,\n # then we consider that the var is not updated.\n if var == wrt:\n continue\n # Otherwise, check it is updated.\n if var.find(wrt) != -1:\n # not in write set: comparison in a lexical form\n return True\n elif AsmParser.is_register(wrt):\n # not in write set: comparison in a normalized form\n cwrt = self.arch.expand_reg_expr(wrt)\n if self._overlap_cvars(cvar, cwrt) != None:\n return True\n return False", "def satisfy(expression, true_var):\n if len(expression.args) == 0:\n if expression.__repr__() == true_var.__repr__():\n return True\n else:\n return False\n cnt = 0\n while True:\n if cnt >= len(expression.args):\n break\n arg = expression.args[cnt]\n changed = satisfy(arg, true_var)\n if isinstance(changed, Expr):\n expression.args[cnt] = changed\n elif changed is True:\n expression.args.pop(cnt)\n expression = expression.args[0]\n cnt += 1\n return expression", "def is_complete(self, variables):\n for var in variables:\n if not self.has_assignment_for(var):\n return False\n\n return True", "def _local_needs_update(\n local_history: History, remote_history: History, yes: bool = True\n) -> bool:\n if not local_history and remote_history:\n return True\n if _no_new_actions_in_local(\n local_history=local_history, remote_history=remote_history\n ):\n return True\n if _actions_in_different_order(\n local_history=local_history, remote_history=remote_history\n ):\n if not yes and not prompt_yes_no(\n prompt_msg=\"Remote environment has same packages but in different order, \"\n \"Should we overwrite local with remote environment\"\n ):\n raise CondaEnvTrackerPullError(\n \"Actions in a different order; user elected not to update\"\n )\n return True\n return False", "def checkargs_wprefresh(cmdargs):\n flags = []\n # Collect Static for wprefresh\n yesforced = False\n noforced = False\n restartonly = False\n if cmdargs:\n for yesarg in ('-c', '--collect'):\n if yesarg in cmdargs:\n yesforced = True\n break\n for noarg in ('-C', '--nocollect'):\n if noarg in cmdargs:\n noforced = True\n break\n if yesforced and noforced:\n print('\\nBoth \\'--collect\\' and \\'--nocollect\\' '\n 'args used, this won\\'t work.')\n sys.exit(1)\n for restartarg in ('-r', '--restart'):\n if restartarg in cmdargs:\n restartonly = True\n break\n\n if (not yesforced) and (not noforced) and (not restartonly):\n collectstatic = input('Would you like to collect static files? '\n '(yes/no): ')\n if collectstatic.lower().startswith('y'):\n flags.append('--collect')\n else:\n flags.append('--nocollect')\n # Get live/skip args\n liveforced = False\n skipforced = False\n if cmdargs:\n # live args used?\n for livearg in ('-l', '--live'):\n if livearg in cmdargs:\n liveforced = True\n break\n # skip args used?\n for skiparg in ('-R', '--norestart'):\n if skiparg in cmdargs:\n skipforced = True\n break\n\n # Ambiguos args.\n if skipforced and restartonly:\n print('\\nBoth \\'--restart\\' and \\'--norestart\\' args used. '\n 'this won\\'t work.')\n sys.exit(1)\n\n if skipforced and liveforced:\n print('\\nBoth \\'--live\\' and \\'--norestart\\' args used, '\n 'this won\\'t work.')\n sys.exit(1)\n\n needlive = (not liveforced) and (not skipforced)\n if (not is_test_site()) and (needlive):\n livesite = confirm('*** This is the LIVE site! ***\\n'\n 'Would you like to restart apache?')\n if livesite:\n flags.append('--live')\n else:\n flags.append('--norestart')\n return flags", "def solved(self):\n if not self.all_variables_assigned():\n return False\n for constraint in self.constraints:\n if not constraint.satisfied(*[self.var_dict[name] for name in constraint.var_names]):\n return False\n return True", "def _update_same(self, update_set):\n for upd in update_set:\n cupd = None\n for rd, wrt, inst in self.syncinfo.rd_wrt_list:\n log.debug(\" UPD0-CHK: %s - RD: %s - WRT: %s [%s]\" \\\n % (upd ,rd, wrt, inst))\n if wrt == \"\":\n continue\n if upd == wrt:\n if self._is_updated(rd):\n log.debug(\" UPD0-FAIL: %s - RD: %s - WRT: %s [%s]\"\\\n % (upd ,rd, wrt, inst))\n return False\n elif AsmParser.is_register(wrt):\n cupd = self.arch.expand_reg_expr(upd) if not cupd else cupd\n cwrt = self.arch.expand_reg_expr(wrt)\n if self._overlap_cvars(cupd, cwrt) != None:\n if self._is_updated(rd):\n log.debug(\" UPD1: %s - RD: %s - WRT: %s [%s]\" \\\n % (upd ,rd, wrt, inst))\n return False\n return True", "def _full_rename(args):\n return args.ns and all(map(args.rename.affects, args.ns))", "def do_update(args):\n # if args.verbosity > 0:\n log.info(\"Verbosity: %d\" % args.verbosity)\n log.info(\"Data directory: %s\" % get_data_dir(args))\n log.info(\"Updating...\")\n csl = update_list(args, 'csl')\n # if args.verbosity > 0:\n log.info(\"Done.\")\n return True", "def needs_update(self) -> bool:\n return False", "def _is_op_defined(t_vars) -> bool:\n return all(t_var.name.startswith(\"Variable\") for t_var in t_vars)", "def update(*args):", "def should_update():\n while True:\n server = cfg.get_server()\n print(\"Server to update: %s\\n\" % server.get(\"url\"))\n answer = (\n input(\n \"Are you sure you want to update Jira with the \"\n + \"information above? [y/n] \"\n )\n .lower()\n .strip()\n )\n if answer in set([\"y\", \"n\"]):\n return answer\n else:\n print(\"Incorrect input: %s\" % answer)", "def valid_update_flags(self) -> bool:\n if CoronaCaseRaw.objects.all().count() < 2:\n return True\n return not CoronaCaseRaw.objects.filter(update_flag=(not self.latest_flag())).exists()", "def cached(self, args) -> bool:\n return all([art.built for art in self.artifacts])", "def force_update(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"force_update\")", "def check_update_sanity(self):\n for update in crest.get_all_updates(self.model):\n assert update._name is not None, f\"There is an Update in {update._parent._name} ({update._parent.__class__.__name__}) whose name is 'None'\"\n assert update._name != \"\", f\"There is an Update in {update._parent._name} ({update._parent.__class__.__name__}) whose name is empty string\"\n\n assert isinstance(update.state, crest.State), f\"Update {update._name}'s state is not a crest.State. It is: {update.state} ({update.state.__class__})\"\n assert update.state in crest.get_states(update._parent), f\"Update's state {update.state._name} ({update.state}) is not in the states of entity {update._parent._name} ({update._parent})\"\n\n assert isinstance(update.target, crest.Port), f\"Update {update._name}'s target is not a crest.Port\"\n assert update.target in api.get_targets(update._parent), f\"Update's target {update.target._name} ({update.target}) is not in the targets of entity {update._parent._name} ({update._parent})\"\n\n assert isinstance(update.function, (crestml.LearnedFunction, types.FunctionType)), f\"Update {update._name}'s function needs to be of type types.FunctionType or crestdsl.ml.LearnedFunction\"\n assert 'dt' in inspect.signature(update.function).parameters, f\"Update {update._name}'s function has no dt parameter. entity: {update._parent._name} ({update._parent.__class__.__name__})\"\n assert 'self' in inspect.signature(update.function).parameters, f\"Update {update._name}'s function has no self parameter. entity: {update._parent._name} ({update._parent.__class__.__name__})\"\n assert len(inspect.signature(update.function).parameters) == 2, f\"An update should have one one argument 'dt' besides 'self'\"\n\n for port in SH.get_read_ports_from_update(update.function, update):\n assert port in api.get_sources(update._parent), f\"Update {update._name} seems to be reading a port {port._name} ({port}) which is not in the sources of its entity {update._parent._name} ({update._parent})\"", "def _can_do_updates(self):\n return True", "def goal_test(self, state):\r\n assignment = dict(state)\r\n return (len(assignment) == len(self.variables)\r\n and all(self.nconflicts(variables, assignment[variables], assignment) == 0\r\n for variables in self.variables))", "def has_unapplied_change(self):\n for name in self.params_to_display.keys():\n if self._tkvar_changed(name):\n return True\n return False", "def updateEquations(equations: List, updatedUnknowns: Set, updateAll: bool = False):\n for equation in equations:\n if updateAll or any(unknown in equation.get_unknowns() for unknown in updatedUnknowns):\n equation.update()\n updatedUnknowns = set()", "def should_check_for_binary_versions(self):\n explicitly_asked_for_binaries_check = 'CHECK_BINARIES_VERSIONS' in config_vars\n update_was_requested = \"__UPDATE_INSTALLED_ITEMS__\" in config_vars.get(\"MAIN_INSTALL_TARGETS\", []).list()\n retVal = explicitly_asked_for_binaries_check or update_was_requested\n return retVal", "def has(self, *args):\n return _ida_hexrays.lvar_saved_infos_t_has(self, *args)" ]
[ "0.5759457", "0.5594278", "0.5564285", "0.5475863", "0.54533947", "0.5403371", "0.5373276", "0.53273535", "0.5294355", "0.52783906", "0.52717745", "0.52661645", "0.5215869", "0.52099615", "0.5206429", "0.51566654", "0.5129506", "0.51088214", "0.5104792", "0.509193", "0.50616646", "0.5058955", "0.5042388", "0.50333124", "0.5015281", "0.5000954", "0.49862412", "0.49624902", "0.49551407", "0.4931629" ]
0.8218321
0
Return the same function as theano.printing._print_fn, with the difference that 'file' is passed as a keyword argument to print().
def _get_print_fn(file=sys.stdout): def _print_fn(op, xin,): for attr in op.attrs: temp = getattr(xin, attr) if callable(temp): pmsg = temp() else: pmsg = temp print(op.message, attr, '=', pmsg, file=file) return _print_fn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def real_print(*args, **kwargs):\n\n kwargs.setdefault('file', real_stdout)\n _python_print_function(*args, **kwargs)", "def adv_print(*args, start='', in_file = False, **kwargs):\n max_line = kwargs.pop('max_line', False)\n print(kwargs)\n old_stdout = sys.stdout\n value = StringIO()\n sys.stdout = value\n print(*args, **kwargs)\n sys.stdout = old_stdout\n value = value.getvalue()\n value = start + value\n if max_line:\n value = value[:max_line] + '\\n' + value[max_line:]\n if in_file:\n if 'filename' in kwargs:\n filename = kwargs['filename']\n else:\n filename = 'output.txt'\n with open(filename, 'w') as f:\n f.write(value)\n print(value)", "def lines_printed_to(file):\n with io.open(file, 'w') as fp:\n def write_line(s=\"\"):\n PRINT(s, file=fp)\n yield write_line", "def print_(*args, **kwargs):\r\n fp = kwargs.pop(\"file\", sys.stdout)\r\n if fp is None:\r\n return\r\n def write(data):\r\n if not isinstance(data, basestring):\r\n data = str(data)\r\n # If the file has an encoding, encode unicode with it.\r\n if (isinstance(fp, file) and\r\n isinstance(data, unicode) and\r\n fp.encoding is not None):\r\n errors = getattr(fp, \"errors\", None)\r\n if errors is None:\r\n errors = \"strict\"\r\n data = data.encode(fp.encoding, errors)\r\n fp.write(data)\r\n want_unicode = False\r\n sep = kwargs.pop(\"sep\", None)\r\n if sep is not None:\r\n if isinstance(sep, unicode):\r\n want_unicode = True\r\n elif not isinstance(sep, str):\r\n raise TypeError(\"sep must be None or a string\")\r\n end = kwargs.pop(\"end\", None)\r\n if end is not None:\r\n if isinstance(end, unicode):\r\n want_unicode = True\r\n elif not isinstance(end, str):\r\n raise TypeError(\"end must be None or a string\")\r\n if kwargs:\r\n raise TypeError(\"invalid keyword arguments to print()\")\r\n if not want_unicode:\r\n for arg in args:\r\n if isinstance(arg, unicode):\r\n want_unicode = True\r\n break\r\n if want_unicode:\r\n newline = unicode(\"\\n\")\r\n space = unicode(\" \")\r\n else:\r\n newline = \"\\n\"\r\n space = \" \"\r\n if sep is None:\r\n sep = space\r\n if end is None:\r\n end = newline\r\n for i, arg in enumerate(args):\r\n if i:\r\n write(sep)\r\n write(arg)\r\n write(end)", "def print_(*args, **kwargs):\r\n fp = kwargs.pop(\"file\", sys.stdout)\r\n if fp is None:\r\n return\r\n\r\n def write(data):\r\n if not isinstance(data, basestring):\r\n data = str(data)\r\n fp.write(data)\r\n\r\n want_unicode = False\r\n sep = kwargs.pop(\"sep\", None)\r\n if sep is not None:\r\n if isinstance(sep, unicode):\r\n want_unicode = True\r\n elif not isinstance(sep, str):\r\n raise TypeError(\"sep must be None or a string\")\r\n end = kwargs.pop(\"end\", None)\r\n if end is not None:\r\n if isinstance(end, unicode):\r\n want_unicode = True\r\n elif not isinstance(end, str):\r\n raise TypeError(\"end must be None or a string\")\r\n if kwargs:\r\n raise TypeError(\"invalid keyword arguments to print()\")\r\n if not want_unicode:\r\n for arg in args:\r\n if isinstance(arg, unicode):\r\n want_unicode = True\r\n break\r\n if want_unicode:\r\n newline = unicode(\"\\n\")\r\n space = unicode(\" \")\r\n else:\r\n newline = \"\\n\"\r\n space = \" \"\r\n if sep is None:\r\n sep = space\r\n if end is None:\r\n end = newline\r\n for i, arg in enumerate(args):\r\n if i:\r\n write(sep)\r\n write(arg)\r\n write(end)", "def output(*args):\n print(*args, end='', file=file)", "def print_(*args, **kwargs):\n fp = kwargs.pop(\"file\", sys.stdout)\n if fp is None:\n return\n\n def write(data):\n if not isinstance(data, basestring):\n data = str(data)\n fp.write(data)\n want_unicode = False\n sep = kwargs.pop(\"sep\", None)\n if sep is not None:\n if isinstance(sep, unicode):\n want_unicode = True\n elif not isinstance(sep, str):\n raise TypeError(\"sep must be None or a string\")\n end = kwargs.pop(\"end\", None)\n if end is not None:\n if isinstance(end, unicode):\n want_unicode = True\n elif not isinstance(end, str):\n raise TypeError(\"end must be None or a string\")\n if kwargs:\n raise TypeError(\"invalid keyword arguments to print()\")\n if not want_unicode:\n for arg in args:\n if isinstance(arg, unicode):\n want_unicode = True\n break\n if want_unicode:\n newline = unicode(\"\\n\")\n space = unicode(\" \")\n else:\n newline = \"\\n\"\n space = \" \"\n if sep is None:\n sep = space\n if end is None:\n end = newline\n for i, arg in enumerate(args):\n if i:\n write(sep)\n write(arg)\n write(end)", "def debug_print(*args, sep=' ', file=None):\n file_name = inspect.stack()[1][1]\n line_no = inspect.stack()[1][2]\n function_name = inspect.stack()[1][3]\n msg = sep.join(map(str, args))\n print(f'{msg}: File \"{file_name}\", line {line_no}, in {function_name}', file=file)", "def print_(*args, **kwargs):\r\n fp = kwargs.pop(\"file\", sys.stdout)\r\n if fp is None:\r\n return\r\n def write(data):\r\n if not isinstance(data, basestring):\r\n data = str(data)\r\n fp.write(data)\r\n want_unicode = False\r\n sep = kwargs.pop(\"sep\", None)\r\n if sep is not None:\r\n if isinstance(sep, unicode):\r\n want_unicode = True\r\n elif not isinstance(sep, str):\r\n raise TypeError(\"sep must be None or a string\")\r\n end = kwargs.pop(\"end\", None)\r\n if end is not None:\r\n if isinstance(end, unicode):\r\n want_unicode = True\r\n elif not isinstance(end, str):\r\n raise TypeError(\"end must be None or a string\")\r\n if kwargs:\r\n raise TypeError(\"invalid keyword arguments to print()\")\r\n if not want_unicode:\r\n for arg in args:\r\n if isinstance(arg, unicode):\r\n want_unicode = True\r\n break\r\n if want_unicode:\r\n newline = unicode(\"\\n\")\r\n space = unicode(\" \")\r\n else:\r\n newline = \"\\n\"\r\n space = \" \"\r\n if sep is None:\r\n sep = space\r\n if end is None:\r\n end = newline\r\n for i, arg in enumerate(args):\r\n if i:\r\n write(sep)\r\n write(arg)\r\n write(end)", "def print_(*args, **kwargs):\r\n fp = kwargs.pop(\"file\", sys.stdout)\r\n if fp is None:\r\n return\r\n def write(data):\r\n if not isinstance(data, basestring):\r\n data = str(data)\r\n fp.write(data)\r\n want_unicode = False\r\n sep = kwargs.pop(\"sep\", None)\r\n if sep is not None:\r\n if isinstance(sep, unicode):\r\n want_unicode = True\r\n elif not isinstance(sep, str):\r\n raise TypeError(\"sep must be None or a string\")\r\n end = kwargs.pop(\"end\", None)\r\n if end is not None:\r\n if isinstance(end, unicode):\r\n want_unicode = True\r\n elif not isinstance(end, str):\r\n raise TypeError(\"end must be None or a string\")\r\n if kwargs:\r\n raise TypeError(\"invalid keyword arguments to print()\")\r\n if not want_unicode:\r\n for arg in args:\r\n if isinstance(arg, unicode):\r\n want_unicode = True\r\n break\r\n if want_unicode:\r\n newline = unicode(\"\\n\")\r\n space = unicode(\" \")\r\n else:\r\n newline = \"\\n\"\r\n space = \" \"\r\n if sep is None:\r\n sep = space\r\n if end is None:\r\n end = newline\r\n for i, arg in enumerate(args):\r\n if i:\r\n write(sep)\r\n write(arg)\r\n write(end)", "def logToFile(output, file): \r\n print( output, file=file )", "def print_file(f):\n print marker\n for files in f:\n print files\n print marker", "def prettyprint(self, _file):\n _file.write(\"Function %s returns %s\\n\" % (self.name, self.returnType))\n _file.write(\" local vars\\n\")\n for val in self.vars.values():\n _file.write(\" \")\n val.prettyprint(_file)\n _file.write(\" params\\n\")\n for val in self.params.values():\n _file.write(\" \")\n val.prettyprint(_file)\n _file.write(\" registers\\n\")\n for val in self.virtRegs.values():\n _file.write(\" \")\n val.prettyprint(_file)\n _file.write(\" code\\n\")\n for instr in self.instrs():\n if isinstance(instr, CLABEL):\n indent = \" \"\n else:\n indent = \" \"\n _file.write(indent + str(instr) + \"\\n\")", "def print_func(self, *args):\n return _ida_hexrays.Hexrays_Hooks_print_func(self, *args)", "def curried_printer(*args):\n if pretty:\n pretty_log(function_name, args)\n else:\n generic_log(function_name, args)\n return function_name", "def __call__(self, format, filename):\n # turn the filename into something suitable for use in #define's\n prettyname = filename.replace(\".\", \"_\").upper()\n prettyname = prettyname.replace(\"/\", \"__\")\n prettyname = prettyname.replace(\":\", \"__\")\n prettyname = prettyname.replace(\"-\", \"__\")\n\n # try and open the file\n with open(filename, \"w\") as output:\n self.writeFuncsLut[format]( output, prettyname )", "def _PrintFunc(self, obj=None, verbose=False, summarize=True, recursive=False,\n use_pager=None, to_file=None):\n if obj is not None:\n self._printed_variables.append(obj)\n lines = describe.GenerateLines(\n obj, verbose=verbose, recursive=recursive, summarize=summarize,\n format_name='text')\n _WriteToStream(lines, use_pager=use_pager, to_file=to_file)", "def prettyprint(self, _file):\n for var in self.variables:\n var.prettyprint(_file)\n for fun in self.functions:\n fun.prettyprint(_file)", "def write_file(*args, **kwargs): # real signature unknown\n pass", "def _default_eprint_worker(*args, **kwargs):\r\n kwargs[\"file\"] = sys.stderr\r\n print(*args, **kwargs)", "def print_file(pfile, printer):\n handle = shell.ShellExecuteEx(\n fMask = 256 + 64,\n lpVerb = 'print',\n lpFile= pfile,\n lpParameters = printer\n )\n hprocess = win32event.WaitForSingleObject(handle['hProcess'], -1)\n return hprocess", "def print_function():\n print(\"I'm {}, and I'm printing now\".format(print_function.__name__))", "def file_func(self,):\n return self._file_func", "def Print(self, fname):\n return _hypre.HypreParVector_Print(self, fname)", "def func_printed(self, *args):\n return _ida_hexrays.Hexrays_Hooks_func_printed(self, *args)", "def prettyprint(self, _file):\n xstr = \"reg \" + self.name + \" \" + self.type.desc()\n _file.write(xstr + \"\\n\")", "def get_logging_fn(cls, _get_final_value_fn):\n outfile = _get_final_value_fn('file')\n if not outfile:\n outfile = sys.stdout # possibly rebound by doctest\n\n logger = _get_final_value_fn('logger')\n # 0.2.4 logger can also be a name of a logger\n if logger and isinstance(logger, str): # not None, not ''\n # We can't first check f there IS such a logger.\n # This creates one (with no handlers) if it doesn't exist:\n logger = logging.getLogger(logger)\n # If logger has no handlers then it can't write anything,\n # so we'll fall back on print\n if logger and not logger.hasHandlers():\n logger = None\n loglevel = _get_final_value_fn('loglevel')\n # Establish logging function\n logging_fn = (partial(logger.log, loglevel)\n if logger else\n lambda msg: print(msg, file=outfile, flush=True))\n# lambda *pargs, **pkwargs: print(*pargs, file=outfile, flush=True, **pkwargs))\n # 0.2.4 - Everybody can indent.\n # loggers: just use formatters with '%(message)s'.\n return logging_fn", "def Print(self, fname, offi=0, offj=0):\n return _hypre.HypreParMatrix_Print(self, fname, offi, offj)", "def print_tables(self,\n wfns=None, file=None,\n print_intensities=True,\n print_energy_corrections=True,\n print_transition_moments=True,\n operators=None,\n logger=None,\n sep_char=\"=\", sep_len=100):\n\n if wfns is None:\n wfns = self.get_wavefunctions()\n\n if isinstance(logger, Logger):\n logger = logger\n elif logger is True or logger is None:\n logger = Logger()\n else:\n logger = Logger(logger)\n\n\n self.print_output_tables(wfns=wfns, file=file,\n print_intensities=print_intensities,\n print_energy_corrections=print_energy_corrections,\n print_transition_moments=print_transition_moments,\n operators=operators, logger=logger,\n sep_char=sep_char, sep_len=sep_len)\n\n return wfns", "def get_print_func(logger=None):\n return print if logger is None else logger.info" ]
[ "0.7028008", "0.60618174", "0.58106077", "0.58032954", "0.57211655", "0.57190937", "0.57069385", "0.568632", "0.5670888", "0.5670888", "0.56702733", "0.5655732", "0.56508785", "0.5643723", "0.55961174", "0.55758446", "0.55572623", "0.54762155", "0.5462057", "0.5454509", "0.5436825", "0.5408373", "0.5404814", "0.53582233", "0.5283842", "0.5280011", "0.5216808", "0.5215957", "0.5206142", "0.5188398" ]
0.7636272
0
Helper function for printing just one element in an array. All parameters except `idx` are the same as for `print`. Returns an identity operation on `x`, so that it can be used as follows >>> x = shim.tensor(np.arange(100, 0.1)) >>> x = shim.print_array(x, idx=3)
def print_array(x, idx=slice(None), message=None, message_prefix="SHIM - ", file=sys.stdout): return set_subtensor(x[idx], print(x[idx], message=message, message_prefix=message_prefix, file=file ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ndarray_print(nd):\n try:\n x = nd.tolist()\n except (TypeError, NotImplementedError):\n x = nd.tobytes()\n if isinstance(nd, ndarray):\n offset = nd.offset\n flags = nd.flags\n else:\n offset = 'unknown'\n flags = 'unknown'\n print(\n \"ndarray(%s, shape=%s, strides=%s, suboffsets=%s, offset=%s, format='%s', itemsize=%s, flags=%s)\"\n % (x, nd.shape, nd.strides, nd.suboffsets, offset, nd.format, nd.\n itemsize, flags))\n sys.stdout.flush()", "def ndarray_print(nd):\n try:\n x = nd.tolist()\n except (TypeError, NotImplementedError):\n x = nd.tobytes()\n if isinstance(nd, ndarray):\n offset = nd.offset\n flags = nd.flags\n else:\n offset = 'unknown'\n flags = 'unknown'\n print(\"ndarray(%s, shape=%s, strides=%s, suboffsets=%s, offset=%s, \"\n \"format='%s', itemsize=%s, flags=%s)\" %\n (x, nd.shape, nd.strides, nd.suboffsets, offset,\n nd.format, nd.itemsize, flags))\n sys.stdout.flush()", "def identity( array ):\n return _myarray.identity( array )", "def _printFromIndex(self, index):\n ret = str(self.arr[index])\n iterator = index + 1\n while iterator != index:\n ret += ' {}'.format(self.arr[iterator])\n iterator = iterator + 1\n iterator = iterator % self.size\n return ret", "def __getitem__(self, *args):\n return _ida_hexrays.fnum_array___getitem__(self, *args)", "def show(self):\n m = [xo_convert(int(x)) for x in np.nditer(self.arr)]\n print(\"{} | {} | {}\".format(*m[:3]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[3:6]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[6:]))\n print()", "def head(array) -> T:\n return array[0]", "def print_img_at_idx(self, idx):\n\n img = self.images[idx]\n print_img(img)", "def real(x):\n return x[..., 0]", "def _show_full_tensor(tensor):\n\n return _print_tensor(tensor_name, -1, tensor, tensor)", "def printArray(arr):\n for entry in arr:\n print(entry)", "def pov_array(arr):\n return \"array[{}] {{{}}}\".format(len(arr), \", \".join(str(x) for x in arr))", "def _print_tensor(tensor_name, num_elements, tensor, output_tensor):\n\n if self._parameters.is_brief_mode():\n if tensor_name not in tensor_trace_order.tensorname_to_cache_idx:\n raise ValueError(\n 'Tensor %s with name %s is not in the tensorname_to_cache_idx' %\n (tensor, tensor_name))\n msg = '%d' % tensor_trace_order.tensorname_to_cache_idx[tensor_name]\n else:\n msg = '\"%s\"' % tensor_name\n\n if self._parameters.trace_dir:\n output_path = os.path.join(\n self._parameters.trace_dir,\n _TRACE_FILE_NAME + self._get_outfile_suffix())\n output_stream = _OUTPUT_STREAM_ESCAPE + output_path\n else:\n output_stream = sys.stderr\n return logging_ops.print_v2(msg, array_ops.shape(output_tensor),\n '@', self._replica_id,\n '\\n', output_tensor, '\\n',\n summarize=num_elements,\n output_stream=output_stream)", "def retrun_1(x):\n ret = np.ones(len(x))\n return ret", "def nprint(x, prefix=\"\", precision=3, surpress=True, max_list_len=5, show_shape=True):\n with printoptions(precision=precision, suppress=surpress):\n if isinstance(x, np.ndarray):\n print(prefix + \"ndarray\")\n print(str(x))\n if show_shape:\n print(\"of shape \" + shape2str(x) + \"\\n\")\n elif isinstance(x, tuple):\n print(prefix + \"tuple\")\n for i, j in enumerate(x):\n print(str(i))\n nprint(j, prefix, precision, surpress, max_list_len, show_shape)\n print()\n elif isinstance(x, list):\n # fixme: breaks when list elements are not ndarrays\n print(prefix + \"list of %d elements with shape %s\"\n % (len(x), shape2str(x[0])))\n for i in range(min(len(x), max_list_len)):\n nprint(x[i], prefix + \"list[%d] \" % i, precision, surpress, max_list_len, show_shape=False)\n print()\n # todo: do the same for tensors\n else:\n print(x)", "def print_numpy(x, val=True, shp=False):\n x = x.astype(np.float64)\n if shp:\n print('shape,', x.shape)\n if val:\n x = x.flatten()\n print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (\n np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))", "def print_numpy(x, val=True, shp=False):\n x = x.astype(np.float64)\n if shp:\n print('shape,', x.shape)\n if val:\n x = x.flatten()\n print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (\n np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))", "def print_numpy(x, val=True, shp=False):\n x = x.astype(np.float64)\n if shp:\n print('shape,', x.shape)\n if val:\n x = x.flatten()\n print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (\n np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))", "def print_numpy(x, val=True, shp=False):\n x = x.astype(np.float64)\n if shp:\n print('shape,', x.shape)\n if val:\n x = x.flatten()\n print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (\n np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))", "def x(a):\n return a[0]", "def print_array(self):\n for item in self.items:\n print(item)", "def __call__(self, array, axis=None):\n raise NotImplementedError()", "def zero_indexed(array):\n if all(dl == 0 for dl in array.datashape.dim_low):\n return array\n if any(dl < 0 for dl in array.datashape.dim_low):\n raise ValueError(\"Cannot zero_index array: one or more \"\n \"dimensions start < 0\")\n\n ds = array.datashape.copy()\n ds.dim_low = [0] * ds.ndim\n return array.redimension(ds.schema)", "def __getitem__(self, idx):\n return self.GetArray(idx)", "def __getitem__(self, idx):\n return self.GetArray(idx)", "def __safe_idx__(arr_name: str, idx) -> str:\n return arr_name + \"[\" + str(idx) + \" % len(\" + arr_name + \")]\"", "def print_(*input_x):\n print_op = _get_cache_prim(Print)()\n return print_op(*input_x)", "def _show_numpy(tensor: ndarray, zoom: float = 1.) -> None:\n from PIL import Image\n shape = tuple(map(lambda s: round(s * zoom), tensor.shape))\n Image.fromarray(tensor).resize((shape[1], shape[0])).show()", "def printCircularArray(self, letter):\n if letter in self.arr:\n index = self.arr.index(letter)\n return self._printFromIndex(index)\n else:\n return \"Invalid Input\"", "def __getitem__(self, index):\n return self.array[index]" ]
[ "0.60525095", "0.601079", "0.5855006", "0.575493", "0.5318782", "0.52099293", "0.5171336", "0.5115472", "0.50951475", "0.50903505", "0.50739056", "0.5069581", "0.49996632", "0.49829748", "0.4963213", "0.49431276", "0.49431276", "0.49431276", "0.49431276", "0.4938878", "0.4933561", "0.4929935", "0.4928669", "0.4895507", "0.4895507", "0.48601726", "0.47953367", "0.47879204", "0.47559187", "0.4754833" ]
0.77429724
0
Call pretty printer (`pprint`) on Theano objects, otherwise standard `print`
def pprint(x): if is_theano_object(x): return _gettheano().printing.pprint(x) else: return str(x)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_pydotprint_profile():\r\n\r\n # Skip test if pydot is not available.\r\n if not theano.printing.pydot_imported:\r\n raise SkipTest('pydot not available')\r\n\r\n A = tensor.matrix()\r\n f = theano.function([A], A + 1, mode='ProfileMode')\r\n theano.printing.pydotprint(f, print_output_file=False)", "def pprint(*args, **kwargs):\n if PRINTING:\n print(*args, **kwargs)", "def show(self,verbose=0):\n print 'inferenceArgs',self.ws.inferenceArgs\n print 'inferenceExpr',theano.pp(self.ws.inferenceExpr)\n if verbose>=1:\n print 'debugprint inferenceExpr:'\n theano.printing.debugprint(self.ws.inferenceExpr)\n if self.ws.dataLossExpr:\n print 'dataLossArgs',self.ws.dataLossArgs\n print 'dataLossExpr',theano.pp(self.ws.dataLossExpr)\n print 'debugprint dataLossExpr:'\n theano.printing.debugprint(self.ws.dataLossExpr)", "def test_print_op():\r\n b = tensor.fmatrix()\r\n f = theano.function([b],theano.printing.Print()(b)*2, mode=mode_with_gpu)\r\n #theano.printing.debugprint(f)\r\n #print f.maker.fgraph.toposort()\r\n#[GpuFromHost(<TensorType(float32, matrix)>), <theano.printing.Print object at 0x3581210>(GpuFromHost.0), GpuElemwise{mul}(CudaNdarray{[[ 2.]]}, <theano.printing.Print object at 0x3581210>.0), HostFromGpu(GpuElemwise{mul}.0)]\r\n topo = f.maker.fgraph.toposort()\r\n assert topo[0].op == cuda.gpu_from_host\r\n assert isinstance(topo[1].op, theano.printing.Print)\r\n assert isinstance(topo[2].op, cuda.GpuElemwise)\r\n assert topo[3].op == cuda.host_from_gpu\r\n f(numpy.random.random((5,5)).astype('float32'))", "def pprint(object, stream=None):\r\n printer = PrettyPrinter(stream=stream)\r\n printer.pprint(object)", "def test_print_op():\r\n b = tensor.fmatrix()\r\n f = theano.function([b], theano.printing.Print()(b) * 2,\r\n mode=mode_with_gpu)\r\n theano.printing.debugprint(f)\r\n #print f.maker.fgraph.toposort()\r\n#[GpuFromHost(<TensorType(float32, matrix)>), <theano.printing.Print object at 0x3581210>(GpuFromHost.0), GpuElemwise{mul}(CudaNdarray{[[ 2.]]}, <theano.printing.Print object at 0x3581210>.0), HostFromGpu(GpuElemwise{mul}.0)]\r\n topo = f.maker.fgraph.toposort()\r\n assert topo[0].op == gpu_from_host\r\n assert isinstance(topo[1].op, theano.printing.Print)\r\n assert isinstance(topo[2].op, GpuElemwise)\r\n assert topo[3].op == host_from_gpu\r\n f(numpy.random.random((5, 5)).astype('float32'))", "def print_summary_(fct_name, compile_time, fct_call_time, fct_call,\r\n apply_time, apply_cimpl, message, variable_shape,\r\n local_time, other_time,\r\n n_apply_to_print=config.ProfileMode.n_apply_to_print,\r\n n_ops_to_print=config.ProfileMode.n_ops_to_print,\r\n print_apply=True,\r\n min_memory_size=config.ProfileMode.min_memory_size,\r\n ):\r\n\r\n print \"ProfileMode is deprecated! Use the new profiler.\"\r\n print \" The Theano flags to enable it ise: profile=True\"\r\n print \" The Theano flags for the memory profile to it is: profile_memory=True\"\r\n\r\n total_time = time.time() - import_time\r\n total_fct_time = sum(fct_call_time.values())\r\n total_fct_call = sum(fct_call.values())\r\n unknown_time = total_time - total_fct_time - compile_time\r\n overhead_time = total_fct_time - local_time\r\n if total_fct_time > 0:\r\n time_pr_in_fct = local_time / total_fct_time * 100\r\n overhead_time_pourcent_fct_time = (overhead_time / total_fct_time *\r\n 100)\r\n time_per_call = total_fct_time / total_fct_call\r\n else:\r\n time_pr_in_fct = 0\r\n overhead_time_pourcent_fct_time = 0\r\n time_per_call = 0\r\n\r\n print\r\n print 'ProfileMode.%s(%s)' % (fct_name,message)\r\n print '---------------------------'\r\n print\r\n print 'Time since import %.3fs'%(total_time)\r\n print 'Theano compile time: %.3fs (%.1f%% since import)'%(compile_time, compile_time/total_time*100)\r\n print ' Optimization time: %.3fs'%(other_time['optimizer_time'])\r\n print ' Linker time: %.3fs'%(other_time['linker_time'])\r\n print 'Theano fct call %.3fs (%.1f%% since import)'%(total_fct_time, total_fct_time/total_time*100)\r\n print ' Theano Op time %.3fs %.1f%%(since import) %.1f%%(of fct call)'% (\r\n local_time, local_time/total_time*100, time_pr_in_fct)\r\n print ' Theano function overhead in ProfileMode %.3fs %.1f%%(since import) %.1f%%(of fct call)'% (\r\n overhead_time, overhead_time/total_time*100, overhead_time_pourcent_fct_time)\r\n print '%i Theano fct call, %.3fs per call'%(total_fct_call, time_per_call)\r\n print 'Rest of the time since import %.3fs %.1f%%'%(unknown_time, unknown_time/total_time*100)\r\n\r\n print\r\n print 'Theano fct summary:'\r\n print '<% total fct time> <total time> <time per call> <nb call> <fct name>'\r\n for key in fct_call.keys():\r\n if fct_call[key]>0:\r\n print ' %4.1f%% %.3fs %.2es %d %s'%(fct_call_time[key]/total_fct_time*100 ,fct_call_time[key],\r\n fct_call_time[key]/fct_call[key], fct_call[key], key.name)\r\n else:\r\n print ' NOT CALLED',key.name\r\n\r\n\r\n # Compute stats per op.\r\n op_time = {}\r\n op_call = {}\r\n op_apply = {}\r\n op_cimpl = {}\r\n sop_apply = {}\r\n for (i,a),t in apply_time.items():\r\n op=a.op\r\n op_time.setdefault(op,0)\r\n op_call.setdefault(op,0)\r\n op_apply.setdefault(op,0)\r\n sop_apply.setdefault(type(a.op),0)\r\n op_time[op]+=t\r\n nb_call = [v for k,v in fct_call.items() if k.maker.fgraph is a.fgraph][0]\r\n op_cimpl.setdefault(a.op, True)\r\n op_cimpl[a.op] = op_cimpl[a.op] and apply_cimpl.get(a, False)\r\n if t==0:\r\n assert nb_call == 0, nb_call\r\n else:\r\n op_call[op] += nb_call\r\n op_apply[op] += 1\r\n sop_apply[type(a.op)] += 1\r\n\r\n # Compute stats per op class\r\n sop_time={}\r\n sop_call={}\r\n sop_op = {}\r\n sop_cimpl={} #map each op class to Bool. True iff all applies were done in c.\r\n for a,t in op_time.items():\r\n typ = type(a)\r\n sop_time.setdefault(typ,0)\r\n sop_time[typ]+=t\r\n sop_op.setdefault(typ,0)\r\n sop_op[typ]+=1\r\n sop_cimpl.setdefault(typ,True)\r\n sop_cimpl[typ]=sop_cimpl[typ] and op_cimpl.get(a, False)\r\n sop_call[typ]=sop_call.get(typ,0)+op_call[a]\r\n\r\n\r\n # Print the summary per op class.\r\n print\r\n print 'Single Op-wise summary:'\r\n print '<% of local_time spent on this kind of Op> <cumulative %> <self seconds> <cumulative seconds> <time per call> [*] <nb_call> <nb_op> <nb_apply> <Op name>'\r\n sotimes = [(t*100/local_time, t, a, sop_cimpl[a], sop_call[a], sop_op[a], sop_apply[a]) for a, t in sop_time.items()]\r\n sotimes.sort()\r\n sotimes.reverse()\r\n tot=0\r\n for f,t,a,ci, nb_call, nb_op, nb_apply in sotimes[:n_ops_to_print]:\r\n if nb_call == 0:\r\n assert t == 0\r\n continue\r\n tot+=t\r\n ftot=tot*100/local_time\r\n if ci:\r\n msg = '*'\r\n else:\r\n msg = ' '\r\n print ' %4.1f%% %5.1f%% %5.3fs %5.3fs %.2es %s %5d %2d %2d %s' % (f, ftot, t, tot, t/nb_call, msg, nb_call, nb_op, nb_apply, a)\r\n print ' ... (remaining %i single Op account for %.2f%%(%.2fs) of the runtime)'\\\r\n %(max(0, len(sotimes)-n_ops_to_print),\r\n sum(soinfo[0] for soinfo in sotimes[n_ops_to_print:]),\r\n sum(soinfo[1] for soinfo in sotimes[n_ops_to_print:]))\r\n\r\n print '(*) Op is running a c implementation'\r\n\r\n\r\n # The summary per op\r\n op_flops = {}\r\n for a,t in op_time.items():\r\n if hasattr(a,'flops'):\r\n op_flops[a]=a.flops*op_call[a]/t/1e6\r\n flops_msg=''\r\n if op_flops:\r\n flops_msg=' <MFlops/s>'\r\n print '\\nHACK WARNING: we print the flops for some OP, but the logic don\\'t always work. You need to know the internal of Theano to make it work correctly. Otherwise don\\'t use!'\r\n print\r\n print 'Op-wise summary:'\r\n print '<%% of local_time spent on this kind of Op> <cumulative %%> <self seconds> <cumulative seconds> <time per call> [*] %s <nb_call> <nb apply> <Op name>'%(flops_msg)\r\n\r\n otimes = [(t*100/local_time, t, a, op_cimpl.get(a, 0), op_call.get(a, 0), op_apply.get(a,0))\r\n for a, t in op_time.items()]\r\n otimes.sort()\r\n otimes.reverse()\r\n tot=0\r\n for f,t,a,ci,nb_call,nb_apply in otimes[:n_ops_to_print]:\r\n if nb_call == 0:\r\n assert t == 0\r\n continue\r\n tot+=t\r\n ftot=tot*100/local_time\r\n if ci:\r\n msg = '*'\r\n else:\r\n msg = ' '\r\n if op_flops:\r\n print ' %4.1f%% %5.1f%% %5.3fs %5.3fs %.2es %s %7.1f %5d %2d %s' % (f, ftot, t, tot, t/nb_call, msg, op_flops.get(a,-1), nb_call, nb_apply, a)\r\n else:\r\n print ' %4.1f%% %5.1f%% %5.3fs %5.3fs %.2es %s %5d %2d %s' % (f, ftot, t, tot, t/nb_call, msg, nb_call, nb_apply, a)\r\n print ' ... (remaining %i Op account for %6.2f%%(%.2fs) of the runtime)'\\\r\n %(max(0, len(otimes)-n_ops_to_print),\r\n sum(f for f, t, a, ci, nb_call, nb_op in otimes[n_ops_to_print:]),\r\n sum(t for f, t, a, ci, nb_call, nb_op in otimes[n_ops_to_print:]))\r\n print '(*) Op is running a c implementation'\r\n\r\n\r\n if print_apply:\r\n print\r\n print 'Apply-wise summary:'\r\n print '<% of local_time spent at this position> <cumulative %%> <apply time> <cumulative seconds> <time per call> [*] <nb_call> <Apply position> <Apply Op name>'\r\n atimes = [(t*100/local_time, t, a, [v for k,v in fct_call.items() if k.maker.fgraph is a[1].fgraph][0]) for a, t in apply_time.items()]\r\n atimes.sort()\r\n atimes.reverse()\r\n tot=0\r\n for f,t,a,nb_call in atimes[:n_apply_to_print]:\r\n tot+=t\r\n ftot=tot*100/local_time\r\n if nb_call==0:\r\n continue\r\n if apply_cimpl.get(a[1], False):\r\n msg = '*'\r\n else:\r\n msg = ' '\r\n print ' %4.1f%% %5.1f%% %5.3fs %5.3fs %.2es %s %i %2i %s' % (\r\n f, ftot, t, tot, t/nb_call, msg, nb_call, a[0], str(a[1]))\r\n print ' ... (remaining %i Apply instances account for %.2f%%(%.2fs) of the runtime)'\\\r\n %(max(0, len(atimes)-n_apply_to_print),\r\n sum(f for f, t, a, nb_call in atimes[n_apply_to_print:]),\r\n sum(t for f, t, a, nb_call in atimes[n_apply_to_print:]))\r\n print '(*) Op is running a c implementation'\r\n for printer in profiler_printers:\r\n printer(fct_name, compile_time, fct_call_time, fct_call,\r\n apply_time, apply_cimpl, message, variable_shape,\r\n other_time)\r\n\r\n if not variable_shape:\r\n print \"\"\"\\nProfile of Theano intermediate memory disabled.\r\n To enabled, put the Theano flag ProfileMode.profile_memory to True.\"\"\"\r\n else:\r\n print \"\"\"\r\n The memory profile in ProfileMode is removed!\r\n Use the new profiler. Use the Theano flags\r\n profile=True,profile_memory=True to enable it.\"\"\"\r\n\r\n print\r\n print \"\"\"Here are tips to potentially make your code run faster\r\n(if you think of new ones, suggest them on the mailing list).\r\nTest them first, as they are not guaranteed to always provide a speedup.\"\"\"\r\n from theano import tensor as T\r\n from theano.tensor.raw_random import RandomFunction\r\n import theano\r\n import theano.scalar as scal\r\n scalar_op_amdlibm_no_speed_up = [scal.LT, scal.GT, scal.LE, scal.GE,\r\n scal.EQ, scal.NEQ, scal.InRange,\r\n scal.Switch, scal.OR, scal.XOR,\r\n scal.AND, scal.Invert, scal.Maximum,\r\n scal.Minimum, scal.Add, scal.Mul,\r\n scal.Sub, scal.TrueDiv, scal.IntDiv,\r\n scal.Clip, scal.Second, scal.Identity,\r\n scal.Cast, scal.Sgn, scal.Neg,\r\n scal.Inv, scal.Sqr]\r\n scalar_op_amdlibm_speed_up = [scal.Mod, scal.Pow, scal.Ceil,\r\n scal.Floor, scal.RoundHalfToEven,\r\n scal.RoundHalfAwayFromZero, scal.Log,\r\n scal.Log2, scal.Log10, scal.Log1p,\r\n scal.Exp, scal.Sqrt, scal.Abs, scal.Cos,\r\n scal.Sin, scal.Tan, scal.Tanh,\r\n scal.Cosh, scal.Sinh,\r\n T.nnet.sigm.ScalarSigmoid,\r\n T.nnet.sigm.ScalarSoftplus]\r\n # Abs, Mod in float{32,64} only\r\n\r\n def get_scalar_ops(s):\r\n if isinstance(s, theano.scalar.Composite):\r\n l = []\r\n for node in s.fgraph.toposort():\r\n l += get_scalar_ops(node.op)\r\n return l\r\n else:\r\n return [s]\r\n\r\n def list_scalar_op(op):\r\n if isinstance(op.scalar_op, theano.scalar.Composite):\r\n return get_scalar_ops(op.scalar_op)\r\n else:\r\n return [op.scalar_op]\r\n\r\n def amdlibm_speed_up(op):\r\n if not isinstance(op, T.Elemwise):\r\n return False\r\n else:\r\n l = list_scalar_op(op)\r\n for s_op in l:\r\n if s_op.__class__ in scalar_op_amdlibm_speed_up:\r\n return True\r\n elif s_op.__class__ not in scalar_op_amdlibm_no_speed_up:\r\n print \"We don't know if amdlibm will accelerate this scalar op.\", s_op\r\n return False\r\n\r\n def exp_float32_op(op):\r\n if not isinstance(op, T.Elemwise):\r\n return False\r\n else:\r\n l = list_scalar_op(op)\r\n return any([s_op.__class__ in [scal.Exp] for s_op in l])\r\n\r\n printed_tip = False\r\n #tip 1\r\n if config.floatX == 'float64':\r\n print \" - Try the Theano flag floatX=float32\"\r\n printed_tip = True\r\n\r\n #tip 2\r\n if not config.lib.amdlibm and any([amdlibm_speed_up(a.op) for i, a\r\n in apply_time]):\r\n print \" - Try installing amdlibm and set the Theano flag lib.amdlibm=True. This speeds up only some Elemwise operation.\"\r\n printed_tip = True\r\n\r\n #tip 3\r\n if not config.lib.amdlibm and any([exp_float32_op(a.op) and\r\n a.inputs[0].dtype == 'float32'\r\n for i, a in apply_time]):\r\n print (\" - With the default gcc libm, exp in float32 is slower \"\r\n \"than in float64! Try Theano flag floatX=float64, or \"\r\n \"install amdlibm and set the theano flags lib.amdlibm=True\")\r\n printed_tip = True\r\n\r\n #tip 4\r\n for a, t in apply_time.iteritems():\r\n node = a[1]\r\n if (isinstance(node.op, T.Dot) and\r\n all([len(i.type.broadcastable) == 2 for i in node.inputs])):\r\n print (\" - You have a dot operation that was not optimized to\"\r\n \" dot22 (which is faster). Make sure the inputs are \"\r\n \"float32 or float64, and are the same for both inputs. \"\r\n \"Currently they are: %s\" %\r\n [i.type for i in node.inputs])\r\n printed_tip = True\r\n\r\n #tip 5\r\n for a, t in apply_time.iteritems():\r\n node = a[1]\r\n if isinstance(node.op, RandomFunction):\r\n printed_tip = True\r\n print (\" - Replace the default random number generator by \"\r\n \"'from theano.sandbox.rng_mrg import MRG_RandomStreams \"\r\n \"as RandomStreams', as this is is faster. It is still \"\r\n \"experimental, but seems to work correctly.\")\r\n if config.device.startswith(\"gpu\"):\r\n print (\" - MRG_RandomStreams is the only random number\"\r\n \" generator supported on the GPU.\")\r\n break\r\n\r\n if not printed_tip:\r\n print \" Sorry, no tip for today.\"", "def __pprint(object, stream=None, indent=1, width=80, depth=None):\n printer = PrettyPrinterExt(\n stream=stream, indent=indent, width=width, depth=depth)\n printer.pprint(object)", "def pformat(object):\r\n return PrettyPrinter().pformat(object)", "def print_summary_(fct_name, compile_time, fct_call_time, fct_call,\n apply_time, apply_cimpl, message, variable_shape,\n local_time, other_time,\n n_apply_to_print=config.ProfileMode.n_apply_to_print,\n n_ops_to_print=config.ProfileMode.n_ops_to_print,\n print_apply=True,\n min_memory_size=config.ProfileMode.min_memory_size,\n ):\n\n print(\"ProfileMode is deprecated! Use the new profiler.\")\n print(\" The Theano flags to enable it ise: profile=True\")\n print(\" The Theano flags for the memory profile to it is: \"\n \"profile_memory=True\")\n\n total_time = time.time() - import_time\n total_fct_time = sum(fct_call_time.values())\n total_fct_call = sum(fct_call.values())\n unknown_time = total_time - total_fct_time - compile_time\n overhead_time = total_fct_time - local_time\n if total_fct_time > 0:\n time_pr_in_fct = local_time / total_fct_time * 100\n overhead_time_pourcent_fct_time = (overhead_time / total_fct_time *\n 100)\n time_per_call = total_fct_time / total_fct_call\n else:\n time_pr_in_fct = 0\n overhead_time_pourcent_fct_time = 0\n time_per_call = 0\n\n print()\n print('ProfileMode.%s(%s)' % (fct_name, message))\n print('---------------------------')\n print()\n print('Time since import %.3fs' % (total_time))\n print('Theano compile time: %.3fs (%.1f%% since import)' %\n (compile_time, compile_time / total_time * 100))\n print(' Optimization time: %.3fs' % (other_time['optimizer_time']))\n print(' Linker time: %.3fs' % (other_time['linker_time']))\n print('Theano fct call %.3fs (%.1f%% since import)' %\n (total_fct_time, total_fct_time / total_time * 100))\n print(' Theano Op time %.3fs %.1f%%(since import) %.1f%%'\n '(of fct call)' % (local_time, local_time / total_time * 100,\n time_pr_in_fct))\n print(' Theano function overhead in ProfileMode %.3fs %.1f%%'\n '(since import) %.1f%%(of fct call)' % (\n overhead_time, overhead_time / total_time * 100,\n overhead_time_pourcent_fct_time))\n print('%i Theano fct call, %.3fs per call' %\n (total_fct_call, time_per_call))\n print('Rest of the time since import %.3fs %.1f%%' %\n (unknown_time, unknown_time / total_time * 100))\n\n print()\n print('Theano fct summary:')\n print('<% total fct time> <total time> <time per call> <nb call> '\n '<fct name>')\n for key in fct_call:\n if fct_call[key] > 0:\n print(' %4.1f%% %.3fs %.2es %d %s' %\n (fct_call_time[key] / total_fct_time * 100,\n fct_call_time[key],\n fct_call_time[key] / fct_call[key],\n fct_call[key],\n key.name))\n else:\n print(' NOT CALLED', key.name)\n\n # Compute stats per op.\n op_time = {}\n op_call = {}\n op_apply = {}\n op_cimpl = {}\n sop_apply = {}\n for (i, a), t in iteritems(apply_time):\n op = a.op\n op_time.setdefault(op, 0)\n op_call.setdefault(op, 0)\n op_apply.setdefault(op, 0)\n sop_apply.setdefault(type(a.op), 0)\n op_time[op] += t\n nb_call = [v for k, v in iteritems(fct_call)\n if k.maker.fgraph is a.fgraph][0]\n op_cimpl.setdefault(a.op, True)\n op_cimpl[a.op] = op_cimpl[a.op] and apply_cimpl.get(a, False)\n if t == 0:\n assert nb_call == 0, nb_call\n else:\n op_call[op] += nb_call\n op_apply[op] += 1\n sop_apply[type(a.op)] += 1\n\n # Compute stats per op class\n sop_time = {}\n sop_call = {}\n sop_op = {}\n # map each op class to Bool. True iff all applies were done in c.\n sop_cimpl = {}\n for a, t in iteritems(op_time):\n typ = type(a)\n sop_time.setdefault(typ, 0)\n sop_time[typ] += t\n sop_op.setdefault(typ, 0)\n sop_op[typ] += 1\n sop_cimpl.setdefault(typ, True)\n sop_cimpl[typ] = sop_cimpl[typ] and op_cimpl.get(a, False)\n sop_call[typ] = sop_call.get(typ, 0) + op_call[a]\n\n # Print the summary per op class.\n print()\n print('Single Op-wise summary:')\n print('<% of local_time spent on this kind of Op> <cumulative %> '\n '<self seconds> <cumulative seconds> <time per call> [*] '\n '<nb_call> <nb_op> <nb_apply> <Op name>')\n sotimes = [(t * 100 / local_time, t, a, sop_cimpl[a], sop_call[a],\n sop_op[a], sop_apply[a]) for a, t in iteritems(sop_time)]\n sotimes.sort()\n sotimes.reverse()\n tot = 0\n for f, t, a, ci, nb_call, nb_op, nb_apply in sotimes[:n_ops_to_print]:\n if nb_call == 0:\n assert t == 0\n continue\n tot += t\n ftot = tot * 100 / local_time\n if ci:\n msg = '*'\n else:\n msg = ' '\n print(' %4.1f%% %5.1f%% %5.3fs %5.3fs %.2es %s %5d %2d '\n '%2d %s' % (f, ftot, t, tot, t / nb_call, msg, nb_call,\n nb_op, nb_apply, a))\n print(' ... (remaining %i single Op account for %.2f%%(%.2fs) of '\n 'the runtime)' %\n (max(0, len(sotimes) - n_ops_to_print),\n sum(soinfo[0] for soinfo in sotimes[n_ops_to_print:]),\n sum(soinfo[1] for soinfo in sotimes[n_ops_to_print:])))\n\n print('(*) Op is running a c implementation')\n\n # The summary per op\n op_flops = {}\n for a, t in iteritems(op_time):\n if hasattr(a, 'flops'):\n op_flops[a] = a.flops * op_call[a] / t / 1e6\n flops_msg = ''\n if op_flops:\n flops_msg = ' <MFlops/s>'\n print(\"\\nHACK WARNING: we print the flops for some OP, but the \"\n \"logic doesn't always work. You need to know the \"\n \"internals of Theano to make it work correctly. \"\n \"Otherwise don't use it!\")\n print()\n print('Op-wise summary:')\n print('<%% of local_time spent on this kind of Op> <cumulative %%> '\n '<self seconds> <cumulative seconds> <time per call> [*] %s '\n '<nb_call> <nb apply> <Op name>' % (flops_msg))\n\n otimes = [(t * 100 / local_time, t, a, op_cimpl.get(a, 0),\n op_call.get(a, 0), op_apply.get(a, 0))\n for a, t in iteritems(op_time)]\n otimes.sort()\n otimes.reverse()\n tot = 0\n for f, t, a, ci, nb_call, nb_apply in otimes[:n_ops_to_print]:\n if nb_call == 0:\n assert t == 0\n continue\n tot += t\n ftot = tot * 100 / local_time\n if ci:\n msg = '*'\n else:\n msg = ' '\n if op_flops:\n print(' %4.1f%% %5.1f%% %5.3fs %5.3fs %.2es %s %7.1f '\n '%5d %2d %s' % (f, ftot, t, tot, t / nb_call, msg,\n op_flops.get(a, -1), nb_call, nb_apply,\n a))\n else:\n print(' %4.1f%% %5.1f%% %5.3fs %5.3fs %.2es %s %5d %2d '\n '%s' % (f, ftot, t, tot, t / nb_call, msg, nb_call,\n nb_apply, a))\n print(' ... (remaining %i Op account for %6.2f%%(%.2fs) of the '\n 'runtime)' %\n (max(0, len(otimes) - n_ops_to_print),\n sum(f for f, t, a, ci, nb_call, nb_op in\n otimes[n_ops_to_print:]),\n sum(t for f, t, a, ci, nb_call, nb_op in\n otimes[n_ops_to_print:])))\n print('(*) Op is running a c implementation')\n\n if print_apply:\n print()\n print('Apply-wise summary:')\n print('<% of local_time spent at this position> <cumulative %%> '\n '<apply time> <cumulative seconds> <time per call> [*] '\n '<nb_call> <Apply position> <Apply Op name>')\n atimes = [(t * 100 / local_time, t, a,\n [v for k, v in iteritems(fct_call)\n if k.maker.fgraph is a[1].fgraph][0])\n for a, t in iteritems(apply_time)]\n atimes.sort()\n atimes.reverse()\n tot = 0\n for f, t, a, nb_call in atimes[:n_apply_to_print]:\n tot += t\n ftot = tot * 100 / local_time\n if nb_call == 0:\n continue\n if apply_cimpl.get(a[1], False):\n msg = '*'\n else:\n msg = ' '\n print(' %4.1f%% %5.1f%% %5.3fs %5.3fs %.2es %s %i '\n '%2i %s' %\n (f, ftot, t, tot, t / nb_call, msg, nb_call, a[0],\n str(a[1])))\n print(' ... (remaining %i Apply instances account for '\n '%.2f%%(%.2fs) of the runtime)' %\n (max(0, len(atimes) - n_apply_to_print),\n sum(f for f, t, a, nb_call in atimes[n_apply_to_print:]),\n sum(t for f, t, a, nb_call in atimes[n_apply_to_print:])))\n print('(*) Op is running a c implementation')\n for printer in profiler_printers:\n printer(fct_name, compile_time, fct_call_time, fct_call,\n apply_time, apply_cimpl, message, variable_shape,\n other_time)\n\n if not variable_shape:\n print(\"\\nProfile of Theano intermediate memory disabled. \"\n \"To enable, set the Theano flag ProfileMode.profile_memory \"\n \"to True.\")\n else:\n print(\"\"\"\n The memory profile in ProfileMode is removed!\n Use the new profiler. Use the Theano flags\n profile=True,profile_memory=True to enable it.\"\"\")\n\n print()\n print(\"\"\"Here are tips to potentially make your code run faster\n(if you think of new ones, suggest them on the mailing list).\nTest them first, as they are not guaranteed to always provide a speedup.\"\"\")\n from theano import tensor as T\n from theano.tensor.raw_random import RandomFunction\n import theano\n import theano.scalar as scal\n scalar_op_amdlibm_no_speed_up = [scal.LT, scal.GT, scal.LE, scal.GE,\n scal.EQ, scal.NEQ, scal.InRange,\n scal.Switch, scal.OR, scal.XOR,\n scal.AND, scal.Invert, scal.Maximum,\n scal.Minimum, scal.Add, scal.Mul,\n scal.Sub, scal.TrueDiv, scal.IntDiv,\n scal.Clip, scal.Second, scal.Identity,\n scal.Cast, scal.Sgn, scal.Neg,\n scal.Inv, scal.Sqr]\n scalar_op_amdlibm_speed_up = [scal.Mod, scal.Pow, scal.Ceil,\n scal.Floor, scal.RoundHalfToEven,\n scal.RoundHalfAwayFromZero, scal.Log,\n scal.Log2, scal.Log10, scal.Log1p,\n scal.Exp, scal.Sqrt, scal.Abs, scal.Cos,\n scal.Sin, scal.Tan, scal.Tanh,\n scal.Cosh, scal.Sinh,\n T.nnet.sigm.ScalarSigmoid,\n T.nnet.sigm.ScalarSoftplus]\n\n def get_scalar_ops(s):\n if isinstance(s, theano.scalar.Composite):\n l = []\n for node in s.fgraph.toposort():\n l += get_scalar_ops(node.op)\n return l\n else:\n return [s]\n\n def list_scalar_op(op):\n if isinstance(op.scalar_op, theano.scalar.Composite):\n return get_scalar_ops(op.scalar_op)\n else:\n return [op.scalar_op]\n\n def amdlibm_speed_up(op):\n if not isinstance(op, T.Elemwise):\n return False\n else:\n l = list_scalar_op(op)\n for s_op in l:\n if s_op.__class__ in scalar_op_amdlibm_speed_up:\n return True\n elif s_op.__class__ not in scalar_op_amdlibm_no_speed_up:\n print(\"We don't know if amdlibm will accelerate \"\n \"this scalar op.\", s_op)\n return False\n\n def exp_float32_op(op):\n if not isinstance(op, T.Elemwise):\n return False\n else:\n l = list_scalar_op(op)\n return any([s_op.__class__ in [scal.Exp] for s_op in l])\n\n printed_tip = False\n # tip 1\n if config.floatX == 'float64':\n print(\" - Try the Theano flag floatX=float32\")\n printed_tip = True\n\n # tip 2\n if not config.lib.amdlibm and any([amdlibm_speed_up(a.op) for i, a\n in apply_time]):\n print(\" - Try installing amdlibm and set the Theano flag \"\n \"lib.amdlibm=True. This speeds up only some Elemwise \"\n \"operation.\")\n printed_tip = True\n\n # tip 3\n if not config.lib.amdlibm and any([exp_float32_op(a.op) and\n a.inputs[0].dtype == 'float32'\n for i, a in apply_time]):\n print(\" - With the default gcc libm, exp in float32 is slower \"\n \"than in float64! Try Theano flag floatX=float64, or \"\n \"install amdlibm and set the theano flags lib.amdlibm=True\")\n printed_tip = True\n\n # tip 4\n for a, t in iteritems(apply_time):\n node = a[1]\n if (isinstance(node.op, T.Dot) and\n all([len(i.type.broadcastable) == 2\n for i in node.inputs])):\n print(\" - You have a dot operation that was not optimized to\"\n \" dot22 (which is faster). Make sure the inputs are \"\n \"float32 or float64, and are the same for both inputs. \"\n \"Currently they are: %s\" %\n [i.type for i in node.inputs])\n printed_tip = True\n\n # tip 5\n for a, t in iteritems(apply_time):\n node = a[1]\n if isinstance(node.op, RandomFunction):\n printed_tip = True\n print(\" - Replace the default random number generator by \"\n \"'from theano.sandbox.rng_mrg import MRG_RandomStreams \"\n \"as RandomStreams', as this is is faster. It is still \"\n \"experimental, but seems to work correctly.\")\n if config.device.startswith(\"gpu\"):\n print(\" - MRG_RandomStreams is the only random number\"\n \" generator supported on the GPU.\")\n break\n\n if not printed_tip:\n print(\" Sorry, no tip for today.\")", "def custom_print(*objects):\n print(*objects, sep=OFS, end=ORS)", "def pprint(obj):\n for argname in sorted([x for x in dir(obj) if not x.startswith('__')]):\n # Skip callables\n if hasattr(getattr(obj, argname), '__call__'):\n continue\n print(\"{} : {}\".format(argname, getattr(obj, argname)))", "def test_pydotprint_long_name():\r\n\r\n # Skip test if pydot is not available.\r\n if not theano.printing.pydot_imported:\r\n raise SkipTest('pydot not available')\r\n\r\n x = tensor.dvector()\r\n mode = theano.compile.mode.get_default_mode().excluding(\"fusion\")\r\n f = theano.function([x], [x * 2, x + x], mode=mode)\r\n f([1, 2, 3, 4])\r\n\r\n s = StringIO()\r\n new_handler = logging.StreamHandler(s)\r\n new_handler.setLevel(logging.DEBUG)\r\n orig_handler = theano.logging_default_handler\r\n\r\n theano.printing.pydotprint(f, max_label_size=5,\r\n print_output_file=False,\r\n assert_nb_all_strings=6)", "def pydotprint(fct, outfile=None,\r\n compact=True, format='png', with_ids=False,\r\n high_contrast=True, cond_highlight=None, colorCodes=None,\r\n max_label_size=70, scan_graphs=False,\r\n var_with_name_simple=False,\r\n print_output_file=True,\r\n assert_nb_all_strings=-1\r\n ):\r\n if colorCodes is None:\r\n colorCodes = default_colorCodes\r\n\r\n if outfile is None:\r\n outfile = os.path.join(config.compiledir, 'theano.pydotprint.' +\r\n config.device + '.' + format)\r\n\r\n if isinstance(fct, Function):\r\n mode = fct.maker.mode\r\n profile = getattr(fct, \"profile\", None)\r\n if (not isinstance(mode, ProfileMode)\r\n or not fct in mode.profile_stats):\r\n mode = None\r\n fct_fgraph = fct.maker.fgraph\r\n elif isinstance(fct, gof.FunctionGraph):\r\n mode = None\r\n profile = None\r\n fct_fgraph = fct\r\n else:\r\n raise ValueError(('pydotprint expects as input a theano.function or '\r\n 'the FunctionGraph of a function!'), fct)\r\n\r\n if not pydot_imported:\r\n raise RuntimeError(\"Failed to import pydot. You must install pydot\"\r\n \" for `pydotprint` to work.\")\r\n return\r\n\r\n g = pd.Dot()\r\n if cond_highlight is not None:\r\n c1 = pd.Cluster('Left')\r\n c2 = pd.Cluster('Right')\r\n c3 = pd.Cluster('Middle')\r\n cond = None\r\n for node in fct_fgraph.toposort():\r\n if (node.op.__class__.__name__ == 'IfElse'\r\n and node.op.name == cond_highlight):\r\n cond = node\r\n if cond is None:\r\n _logger.warn(\"pydotprint: cond_highlight is set but there is no\"\r\n \" IfElse node in the graph\")\r\n cond_highlight = None\r\n\r\n if cond_highlight is not None:\r\n def recursive_pass(x, ls):\r\n if not x.owner:\r\n return ls\r\n else:\r\n ls += [x.owner]\r\n for inp in x.inputs:\r\n ls += recursive_pass(inp, ls)\r\n return ls\r\n\r\n left = set(recursive_pass(cond.inputs[1], []))\r\n right = set(recursive_pass(cond.inputs[2], []))\r\n middle = left.intersection(right)\r\n left = left.difference(middle)\r\n right = right.difference(middle)\r\n middle = list(middle)\r\n left = list(left)\r\n right = list(right)\r\n\r\n var_str = {}\r\n all_strings = set()\r\n\r\n def var_name(var):\r\n if var in var_str:\r\n return var_str[var]\r\n\r\n if var.name is not None:\r\n if var_with_name_simple:\r\n varstr = var.name\r\n else:\r\n varstr = 'name=' + var.name + \" \" + str(var.type)\r\n elif isinstance(var, gof.Constant):\r\n dstr = 'val=' + str(numpy.asarray(var.data))\r\n if '\\n' in dstr:\r\n dstr = dstr[:dstr.index('\\n')]\r\n varstr = '%s %s' % (dstr, str(var.type))\r\n elif (var in input_update\r\n and input_update[var].variable.name is not None):\r\n if var_with_name_simple:\r\n varstr = input_update[var].variable.name + \" UPDATE\"\r\n else:\r\n varstr = (input_update[var].variable.name + \" UPDATE \"\r\n + str(var.type))\r\n else:\r\n #a var id is needed as otherwise var with the same type will be\r\n #merged in the graph.\r\n varstr = str(var.type)\r\n if (varstr in all_strings) or with_ids:\r\n idx = ' id=' + str(len(var_str))\r\n if len(varstr) + len(idx) > max_label_size:\r\n varstr = varstr[:max_label_size - 3 - len(idx)] + idx + '...'\r\n else:\r\n varstr = varstr + idx\r\n elif len(varstr) > max_label_size:\r\n varstr = varstr[:max_label_size - 3] + '...'\r\n idx = 1\r\n while varstr in all_strings:\r\n idx += 1\r\n suffix = ' id=' + str(idx)\r\n varstr = (varstr[:max_label_size - 3 - len(suffix)] +\r\n '...' +\r\n suffix)\r\n var_str[var] = varstr\r\n all_strings.add(varstr)\r\n\r\n return varstr\r\n topo = fct_fgraph.toposort()\r\n apply_name_cache = {}\r\n\r\n def apply_name(node):\r\n if node in apply_name_cache:\r\n return apply_name_cache[node]\r\n prof_str = ''\r\n if mode:\r\n time = mode.profile_stats[fct].apply_time.get(node, 0)\r\n #second, % total time in profiler, %fct time in profiler\r\n if mode.local_time == 0:\r\n pt = 0\r\n else:\r\n pt = time * 100 / mode.local_time\r\n if mode.profile_stats[fct].fct_callcount == 0:\r\n pf = 0\r\n else:\r\n pf = time * 100 / mode.profile_stats[fct].fct_call_time\r\n prof_str = ' (%.3fs,%.3f%%,%.3f%%)' % (time, pt, pf)\r\n elif profile:\r\n time = profile.apply_time.get(node, 0)\r\n #second, %fct time in profiler\r\n if profile.fct_callcount == 0:\r\n pf = 0\r\n else:\r\n pf = time * 100 / profile.fct_call_time\r\n prof_str = ' (%.3fs,%.3f%%)' % (time, pf)\r\n applystr = str(node.op).replace(':', '_')\r\n applystr += prof_str\r\n if (applystr in all_strings) or with_ids:\r\n idx = ' id=' + str(topo.index(node))\r\n if len(applystr) + len(idx) > max_label_size:\r\n applystr = (applystr[:max_label_size - 3 - len(idx)] + idx\r\n + '...')\r\n else:\r\n applystr = applystr + idx\r\n elif len(applystr) > max_label_size:\r\n applystr = applystr[:max_label_size - 3] + '...'\r\n idx = 1\r\n while applystr in all_strings:\r\n idx += 1\r\n suffix = ' id=' + str(idx)\r\n applystr = (applystr[:max_label_size - 3 - len(suffix)] +\r\n '...' +\r\n suffix)\r\n\r\n all_strings.add(applystr)\r\n apply_name_cache[node] = applystr\r\n return applystr\r\n\r\n # Update the inputs that have an update function\r\n input_update = {}\r\n outputs = list(fct_fgraph.outputs)\r\n if isinstance(fct, Function):\r\n for i in reversed(fct.maker.expanded_inputs):\r\n if i.update is not None:\r\n input_update[outputs.pop()] = i\r\n\r\n apply_shape = 'ellipse'\r\n var_shape = 'box'\r\n for node_idx, node in enumerate(topo):\r\n astr = apply_name(node)\r\n\r\n use_color = None\r\n for opName, color in colorCodes.items():\r\n if opName in node.op.__class__.__name__:\r\n use_color = color\r\n\r\n if use_color is None:\r\n nw_node = pd.Node(astr, shape=apply_shape)\r\n elif high_contrast:\r\n nw_node = pd.Node(astr, style='filled', fillcolor=use_color,\r\n shape=apply_shape)\r\n else:\r\n nw_node = pd.Node(astr, color=use_color, shape=apply_shape)\r\n g.add_node(nw_node)\r\n if cond_highlight:\r\n if node in middle:\r\n c3.add_node(nw_node)\r\n elif node in left:\r\n c1.add_node(nw_node)\r\n elif node in right:\r\n c2.add_node(nw_node)\r\n\r\n for id, var in enumerate(node.inputs):\r\n varstr = var_name(var)\r\n label = str(var.type)\r\n if len(node.inputs) > 1:\r\n label = str(id) + ' ' + label\r\n if len(label) > max_label_size:\r\n label = label[:max_label_size - 3] + '...'\r\n if var.owner is None:\r\n if high_contrast:\r\n g.add_node(pd.Node(varstr,\r\n style='filled',\r\n fillcolor='green',\r\n shape=var_shape))\r\n else:\r\n g.add_node(pd.Node(varstr, color='green', shape=var_shape))\r\n g.add_edge(pd.Edge(varstr, astr, label=label))\r\n elif var.name or not compact:\r\n g.add_edge(pd.Edge(varstr, astr, label=label))\r\n else:\r\n #no name, so we don't make a var ellipse\r\n g.add_edge(pd.Edge(apply_name(var.owner), astr, label=label))\r\n\r\n for id, var in enumerate(node.outputs):\r\n varstr = var_name(var)\r\n out = any([x[0] == 'output' for x in var.clients])\r\n label = str(var.type)\r\n if len(node.outputs) > 1:\r\n label = str(id) + ' ' + label\r\n if len(label) > max_label_size:\r\n label = label[:max_label_size - 3] + '...'\r\n if out:\r\n g.add_edge(pd.Edge(astr, varstr, label=label))\r\n if high_contrast:\r\n g.add_node(pd.Node(varstr, style='filled',\r\n fillcolor='blue', shape=var_shape))\r\n else:\r\n g.add_node(pd.Node(varstr, color='blue', shape=var_shape))\r\n elif len(var.clients) == 0:\r\n g.add_edge(pd.Edge(astr, varstr, label=label))\r\n if high_contrast:\r\n g.add_node(pd.Node(varstr, style='filled',\r\n fillcolor='grey', shape=var_shape))\r\n else:\r\n g.add_node(pd.Node(varstr, color='grey', shape=var_shape))\r\n elif var.name or not compact:\r\n g.add_edge(pd.Edge(astr, varstr, label=label))\r\n# else:\r\n #don't add egde here as it is already added from the inputs.\r\n\r\n if cond_highlight:\r\n g.add_subgraph(c1)\r\n g.add_subgraph(c2)\r\n g.add_subgraph(c3)\r\n\r\n if not outfile.endswith('.' + format):\r\n outfile += '.' + format\r\n\r\n g.write(outfile, prog='dot', format=format)\r\n if print_output_file:\r\n print 'The output file is available at', outfile\r\n\r\n if assert_nb_all_strings != -1:\r\n assert len(all_strings) == assert_nb_all_strings\r\n\r\n if scan_graphs:\r\n scan_ops = [(idx, x) for idx, x in enumerate(fct_fgraph.toposort())\r\n if isinstance(x.op, theano.scan_module.scan_op.Scan)]\r\n path, fn = os.path.split(outfile)\r\n basename = '.'.join(fn.split('.')[:-1])\r\n # Safe way of doing things .. a file name may contain multiple .\r\n ext = fn[len(basename):]\r\n\r\n for idx, scan_op in scan_ops:\r\n # is there a chance that name is not defined?\r\n if hasattr(scan_op.op, 'name'):\r\n new_name = basename + '_' + scan_op.op.name + '_' + str(idx)\r\n else:\r\n new_name = basename + '_' + str(idx)\r\n new_name = os.path.join(path, new_name + ext)\r\n pydotprint(scan_op.op.fn, new_name, compact, format, with_ids,\r\n high_contrast, cond_highlight, colorCodes,\r\n max_label_size, scan_graphs)", "def pprint(tree):\n p = PrettyPrinter(indent=2)\n p.pprint(tree)", "def pprint(self, parameter_s=''):\n ptformatter = self.shell.display_formatter.formatters['text/plain']\n ptformatter.pprint = bool(1 - ptformatter.pprint)\n print('Pretty printing has been turned',\n ['OFF','ON'][ptformatter.pprint])", "def print_verbose(self) -> None:\n print(self)\n if self.meta is not None:\n print(self.meta.__repr__())", "def pretty_print(results: List[Tuple[str, torch.Tensor]]):\n for item in results:\n print(\"...[%.2f] - %s\" % (item[1], item[0]))", "def magic_Pprint(self, parameter_s=''):\n \n self.shell.outputcache.Pprint = 1 - self.shell.outputcache.Pprint\n print 'Pretty printing has been turned', \\\n ['OFF','ON'][self.shell.outputcache.Pprint]", "def _print_transforms(self):\n self._print_frozen_transforms()\n self._print_nonfrozen_transforms()", "def pprint():\n parser = ArgumentParser(description=pprint.__doc__)\n parser.add_argument(\"filenames\", metavar=\"FILENAME\", nargs=\"+\",\n help=\"an input filename to pretty-print\")\n parser.add_argument(\"-n\", metavar=\"NUMBER\", type=int,\n help=\"only process the first NUMBER objects\"\n \" from each input file\")\n args = parser.parse_args()\n\n # read and pretty-print the given input files\n pprinter = PrettyPrinter()\n for fname in args.filenames:\n sys.stderr.write(\"Processing file '%s' ...\\n\" % fname)\n try:\n compression = fname.lower().endswith(\".lz4\")\n for cnt, obj in enumerate(load_iter(fname, compression=compression, use_list=False)):\n if args.n is None or cnt < args.n:\n pprinter.pprint(obj)\n except Exception as excp: # pylint: disable=broad-except\n sys.stderr.write(\"%s\\n\" % excp)", "def p(value):\n pp.pprint(value)", "def test_pydotprint_cond_highlight():\r\n\r\n # Skip test if pydot is not available.\r\n if not theano.printing.pydot_imported:\r\n raise SkipTest('pydot not available')\r\n\r\n x = tensor.dvector()\r\n f = theano.function([x], x * 2)\r\n f([1, 2, 3, 4])\r\n\r\n s = StringIO()\r\n new_handler = logging.StreamHandler(s)\r\n new_handler.setLevel(logging.DEBUG)\r\n orig_handler = theano.logging_default_handler\r\n\r\n theano.theano_logger.removeHandler(orig_handler)\r\n theano.theano_logger.addHandler(new_handler)\r\n try:\r\n theano.printing.pydotprint(f, cond_highlight=True,\r\n print_output_file=False)\r\n finally:\r\n theano.theano_logger.addHandler(orig_handler)\r\n theano.theano_logger.removeHandler(new_handler)\r\n\r\n assert (s.getvalue() == 'pydotprint: cond_highlight is set but there'\r\n ' is no IfElse node in the graph\\n')", "def _pprint(params, offset=0, printer=repr):\n # Do a multi-line justified repr:\n param_names = [p for p in params.keys() if p is not \"cost\"]\n param_names.sort()\n\n params_list = list()\n this_line_length = offset\n line_sep = ',\\n' + (1 + offset // 2) * ' '\n for i, name in enumerate(param_names):\n value = params[name]\n if isinstance(value, float):\n this_repr = '%s=%s' % (name, str(value))\n else:\n this_repr = '%s=%s' % (name, printer(value))\n if len(this_repr) > 500:\n this_repr = this_repr[:300] + '...' + this_repr[-100:]\n if i > 0:\n if (this_line_length + len(this_repr) >= 75 or '\\n' in this_repr):\n params_list.append(line_sep)\n this_line_length = len(line_sep)\n else:\n params_list.append(', ')\n this_line_length += 2\n params_list.append(this_repr)\n this_line_length += len(this_repr)\n # options = np.get_printoptions()\n # np.set_printoptions(**options)\n lines = ''.join(params_list)\n # Strip trailing space to avoid nightmare in doctests\n lines = '\\n'.join(l.rstrip(' ') for l in lines.split('\\n'))\n return lines", "def print(*args, **kwargs):\n new_args = []\n for arg in args:\n if builtins.isinstance(arg, models.Point):\n new_args.append(\"({0}, {1})\".format(arg.x, arg.y))\n else:\n new_args.append(arg)\n\n builtins.print(*new_args, **kwargs)", "def p(mess, obj):\n if hasattr(obj, 'shape'):\n print(mess, type(obj), obj.shape, \"\\n\", obj)\n else:\n print(mess, type(obj), \"\\n\", obj)", "def print_out():\n pass", "def printpretty(self):\n print(self.string_rep())", "def print_graph() -> None:\n raise NotImplementedError", "def pretty_print(self):\n pt = PrettyTable()\n for i in self.files_summary:\n pt.field_names = [\"File Name\", \"Classes\", \"Functions\", \"Lines\", \"Characters\"]\n pt.add_row(list([i, self.files_summary[i][\"class\"], self.files_summary[i][\"function\"], self.files_summary[i][\"line\"], self.files_summary[i][\"char\"]]))\n print(pt) #Using a Print statement here because i tried to return self.pt and it didnt give me anything but the print works" ]
[ "0.67568576", "0.66903853", "0.6612041", "0.64975744", "0.63267666", "0.63239855", "0.62581927", "0.6243194", "0.61284935", "0.61009616", "0.5968577", "0.5889163", "0.58622116", "0.5802376", "0.5783404", "0.57769144", "0.5669858", "0.5625584", "0.5618598", "0.55597955", "0.55507463", "0.5509434", "0.55084455", "0.5487345", "0.5477463", "0.5467825", "0.546164", "0.54303193", "0.5392316", "0.5385805" ]
0.7738382
0
If `value` is a Theano variable, return its test value if it is defined. Otherwise just return `value` unchanged. If `nofail` is False (default), will raise an error if no test value is found. Otherwise returns None
def get_test_value(var, nofail=False): if 'theano' in sys.modules and isinstance(var, _getT().sharedvar.SharedVariable): retval = var.get_value() elif 'theano' in sys.modules and isinstance(var, _gettheano().graph.basic.Variable): try: retval = var.tag.test_value except AttributeError: if nofail: return None else: raise AttributeError("You've attempted to execute a function that " "requires a test_value for the variable {} to " "be set, and this value is not set.".format(var)) else: retval = var return retval
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def value_or_none(value):\n if value or value == 0:\n return value\n return None", "def get_test_value(v):\r\n if not isinstance(v, graph.Variable):\r\n v_var = theano.tensor.as_tensor_variable(v)\r\n else:\r\n v_var = v\r\n return PureOp._get_test_value(v_var)", "def _get_test_value(cls, v):\r\n # avoid circular import\r\n from theano.compile.sharedvalue import SharedVariable\r\n\r\n if isinstance(v, graph.Constant):\r\n return v.value\r\n elif isinstance(v, SharedVariable):\r\n return v.get_value(borrow=True, return_internal_type=True)\r\n elif isinstance(v, graph.Variable) and hasattr(v.tag, 'test_value'):\r\n # ensure that the test value is correct\r\n return v.type.filter(v.tag.test_value)\r\n\r\n raise AttributeError('%s has no test value' % v)", "def has_value(var) :\n return var != None", "def return_value(original_value):\r\n try:\r\n result = original_value if float(original_value) > 0 else None\r\n except (ValueError, TypeError):\r\n result = None\r\n return result", "def if_none(value: Any, default: Any):\n return value if value is not None else default", "def value_check(unit, value):\n while True:\n try: # <=== Checks that value isn't below abs 0\n t = value_input(unit) # Returns value if okay\n if value(t) != None:\n return t\n break\n except ValueError:\n tempConv(t)", "def not_none(value):\n return not value is None", "def optional_apply(f, value):\n if value is not None:\n return f(value)", "def of(cls, value, predicate=None):\n if predicate is None:\n predicate = _impl._is_not_none\n return cls.Some(value) if predicate(value) else cls.Nothing", "def Noneify(variable):\n if variable in (\"None\", \"\", None):\n return None\n if variable in (\"False\", \"0\"):\n return False\n if variable in (\"True\", \"1\"):\n return True\n return variable", "def discard_if_false(value):\n if not value:\n stack = currentframe().f_back.f_locals.setdefault(SN, [])\n stack.pop()\n return value", "def __nonzero__(self):\n return self.value.__nonzero__()", "def fn_if(self, value):\n\n condition_name, true_value, false_value = value\n if self.parser.conditions.evaluate(condition_name):\n return true_value\n else:\n return false_value", "def nonull(val):\n return val if not pd.isnull(val) else None", "def try_or_none(f):\n def f_or_none(x):\n try: return f(x)\n except: return None\n return f_or_none", "def try_or_none(f):\n\n def f_or_none(x):\n try: return f(x)\n except: return None\n\n return f_or_none", "def nonNegativeFloatOrNone(value):\n return None if value == None or value < 0 else float(value)", "def nan_helper(y):\n\n return (np.isnan(y), lambda z: z.to_numpy().nonzero()[0])", "def var_or_atomic_or_blank():\n return var_or_atomic() | next_value('_')", "def convert_or_none(value, type_):\n try:\n return type_(value)\n except Exception:\n return None", "def _nullop(value):\n return value", "def compare_with_none():\n value = {};\n if value is not None:\n print(\"value is not none\")\n else:\n print(\"value is none\")", "def isfalse(variable):\n\n # Return the answer\n return variable in [0, 0.0, False, [], {}, math.nan, \"\", (), None]", "def valueOrDefault(x):\n\tif isNumber(x): return x\n\telse: return x.valueOrDefault()", "def testGuardingIsNotElse(self):\n self.Check(\"\"\"\n from __future__ import google_type_annotations\n from typing import Optional\n def f(x: Optional[str]) -> int:\n if x is None:\n x = 1\n else:\n x = 1\n return x\n \"\"\")", "def valueOrDefault(x):\n if isNumber(x):\n return x\n else:\n return x.valueOrDefault()", "def not_equals_success_func(target, result):\n if result is None:\n return False\n return target != result", "def nan_helper(y):\r\n\r\n return np.isnan(y), lambda z: z.nonzero()[0]", "def nan_helper(y):\r\n\r\n return np.isnan(y), lambda z: z.nonzero()[0]" ]
[ "0.59126365", "0.5759972", "0.56993127", "0.55516756", "0.5535529", "0.5529996", "0.536491", "0.53313243", "0.5318258", "0.5313379", "0.53128356", "0.5148102", "0.509741", "0.50536364", "0.50174165", "0.49973387", "0.49730897", "0.49612755", "0.49369153", "0.49357876", "0.49352363", "0.4903247", "0.48824894", "0.48682725", "0.48483664", "0.48415053", "0.48386008", "0.48326454", "0.48270777", "0.48270777" ]
0.74623376
0
Todo There seems to be some redundancy between ``is_pure_symbolic(x)`` and ``not graph.is_computable(x)``.
def is_pure_symbolic(*var): # return 'theano' in sys.modules and builtins.any(isinstance(v, _gettheano().tensor.TensorVariable) return 'theano' in sys.modules and builtins.any(isinstance(v, cf.PureSymbolicTypes) for v in _expand_args(var))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_pure(self) -> bool:\r\n return self.is_valid and np.all([x[\"operation\"].is_pure for x in self.operations_by_name.values()])", "def is_pure(self):\r\n return isinstance(self, PureOperation)", "def is_symbolic(self: Q) -> bool:\n\n symbolic = False\n\n if (\n hasattr(self.t, \"free_symbols\")\n or hasattr(self.x, \"free_symbols\")\n or hasattr(self.y, \"free_symbols\")\n or hasattr(self.z, \"free_symbols\")\n ):\n symbolic = True\n\n return symbolic", "def is_pure(self):\r\n return True", "def notpexpr(*disallowed_heads):\n return some(lambda x: not (\n isinstance(x, HyExpression) and\n x and\n isinstance(x[0], HySymbol) and\n x[0] in disallowed_heads))", "def is_pure(self) -> bool:\n return self.w==0.0", "def is_quantifier_free(formula):\n assert type(formula) is Formula\n # Task 11.3.1\n if is_constant(formula.root) or is_variable(formula.root) or is_relation(formula.root) or is_equality(formula.root):\n return True\n\n if is_quantifier(formula.root):\n return False\n\n is_first = is_quantifier_free(formula.first)\n if is_binary(formula.root):\n return is_first and is_quantifier_free(formula.second)\n\n return is_first", "def is_quantifier_free(formula: Formula) -> bool:\r\n # Task 11.3.1\r\n\r\n if is_quantifier(formula.root):\r\n return False\r\n\r\n if is_binary(formula.root):\r\n return is_quantifier_free(formula.first) and is_quantifier_free(formula.second)\r\n\r\n if is_unary(formula.root):\r\n return is_quantifier_free(formula.first)\r\n\r\n return True", "def _check(self):\n d = self.degree()\n Sd = self.parent()._sym\n\n if prod(self._g, Sd.one()) != Sd.one():\n raise ValueError(\"the product is not identity\")\n\n if self._connected and not perms_are_connected(self._g, d):\n raise ValueError(\"not connected\")", "def _is_compatible_symbolic_array(a, b):\n if not a.shape == b.shape:\n return False\n a = a.flatten()\n b = b.flatten()\n for t, v in zip(a, b):\n if not is_symbolic(t) and not is_symbolic(v):\n if t != v:\n return False\n return True", "def _eval_is_alt_sym_naive(self, only_sym=False, only_alt=False):\n if only_sym and only_alt:\n raise ValueError(\n \"Both {} and {} cannot be set to True\"\n .format(only_sym, only_alt))\n\n n = self.degree\n sym_order = _factorial(n)\n order = self.order()\n\n if order == sym_order:\n self._is_sym = True\n self._is_alt = False\n if only_alt:\n return False\n return True\n\n elif 2*order == sym_order:\n self._is_sym = False\n self._is_alt = True\n if only_sym:\n return False\n return True\n\n return False", "def is_atomic(formula):\n return isinstance(formula, Symbol) or isinstance(formula, Predicate)", "def _logical_not(x):\n x_ = _static_value(x)\n if x_ is None:\n return math_ops.logical_not(x)\n return constant_op.constant(np.logical_not(x_))", "def is_non_reducing(self):\n return bool(set(self.kind) & set(\"ABC\"))", "def test_not_strongly_connected(self):\n G = DiGraph([(0, 1), (0, 2), (1, 2)])\n assert_false(is_strongly_connected(G))", "def isUnConditional(self) -> bool:\n ...", "def is_pure_electric(self):\n klist = [key for key in self.components if not self.component_is_zero(key)]\n return all([key.startswith('electric') for key in klist])", "def is_contradiction(formula: Formula) -> bool:\n # Task 2.5b\n return not is_satisfiable(formula)", "def _is_zero(x):\r\n if not hasattr(x, 'type'):\r\n return np.all(x == 0.)\r\n if isinstance(x.type, NullType):\r\n return 'no'\r\n if isinstance(x.type, DisconnectedType):\r\n return 'yes'\r\n\r\n no_constant_value = True\r\n try:\r\n constant_value = theano.get_scalar_constant_value(x)\r\n no_constant_value = False\r\n except theano.tensor.basic.NotScalarConstantError:\r\n pass\r\n\r\n if no_constant_value:\r\n return 'maybe'\r\n\r\n if constant_value != 0.:\r\n return 'no'\r\n\r\n return 'yes'", "def is_contradiction(formula: Formula) -> bool:\r\n # Task 2.5b\r\n return not is_satisfiable(formula)", "def is_atom_convex(self):\n return False", "def is_atom_convex(self):\n return False", "def logical_not(x, f=None):\n return _cur_framework(x, f=f).logical_not(x)", "def isIsotropic( self ) :\n\n for coefficient in self[1:] :\n if( coefficient != 0. ) : return( False )\n return( True )", "def _do_eq_sympify(self, other):\n for superclass in type(other).__mro__:\n conv = _external_converter.get(superclass)\n if conv is not None:\n return self == conv(other)\n if hasattr(other, '_sympy_'):\n return self == other._sympy_()\n return NotImplemented", "def test_tensors_w_functions_can_be_canonicalized(free_alg):\n dr = free_alg\n dr.set_symm(SymmFunc, Perm([1, 0], NEG), valence=2, set_base_name=False)\n\n p = dr.names\n i, j, k = p.R_dumms[:3]\n r = p.R\n v = p.v\n\n # General anti-symmetric real matrix.\n tensor = dr.sum(\n (i, r), (j, r), SymmFunc(k, i, j) * SymmFunc(i, j) * v[i] * v[j]\n ) + dr.sum(\n (i, r), (j, r), SymmFunc(k, i, j) * SymmFunc(j, i) * v[i] * v[j]\n )\n assert tensor.n_terms == 2\n assert tensor.simplify() == 0", "def test_evaluate_not_expression(self):\n value = self.evaluate_common(\"not false\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Boolean, \"Expected Boolean\")\n self.assertTrue(value.value is True, \"Expected True\")\n value = self.evaluate_common(\"not true\")\n self.assertTrue(value.value is False, \"Expected False\")\n try:\n value = self.evaluate_common(\"not 1\")\n self.fail(\"Integer promotion to Boolean\")\n except odata.EvaluationError:\n pass\n value = self.evaluate_common(\"not null\")\n self.assertTrue(value.value is None, \"Expected NULL\")", "def is_atom_convex(self) -> bool:\n return False", "def is_symmetric(self):\n _is_sym = self._is_sym\n if _is_sym is not None:\n return _is_sym\n\n n = self.degree\n if n >= 8:\n if self.is_transitive():\n _is_alt_sym = self._eval_is_alt_sym_monte_carlo()\n if _is_alt_sym:\n if any(g.is_odd for g in self.generators):\n self._is_sym, self._is_alt = True, False\n return True\n\n self._is_sym, self._is_alt = False, True\n return False\n\n return self._eval_is_alt_sym_naive(only_sym=True)\n\n self._is_sym, self._is_alt = False, False\n return False\n\n return self._eval_is_alt_sym_naive(only_sym=True)", "def is_commutative(self, s):\n return all(self.operations[s][x][y] == self.operations[s][y][x] \n for x in range(self.cardinality) for y in range(self.cardinality))" ]
[ "0.65525603", "0.6225219", "0.6183948", "0.6024547", "0.57890093", "0.55200416", "0.5471868", "0.5459004", "0.5401552", "0.53722376", "0.5332143", "0.53140587", "0.5311742", "0.5289241", "0.5244963", "0.5174868", "0.51738834", "0.517038", "0.5164459", "0.51191884", "0.5084533", "0.5084533", "0.5078841", "0.50629514", "0.505558", "0.50500774", "0.50481135", "0.5046682", "0.5019202", "0.5014102" ]
0.7302066
0
Return True if `var` is any recognized sparse format.
def issparse(var): if 'theano.sparse' in sys.modules: return (sp.sparse.issparse(var) or isinstance(var, sys.modules['theano.sparse'].basic.SparseVariable)) else: return sp.sparse.issparse(var)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isspsparse(var):\n if 'theano.sparse' in sys.modules:\n return (sp.sparse.issparse(var)\n or isinstance(var, sys.modules['theano.sparse'].basic.SparseVariable))\n else:\n return sp.sparse.issparse(var)", "def is_sparse(constant) -> bool:\n return sp.issparse(constant)", "def _is_allowed_sparse_format(matrix):\n if _spsparse.isspmatrix(matrix):\n return _spsparse.isspmatrix_csr(matrix) or _spsparse.isspmatrix_csc(matrix) or _spsparse.isspmatrix_bsr(matrix)\n else:\n return True", "def _has_sparse_features(\n feature_container: Iterable[schema_pb2.Feature]\n ) -> bool:\n for f in feature_container:\n if isinstance(f, schema_pb2.SparseFeature):\n return True\n if f.type == schema_pb2.STRUCT:\n if f.struct_domain.sparse_feature:\n return True\n return _has_sparse_features(f.struct_domain.feature)\n return False", "def _schema_has_sparse_features(schema: schema_pb2.Schema) -> bool:\n\n def _has_sparse_features(\n feature_container: Iterable[schema_pb2.Feature]\n ) -> bool:\n \"\"\"Helper function used to determine whether there are sparse features.\"\"\"\n for f in feature_container:\n if isinstance(f, schema_pb2.SparseFeature):\n return True\n if f.type == schema_pb2.STRUCT:\n if f.struct_domain.sparse_feature:\n return True\n return _has_sparse_features(f.struct_domain.feature)\n return False\n\n if schema.sparse_feature:\n return True\n return _has_sparse_features(schema.feature)", "def is_sparse(x: Any, backend=None) -> bool:\r\n module = get_module(backend)\r\n return module.is_sparse(x)", "def is_sparse(tensor):\n return isinstance(tensor, sparse_tensor.SparseTensor)", "def is_sparse(A):\n if isinstance(A, torch.Tensor):\n return A.layout == torch.sparse_coo\n raise TypeError(\"expected Tensor but got %s\" % (type(A).__name__))", "def _check_sparse_format(spmatrix, accept_sparse=True, dtype=None,\n force_all_finite=True, context=\"\"):\n if accept_sparse in [None, False]:\n raise TypeError('%sA sparse matrix was passed, but dense '\n 'data is required. Use X.toarray() to '\n 'convert to a dense numpy array.' % context)\n if dtype is None:\n dtype = spmatrix.dtype\n\n CHANGE_FORMAT = False\n if (isinstance(accept_sparse, (list, tuple)) and spmatrix.format not in\n accept_sparse):\n CHANGE_FORMAT = True\n\n if CHANGE_FORMAT:\n msg = (\"%sSparse format not one of recommended [format: %s]. \"\n \"Consider changing one of %r\")\n warnings.warn(msg % (context, spmatrix.format, accept_sparse),\n InputDataWarning)\n\n CHANGE_DTYPE = False\n if dtype != spmatrix.dtype:\n # convert dtype\n CHANGE_DTYPE = True\n\n if CHANGE_DTYPE:\n msg = (\"%sDtype of sparse array not the expected type [dtype: %s]. \"\n \"Consider changing to %r\")\n warnings.warn(msg % (context, spmatrix.dtype, dtype), InputDataWarning)\n\n ALL_FINITE = True\n if force_all_finite:\n if not hasattr(spmatrix, \"data\"):\n msg = \"%sCan't check %s sparse matrix for nan or inf.\"\n warnings.warn(msg % (context, spmatrix.format))\n else:\n ALL_FINITE = check_all_finite(spmatrix.data)\n\n if not ALL_FINITE:\n msg = (\"%sNot all elements in array are finite. This may cause \"\n \"estimation problems. Consider nan conversion and replacing \"\n \"infinite values.\")\n warnings.warn(msg % context, InputDataWarning)\n\n return CHANGE_DTYPE or CHANGE_FORMAT or not ALL_FINITE", "def is_splitable_var(var: Any) -> bool:\n if isinstance(var, DataSample):\n return True\n if isinstance(var, torch.Tensor):\n return True\n if isinstance(var, np.ndarray):\n return True\n if isinstance(var, abc.Sequence) and not isinstance(var, str):\n return True\n return False", "def test_import_type_sparse():\n x = sps.csr_matrix(np.random.rand(7, 11))\n export_data('/tmp/test.sparse', x)\n assert x.dtype == import_data('/tmp/test.sparse').dtype", "def is_sparsity_enabled(cls):\n total,sp100,sp50 = 0,0,0\n for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:\n total += 1\n mask_sum = mask.sum()\n mask_numel = mask.numel()\n if mask_sum == mask_numel:\n sp100 += 1\n elif mask_sum*2 == mask_numel:\n sp50 += 1\n\n assert (total == sp100 or total == sp50), \"Inconsistent model sparsity\"\n if total == sp100:\n return False\n elif total == sp50:\n return True", "def test_return_sparse():\n X = Vectorizer(strategy=\"bow\", return_sparse=True).fit_transform(X_text, y10)\n assert all(pd.api.types.is_sparse(X[c]) for c in X.columns)", "def is_dense(x: Any, backend=None) -> bool:\r\n\r\n module = get_module(backend)\r\n return module.is_dense(x)", "def isdense(qob):\n return isinstance(qob, np.ndarray)", "def is_sparse(number):\n\n if number == 0:\n return True\n if number == 1:\n # edge case. List explicitly for clarity. Define to be True\n return True\n else:\n bits = bits_list(number)\n # start power_of_2 at 1 so previous_bit index won't be out of list range\n for power_of_2 in range(1, len(bits)):\n current_bit = bits[power_of_2]\n previous_bit = bits[power_of_2 - 1]\n if ((current_bit == 1) and (previous_bit == 1)):\n # number has two consecutive 1s\n return False\n return True", "def issparse(qob):\n return isinstance(qob, sp.spmatrix)", "def test_import_sparse_type_mat():\n x = sps.csr_matrix(np.random.rand(3, 2))\n export_data('/tmp/test.mat', x)\n assert x.dtype == import_data('/tmp/test.mat').dtype", "def validVarConstruct(self,thisvar):\r\n validLength = self.validVarConstructLength(thisvar)\r\n if not validLength:\r\n return False, '', '', False\r\n validName, varName = self.validVarConstructName(thisvar[0])\r\n if not validName:\r\n return False, '', '', False \r\n validType, varType, varArray = self.validVarConstructType(thisvar[1])\r\n if not validType:\r\n return False, '', '', False\r\n \r\n return True, varName, varType, varArray", "def test_sparsity_detection(ODE, alg):\n stepper = alg(0, ODE.dt_init, ODE.q_init, ODE.A)\n assert ODE.__name__.startswith('Sparse') == sp.issparse(stepper.I)", "def seg_known(self, segment, normalize=True):\n if normalize:\n segment = FeatureTable.normalize(segment)\n return segment in self.seg_dict", "def has_contig(variant: str) -> bool:\n fields: List[str] = variant.split(\"|\")\n for field in fields:\n field_name_value: List[str] = field.split(\":\")\n field_name: str = field_name_value[0]\n if field_name == \"CTG\":\n field_value: str = field_name_value[1]\n if field_value != \".\":\n return True\n return False", "def _isstrvar(self, index):\n return self._typlist[index] <= 32768", "def _isstrvar(self, index):\n return self._typlist[index] <= 244", "def test_check_sparse(self):\n x, x_rand, s = self.create_testdata()\n task = mmRDTR()\n #check that a dense array x is passed thru unchanged\n check = task.check_sparse(x)\n self.assertEqual(np.all(check==x),True)\n #check that a sparse matrix s is converted to a numpy array\n check = task.check_sparse(s)\n self.assertIsInstance(check,np.ndarray)\n self.assertEqual(np.all(check==s.todense()),True)", "def _check_for_unsupported_schema_fields(schema):\n if schema.sparse_feature:\n logging.warning('The input schema has sparse features which'\n ' are currently not supported.')", "def _validate_dtype():\n\n test_array = _spsparse.random(5, 5, density=0.5, format=\"csc\", dtype=np.float32, random_state=50)\n test_comparison = test_array.A\n\n csc_ref, precision_flag = _create_mkl_sparse(test_array)\n\n try:\n csr_ref = _convert_to_csr(csc_ref)\n final_array = _export_mkl(csr_ref, precision_flag)\n if not np.allclose(test_comparison, final_array.A):\n raise ValueError(\"Match failed after matrix conversion\")\n _destroy_mkl_handle(csr_ref)\n finally:\n _destroy_mkl_handle(csc_ref)", "def hasFlexibleFields(data):\n\n regexp = '_f[0..9]+$'\n\n # XXX GR the 'and' close shouldn't be necessary, but there are\n # empty flexible fields in some of our mails\n # also bool on iterators doesn't check non emptiness\n\n return int(bool([fid for fid in data\n if re.search(regexp, fid) and data[fid] is not None]))", "def is_dense(self, rel_name):\n return self._declaration[rel_name].dense", "def is_a_spectrum_file(self):\n import re\n\n is_spectrum = self.ndp3 == 4\n regex = re.compile(r'F[0-9]{2} PT2D[0-9]{6}')\n is_spectrum = is_spectrum and \\\n all([regex.match(var) is not None\\\n for var in self.varnames])\n\n return is_spectrum" ]
[ "0.80299467", "0.7115952", "0.6787843", "0.66746426", "0.6657034", "0.6640711", "0.65491956", "0.647349", "0.6378612", "0.5713969", "0.56804806", "0.5675083", "0.5631618", "0.55671525", "0.5554212", "0.5530677", "0.54940146", "0.54143655", "0.54069203", "0.5393739", "0.53715944", "0.5351125", "0.5344182", "0.53063494", "0.53040576", "0.524251", "0.5214008", "0.5209621", "0.5181622", "0.5165097" ]
0.7509615
1
Return True if `var` is sparse with `scipy.sparse` interface. True for scipy.sparse, theano.sparse.
def isspsparse(var): if 'theano.sparse' in sys.modules: return (sp.sparse.issparse(var) or isinstance(var, sys.modules['theano.sparse'].basic.SparseVariable)) else: return sp.sparse.issparse(var)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_sparse(x: Any, backend=None) -> bool:\r\n module = get_module(backend)\r\n return module.is_sparse(x)", "def issparse(var):\n if 'theano.sparse' in sys.modules:\n return (sp.sparse.issparse(var)\n or isinstance(var, sys.modules['theano.sparse'].basic.SparseVariable))\n else:\n return sp.sparse.issparse(var)", "def is_sparse(A):\n if isinstance(A, torch.Tensor):\n return A.layout == torch.sparse_coo\n raise TypeError(\"expected Tensor but got %s\" % (type(A).__name__))", "def is_sparse(tensor):\n return isinstance(tensor, sparse_tensor.SparseTensor)", "def is_sparse(constant) -> bool:\n return sp.issparse(constant)", "def _schema_has_sparse_features(schema: schema_pb2.Schema) -> bool:\n\n def _has_sparse_features(\n feature_container: Iterable[schema_pb2.Feature]\n ) -> bool:\n \"\"\"Helper function used to determine whether there are sparse features.\"\"\"\n for f in feature_container:\n if isinstance(f, schema_pb2.SparseFeature):\n return True\n if f.type == schema_pb2.STRUCT:\n if f.struct_domain.sparse_feature:\n return True\n return _has_sparse_features(f.struct_domain.feature)\n return False\n\n if schema.sparse_feature:\n return True\n return _has_sparse_features(schema.feature)", "def _has_sparse_features(\n feature_container: Iterable[schema_pb2.Feature]\n ) -> bool:\n for f in feature_container:\n if isinstance(f, schema_pb2.SparseFeature):\n return True\n if f.type == schema_pb2.STRUCT:\n if f.struct_domain.sparse_feature:\n return True\n return _has_sparse_features(f.struct_domain.feature)\n return False", "def _is_allowed_sparse_format(matrix):\n if _spsparse.isspmatrix(matrix):\n return _spsparse.isspmatrix_csr(matrix) or _spsparse.isspmatrix_csc(matrix) or _spsparse.isspmatrix_bsr(matrix)\n else:\n return True", "def test_return_sparse():\n X = Vectorizer(strategy=\"bow\", return_sparse=True).fit_transform(X_text, y10)\n assert all(pd.api.types.is_sparse(X[c]) for c in X.columns)", "def test_import_type_sparse():\n x = sps.csr_matrix(np.random.rand(7, 11))\n export_data('/tmp/test.sparse', x)\n assert x.dtype == import_data('/tmp/test.sparse').dtype", "def test_check_sparse(self):\n x, x_rand, s = self.create_testdata()\n task = mmRDTR()\n #check that a dense array x is passed thru unchanged\n check = task.check_sparse(x)\n self.assertEqual(np.all(check==x),True)\n #check that a sparse matrix s is converted to a numpy array\n check = task.check_sparse(s)\n self.assertIsInstance(check,np.ndarray)\n self.assertEqual(np.all(check==s.todense()),True)", "def test_csm_sparser(self):\r\n sp_types = {'csc': sp.csc_matrix,\r\n 'csr': sp.csr_matrix}\r\n\r\n for format in ['csc', 'csr']:\r\n for dtype in ['float32', 'float64']:\r\n x = tensor.tensor(dtype=dtype, broadcastable=(False,))\r\n y = tensor.ivector()\r\n z = tensor.ivector()\r\n s = tensor.ivector()\r\n\r\n a = as_sparse_variable(sp_types[format](random_lil((4, 3),\r\n dtype, 1)))\r\n\r\n f = theano.function([x, y, z, s],\r\n tensor.grad(dense_from_sparse(\r\n a * CSM(format)(x, y, z, s)).sum(), x))\r\n\r\n spmat = sp_types[format](random_lil((4, 3), dtype, 3))\r\n\r\n res = f(spmat.data, spmat.indices, spmat.indptr,\r\n numpy.asarray(spmat.shape, 'int32'))\r\n\r\n assert len(spmat.data) == len(res)", "def set_sparsity(self,use_sparse):\n \n if hasattr(self.problem,'sparse_jac'):\n self.use_sparse = use_sparse\n else:\n raise KINSOL_Exception(\"The problem must have implemented a method 'sparse_jac' for sparsity to by used.\")", "def is_sparsity_enabled(cls):\n total,sp100,sp50 = 0,0,0\n for module_name, module, p_name, p, mask, pruned in cls.__sparse_parameters:\n total += 1\n mask_sum = mask.sum()\n mask_numel = mask.numel()\n if mask_sum == mask_numel:\n sp100 += 1\n elif mask_sum*2 == mask_numel:\n sp50 += 1\n\n assert (total == sp100 or total == sp50), \"Inconsistent model sparsity\"\n if total == sp100:\n return False\n elif total == sp50:\n return True", "def as_sparse_variable(x, name=None):\r\n\r\n # TODO\r\n # Verify that sp is sufficiently sparse, and raise a\r\n # warning if it is not\r\n\r\n if isinstance(x, gof.Apply):\r\n if len(x.outputs) != 1:\r\n raise ValueError(\"It is ambiguous which output of a \"\r\n \"multi-output Op has to be fetched.\", x)\r\n else:\r\n x = x.outputs[0]\r\n if isinstance(x, gof.Variable):\r\n if not isinstance(x.type, SparseType):\r\n raise TypeError(\"Variable type field must be a SparseType.\", x,\r\n x.type)\r\n return x\r\n try:\r\n return constant(x, name=name)\r\n except TypeError:\r\n raise TypeError(\"Cannot convert %s to SparseType\" % x, type(x))", "def is_dense(x: Any, backend=None) -> bool:\r\n\r\n module = get_module(backend)\r\n return module.is_dense(x)", "def test_import_values_sparse():\n x = sps.csr_matrix(np.random.rand(7, 11))\n export_data('/tmp/test.sparse', x)\n assert np.array_equal(x.toarray(), import_data('/tmp/test.sparse').toarray())", "def test_sparsity_detection(ODE, alg):\n stepper = alg(0, ODE.dt_init, ODE.q_init, ODE.A)\n assert ODE.__name__.startswith('Sparse') == sp.issparse(stepper.I)", "def test_import_sparse_type_mat():\n x = sps.csr_matrix(np.random.rand(3, 2))\n export_data('/tmp/test.mat', x)\n assert x.dtype == import_data('/tmp/test.mat').dtype", "def _check_sparse_format(spmatrix, accept_sparse=True, dtype=None,\n force_all_finite=True, context=\"\"):\n if accept_sparse in [None, False]:\n raise TypeError('%sA sparse matrix was passed, but dense '\n 'data is required. Use X.toarray() to '\n 'convert to a dense numpy array.' % context)\n if dtype is None:\n dtype = spmatrix.dtype\n\n CHANGE_FORMAT = False\n if (isinstance(accept_sparse, (list, tuple)) and spmatrix.format not in\n accept_sparse):\n CHANGE_FORMAT = True\n\n if CHANGE_FORMAT:\n msg = (\"%sSparse format not one of recommended [format: %s]. \"\n \"Consider changing one of %r\")\n warnings.warn(msg % (context, spmatrix.format, accept_sparse),\n InputDataWarning)\n\n CHANGE_DTYPE = False\n if dtype != spmatrix.dtype:\n # convert dtype\n CHANGE_DTYPE = True\n\n if CHANGE_DTYPE:\n msg = (\"%sDtype of sparse array not the expected type [dtype: %s]. \"\n \"Consider changing to %r\")\n warnings.warn(msg % (context, spmatrix.dtype, dtype), InputDataWarning)\n\n ALL_FINITE = True\n if force_all_finite:\n if not hasattr(spmatrix, \"data\"):\n msg = \"%sCan't check %s sparse matrix for nan or inf.\"\n warnings.warn(msg % (context, spmatrix.format))\n else:\n ALL_FINITE = check_all_finite(spmatrix.data)\n\n if not ALL_FINITE:\n msg = (\"%sNot all elements in array are finite. This may cause \"\n \"estimation problems. Consider nan conversion and replacing \"\n \"infinite values.\")\n warnings.warn(msg % context, InputDataWarning)\n\n return CHANGE_DTYPE or CHANGE_FORMAT or not ALL_FINITE", "def is_sparse(number):\n\n if number == 0:\n return True\n if number == 1:\n # edge case. List explicitly for clarity. Define to be True\n return True\n else:\n bits = bits_list(number)\n # start power_of_2 at 1 so previous_bit index won't be out of list range\n for power_of_2 in range(1, len(bits)):\n current_bit = bits[power_of_2]\n previous_bit = bits[power_of_2 - 1]\n if ((current_bit == 1) and (previous_bit == 1)):\n # number has two consecutive 1s\n return False\n return True", "def testGetNodeSparseFeature(self):\n op = ops.get_sparse_feature(tf.constant([1, 2, 3, 4], dtype=tf.int64), [0, 1], None, 2)\n with tf.Session() as sess:\n sparse_features = sess.run(op)\n features = [\n sess.run(tf.sparse_tensor_to_dense(sp)) for sp in sparse_features\n ]\n\n self.assertAllEqual(\n [[12341, 56781, 1234, 5678], [12342, 56782, 0, 0], [12343, 56783, 0, 0], [12344, 56784, 0, 0]],\n features[0])\n self.assertAllEqual(\n [[8888, 9999], [8888, 9999], [8888, 9999], [8888, 9999]],\n features[1])", "def use_sparse_routines(self):\n return self._use_sparse_routines", "def get_cvxopt_sparse_intf():\n import cvxpy.interface.cvxopt_interface.sparse_matrix_interface as smi\n return smi.SparseMatrixInterface()", "def _apply_sparse(self, grad, var):\n\n return self._apply_sparse_shared(\n grad.values,\n var,\n grad.indices,\n lambda x, i, v: state_ops.scatter_add( # pylint: disable=g-long-lambda\n x,\n i,\n v,\n use_locking=self._use_locking))", "def test_sparse_with_dense():\n\n def test_func(df):\n df[\"new column\"] = 1 # Create dense column\n return df\n\n atom = ATOMClassifier(X_text, y10, random_state=1)\n atom.apply(test_func)\n atom.vectorize(strategy=\"BOW\", return_sparse=False)\n assert all(not pd.api.types.is_sparse(atom.X[c]) for c in atom.features)", "def sparse_vars(a, axis=None):\n a_squared = a.copy()\n a_squared.data **= 2\n return a_squared.mean(axis) - np.square(a.mean(axis))", "def _apply_sparse(self, grad, var):\n return self._apply_sparse_shared(\n grad.values,\n var,\n grad.indices,\n lambda x, i, v: state_ops.scatter_add( # pylint: disable=g-long-lambda\n x,\n i,\n v,\n use_locking=self._use_locking))", "def test_import_sparse_values_mat():\n x = sps.csr_matrix(np.random.rand(3, 2))\n export_data('/tmp/test.mat', x)\n assert np.array_equal(x.toarray(), import_data('/tmp/test.mat').toarray())", "def calc_sparsity (data): \n matrix_size = data.shape[0]*data.shape[1] # Number of possible interactions in the matrix\n num_purchases = len(data.nonzero()[0]) # Number of items interacted with\n sparsity = 100*(1 - (num_purchases/matrix_size))\n print('{:.2f} % of the user interaction matrix is sparse'.format(sparsity,2))" ]
[ "0.73790324", "0.72182786", "0.7192387", "0.71083176", "0.706764", "0.65949607", "0.6314302", "0.62896657", "0.62845683", "0.6133798", "0.6107449", "0.6087067", "0.6074301", "0.6030176", "0.60223293", "0.5920868", "0.585102", "0.5810927", "0.57268214", "0.57231104", "0.57087183", "0.56712854", "0.56604356", "0.564207", "0.56231695", "0.5609846", "0.56044024", "0.55981", "0.55721855", "0.5562673" ]
0.86598843
0